StanHeaders/0000755000176200001440000000000014645161272012463 5ustar liggesusersStanHeaders/tests/0000755000176200001440000000000013713160557013625 5ustar liggesusersStanHeaders/tests/rstan.R0000644000176200001440000000035613713160557015103 0ustar liggesusers#Sys.setenv("R_TESTS" = "") #library(rstan) #stancode <- 'data {real y_mean;} parameters {real y;} model {y ~ normal(y_mean,1);}' #mod <- stan_model(model_code = stancode, verbose = TRUE) #fit <- sampling(mod, data = list(y_mean = 0)) StanHeaders/MD50000644000176200001440000060115714645161272013005 0ustar liggesuserse03e0712141bf9ea80a2267e8fb0a7f0 *DESCRIPTION e9654ca8e0359948dffe0bb36f0c0f43 *LICENSE c154f76504a2c135d15e3a5a519e6034 *NAMESPACE 40e49061477c07658193faa24f3bd9b0 *R/Flags.R 57ad2cc6f62e463122064e1f71c62041 *R/stanFunction.R 69b0e3b33792be7a8f82ee3ac7412ed6 *build/vignette.rds c34ecf0a899536555d3179c8ea6b5336 *inst/CITATION ddfb7fe84a5f61eaec00c645f5a5c905 *inst/doc/sparselm_stan.hpp bd4d01db136f8c0d0fa8fd3e348359bb *inst/doc/stanmath.R 33c9e17d63127207e090c353b627fc98 *inst/doc/stanmath.Rmd c02c7987fdbb9f89ab21b5a282dc0cbc *inst/doc/stanmath.html de4af08eaf10fb0e2af7e7eb9f8b09e0 *inst/include/cvodes/cvodes.h cb59c489739b54654b08e559cf2fc55a *inst/include/cvodes/cvodes_bandpre.h 4bd02b30179012ac4c2ca5f9b9468ec8 *inst/include/cvodes/cvodes_bbdpre.h 1f5afbcfa6ffe924fb4260e7ca12fb64 *inst/include/cvodes/cvodes_diag.h 5cd37c06c64ceb85d0e094f7ea3a2d12 *inst/include/cvodes/cvodes_direct.h f21cbb49545f94c5c85a19efddb508d3 *inst/include/cvodes/cvodes_ls.h fad20a453bd0b2a24c2bd764df217ba4 *inst/include/cvodes/cvodes_spils.h 42e0688281afe767fbcd3437e1501737 *inst/include/idas/idas.h 0dc52cac4546e6fc89b5ee680b8cde1a *inst/include/idas/idas_bbdpre.h c66df452398e3b80086bfe3a302fe993 *inst/include/idas/idas_direct.h edb3ad3bc2224d588718f706b5018549 *inst/include/idas/idas_ls.h c1bcd67520dfe5833b9854d039d94e97 *inst/include/idas/idas_spils.h 7e64d22771b3fe6cb1d5f73ea92c05ae *inst/include/kinsol/kinsol.h efc763868e09c54c8c78dd1b1664ab38 *inst/include/kinsol/kinsol_bbdpre.h f2e63a0e14e52b0312da58cc3b8716ed *inst/include/kinsol/kinsol_direct.h cf6dd7a060df9243d9f23dfccacd083c *inst/include/kinsol/kinsol_ls.h 425e97487935aa75cc37bc51141612db *inst/include/kinsol/kinsol_spils.h bdc00981dc4ef6d3ae0881e63ac88e47 *inst/include/nvector/nvector_cuda.h 0cb546af05cfde494fbd35104858852a *inst/include/nvector/nvector_hip.h 9b50b30d330fb9dd4a849a12e83820c2 *inst/include/nvector/nvector_manyvector.h bc6fe77bee2416cf778beb08928d2c2a *inst/include/nvector/nvector_mpimanyvector.h bb487394bc2bfd4697da7b2cba0455dc *inst/include/nvector/nvector_mpiplusx.h 47ecf82d76d366f6f7ab8c57c651582d *inst/include/nvector/nvector_openmp.h 93fad479b97a75a4c837cf2fccd62341 *inst/include/nvector/nvector_openmpdev.h c5ac6e483d465b76a656ac67e3e26fd4 *inst/include/nvector/nvector_parallel.h 450ece5756f919796801d78e1e0fd27a *inst/include/nvector/nvector_parhyp.h 36b0d5f1696e37207cbbfebf4e346307 *inst/include/nvector/nvector_petsc.h bdb401256565564dc327f6f991dc9c66 *inst/include/nvector/nvector_pthreads.h e31e23b6cb209abe7d64b8ee6343d906 *inst/include/nvector/nvector_raja.h 0e27b15a3a2e1eae05e7b0d4a167c7ad *inst/include/nvector/nvector_serial.h 72a4e589d41eff6b2e376d26cd4e62f2 *inst/include/nvector/nvector_sycl.h 2f4a8067306458e6dd93b32d1546e932 *inst/include/nvector/nvector_trilinos.h 53a1bbf89328c0cc5a6be785c9af8616 *inst/include/nvector/trilinos/SundialsTpetraVectorInterface.hpp f050ca5e350ab92a3b11aa09eb5c104e *inst/include/nvector/trilinos/SundialsTpetraVectorKernels.hpp 207f5b117be93aad28c00c767baf5577 *inst/include/src/stan/analyze/mcmc/autocovariance.hpp c5794ebbab8013e0c4b42506f89b83fe *inst/include/src/stan/analyze/mcmc/compute_effective_sample_size.hpp e329d56fd9f37adbcca7812b87dc96e7 *inst/include/src/stan/analyze/mcmc/compute_potential_scale_reduction.hpp ae6b1e4854d0a2751e548533b41dc8e9 *inst/include/src/stan/analyze/mcmc/split_chains.hpp 9a87e19468b45acccecac19ff37a10cf *inst/include/src/stan/callbacks/interrupt.hpp 88bb6c43c26ea8a899c96d9a36124052 *inst/include/src/stan/callbacks/logger.hpp 7e734d2817851286998bab9a2cb698c9 *inst/include/src/stan/callbacks/stream_logger.hpp fdcc3684f6c6481071f716af010426f4 *inst/include/src/stan/callbacks/stream_writer.hpp 10fdeaa1d75997633d121b30e1e0eeaf *inst/include/src/stan/callbacks/tee_writer.hpp fc181dd0ba8c9be6bcc2cf037efa4eea *inst/include/src/stan/callbacks/unique_stream_writer.hpp accc3a07868fa78ddf799034122315bd *inst/include/src/stan/callbacks/writer.hpp c03fe083f690a000863a615eda5326e2 *inst/include/src/stan/io/array_var_context.hpp 26f93e9cb57b990e04fe8db44db3790f *inst/include/src/stan/io/chained_var_context.hpp 0ac7c5178f64e11d60e9e42013f41204 *inst/include/src/stan/io/deserializer.hpp caa83cce796af7a81a9e90e186b00a40 *inst/include/src/stan/io/dump.hpp ed9bd784573c46958547b935d0b392f3 *inst/include/src/stan/io/empty_var_context.hpp 40e88343a015002aa91dd090765d7cc6 *inst/include/src/stan/io/ends_with.hpp bfb8b0af4da866a42ce4229b97b6c1c6 *inst/include/src/stan/io/json/json_data.hpp 1f4493074cdc3c484e41e3e52237af73 *inst/include/src/stan/io/json/json_data_handler.hpp d72763b4b47e8e37309b1eec07f73323 *inst/include/src/stan/io/json/json_error.hpp 1bf868777629c945f976de3866a49f52 *inst/include/src/stan/io/json/json_handler.hpp 822a2715eeb3d894bc45833e031413a3 *inst/include/src/stan/io/json/rapidjson_parser.hpp 7de432f54eb4b25e0555d44c1569a894 *inst/include/src/stan/io/random_var_context.hpp aaa3e256d926c4bc422a11a8c2e4624d *inst/include/src/stan/io/serializer.hpp 900ec81fc817f079fafdecfad9970f87 *inst/include/src/stan/io/stan_csv_reader.hpp 3cf3688b0179432595efa2b5d388f3fd *inst/include/src/stan/io/validate_dims.hpp 8af324e98d1808e0d2e4adb7a88ca81f *inst/include/src/stan/io/validate_zero_buf.hpp b5837de7514d1197f636ae17888572e3 *inst/include/src/stan/io/var_context.hpp b48f3b358fba5d602c098d8de060b3d3 *inst/include/src/stan/lang/rethrow_located.hpp c10d85a634ab4c8810aba1164e4b9409 *inst/include/src/stan/mcmc/base_adaptation.hpp b11082dc86382ac1c5a09fb3e476ab07 *inst/include/src/stan/mcmc/base_adapter.hpp 0502d5666299980aea11fab4138b34f1 *inst/include/src/stan/mcmc/base_mcmc.hpp 988f8265443d0fc55270b5251369f671 *inst/include/src/stan/mcmc/chains.hpp e85c5b52c446e86e91063254cd1ee95d *inst/include/src/stan/mcmc/covar_adaptation.hpp 433d713a915c4e5f648cc453029d02aa *inst/include/src/stan/mcmc/fixed_param_sampler.hpp 00022a7846698f9ec229f5d8479ceeaf *inst/include/src/stan/mcmc/hmc/base_hmc.hpp 2b1bd696b5133dafd96ee707501a99bb *inst/include/src/stan/mcmc/hmc/hamiltonians/base_hamiltonian.hpp a22c78ba755a0606cc5292ebcfd4ea00 *inst/include/src/stan/mcmc/hmc/hamiltonians/dense_e_metric.hpp ecd9cba40b07807eae2dddfc8bad9954 *inst/include/src/stan/mcmc/hmc/hamiltonians/dense_e_point.hpp 2bdbc330a4645987fb90b03695fe01d6 *inst/include/src/stan/mcmc/hmc/hamiltonians/diag_e_metric.hpp 35d3681be9f9a66faa8e5075661fe166 *inst/include/src/stan/mcmc/hmc/hamiltonians/diag_e_point.hpp ccf42a6fdbc9ba02a5506a2183f041d5 *inst/include/src/stan/mcmc/hmc/hamiltonians/ps_point.hpp 19e65645a5cbb316528a592b9210ddfd *inst/include/src/stan/mcmc/hmc/hamiltonians/softabs_metric.hpp 0a110456c9c654a42002212e2481e44d *inst/include/src/stan/mcmc/hmc/hamiltonians/softabs_point.hpp 4023d4ab7d1d6adae30b4009d1d723c1 *inst/include/src/stan/mcmc/hmc/hamiltonians/unit_e_metric.hpp 3f5a2f18114a5008485df95c44edb837 *inst/include/src/stan/mcmc/hmc/hamiltonians/unit_e_point.hpp fae768906314938a69e038811c1599a0 *inst/include/src/stan/mcmc/hmc/integrators/base_integrator.hpp c90cad3a1b800b02b9d9f26c645ea518 *inst/include/src/stan/mcmc/hmc/integrators/base_leapfrog.hpp 182ef84f24ad8c157d886fea29790f26 *inst/include/src/stan/mcmc/hmc/integrators/expl_leapfrog.hpp 2b2e91d914b9fadbbaa1247392813db6 *inst/include/src/stan/mcmc/hmc/integrators/impl_leapfrog.hpp 120280620e2e26d9e511bb1fc17d16ee *inst/include/src/stan/mcmc/hmc/nuts/adapt_dense_e_nuts.hpp 5757c3349d0e68c6c4ccf191a354dba8 *inst/include/src/stan/mcmc/hmc/nuts/adapt_diag_e_nuts.hpp b764a564765ba8e6fd5aa465314b93a3 *inst/include/src/stan/mcmc/hmc/nuts/adapt_softabs_nuts.hpp 174a1ac2fb4f7ac459e49a151ed88fb4 *inst/include/src/stan/mcmc/hmc/nuts/adapt_unit_e_nuts.hpp cc40f0f5cef17f9abad613621389591a *inst/include/src/stan/mcmc/hmc/nuts/base_nuts.hpp 7c0041b1ed401cb09b1af88e5c5d825b *inst/include/src/stan/mcmc/hmc/nuts/dense_e_nuts.hpp 5758356af7355f907ac2a3be1bbe019e *inst/include/src/stan/mcmc/hmc/nuts/diag_e_nuts.hpp b3a427bb83a57c8b71b9b2a8596b20c9 *inst/include/src/stan/mcmc/hmc/nuts/softabs_nuts.hpp c108c834f3003f41fea8c2916733aef1 *inst/include/src/stan/mcmc/hmc/nuts/unit_e_nuts.hpp 9764eab397400de386bb0376353812e7 *inst/include/src/stan/mcmc/hmc/nuts_classic/adapt_dense_e_nuts_classic.hpp d0896871230ac3708c143ccef4567178 *inst/include/src/stan/mcmc/hmc/nuts_classic/adapt_diag_e_nuts_classic.hpp 7f1bb5ee3f0eb6776ec8d4b2707ee1f8 *inst/include/src/stan/mcmc/hmc/nuts_classic/adapt_unit_e_nuts_classic.hpp fd83ed5d4f1ab2ee9ffa9173492153ca *inst/include/src/stan/mcmc/hmc/nuts_classic/base_nuts_classic.hpp b00808cefbc72cebf5c38bd2fea7ca69 *inst/include/src/stan/mcmc/hmc/nuts_classic/dense_e_nuts_classic.hpp ee11d39d53250367b5260223a688a95b *inst/include/src/stan/mcmc/hmc/nuts_classic/diag_e_nuts_classic.hpp 28abcbee705ca57acf3de8093dd27061 *inst/include/src/stan/mcmc/hmc/nuts_classic/unit_e_nuts_classic.hpp fb1f3f399f8ee9b77055d6b22375f247 *inst/include/src/stan/mcmc/hmc/static/adapt_dense_e_static_hmc.hpp 4a50bc8495da52b435d0ce87934af61a *inst/include/src/stan/mcmc/hmc/static/adapt_diag_e_static_hmc.hpp 5b7ced6ccffb6009568e8fd8a87d48f4 *inst/include/src/stan/mcmc/hmc/static/adapt_softabs_static_hmc.hpp c5004000ac83cf04a05e9ef550ae450a *inst/include/src/stan/mcmc/hmc/static/adapt_unit_e_static_hmc.hpp 2cd8952fdd51092bb5979723d9e22a7b *inst/include/src/stan/mcmc/hmc/static/base_static_hmc.hpp 086dd35078d569fa6f06d00720e0e4fd *inst/include/src/stan/mcmc/hmc/static/dense_e_static_hmc.hpp b77bb0267e82ab56d0e1f7e0a550e39e *inst/include/src/stan/mcmc/hmc/static/diag_e_static_hmc.hpp a724a6939cd662e47d3a96fd9c13f1b7 *inst/include/src/stan/mcmc/hmc/static/softabs_static_hmc.hpp fe0e4e863469266e1fc82359a2bb4b90 *inst/include/src/stan/mcmc/hmc/static/unit_e_static_hmc.hpp a90aad32c8a8b2c929065b1e6a249faf *inst/include/src/stan/mcmc/hmc/static_uniform/adapt_dense_e_static_uniform.hpp efcda4b6b2a90f435684db0f892fe353 *inst/include/src/stan/mcmc/hmc/static_uniform/adapt_diag_e_static_uniform.hpp dbf52302d8de561e67d1ff971f778a9c *inst/include/src/stan/mcmc/hmc/static_uniform/adapt_softabs_static_uniform.hpp 560476a41ec4149478b3c442cafab71e *inst/include/src/stan/mcmc/hmc/static_uniform/adapt_unit_e_static_uniform.hpp ac6ca6b4cf2c12454688153397206cb4 *inst/include/src/stan/mcmc/hmc/static_uniform/base_static_uniform.hpp eec3499d631680ab4286371e0a0817ca *inst/include/src/stan/mcmc/hmc/static_uniform/dense_e_static_uniform.hpp 2aa43c250b956f84beda97e9b052e19c *inst/include/src/stan/mcmc/hmc/static_uniform/diag_e_static_uniform.hpp d4a50a4bb4b2d3cca42e64ee55cd8940 *inst/include/src/stan/mcmc/hmc/static_uniform/softabs_static_uniform.hpp d6de3e6776e1f21b5b8a7523ac357c02 *inst/include/src/stan/mcmc/hmc/static_uniform/unit_e_static_uniform.hpp 89e0dca338832d24bdebeb15b140f154 *inst/include/src/stan/mcmc/hmc/xhmc/adapt_dense_e_xhmc.hpp fd708e4257036ebb8114a441c8106e0d *inst/include/src/stan/mcmc/hmc/xhmc/adapt_diag_e_xhmc.hpp e8b0aa77425fc586b77253ca649ea1e2 *inst/include/src/stan/mcmc/hmc/xhmc/adapt_softabs_xhmc.hpp de4421f985542fa7411a3fedc8c0c393 *inst/include/src/stan/mcmc/hmc/xhmc/adapt_unit_e_xhmc.hpp 3a943da20c2469e1a801f207ef46697b *inst/include/src/stan/mcmc/hmc/xhmc/base_xhmc.hpp b2705f8647705756204a2d8d1b87c805 *inst/include/src/stan/mcmc/hmc/xhmc/dense_e_xhmc.hpp e1514fe43561bc8cb09fa1f123002828 *inst/include/src/stan/mcmc/hmc/xhmc/diag_e_xhmc.hpp 39c9c19ba3791969b466f9db7e5c5056 *inst/include/src/stan/mcmc/hmc/xhmc/softabs_xhmc.hpp 5d98e4a8e1948aa7144ff07893128705 *inst/include/src/stan/mcmc/hmc/xhmc/unit_e_xhmc.hpp a6dc976a24e009c5e1cbc4aec6bb148e *inst/include/src/stan/mcmc/sample.hpp 7c322f06a9eccc412e9d8bdfac20089d *inst/include/src/stan/mcmc/stepsize_adaptation.hpp 0e99b3f3e88f5cc0afca301b026d52b9 *inst/include/src/stan/mcmc/stepsize_adapter.hpp 41a1bf89c5e12ac7a90e83c7ebc3cfed *inst/include/src/stan/mcmc/stepsize_covar_adapter.hpp c4dfe71ffafc817e659ef11dd7dfe16f *inst/include/src/stan/mcmc/stepsize_var_adapter.hpp 2fed1497e33fbdf8f9602a3d159461fe *inst/include/src/stan/mcmc/var_adaptation.hpp 55664d644978cec82aca55e366012577 *inst/include/src/stan/mcmc/windowed_adaptation.hpp 8e578b24882d76175a2db72092651308 *inst/include/src/stan/model/finite_diff_grad.hpp ca20fa31c3fab30c5dbeeed2c76e02b2 *inst/include/src/stan/model/grad_hess_log_prob.hpp 49ce2ad29a81ed0a89d8d573f41a69e0 *inst/include/src/stan/model/grad_tr_mat_times_hessian.hpp 6ed50f2dc607cf6842956edf48c11158 *inst/include/src/stan/model/gradient.hpp e447d8ef13b82b73161af4193cb8ce15 *inst/include/src/stan/model/gradient_dot_vector.hpp 8c5979b23a19965163d42ad3e8d68632 *inst/include/src/stan/model/hessian.hpp ce4d9767d3e7a5ca796631b66b7f5f2d *inst/include/src/stan/model/hessian_times_vector.hpp 6be682f18b0ef2329d66c309fbdd6495 *inst/include/src/stan/model/indexing.hpp 35b4b340199b403a23df5bf8f72c7d61 *inst/include/src/stan/model/indexing/access_helpers.hpp 2daa9c2d18db461739beeef942e8a15e *inst/include/src/stan/model/indexing/assign.hpp 03d4d4ea467fc270cb6615840d63b2e6 *inst/include/src/stan/model/indexing/assign_cl.hpp daeb301590860fa064d3bbc62e99dd8b *inst/include/src/stan/model/indexing/assign_varmat.hpp 7ebcde14a14ddf0b1d220bf63b7a36ef *inst/include/src/stan/model/indexing/deep_copy.hpp 14446b26c8b873bdd712e675cff8e6c8 *inst/include/src/stan/model/indexing/index.hpp 4a057e3e30160ee9735899bfddc5f68e *inst/include/src/stan/model/indexing/rvalue.hpp 3a3d872ae6c050f0aa5e23fb053f0c33 *inst/include/src/stan/model/indexing/rvalue_at.hpp 559674082cb195d6180cb102078c57cb *inst/include/src/stan/model/indexing/rvalue_cl.hpp 02843ed7ac9534b125675eeaac8e1dc2 *inst/include/src/stan/model/indexing/rvalue_index_size.hpp d7e771fb1dc8ebcaca0d8187e02ad9a5 *inst/include/src/stan/model/indexing/rvalue_varmat.hpp a1f01f9bee12d07feaedfbd29822ea87 *inst/include/src/stan/model/log_prob_grad.hpp a6891a3e3d5e7c8fdf1b709ec86b077b *inst/include/src/stan/model/log_prob_propto.hpp ad6e2219fb7bbe59cdc4cd8e93d3eaa9 *inst/include/src/stan/model/model_base.hpp cc2d0f4420fbd71833b0450bf597e593 *inst/include/src/stan/model/model_base_crtp.hpp 3bb6175df3b141a208e1a35b3891a19d *inst/include/src/stan/model/model_functional.hpp 49e624954267d33bfb787d623b9aec8c *inst/include/src/stan/model/model_header.hpp 0b2e0cb8a3d92e3c18e6530220b4ee3e *inst/include/src/stan/model/prob_grad.hpp 39e71920eb23626413cfc0bb27c054e0 *inst/include/src/stan/model/rethrow_located.hpp eb0b3a812da4ca9650ac9e9cb9b31b1e *inst/include/src/stan/model/test_gradients.hpp 10255215050780f31607a6a566d0bc57 *inst/include/src/stan/optimization/bfgs.hpp 5559dea8529c2b46d85864dd1a264778 *inst/include/src/stan/optimization/bfgs_linesearch.hpp 5434d1d24742da9d75d5692ed67d89ea *inst/include/src/stan/optimization/bfgs_update.hpp a1869779928fadde6652c1f4701a17fb *inst/include/src/stan/optimization/lbfgs_update.hpp e716577d5f19fc5ad16039f4a6f51de8 *inst/include/src/stan/optimization/newton.hpp 07f58c482804180bd5a54576fc7ff4ea *inst/include/src/stan/services/diagnose/defaults.hpp 8b9876de8c5ff2c9d79edd2d170fa30b *inst/include/src/stan/services/diagnose/diagnose.hpp 679e252f18e8dd28cf327e0afd4f01e8 *inst/include/src/stan/services/error_codes.hpp 245009e774b042bb1e468a14cc39b41d *inst/include/src/stan/services/experimental/advi/defaults.hpp 3e62caa913f8ca9500e087881abace5e *inst/include/src/stan/services/experimental/advi/fullrank.hpp 8c1860460f55b715b732a62986fce9e4 *inst/include/src/stan/services/experimental/advi/meanfield.hpp 9a00f1de6f83f719a2b82fe1473cf81b *inst/include/src/stan/services/optimize/bfgs.hpp 540de2abdc11d7b9beac36dfeb6a7df7 *inst/include/src/stan/services/optimize/defaults.hpp 6b36e9915fc917d67c0037d7e031674f *inst/include/src/stan/services/optimize/laplace_sample.hpp ccff674a31bf31d16ac11efcb04fc185 *inst/include/src/stan/services/optimize/lbfgs.hpp 9fd3d3fb480c9ebcebc993ff8cb08b40 *inst/include/src/stan/services/optimize/newton.hpp b0d26d3f90d9bceb7f4468c25fd2e57f *inst/include/src/stan/services/sample/defaults.hpp b2d1817679052ca461d1cbb825b4eb78 *inst/include/src/stan/services/sample/fixed_param.hpp 63661683ee1fc6de3167235ac5f5d01b *inst/include/src/stan/services/sample/hmc_nuts_dense_e.hpp dc8cebeca3fe778cb4241bbfa2fe4d45 *inst/include/src/stan/services/sample/hmc_nuts_dense_e_adapt.hpp a3c621cc7f350927e0c5bf12e1f6b2e8 *inst/include/src/stan/services/sample/hmc_nuts_diag_e.hpp a1463c2825b69ee40730d96ddb108ce7 *inst/include/src/stan/services/sample/hmc_nuts_diag_e_adapt.hpp 793cb81221c7450e375e4e016a43d7ea *inst/include/src/stan/services/sample/hmc_nuts_unit_e.hpp 51a8e7b308a11abc125d85b172da0c58 *inst/include/src/stan/services/sample/hmc_nuts_unit_e_adapt.hpp a6f3ec787405dc308af7a236d0e4c392 *inst/include/src/stan/services/sample/hmc_static_dense_e.hpp 919da6fd2c91be12dbc39f02a016f5df *inst/include/src/stan/services/sample/hmc_static_dense_e_adapt.hpp fbf0eb7250decf452189b03ede758144 *inst/include/src/stan/services/sample/hmc_static_diag_e.hpp ebfd18347c30d05f06f7cbc9a2f18028 *inst/include/src/stan/services/sample/hmc_static_diag_e_adapt.hpp 6516c942f7234663c7b53b2d3b8615c0 *inst/include/src/stan/services/sample/hmc_static_unit_e.hpp ce1e0c684a83ef86aef78c36a8e653dd *inst/include/src/stan/services/sample/hmc_static_unit_e_adapt.hpp 493516313e658590ae607e28d10e589d *inst/include/src/stan/services/sample/standalone_gqs.hpp 6d2b321293a8ad241c318ef691347946 *inst/include/src/stan/services/util/create_rng.hpp 77ae5b2ee5a997d75d6e71e08c0eaa30 *inst/include/src/stan/services/util/create_unit_e_dense_inv_metric.hpp 8ad9e62b5c30796eb5e2d79b018e303e *inst/include/src/stan/services/util/create_unit_e_diag_inv_metric.hpp 20711f9dcb58405998e370fd065b3e96 *inst/include/src/stan/services/util/experimental_message.hpp c97ba82ed1de8914a117af6a14618077 *inst/include/src/stan/services/util/generate_transitions.hpp a4133d7a828005ee695575781082be9d *inst/include/src/stan/services/util/gq_writer.hpp cf14e9698e73c4de3d9548844b18dbea *inst/include/src/stan/services/util/initialize.hpp 8d87e4604687fda54f54ab452eb0d858 *inst/include/src/stan/services/util/inv_metric.hpp cfcc693252c2041b9c154cf02e3463f7 *inst/include/src/stan/services/util/mcmc_writer.hpp 0b717e49560092c81d622e9fc259793c *inst/include/src/stan/services/util/read_dense_inv_metric.hpp 05d857325a6e6a78a72fa2afdf3cfbbf *inst/include/src/stan/services/util/read_diag_inv_metric.hpp acf9e1b16b259d0c293849ca8603951a *inst/include/src/stan/services/util/run_adaptive_sampler.hpp f12c6fd196f8805628d2216772b420b1 *inst/include/src/stan/services/util/run_sampler.hpp 990b88bd26e523d44be4364959751b1a *inst/include/src/stan/services/util/validate_dense_inv_metric.hpp eebca791e3c7b0fb8a9d13e1a569e0b5 *inst/include/src/stan/services/util/validate_diag_inv_metric.hpp b477e2d9155e0a31a35e1789bfbdef7c *inst/include/src/stan/variational/advi.hpp 30bbe658e22174370718fb10c71b57ca *inst/include/src/stan/variational/base_family.hpp f1f9c3614b4a68e2e9f59b836804867a *inst/include/src/stan/variational/families/normal_fullrank.hpp dfbc625b3bab0892237c6760d8fbb1d9 *inst/include/src/stan/variational/families/normal_meanfield.hpp be8db6a0f70074619018a332268f5916 *inst/include/src/stan/variational/print_progress.hpp 8379ecfd10ebd23d67ef198b88361be0 *inst/include/src/stan/version.hpp ca1c31becd867b050e0e37c1a5954c9a *inst/include/stan/math.hpp c1c09ce9a140539e00c9368a3c4cd13a *inst/include/stan/math/fwd.hpp 10f9c18732df8f00adf6f2c5371d8028 *inst/include/stan/math/fwd/core.hpp 29dc293eed4a8e5f8dfc90fa21923355 *inst/include/stan/math/fwd/core/fvar.hpp 238ed927083f1385452973094e5c70ec *inst/include/stan/math/fwd/core/operator_addition.hpp c6fcad63619c405c5e41141d1897b710 *inst/include/stan/math/fwd/core/operator_division.hpp de8b4e221175c599cffee1d0fac92866 *inst/include/stan/math/fwd/core/operator_equal.hpp 6073f5cd560b66b298c906651ed9f940 *inst/include/stan/math/fwd/core/operator_greater_than.hpp 3178d1238173c1e18e1f75a3d1d99bc2 *inst/include/stan/math/fwd/core/operator_greater_than_or_equal.hpp 94d720ade2715fc43b975cade37bd5b0 *inst/include/stan/math/fwd/core/operator_less_than.hpp 081697396be9892848a9572c8b5abfdd *inst/include/stan/math/fwd/core/operator_less_than_or_equal.hpp 0b1ee8c588deb74c38255992124c50ae *inst/include/stan/math/fwd/core/operator_logical_and.hpp 9e2c4a1f1b54aa5ec467d49818029f80 *inst/include/stan/math/fwd/core/operator_logical_or.hpp 20b6423ae35b96bdd4323d63be835d09 *inst/include/stan/math/fwd/core/operator_multiplication.hpp d777c6aa41767afb3d37ac692516c15f *inst/include/stan/math/fwd/core/operator_not_equal.hpp 3a7967b4ce2bed0c0d5c9468cd010fe5 *inst/include/stan/math/fwd/core/operator_subtraction.hpp 4b3dd1ad5ea5dd83b7222df07d2ec49f *inst/include/stan/math/fwd/core/operator_unary_minus.hpp db17781dd2497d31145a31957a750799 *inst/include/stan/math/fwd/core/operator_unary_not.hpp 04d03ec668034c80189078c39293153c *inst/include/stan/math/fwd/core/operator_unary_plus.hpp 1ce524354d06a04fe5aff4161a15873b *inst/include/stan/math/fwd/core/std_complex.hpp bb70c5c876a7675c61e214246718fbff *inst/include/stan/math/fwd/core/std_iterator_traits.hpp d1d71b7e541c91d9b41cd745b468ac7c *inst/include/stan/math/fwd/core/std_numeric_limits.hpp 7ddf02ace66900b9f3646d3dbe85349f *inst/include/stan/math/fwd/fun.hpp 0721c562859372087df85ea88c95d112 *inst/include/stan/math/fwd/fun/Eigen_NumTraits.hpp f8e73449d3a0798831fe1a877e0ab335 *inst/include/stan/math/fwd/fun/Phi.hpp 10e87587f7638fddf6adf01db0db2ffc *inst/include/stan/math/fwd/fun/Phi_approx.hpp 7d96242151e0ac3e5d2501f63cfee110 *inst/include/stan/math/fwd/fun/abs.hpp 442ca052ceaa3923adade05c6ae53a53 *inst/include/stan/math/fwd/fun/accumulator.hpp b0f2d8252f4bedd2dd9b1daa52c014c9 *inst/include/stan/math/fwd/fun/acos.hpp d6ce79d946c57b89edec3c76f68dd47f *inst/include/stan/math/fwd/fun/acosh.hpp b037c92eb1033408d0d3d2f84dd2adce *inst/include/stan/math/fwd/fun/arg.hpp fdc386cb16853296146f51368485f6d6 *inst/include/stan/math/fwd/fun/asin.hpp 2e62c5b6a33521f9974397f347b80c81 *inst/include/stan/math/fwd/fun/asinh.hpp 2b0efcb74794bb48541edd5a03c86674 *inst/include/stan/math/fwd/fun/atan.hpp 6946800312458de12e47a1514696001c *inst/include/stan/math/fwd/fun/atan2.hpp e864c6094875abd0873662ded504dbaf *inst/include/stan/math/fwd/fun/atanh.hpp aa950622a4c053630426e9f72b9f5c98 *inst/include/stan/math/fwd/fun/bessel_first_kind.hpp f16c7ffe419c92e0064c53bb7086932b *inst/include/stan/math/fwd/fun/bessel_second_kind.hpp 225522619ce72b401c290834c6be170d *inst/include/stan/math/fwd/fun/beta.hpp 524b37eb1fe9305e64791340bd8842e2 *inst/include/stan/math/fwd/fun/binary_log_loss.hpp e9ce214101bef25708480c332159b6d2 *inst/include/stan/math/fwd/fun/cbrt.hpp f92e86856316ee8e7157b0739007cebb *inst/include/stan/math/fwd/fun/ceil.hpp e7c4e045636d805bc4db79b91e0be181 *inst/include/stan/math/fwd/fun/conj.hpp dbaa93c4deb94a5d22de6b20c1d0a7bd *inst/include/stan/math/fwd/fun/cos.hpp d7162e3068ca2174cff95cffd169bdb2 *inst/include/stan/math/fwd/fun/cosh.hpp 0ece45db784b8eb62907b15bd663128c *inst/include/stan/math/fwd/fun/determinant.hpp b4a90c12008e9cee5d0d83c8085ad7e8 *inst/include/stan/math/fwd/fun/digamma.hpp 9bc15c8a175eb57c1bfc5e1d4b192c44 *inst/include/stan/math/fwd/fun/erf.hpp 7b7d40f2052a0254cc5622d0bdae6b9f *inst/include/stan/math/fwd/fun/erfc.hpp c950cb0ddf491804cf13957421fed6db *inst/include/stan/math/fwd/fun/exp.hpp 7c4e92a0d6f6e5c4940b100a0db0974c *inst/include/stan/math/fwd/fun/exp2.hpp 8b6ee7b0dc746292f44cffc9a1b4475e *inst/include/stan/math/fwd/fun/expm1.hpp afbfbc40d32b22537937c3e4e2c8a50d *inst/include/stan/math/fwd/fun/fabs.hpp 3d54a5987afd3af076a5abf822d9a9f1 *inst/include/stan/math/fwd/fun/falling_factorial.hpp 85e596ad2f0ceb5a48521cd79f0e8cf6 *inst/include/stan/math/fwd/fun/fdim.hpp 0589b25646c69adacdfa5438da342bbb *inst/include/stan/math/fwd/fun/floor.hpp b437abe9806601c375b282c9f55212b0 *inst/include/stan/math/fwd/fun/fma.hpp ae28c5d7a38c581d45a402230260b716 *inst/include/stan/math/fwd/fun/fmax.hpp be226c9d26b13a24afc6c9f47d45983e *inst/include/stan/math/fwd/fun/fmin.hpp 3932a0925df6c7d60cc56879a478aff3 *inst/include/stan/math/fwd/fun/fmod.hpp 979e45e02f196cbbaebea1f8134ff007 *inst/include/stan/math/fwd/fun/gamma_p.hpp fa19eb3829fffc35d8a663e48653fb47 *inst/include/stan/math/fwd/fun/gamma_q.hpp 8ceb87be2ac16627971bbdabcb9fa9f7 *inst/include/stan/math/fwd/fun/grad_inc_beta.hpp 6fbc01aa9e10b26009b7d01e67f186ba *inst/include/stan/math/fwd/fun/hypergeometric_2F1.hpp 68930c6470c80df17a15ae9dd51736b3 *inst/include/stan/math/fwd/fun/hypergeometric_pFq.hpp 7e94054c169aa97ecedc8bcffbe6e488 *inst/include/stan/math/fwd/fun/hypot.hpp a7200606e6b7a9e7b1e328a42341358c *inst/include/stan/math/fwd/fun/inc_beta.hpp 2189653b204c5507ef308a0997ea61bc *inst/include/stan/math/fwd/fun/inv.hpp 054db8ecda46d2bf16677e7b7a320911 *inst/include/stan/math/fwd/fun/inv_Phi.hpp 3c09135cacb3c8f7543b0ffb50640258 *inst/include/stan/math/fwd/fun/inv_cloglog.hpp b2aa34f33f48ddfe22212f0df20d7ce9 *inst/include/stan/math/fwd/fun/inv_erfc.hpp 57ce880b637bd73f436ec7aefb15e257 *inst/include/stan/math/fwd/fun/inv_inc_beta.hpp e5a5adf1663cdc7896ec4195ecded8d2 *inst/include/stan/math/fwd/fun/inv_logit.hpp d60c9b8dbf1b02d1e4a546e58eb39493 *inst/include/stan/math/fwd/fun/inv_sqrt.hpp 88aa0545b185030f09169290c8832757 *inst/include/stan/math/fwd/fun/inv_square.hpp 1e4b7dfbb3710ca8b6d3dc10312efe69 *inst/include/stan/math/fwd/fun/inverse.hpp 128c6a0e4c8072021a45121ce8d6ca8e *inst/include/stan/math/fwd/fun/is_inf.hpp 9c79bd5f1e3c8cd4d1033237b75b894b *inst/include/stan/math/fwd/fun/is_nan.hpp d5b76aa8cc22f116f59b4cca328364d6 *inst/include/stan/math/fwd/fun/lambert_w.hpp 622faa0257c1fcbee41b962126549c02 *inst/include/stan/math/fwd/fun/lbeta.hpp 4cbbfe4230f8baac56f1b82155e5b93a *inst/include/stan/math/fwd/fun/ldexp.hpp 9c0d3c465e18892e5ff96d7bad4eee02 *inst/include/stan/math/fwd/fun/lgamma.hpp 45a5f29e8412e0b8c2891524fb57ac5c *inst/include/stan/math/fwd/fun/lmgamma.hpp 3b4fb32e43f80b413ac1dab7c67d5ae6 *inst/include/stan/math/fwd/fun/lmultiply.hpp 41051a8c7ae3fb909d164d7b218d0b1c *inst/include/stan/math/fwd/fun/log.hpp b6ebe99e293f37de2436fcd12df03557 *inst/include/stan/math/fwd/fun/log10.hpp d7838b96517678a50299749de959a72e *inst/include/stan/math/fwd/fun/log1m.hpp 17689432f8217ff84b0582429806d989 *inst/include/stan/math/fwd/fun/log1m_exp.hpp 495fc740195e33237ad6dbb3d783fc7c *inst/include/stan/math/fwd/fun/log1m_inv_logit.hpp b9135c509f2f689376be88989f08b529 *inst/include/stan/math/fwd/fun/log1p.hpp 10e64c50fa44126532abc4653dd4b245 *inst/include/stan/math/fwd/fun/log1p_exp.hpp f2dbc7914dcd5a2aee5aa8e1a5133b84 *inst/include/stan/math/fwd/fun/log2.hpp f4ab78498d47caa39985b7ba3f08098b *inst/include/stan/math/fwd/fun/log_determinant.hpp 4d99b51f67c101eeeefeeab3e2c09815 *inst/include/stan/math/fwd/fun/log_diff_exp.hpp ce76514a620863b903dc267ca5b8fe5d *inst/include/stan/math/fwd/fun/log_falling_factorial.hpp a60885f4b178308f8d11bc25c3b0d1e0 *inst/include/stan/math/fwd/fun/log_inv_logit.hpp ee20f7d5f654888c7d400d4dd14894e8 *inst/include/stan/math/fwd/fun/log_inv_logit_diff.hpp 965e32b17b084ec6c1f3a250b3046412 *inst/include/stan/math/fwd/fun/log_mix.hpp 221c377b84feb18162af390e059882bf *inst/include/stan/math/fwd/fun/log_rising_factorial.hpp ca60267321f0a026580229dfb63333b1 *inst/include/stan/math/fwd/fun/log_softmax.hpp d23fc08bb5689dc4e87b230f10b9a049 *inst/include/stan/math/fwd/fun/log_sum_exp.hpp 91aec4bb5c86609fcf8315cb88433d9c *inst/include/stan/math/fwd/fun/logit.hpp 9a3a8e2400e606f0ea697d1849e0ae0c *inst/include/stan/math/fwd/fun/mdivide_left.hpp d84958f00470848bdf9ca1eed88730cf *inst/include/stan/math/fwd/fun/mdivide_left_ldlt.hpp cf1a39882e663ac670a223092e3a328a *inst/include/stan/math/fwd/fun/mdivide_left_tri_low.hpp dddec54dcea5b781e9bea76dfc72b33a *inst/include/stan/math/fwd/fun/mdivide_right.hpp 3df2a8712ce838fb53cc7fbd4565186a *inst/include/stan/math/fwd/fun/mdivide_right_tri_low.hpp 151617dcd862f0366b0fc9e33c05d01f *inst/include/stan/math/fwd/fun/modified_bessel_first_kind.hpp add367411ea90d05d47a4027f4696d51 *inst/include/stan/math/fwd/fun/modified_bessel_second_kind.hpp a88a36e3feb405ee5a563b88831fe71d *inst/include/stan/math/fwd/fun/multiply.hpp 075e27961bd4cc9a2b667bad383381bc *inst/include/stan/math/fwd/fun/multiply_log.hpp d0a508ddc3be401204dce2eaaabbb94e *inst/include/stan/math/fwd/fun/multiply_lower_tri_self_transpose.hpp d293fb4de0f4cdc4d798768f936549fa *inst/include/stan/math/fwd/fun/norm.hpp 00fed1e0953568f51e689a7343ae5249 *inst/include/stan/math/fwd/fun/norm1.hpp 95c88d548eae36e1e8fb242925519f11 *inst/include/stan/math/fwd/fun/norm2.hpp 3c6b1686137c2c16d224cf6d4f0376e4 *inst/include/stan/math/fwd/fun/owens_t.hpp 3083d1418ff9d0c40e70d8687f6fc85d *inst/include/stan/math/fwd/fun/polar.hpp 85e1e40f88e0226b1a0e8aa3db84c21b *inst/include/stan/math/fwd/fun/pow.hpp d0ba06a5ae7f8b77bf430f2624b3801a *inst/include/stan/math/fwd/fun/primitive_value.hpp d434d1a0b88d9defe5bcdf4f478f5ec2 *inst/include/stan/math/fwd/fun/proj.hpp a8460c93e665bf24cf9dc5da492ca2e5 *inst/include/stan/math/fwd/fun/quad_form.hpp 2e2368fe61e7728adc336f32ed109b44 *inst/include/stan/math/fwd/fun/quad_form_sym.hpp e7ae3264ea6da37cc156d5ec8883acfb *inst/include/stan/math/fwd/fun/read_fvar.hpp 8e6b038493e36f8bd6105354a3dcabad *inst/include/stan/math/fwd/fun/rising_factorial.hpp f5fe6b876bfaa9dd2a023fc13f12b8b1 *inst/include/stan/math/fwd/fun/round.hpp a307fcf0e8b769251a979b4477999d50 *inst/include/stan/math/fwd/fun/sin.hpp 26b6ab4caa99cbb1e41588e943eb653f *inst/include/stan/math/fwd/fun/sinh.hpp 0cb8336fc29af75430f303d51b99ea01 *inst/include/stan/math/fwd/fun/softmax.hpp 7ba95550ae1b38b20164bdca7ed2948e *inst/include/stan/math/fwd/fun/sqrt.hpp 6c089718c503df987455d1ac443b8198 *inst/include/stan/math/fwd/fun/square.hpp 6681f71fa70cca47615b19f171479c29 *inst/include/stan/math/fwd/fun/sum.hpp f590685d4bf3ea8c5abc61129a3c2a5d *inst/include/stan/math/fwd/fun/tan.hpp 1a409609438a94e3bb70204f70bd398b *inst/include/stan/math/fwd/fun/tanh.hpp 550e7d157e6a1ef9e6a71fdc60f44de4 *inst/include/stan/math/fwd/fun/tcrossprod.hpp 4b7a01cec3c7a8b38fccc0940a56b1a0 *inst/include/stan/math/fwd/fun/tgamma.hpp ae052691748572b27ca339b5f50fa88e *inst/include/stan/math/fwd/fun/to_fvar.hpp d15a62be6aa645652437c025842d9d7d *inst/include/stan/math/fwd/fun/trace_quad_form.hpp 1af16ab4b419a125dd3446b001d0e678 *inst/include/stan/math/fwd/fun/trigamma.hpp 07a290c8c011a263ca1aaa8a38fed4c1 *inst/include/stan/math/fwd/fun/trunc.hpp e9e80466c1d6d13e9ee472e87b887e09 *inst/include/stan/math/fwd/fun/typedefs.hpp 4a3793ec819a50976fa632fba757e326 *inst/include/stan/math/fwd/fun/unit_vector_constrain.hpp e0ebd4c42a7762293865e2748acde96f *inst/include/stan/math/fwd/fun/value_of.hpp 62d8129994da1f259894894b881a0cb9 *inst/include/stan/math/fwd/fun/value_of_rec.hpp ddca5689869a12b5298414d25e2c37ea *inst/include/stan/math/fwd/functor.hpp 5cdcb271fe1b5785fff5b73309acba66 *inst/include/stan/math/fwd/functor/apply_scalar_unary.hpp f160286c0b63d77c8647dda91fe2a29d *inst/include/stan/math/fwd/functor/gradient.hpp 6502875879b14200d085bf3e08fc85d0 *inst/include/stan/math/fwd/functor/hessian.hpp eceeb88ea5ff16ccdc1eeeaa247ceee6 *inst/include/stan/math/fwd/functor/jacobian.hpp 77d4f20a5ffb3616727668d0913cd423 *inst/include/stan/math/fwd/functor/operands_and_partials.hpp da38652412afb4bc49a0405aa32b6cf4 *inst/include/stan/math/fwd/functor/partials_propagator.hpp 8c9b8e89aefc0a9a1046c6a21d0660dd *inst/include/stan/math/fwd/functor/reduce_sum.hpp e649c62ad21ccbf1a0e7166158056096 *inst/include/stan/math/fwd/meta.hpp 0afa836fca55065268b6052832d4e671 *inst/include/stan/math/fwd/meta/is_fvar.hpp 4993e53d3ef8c626c1f36493684f9da3 *inst/include/stan/math/fwd/meta/partials_type.hpp 31fec36c4f29130f4bbb26bfe26c0c6e *inst/include/stan/math/fwd/prob.hpp 8d375113b8accedc4654e258adc59396 *inst/include/stan/math/fwd/prob/std_normal_log_qf.hpp cada628bdc07c81db92e97913c0ed44c *inst/include/stan/math/memory/stack_alloc.hpp 4929dce845e6e24ad684aef44deaff48 *inst/include/stan/math/mix.hpp 1da6bdda1bde9133efb775496a118a91 *inst/include/stan/math/mix/fun.hpp b7dd6209dda769cff2ac9269a69509fb *inst/include/stan/math/mix/fun/typedefs.hpp 5042bf124909628733fcb28cc54ddbef *inst/include/stan/math/mix/functor.hpp 685e4f0f9dfd1d909286784681253b03 *inst/include/stan/math/mix/functor/derivative.hpp 8e01b9693ab4f87bdf4b0e9351092947 *inst/include/stan/math/mix/functor/finite_diff_grad_hessian.hpp c05d2c20c312c908a7df5f7cbf709326 *inst/include/stan/math/mix/functor/finite_diff_grad_hessian_auto.hpp 2d8958b96cc7c8583de3ae0a03eef723 *inst/include/stan/math/mix/functor/grad_hessian.hpp 37ccb329d5bb5041d0dc19079517fc9a *inst/include/stan/math/mix/functor/grad_tr_mat_times_hessian.hpp 2d5ffa2d40cee802a2a473fba17d4df5 *inst/include/stan/math/mix/functor/gradient_dot_vector.hpp 79214802983bd9af1df26ee3ee0227f2 *inst/include/stan/math/mix/functor/hessian.hpp efcce67c226c13be2bc65363269bc669 *inst/include/stan/math/mix/functor/hessian_times_vector.hpp 21a86e7ee301fc020d62c909c6598b62 *inst/include/stan/math/mix/functor/partial_derivative.hpp ee3e7f002e5fd62eaf43c1fd4b8f0fb5 *inst/include/stan/math/mix/meta.hpp 0f037ec6c19f3e8e49647b23b2001005 *inst/include/stan/math/opencl/buffer_types.hpp 20a31e5bef0fbba2a9a08110efe9c8b0 *inst/include/stan/math/opencl/cholesky_decompose.hpp 4640456190e6385285b6e68c31964995 *inst/include/stan/math/opencl/copy.hpp 11a5ddccabbbe439d9e7fe51a29a8d2e *inst/include/stan/math/opencl/double_d.hpp 97dfbee3356d0ea4e8eeb5a9ae87a8ee *inst/include/stan/math/opencl/err.hpp b40469150625d05af4987d0af298f081 *inst/include/stan/math/opencl/err/check_mat_size_one.hpp d6ab44078c7d66da97532574a085da51 *inst/include/stan/math/opencl/err/check_opencl.hpp aa00d4ce062f145a2774525565b8a9d7 *inst/include/stan/math/opencl/err/check_symmetric.hpp 2f7de7eb86aff999a7cf7736c12acee8 *inst/include/stan/math/opencl/err/check_triangular.hpp c13dd7561313a70e23c2aca9b6027218 *inst/include/stan/math/opencl/indexing_rev.hpp 9a638c0b36048b4f249d435300f73d94 *inst/include/stan/math/opencl/is_constant.hpp e9b53341e0217fb74016b7bbb2122785 *inst/include/stan/math/opencl/kernel_cl.hpp 0dac56781413a44b72c65828021c2b58 *inst/include/stan/math/opencl/kernel_generator.hpp 3ff171ea66aa499ad6c36397790d4d61 *inst/include/stan/math/opencl/kernel_generator/append.hpp dbf80d303a9b00b719e99cb4c2859134 *inst/include/stan/math/opencl/kernel_generator/as_column_vector_or_scalar.hpp 023569ed2c60dcc3cc9dbb65f0c338d9 *inst/include/stan/math/opencl/kernel_generator/as_operation_cl.hpp 84116b3fdb0ecb4300e31dbb65e852e3 *inst/include/stan/math/opencl/kernel_generator/binary_operation.hpp d2b91a2df0fb67717eaa1a675703c4b1 *inst/include/stan/math/opencl/kernel_generator/block_zero_based.hpp 477df705d59db169dd1dc9565f110132 *inst/include/stan/math/opencl/kernel_generator/broadcast.hpp 5144fa12d8dc1296988ff2d0911ef516 *inst/include/stan/math/opencl/kernel_generator/calc_if.hpp 4f157c1f611606b0ae85e1c6779d689d *inst/include/stan/math/opencl/kernel_generator/cast.hpp 7ffe61497afd4be62d8bc458e9b18019 *inst/include/stan/math/opencl/kernel_generator/check_cl.hpp b98bd685077896c966acf96f23d7eca0 *inst/include/stan/math/opencl/kernel_generator/colwise_reduction.hpp c27c0c0771ac25813b3c16d18641a1fc *inst/include/stan/math/opencl/kernel_generator/common_return_scalar.hpp 3e0c61980fcdac171ce585eb46ebdcaf *inst/include/stan/math/opencl/kernel_generator/compound_assignments.hpp b0a48489821511397ac9bb79e73bc16c *inst/include/stan/math/opencl/kernel_generator/constant.hpp 2805199e4cb5895dd1fc62499bcc654d *inst/include/stan/math/opencl/kernel_generator/diagonal.hpp 951bc18467ed60eae2168b8b07189e4b *inst/include/stan/math/opencl/kernel_generator/elt_function_cl.hpp 012e55f60f8decddd32a1c7f6750d7d2 *inst/include/stan/math/opencl/kernel_generator/evaluate_into.hpp 8eb6ca6e52b53e31d5264664dafbe015 *inst/include/stan/math/opencl/kernel_generator/get_kernel_source_for_evaluating_into.hpp d883e6632b34adcf45ae48bc9ec9ef2b *inst/include/stan/math/opencl/kernel_generator/holder_cl.hpp 0929116e1c038d3e3ee5a22ca6097385 *inst/include/stan/math/opencl/kernel_generator/index.hpp b824f367eb278b79d620fade1c599215 *inst/include/stan/math/opencl/kernel_generator/indexing.hpp d830a88d3aa1c4b714f5e47fc851e716 *inst/include/stan/math/opencl/kernel_generator/load.hpp a266b316c886c6e05df465b0c3ba1d2c *inst/include/stan/math/opencl/kernel_generator/matrix_cl_conversion.hpp fc4f6afcfb1fa98ce44c6b197dc81538 *inst/include/stan/math/opencl/kernel_generator/matrix_vector_multiply.hpp c3d811321a5a983bc99e6af3bd2701b0 *inst/include/stan/math/opencl/kernel_generator/multi_result_kernel.hpp 8cfcd4196c6787724897d9b4ac15ec5d *inst/include/stan/math/opencl/kernel_generator/name_generator.hpp d18e9fb215a531b313552bfe28c64158 *inst/include/stan/math/opencl/kernel_generator/opencl_code.hpp 97d4e36b60bb46f08bc539192fe30f57 *inst/include/stan/math/opencl/kernel_generator/operation_cl.hpp e5284f8cfd2cdbb17cb59c502e33ebab *inst/include/stan/math/opencl/kernel_generator/operation_cl_lhs.hpp 9d9ca8d1a86beb4bfe0caa25669d5fca *inst/include/stan/math/opencl/kernel_generator/optional_broadcast.hpp b51f33e9490f546de01faec12e78b1f3 *inst/include/stan/math/opencl/kernel_generator/reduction_2d.hpp 8eb63f9f78e1ad47ede1fe3ee6a8340f *inst/include/stan/math/opencl/kernel_generator/rowwise_reduction.hpp 877565687c09b1491f5aa105292c23d5 *inst/include/stan/math/opencl/kernel_generator/scalar.hpp d505556d4a9aab537014f4739ae6aca0 *inst/include/stan/math/opencl/kernel_generator/select.hpp 86a7c40a00e66b75f17353c3ae75aae6 *inst/include/stan/math/opencl/kernel_generator/transpose.hpp d2e94a2f17293f0f8169cf6db3497624 *inst/include/stan/math/opencl/kernel_generator/type_str.hpp 328df994b2db7660ee854b5aa278b4c6 *inst/include/stan/math/opencl/kernel_generator/unary_operation_cl.hpp dd3d8e4ca2f58ba1b907943cbe2413ac *inst/include/stan/math/opencl/kernels/add.hpp 75fefc2f1ecf9d4fc4d6392477a65221 *inst/include/stan/math/opencl/kernels/batch_identity.hpp 7b2004d2668153bdbbca0fb5677308aa *inst/include/stan/math/opencl/kernels/categorical_logit_glm_lpmf.hpp f3067ec6b6480cff5b4707b7a7b8009f *inst/include/stan/math/opencl/kernels/check_symmetric.hpp c07859df72dcc86fc138da456edd386d *inst/include/stan/math/opencl/kernels/cholesky_decompose.hpp 3838b277bd6c9c7e1d91189b8bbde292 *inst/include/stan/math/opencl/kernels/cumulative_sum.hpp 155e727688e5fdc6cb0b163ad400cc0d *inst/include/stan/math/opencl/kernels/device_functions/Phi.hpp 2bcd230593ad9aa474753ddb0ef3e7ab *inst/include/stan/math/opencl/kernels/device_functions/Phi_approx.hpp f921eb06bacdbf8e963d31baf8c6508d *inst/include/stan/math/opencl/kernels/device_functions/atomic_add_double.hpp e42713b67bcce9b052bb53ac7b9a6eac *inst/include/stan/math/opencl/kernels/device_functions/beta.hpp 6a64f1d87c160ecdd8201b237d8c835b *inst/include/stan/math/opencl/kernels/device_functions/binomial_coefficient_log.hpp 037054888e1abb54e5ac085d779d7063 *inst/include/stan/math/opencl/kernels/device_functions/digamma.hpp 6b836658f7e33617191bab10d921fc48 *inst/include/stan/math/opencl/kernels/device_functions/inv_Phi.hpp cb934c2cd8603521fc3522d9a076c191 *inst/include/stan/math/opencl/kernels/device_functions/inv_logit.hpp 9e97990e66de51ce6582e59675eb34fd *inst/include/stan/math/opencl/kernels/device_functions/inv_square.hpp 6123bb3ab363b78f6baf22427148bbe5 *inst/include/stan/math/opencl/kernels/device_functions/lbeta.hpp 3d5c45486e5a68fa3aba3ef37574666f *inst/include/stan/math/opencl/kernels/device_functions/lgamma_stirling.hpp a1d34e7f9dcfd8a5c084372a7a9b2bdd *inst/include/stan/math/opencl/kernels/device_functions/lgamma_stirling_diff.hpp b96edd89ba9007b842f7b34b25355133 *inst/include/stan/math/opencl/kernels/device_functions/lmultiply.hpp 2c3c451fac1e3c8fc580d00942ec2101 *inst/include/stan/math/opencl/kernels/device_functions/log1m.hpp 8e65b9d22a6daf02ba9b1283643f8d75 *inst/include/stan/math/opencl/kernels/device_functions/log1m_exp.hpp 66263dbf2cdeee129977e11424bacde1 *inst/include/stan/math/opencl/kernels/device_functions/log1m_inv_logit.hpp fbb75a1721058de0040ae44f9a68a21e *inst/include/stan/math/opencl/kernels/device_functions/log1p_exp.hpp 3e54b342a170f72af8150d14cddd1e9b *inst/include/stan/math/opencl/kernels/device_functions/log_diff_exp.hpp 0c951e05ceb942c8e19c0684519f8545 *inst/include/stan/math/opencl/kernels/device_functions/log_inv_logit.hpp 4ff2fb914621b67802d4ce886f240b99 *inst/include/stan/math/opencl/kernels/device_functions/log_inv_logit_diff.hpp 7a0bdb7adaf13bfd674ba1a8b951f531 *inst/include/stan/math/opencl/kernels/device_functions/logit.hpp 717045e5ac569c5dab587ec0d51564a8 *inst/include/stan/math/opencl/kernels/device_functions/multiply_log.hpp e9c0c0095f1af59d8934088979dedd12 *inst/include/stan/math/opencl/kernels/device_functions/trigamma.hpp 8bba841fbeeb9dda858f87432c776f82 *inst/include/stan/math/opencl/kernels/diag_inv.hpp 509c3bd24b44001134477af5bb971054 *inst/include/stan/math/opencl/kernels/divide_columns.hpp 4e018333f7b6c5e1a6e09d923029b143 *inst/include/stan/math/opencl/kernels/fill_strict_tri.hpp 121d24a0913ff2962a22e129c28c498a *inst/include/stan/math/opencl/kernels/gp_exp_quad_cov.hpp 2f3ea6796ace8268f6dcf059ac460b2f *inst/include/stan/math/opencl/kernels/gp_exponential_cov.hpp b801ba68c5a9e97a26f30cce80a484d8 *inst/include/stan/math/opencl/kernels/gp_matern32_cov.hpp 001c05f61c3c5991d093a53dee5d0819 *inst/include/stan/math/opencl/kernels/gp_matern52_cov.hpp 6a696ae91f1ea2d9fa9ad793ce00fdee *inst/include/stan/math/opencl/kernels/helpers.hpp 9689679fbe42d61f6f0ed63b14c7a674 *inst/include/stan/math/opencl/kernels/indexing_rev.hpp 971075bd9773cc874bd308dd03b7a0c1 *inst/include/stan/math/opencl/kernels/inv_lower_tri_multiply.hpp a24acc43436efcefb5586924fceeec26 *inst/include/stan/math/opencl/kernels/matrix_multiply.hpp 0b2e71f2c5ac3b2331cdc685b3d606a9 *inst/include/stan/math/opencl/kernels/mergesort.hpp 626e81ff9c18d67cf29dae2c444898a7 *inst/include/stan/math/opencl/kernels/mrrr.hpp 87dc729c54d9a19706eb74688f750886 *inst/include/stan/math/opencl/kernels/multiply_transpose.hpp cfbf56114d22e4cf044b2252653b9840 *inst/include/stan/math/opencl/kernels/neg_binomial_2_log_glm_lpmf.hpp 2494cc7a7efe60ec5d9b8ac60a087a53 *inst/include/stan/math/opencl/kernels/neg_rect_lower_tri_multiply.hpp 7d778948dca6000c43b6345a129b2691 *inst/include/stan/math/opencl/kernels/ordered_logistic_glm_lpmf.hpp ee506eef7b11cc649865ccb4bf11de5f *inst/include/stan/math/opencl/kernels/ordered_logistic_lpmf.hpp 4a363d85fc7321badc3204c0153a9773 *inst/include/stan/math/opencl/kernels/pack.hpp 087f8cb3813059331322a9dee1781c71 *inst/include/stan/math/opencl/kernels/rep_matrix.hpp c3b7aaa189a7c568da9190fbba71fd04 *inst/include/stan/math/opencl/kernels/tridiagonalization.hpp a022d3112669b2b4eea5b945d2cb86b4 *inst/include/stan/math/opencl/kernels/unpack.hpp 6fde28f857fbd4f4eaa90b6b180b8125 *inst/include/stan/math/opencl/matrix_cl.hpp 9c9db68671c99d632223fce94b759bd7 *inst/include/stan/math/opencl/matrix_cl_view.hpp 9715769389e426a83609f506392b2a4c *inst/include/stan/math/opencl/mrrr.hpp 99ebe9aef3d976d399500caaef505571 *inst/include/stan/math/opencl/multiply_transpose.hpp 43ce08f75406fbcdd864b48aa49b413b *inst/include/stan/math/opencl/opencl_context.hpp 558059c74c8c87eabd4625f50ed35884 *inst/include/stan/math/opencl/pinned_matrix.hpp 6d8c610c02ec2d4565c548942057594f *inst/include/stan/math/opencl/plain_type.hpp ffd3391456d6d3547ef98208e8670a64 *inst/include/stan/math/opencl/prim.hpp a511536738338888da66f2350de34a02 *inst/include/stan/math/opencl/prim/add_diag.hpp 6c17700adfb542ed50e1b5fb2b336d0f *inst/include/stan/math/opencl/prim/append_array.hpp 26388b771733e869adde639f60d0463d *inst/include/stan/math/opencl/prim/bernoulli_cdf.hpp 9e0955a5f1a9dfd0c2d6fa6d56366e91 *inst/include/stan/math/opencl/prim/bernoulli_lccdf.hpp 7119100f35727dd8a04b6a15cf7d9704 *inst/include/stan/math/opencl/prim/bernoulli_lcdf.hpp 624f3d26479f101e5c26a1badf97de84 *inst/include/stan/math/opencl/prim/bernoulli_logit_glm_lpmf.hpp 0fb6bc4fc0e667124704bdf008dce41b *inst/include/stan/math/opencl/prim/bernoulli_logit_lpmf.hpp 8cbd28e00f6a9ff376a70f0050fa1c14 *inst/include/stan/math/opencl/prim/bernoulli_lpmf.hpp fec7e68a86a12f8fae62685c9a2a3bd5 *inst/include/stan/math/opencl/prim/beta_binomial_lpmf.hpp cc3a0aa725bc3b3db6688c7aa5d403e2 *inst/include/stan/math/opencl/prim/beta_lpdf.hpp d4343edded1a27aee96c89db497d9559 *inst/include/stan/math/opencl/prim/beta_proportion_lpdf.hpp 8ead6285f1a89e33d2c116daeb316e5e *inst/include/stan/math/opencl/prim/binomial_logit_lpmf.hpp df48b00e136c16603a703b798e5cbc23 *inst/include/stan/math/opencl/prim/binomial_lpmf.hpp faf1683f87d11604f30bf20a01496190 *inst/include/stan/math/opencl/prim/block.hpp 667a48e7eb9533c80594528430359210 *inst/include/stan/math/opencl/prim/categorical_logit_glm_lpmf.hpp 5e7aff34d4b33ea95f0f7a24147a9893 *inst/include/stan/math/opencl/prim/cauchy_cdf.hpp e58bdc7900e4f4868568166587ef3d37 *inst/include/stan/math/opencl/prim/cauchy_lccdf.hpp dc818af77c854149d9b6304e17831b73 *inst/include/stan/math/opencl/prim/cauchy_lcdf.hpp b4a556460bb09581beea38f7cf87b46b *inst/include/stan/math/opencl/prim/cauchy_lpdf.hpp 13a13cb4f7955b2c548c0fe6b6e8b613 *inst/include/stan/math/opencl/prim/chi_square_lpdf.hpp c813e1508c4815cd3ca44d4912254023 *inst/include/stan/math/opencl/prim/cholesky_decompose.hpp ed2a029c0a0d5413103dbfe0f9813e37 *inst/include/stan/math/opencl/prim/col.hpp 6c3939979a3b594435587b026fdce3a3 *inst/include/stan/math/opencl/prim/cols.hpp dcf8dab42275d330ead12a27b67441af *inst/include/stan/math/opencl/prim/columns_dot_product.hpp 6c6a1e6e07f878e368f2886676d01449 *inst/include/stan/math/opencl/prim/columns_dot_self.hpp f3dce9239737ddcf4072b93c3928b5f8 *inst/include/stan/math/opencl/prim/crossprod.hpp 3d107b37ba0bcb2a55c22e87a94297a8 *inst/include/stan/math/opencl/prim/cumulative_sum.hpp 4be10afb3fb21f497241fc88e214c671 *inst/include/stan/math/opencl/prim/diag_matrix.hpp c72a9217ff89c89b26ab8d302242dddd *inst/include/stan/math/opencl/prim/diag_post_multiply.hpp 7c03ba23b077c8d282a8522feef59a74 *inst/include/stan/math/opencl/prim/diag_pre_multiply.hpp 60959340b5cacaa5a66e3e4cde17b10e *inst/include/stan/math/opencl/prim/dims.hpp f309566e9f8d6b04f7deff97c0a00f83 *inst/include/stan/math/opencl/prim/dirichlet_lpdf.hpp c705f1b0ca1f2f04b7689c980bcd8bae *inst/include/stan/math/opencl/prim/distance.hpp 25d05ac155998ad9ee94429b63b09b43 *inst/include/stan/math/opencl/prim/divide.hpp 4017a3085f22959bcb796399ca4d8c27 *inst/include/stan/math/opencl/prim/divide_columns.hpp ec502cd6a8cfbc28f97b321e63e20c23 *inst/include/stan/math/opencl/prim/dot_product.hpp b4c4d4f3ae414b8c8c2126eb8871f743 *inst/include/stan/math/opencl/prim/dot_self.hpp 2af0b79bfd8c620f42451c7ceac546b1 *inst/include/stan/math/opencl/prim/double_exponential_cdf.hpp 913ce5654020856d0bd3866c5bdb8bfb *inst/include/stan/math/opencl/prim/double_exponential_lccdf.hpp c693a7af83602cd1675b6b14d0b99a6e *inst/include/stan/math/opencl/prim/double_exponential_lcdf.hpp 9361a0348251e0bbdc66c3d7bcae3d6b *inst/include/stan/math/opencl/prim/double_exponential_lpdf.hpp ef78324ec2e6f741adc00b7bce2122d9 *inst/include/stan/math/opencl/prim/eigenvalues_sym.hpp d81ba31564631d3cb66dbe8822f6a346 *inst/include/stan/math/opencl/prim/eigenvectors_sym.hpp 018d455934287796709bb194390c7330 *inst/include/stan/math/opencl/prim/exp_mod_normal_cdf.hpp ecf67bc47cfa820e162880208e88d08e *inst/include/stan/math/opencl/prim/exp_mod_normal_lccdf.hpp 88ab861847f7c3adc9bb0186c8c483aa *inst/include/stan/math/opencl/prim/exp_mod_normal_lcdf.hpp 27fdfadf73f88359afe4e997b25531a3 *inst/include/stan/math/opencl/prim/exp_mod_normal_lpdf.hpp eef56962ababb3a6d953f19c4d7bbf65 *inst/include/stan/math/opencl/prim/exponential_cdf.hpp 8735c5aeb6f2f33b0ecff63ede9de9f4 *inst/include/stan/math/opencl/prim/exponential_lccdf.hpp 070e6f2c82226d7b968e8f52241fdea3 *inst/include/stan/math/opencl/prim/exponential_lcdf.hpp cc46827dc9ba2425e4939f92ecf3b40f *inst/include/stan/math/opencl/prim/exponential_lpdf.hpp 1f3e32dd7823211cb99d3ecea1d769f1 *inst/include/stan/math/opencl/prim/frechet_cdf.hpp 4e93c2ef234ea23ece246fb528413846 *inst/include/stan/math/opencl/prim/frechet_lccdf.hpp 2c9758cf9460ed23c48e09e3899147e8 *inst/include/stan/math/opencl/prim/frechet_lcdf.hpp 7a869d62b9bcbfe7409c4b939bfde74a *inst/include/stan/math/opencl/prim/frechet_lpdf.hpp 6b870586df1b7ba308c579613879e06c *inst/include/stan/math/opencl/prim/gamma_lpdf.hpp c7c64d488d8981dd7f31fa4278db416b *inst/include/stan/math/opencl/prim/gp_dot_prod_cov.hpp 4ff823af467fe4eee1b69912945943d3 *inst/include/stan/math/opencl/prim/gp_exp_quad_cov.hpp a66a252ca66afe72cf77f7f2e91b2a93 *inst/include/stan/math/opencl/prim/gp_exponential_cov.hpp b4cab9e82bf80c97bb9d52f46c9d5c39 *inst/include/stan/math/opencl/prim/gp_matern32_cov.hpp c6e752cedbc35f0fb709afbdd4f879f7 *inst/include/stan/math/opencl/prim/gp_matern52_cov.hpp e7943781440e6742c618b4e312722259 *inst/include/stan/math/opencl/prim/gumbel_cdf.hpp e7084f587061d8dad56c17ca6594ecdd *inst/include/stan/math/opencl/prim/gumbel_lccdf.hpp a274bb67978e903e598ee07f95fd54fc *inst/include/stan/math/opencl/prim/gumbel_lcdf.hpp 02fe015a16e0a29f990a1e6d58a8c79e *inst/include/stan/math/opencl/prim/gumbel_lpdf.hpp 7d1da5f23abca26ac7d8fd2abf82a3f5 *inst/include/stan/math/opencl/prim/head.hpp 8ff4ca346c9e2db48db19bcf8b6bbac2 *inst/include/stan/math/opencl/prim/identity_matrix.hpp b5c7d5a9de4c9123f781a8c1f837e827 *inst/include/stan/math/opencl/prim/inv.hpp eed4d6116b3cf1bf37a8375024510608 *inst/include/stan/math/opencl/prim/inv_chi_square_lpdf.hpp abab87f227afb75e87afb1b74364dec3 *inst/include/stan/math/opencl/prim/inv_cloglog.hpp 6b919135790d3fb50f7c81ec18dc0444 *inst/include/stan/math/opencl/prim/inv_gamma_lpdf.hpp 994d8778e35ed961d01df96916120226 *inst/include/stan/math/opencl/prim/inv_sqrt.hpp 7f44b506ac3ee916fb53e805f3dd8f11 *inst/include/stan/math/opencl/prim/lb_constrain.hpp 4530504f614ec11432227d83b24d46b0 *inst/include/stan/math/opencl/prim/log_mix.hpp 0ff378087c0ac43bbc0cd299e1873264 *inst/include/stan/math/opencl/prim/log_softmax.hpp 1e61500533dbf180bc22ec09f958aeae *inst/include/stan/math/opencl/prim/log_sum_exp.hpp a30c32d68aebb337d50ac613b0b41c06 *inst/include/stan/math/opencl/prim/logistic_cdf.hpp c48048401728c384a946d02595d939db *inst/include/stan/math/opencl/prim/logistic_lccdf.hpp af2fbb6cced13c0a8d4713e072a46eae *inst/include/stan/math/opencl/prim/logistic_lcdf.hpp f436d86c327809432c217609b5d4f746 *inst/include/stan/math/opencl/prim/logistic_lpdf.hpp 80538d13fcf9dabb97bb7e9f7bd2759a *inst/include/stan/math/opencl/prim/lognormal_cdf.hpp d063448f070b1ad87c72dba39afbabf7 *inst/include/stan/math/opencl/prim/lognormal_lccdf.hpp 5fd0d42b2eaa1ff95484360fdc59b536 *inst/include/stan/math/opencl/prim/lognormal_lcdf.hpp e4f800b359909337c2fa44c6a28ca848 *inst/include/stan/math/opencl/prim/lognormal_lpdf.hpp eeb62003965a16c4b3d73a12caf59f5e *inst/include/stan/math/opencl/prim/lub_constrain.hpp b9dc7639a50b094fc3cf39f1624673c1 *inst/include/stan/math/opencl/prim/matrix_power.hpp 25dddd4feb7c90d6c930f9d961b67f85 *inst/include/stan/math/opencl/prim/mdivide_left_tri_low.hpp 68446285af7c66f0d9f682a6679959f7 *inst/include/stan/math/opencl/prim/mdivide_right_tri_low.hpp 28070fa98b65006bd8961efdf74a0b96 *inst/include/stan/math/opencl/prim/mean.hpp 2d3b4836ab9def5a64677b945714ae44 *inst/include/stan/math/opencl/prim/multi_normal_cholesky_lpdf.hpp 41dbbcb7f7e8288fbc32d96ea9905a4b *inst/include/stan/math/opencl/prim/multiply.hpp 5c4d126e9576bab2fc0399d294430e7a *inst/include/stan/math/opencl/prim/multiply_lower_tri_self_transpose.hpp 8026927ee15ae38c47838701adcb67be *inst/include/stan/math/opencl/prim/neg_binomial_2_log_glm_lpmf.hpp 8e47d869d00ff37fa55b2a24d9958554 *inst/include/stan/math/opencl/prim/neg_binomial_2_log_lpmf.hpp e82ccad672468e0ac38ce4b00f1a4ed9 *inst/include/stan/math/opencl/prim/neg_binomial_2_lpmf.hpp 45aedf4e5f75707653af228ee9e57dd5 *inst/include/stan/math/opencl/prim/neg_binomial_lpmf.hpp 99fc76a9435411799b015adf6eb2b00c *inst/include/stan/math/opencl/prim/normal_cdf.hpp 1faeaa98e3253aa073e1c677c0b7a321 *inst/include/stan/math/opencl/prim/normal_id_glm_lpdf.hpp 9d4a3fcff04317143c2fcbe53af246d8 *inst/include/stan/math/opencl/prim/normal_lccdf.hpp 9d2209d16a88f0451e137f3fd8068273 *inst/include/stan/math/opencl/prim/normal_lcdf.hpp 1d3752268e80ad5cdc27b3e3e3295f60 *inst/include/stan/math/opencl/prim/normal_lpdf.hpp 3829659544fe251d02ea3c446e86a02f *inst/include/stan/math/opencl/prim/num_elements.hpp c2f65330bea96f2bbf1f79ee2d32f2e2 *inst/include/stan/math/opencl/prim/offset_multiplier_constrain.hpp ccd942cc28742618a66fa1136a49be73 *inst/include/stan/math/opencl/prim/ordered_logistic_glm_lpmf.hpp ac8398bc1c467c39a68afc5f83d52dc6 *inst/include/stan/math/opencl/prim/ordered_logistic_lpmf.hpp e7b598049a4e74e6d83cb8e9bf015968 *inst/include/stan/math/opencl/prim/pareto_cdf.hpp eb7856fb5e839dc6a1663ca059babe15 *inst/include/stan/math/opencl/prim/pareto_lccdf.hpp d5d4ef844d26975431beedab44e69e22 *inst/include/stan/math/opencl/prim/pareto_lcdf.hpp dacc9d1fe72461a0eff28454623b53e2 *inst/include/stan/math/opencl/prim/pareto_lpdf.hpp bcb8387893a81243daf6ad6dcb57c15d *inst/include/stan/math/opencl/prim/pareto_type_2_cdf.hpp 50552b9cf62d14aa179e41eb9b0951f5 *inst/include/stan/math/opencl/prim/pareto_type_2_lccdf.hpp bd55827bff4e653f758d275d1f70e113 *inst/include/stan/math/opencl/prim/pareto_type_2_lcdf.hpp 03b55898172d692acf33ff12d7f3a0fe *inst/include/stan/math/opencl/prim/pareto_type_2_lpdf.hpp 502edcc696622291d171138a974d05e5 *inst/include/stan/math/opencl/prim/poisson_log_glm_lpmf.hpp ea6d4af07ac7721db0c7eea43184097f *inst/include/stan/math/opencl/prim/poisson_log_lpmf.hpp c0e55d157f4440928d35ff53f70de61a *inst/include/stan/math/opencl/prim/poisson_lpmf.hpp 4c8058f68e066f82732f0f3c979d90f2 *inst/include/stan/math/opencl/prim/prod.hpp b368713ba31cc6be3e253dc6fc440832 *inst/include/stan/math/opencl/prim/qr_Q.hpp ad67d62dc3c6f11d0ff2458852c29beb *inst/include/stan/math/opencl/prim/qr_R.hpp c763986bd7b0143d16d30784c0b61e88 *inst/include/stan/math/opencl/prim/qr_thin_Q.hpp 39a86748cb8c18b8caa27d678d81978c *inst/include/stan/math/opencl/prim/qr_thin_R.hpp 2b44f05e84c219d9a33a443fd476f34c *inst/include/stan/math/opencl/prim/rank.hpp 329b5424cec96925c77989953893fa37 *inst/include/stan/math/opencl/prim/rayleigh_cdf.hpp dcdc35aa0d6ff762102faaff1b2f184f *inst/include/stan/math/opencl/prim/rayleigh_lccdf.hpp 503384aa946d71111db3cb97199a1ab1 *inst/include/stan/math/opencl/prim/rayleigh_lcdf.hpp fd510ea5dfa06416cd0ca76b42522393 *inst/include/stan/math/opencl/prim/rayleigh_lpdf.hpp 6226773d06e283b9bd1ca0150ea98879 *inst/include/stan/math/opencl/prim/rep_array.hpp 5d27ec9ae5620f1b1eac06a8baee356a *inst/include/stan/math/opencl/prim/rep_matrix.hpp 53534d9d11a517a34a355b235767e42d *inst/include/stan/math/opencl/prim/rep_row_vector.hpp daa1b2f88d0344ccef76da379a88ee76 *inst/include/stan/math/opencl/prim/rep_vector.hpp 4a9faf024b8c7e4926a9e9fae61f2c3f *inst/include/stan/math/opencl/prim/reverse.hpp 65b1015c9d145801a066a1f6bb65181e *inst/include/stan/math/opencl/prim/row.hpp fb5815c110d4310211d0e6ad80228623 *inst/include/stan/math/opencl/prim/rows.hpp c1475a23f4c695b862fb1cfd4e1b50e3 *inst/include/stan/math/opencl/prim/rows_dot_product.hpp 4d1c342275320fbcfb6c1681293fe91f *inst/include/stan/math/opencl/prim/rows_dot_self.hpp 01c586d3718f62d296de8479144bab6a *inst/include/stan/math/opencl/prim/scaled_inv_chi_square_lpdf.hpp 8862aa3549b9da1bb39513e0e18670b6 *inst/include/stan/math/opencl/prim/sd.hpp 8bd4a53e32b013466e5c60065a96e2ec *inst/include/stan/math/opencl/prim/segment.hpp 3a448f909b100846ab6a4f3dc33f79d9 *inst/include/stan/math/opencl/prim/sign.hpp 0209a7056a11ae9784960f3a271a066e *inst/include/stan/math/opencl/prim/size.hpp 47e3a3b52c1e263a657bdc10e923c208 *inst/include/stan/math/opencl/prim/skew_double_exponential_cdf.hpp 06c84206699602feeabbcdef6d0b044c *inst/include/stan/math/opencl/prim/skew_double_exponential_lccdf.hpp 861b3ed1b1faf4df30b9711a3241d8bc *inst/include/stan/math/opencl/prim/skew_double_exponential_lcdf.hpp 157ed18153acdff4c464883be5fdb836 *inst/include/stan/math/opencl/prim/skew_double_exponential_lpdf.hpp 49cfdfa59da1c517aba0ee8db8c50146 *inst/include/stan/math/opencl/prim/skew_normal_lpdf.hpp e846bed0dda2c265c133bdff4a30ac19 *inst/include/stan/math/opencl/prim/softmax.hpp 40b94903f88349f95d22c9f4edb178f8 *inst/include/stan/math/opencl/prim/sort_asc.hpp e22edbb317602d905adb0a13b648b804 *inst/include/stan/math/opencl/prim/sort_desc.hpp 940b6cda57f11f526503eaf7d5c3ccc9 *inst/include/stan/math/opencl/prim/squared_distance.hpp 764c748932e6c61e8ee27316807af79e *inst/include/stan/math/opencl/prim/std_normal_cdf.hpp dbd3fdb92199192e240b29cf2c2455e5 *inst/include/stan/math/opencl/prim/std_normal_lccdf.hpp 43a0936d65951c989129a90f07ff0e77 *inst/include/stan/math/opencl/prim/std_normal_lcdf.hpp 7bfddf1376b9c1388f00145d6f946455 *inst/include/stan/math/opencl/prim/std_normal_lpdf.hpp 888d5d3c891055b8aaf84bea4c42729c *inst/include/stan/math/opencl/prim/student_t_lpdf.hpp df88e34d5c4678b0f241328541f097ef *inst/include/stan/math/opencl/prim/sub_col.hpp dfa16f1c5e61c7ad37fe0b505aafaee6 *inst/include/stan/math/opencl/prim/sub_row.hpp 4ae458856498943229a46102fca46a2f *inst/include/stan/math/opencl/prim/sum.hpp ea194139056b90d89e237ef8b8125f71 *inst/include/stan/math/opencl/prim/symmetrize_from_lower_tri.hpp 958e52d30965f1a82d8b031987193c02 *inst/include/stan/math/opencl/prim/symmetrize_from_upper_tri.hpp 25e55c4c8dea8edaf1c4e60a26766a9c *inst/include/stan/math/opencl/prim/tail.hpp ff4171d613de82f4cf210e09d0d6be31 *inst/include/stan/math/opencl/prim/tcrossprod.hpp 4b06b0bc4f3f1b6c29a2115d11ad7ec9 *inst/include/stan/math/opencl/prim/to_array_1d.hpp 7f5fef849e9ec402f623151b355fb469 *inst/include/stan/math/opencl/prim/to_array_2d.hpp 98a4cdbdf82d0fcf0f7a90b0943579b8 *inst/include/stan/math/opencl/prim/to_matrix.hpp d7302f7b18c07a74a76bb06c05eeee16 *inst/include/stan/math/opencl/prim/to_row_vector.hpp 3ef2ccc98cda4e651144fd9fce6c3e59 *inst/include/stan/math/opencl/prim/to_vector.hpp 68a4cd8e2148311a1d6cda5d27fc2ff6 *inst/include/stan/math/opencl/prim/trace.hpp ada9b290524ccce0ad2cf817460b3253 *inst/include/stan/math/opencl/prim/ub_constrain.hpp dd095d2df6d088a9f9f92dbcf43d5c99 *inst/include/stan/math/opencl/prim/uniform_cdf.hpp 923c9e58ebc689c0c0bb3c434e7f0da9 *inst/include/stan/math/opencl/prim/uniform_lccdf.hpp 27e8fd86f0f3de6e173eaf5966cd26b3 *inst/include/stan/math/opencl/prim/uniform_lcdf.hpp ca935ed329980c24a57a15bcae4ab291 *inst/include/stan/math/opencl/prim/uniform_lpdf.hpp 2cf64669cf67a10f17a6eb499b7a46c6 *inst/include/stan/math/opencl/prim/unit_vector_constrain.hpp 556abc37cf30e22524a5e8b588e45807 *inst/include/stan/math/opencl/prim/variance.hpp f67b1447718a15a0e3057affd799d4ca *inst/include/stan/math/opencl/prim/weibull_cdf.hpp 6407793b2ff874706c3714f7df0ec8e7 *inst/include/stan/math/opencl/prim/weibull_lccdf.hpp 6879722f1e1bdf7b330d4ad55df1e217 *inst/include/stan/math/opencl/prim/weibull_lcdf.hpp 0913679daa8d5cae1fef47e11b2fc118 *inst/include/stan/math/opencl/prim/weibull_lpdf.hpp 419e6541a01e03acc248a0f742dfe3dd *inst/include/stan/math/opencl/qr_decomposition.hpp 242d49702d438d3d04c1081b3712e0f2 *inst/include/stan/math/opencl/ref_type.hpp f626a7f36f067789abbf20c8443a92f4 *inst/include/stan/math/opencl/ref_type_for_opencl.hpp c98ed245228e17c33afccda477ca8e47 *inst/include/stan/math/opencl/rev.hpp 532d166955ad9cddb611bf81b8a7b7a2 *inst/include/stan/math/opencl/rev/Phi.hpp a7ffa95dfda806a930f554270566505e *inst/include/stan/math/opencl/rev/Phi_approx.hpp c14dfdd0a6fa4b481d209504c2f81134 *inst/include/stan/math/opencl/rev/acos.hpp 16dc2205a40dbf3d4cbd54196baa7ae2 *inst/include/stan/math/opencl/rev/acosh.hpp 02bc0371c3431eca49a9a1393afb5ed8 *inst/include/stan/math/opencl/rev/add.hpp f6bd59b43e114d8eb8d954d83aecc843 *inst/include/stan/math/opencl/rev/add_diag.hpp 31916660ec7fb3e192b9a8936df2d2da *inst/include/stan/math/opencl/rev/adjoint_results.hpp dcb880091378436078741b3b0d78e3d1 *inst/include/stan/math/opencl/rev/append_col.hpp 5cab38db3315ee8d9358543597c7722e *inst/include/stan/math/opencl/rev/append_row.hpp c1c7278cb8aef9d83304cd355dd80aa2 *inst/include/stan/math/opencl/rev/arena_matrix_cl.hpp 11e9d8cfac6e7f00b0f09d94969e9323 *inst/include/stan/math/opencl/rev/arena_type.hpp 5a46a343e00549e20aebb9b9873444cf *inst/include/stan/math/opencl/rev/as_column_vector_or_scalar.hpp 0a4faa104867dfa07a46a25e334995c5 *inst/include/stan/math/opencl/rev/asin.hpp 636d5dda9d870959750cc7302a21da0b *inst/include/stan/math/opencl/rev/asinh.hpp 50da989fe829f90459b8e432575537e7 *inst/include/stan/math/opencl/rev/atan.hpp b0f5fa2ec1e4f3436bb0e1dd7839e2dd *inst/include/stan/math/opencl/rev/atanh.hpp 166a9a16bd388069823e59fde3ae9935 *inst/include/stan/math/opencl/rev/beta.hpp 6137c58daf8ba179530475bafbb2c456 *inst/include/stan/math/opencl/rev/block.hpp e573e0a1672433d0dd18b9aee0cd86bc *inst/include/stan/math/opencl/rev/cbrt.hpp 275c390a1894c1fd6c05a8ead9defe62 *inst/include/stan/math/opencl/rev/ceil.hpp 8ac810801bad22350e7ad03b03ddb235 *inst/include/stan/math/opencl/rev/cholesky_decompose.hpp abe3f56bdfdc2be99a63eb7c1702bfb6 *inst/include/stan/math/opencl/rev/columns_dot_product.hpp 4917285f34a6cd1dc15e8be06ae53ee9 *inst/include/stan/math/opencl/rev/columns_dot_self.hpp 6957d8c836dac5620e50af2df7c3d9a7 *inst/include/stan/math/opencl/rev/copy.hpp 6befdc4de165acc3c2e77690d5110061 *inst/include/stan/math/opencl/rev/cos.hpp 135d3d6785f34b2ac4e244f247c435e9 *inst/include/stan/math/opencl/rev/cosh.hpp dd9be5df64ffd2bfa4a162bcc9dbc595 *inst/include/stan/math/opencl/rev/crossprod.hpp ef00166dec6d95d4ca6b6a31d5e1d458 *inst/include/stan/math/opencl/rev/cumulative_sum.hpp 8cd7162873d331ce8fd5017dd1c4089e *inst/include/stan/math/opencl/rev/diag_matrix.hpp 4f49928700b5ccc4f8563c86ffe54502 *inst/include/stan/math/opencl/rev/diag_post_multiply.hpp b733640ebdd55ef8030589c532181883 *inst/include/stan/math/opencl/rev/diag_pre_multiply.hpp e984b40ed82885f494d7269c6a69d47a *inst/include/stan/math/opencl/rev/diagonal.hpp 8cbd152a6e6166030b2a1be0003ad285 *inst/include/stan/math/opencl/rev/digamma.hpp d4e4cd6d8e44b67e87d3ceb2ff8cc6c1 *inst/include/stan/math/opencl/rev/divide.hpp 2de44070291a7540453d6a141426b214 *inst/include/stan/math/opencl/rev/dot_product.hpp e6f99f05431c3ea596db4a8de8dab421 *inst/include/stan/math/opencl/rev/dot_self.hpp 3d2e9571b0a82f0043455491cb247342 *inst/include/stan/math/opencl/rev/elt_divide.hpp 00981cd44354fdad761ddd9c1066f78f *inst/include/stan/math/opencl/rev/elt_multiply.hpp 81119202967f000ce3f130884051e009 *inst/include/stan/math/opencl/rev/erf.hpp 32f89168b16d5e8f3ed26d7d915226c1 *inst/include/stan/math/opencl/rev/erfc.hpp a5ccbc8802f09415beae2e09d53871fd *inst/include/stan/math/opencl/rev/exp.hpp 54823fefc68878328e3011e68bf68ad3 *inst/include/stan/math/opencl/rev/exp2.hpp bdf0e946003e6768db87d178599f2c36 *inst/include/stan/math/opencl/rev/expm1.hpp de9a0bd36a9800437c34425cadfd860b *inst/include/stan/math/opencl/rev/fabs.hpp d7478c40c41b24386a06589075d89208 *inst/include/stan/math/opencl/rev/fdim.hpp 13ab54496a7e32d2df3f6ca37ddb7e5d *inst/include/stan/math/opencl/rev/floor.hpp 673e499bd6f614dba6bce09e597e3ec0 *inst/include/stan/math/opencl/rev/fmax.hpp 50c09f335942bfd2f5d55aee9235b31a *inst/include/stan/math/opencl/rev/fmin.hpp 57b7de4f5a4ce6b60c2e9531fdbf6782 *inst/include/stan/math/opencl/rev/fmod.hpp 97fde0fd64bb0e7dcc064364ddc9ccdb *inst/include/stan/math/opencl/rev/hypot.hpp e7ba19b9d2d9550a0f6e71c9d7a8b88d *inst/include/stan/math/opencl/rev/inv.hpp 4eed2a5ec70b150da5660825bc8f4858 *inst/include/stan/math/opencl/rev/inv_Phi.hpp 9ef7f3db79d69324a3989bec69d1e0c4 *inst/include/stan/math/opencl/rev/inv_cloglog.hpp 6a32c8c2a1a41d6c34390a91f08cb1fa *inst/include/stan/math/opencl/rev/inv_logit.hpp a462951237e6db123306c735d935c26e *inst/include/stan/math/opencl/rev/inv_sqrt.hpp 775888074853484c689848ba90913a9b *inst/include/stan/math/opencl/rev/inv_square.hpp 4a91a29f911bd667b07b012a36b8e877 *inst/include/stan/math/opencl/rev/lb_constrain.hpp 5fc943760f3dd8f6f5d0a1abf53fb641 *inst/include/stan/math/opencl/rev/lbeta.hpp 39bca8142e338a2522aceb09dd5a91df *inst/include/stan/math/opencl/rev/ldexp.hpp f880e75b1acd355cbebad9a5ef804912 *inst/include/stan/math/opencl/rev/lgamma.hpp 4dfd59e07f0e820f9d65f4f5ee0e6e3e *inst/include/stan/math/opencl/rev/lmultiply.hpp 079e446861dab5f3c03194e5bb45a9a8 *inst/include/stan/math/opencl/rev/log.hpp 9ac18383fffeee917632bbe19b3d8fbc *inst/include/stan/math/opencl/rev/log10.hpp 4c0f186359260b2c44f3ab8a0a9353a3 *inst/include/stan/math/opencl/rev/log1m.hpp cb95dca0118849b6b8f37e47186ffce8 *inst/include/stan/math/opencl/rev/log1m_exp.hpp 6b8eca3c4be86ac8b517f75d018121b9 *inst/include/stan/math/opencl/rev/log1m_inv_logit.hpp 36bdd291d6edc55421d99bd90e01f3cb *inst/include/stan/math/opencl/rev/log1p.hpp 32d94e2d3261771cf688dccd7e536655 *inst/include/stan/math/opencl/rev/log1p_exp.hpp 7b33ced1d7d1ca206642b189b3ded102 *inst/include/stan/math/opencl/rev/log2.hpp 5674686607830d7c0a9025dde47b8fd7 *inst/include/stan/math/opencl/rev/log_diff_exp.hpp 987d003effbeb57e288c4105881c573c *inst/include/stan/math/opencl/rev/log_inv_logit.hpp fb5abbb3f85963cde54d30ffae737ae5 *inst/include/stan/math/opencl/rev/log_inv_logit_diff.hpp 5b81ea12bcce3a186cbdf4ebb01ba797 *inst/include/stan/math/opencl/rev/log_softmax.hpp b93dbb36ccbd6fb38413e95cb3df6ac4 *inst/include/stan/math/opencl/rev/log_sum_exp.hpp 68f409b892100f629dd91ba5f6c31b61 *inst/include/stan/math/opencl/rev/logit.hpp 8385183a190a87fd441c51ed5ca67fc4 *inst/include/stan/math/opencl/rev/lub_constrain.hpp a878434b0ea9a72293f461db5bf69be8 *inst/include/stan/math/opencl/rev/matrix_power.hpp d1fbecdd39627a49b93ff9aa3da6b5ce *inst/include/stan/math/opencl/rev/mdivide_left_tri_low.hpp 8647bb0223aa319aee2531f1cb3c0c42 *inst/include/stan/math/opencl/rev/mdivide_right_tri_low.hpp 75036b1cb09f3d8145bbc45438f8a2fb *inst/include/stan/math/opencl/rev/multiply.hpp 8223d0ab08b81fd823431a016ddb29ac *inst/include/stan/math/opencl/rev/multiply_log.hpp a8dcb9db4b428a1b6ec277b7c35bfc5f *inst/include/stan/math/opencl/rev/multiply_lower_tri_self_transpose.hpp 25dfe77768aacfd0044a2d289ffc773b *inst/include/stan/math/opencl/rev/offset_multiplier_constrain.hpp 47ced5dfb9cd23f2c8f614756d79b75f *inst/include/stan/math/opencl/rev/operands_and_partials.hpp 6c0e660d2e815941c1322fc3feaf5764 *inst/include/stan/math/opencl/rev/operator_unary_minus.hpp 069960bad85c4d8f6e96d6289a37bf52 *inst/include/stan/math/opencl/rev/operator_unary_plus.hpp 1c9102481a89f1f207f12e237312a697 *inst/include/stan/math/opencl/rev/pow.hpp 4e2321787d60db1e7f6a681136c50030 *inst/include/stan/math/opencl/rev/prod.hpp 8d8d8cc3bd15a2f24693f59a3443032d *inst/include/stan/math/opencl/rev/rep_matrix.hpp a21a68432cef5d9ec1a4002179f245bf *inst/include/stan/math/opencl/rev/reverse.hpp 8413b8c2fa716a75fd5831bd030cafbe *inst/include/stan/math/opencl/rev/round.hpp bd6dd9d4a9cfd209f8441a120d2dadc0 *inst/include/stan/math/opencl/rev/rows_dot_product.hpp 99c6fef3c5ed6d6addf1af56eb771579 *inst/include/stan/math/opencl/rev/rows_dot_self.hpp 53e37a599eefa2883f07262576ddc1e9 *inst/include/stan/math/opencl/rev/sd.hpp 3003cb3afcc2a0ebfb16e5c01579da3d *inst/include/stan/math/opencl/rev/sin.hpp e797cb925401e77df0bb720ec48c6b23 *inst/include/stan/math/opencl/rev/sinh.hpp f13a0afcd5a8e8e67594ddf89ea519d0 *inst/include/stan/math/opencl/rev/softmax.hpp 565b7938a87373f263be06e1fc8f50b9 *inst/include/stan/math/opencl/rev/sqrt.hpp be80bf297535fbd4321527a467e8cecc *inst/include/stan/math/opencl/rev/square.hpp 411c04787cb3644bd25645de06c88542 *inst/include/stan/math/opencl/rev/squared_distance.hpp bee5c906f01382bc8341aa9e5ebaa996 *inst/include/stan/math/opencl/rev/subtract.hpp 4322d34be4400ab83b98dbf15541fb2d *inst/include/stan/math/opencl/rev/sum.hpp 908812a5532378ebac5dc8bae0b74ef2 *inst/include/stan/math/opencl/rev/symmetrize_from_lower_tri.hpp 5403c2c2e84f9801eb5d43da23a94ec7 *inst/include/stan/math/opencl/rev/symmetrize_from_upper_tri.hpp 92b56e251106ebc598a3c301a14fb693 *inst/include/stan/math/opencl/rev/tan.hpp 394a43a0aa19116845b7c9092321ebc2 *inst/include/stan/math/opencl/rev/tanh.hpp 2c91416d3845a13899f6db2b46311574 *inst/include/stan/math/opencl/rev/tcrossprod.hpp 62fec1b0beb536fcbd93bf31ba7a2774 *inst/include/stan/math/opencl/rev/tgamma.hpp 7ae6e28f3fd8b6e20e7692b72ac3daf5 *inst/include/stan/math/opencl/rev/to_arena.hpp 4c2849caf9fa4fb43b0b04e2e4fa353f *inst/include/stan/math/opencl/rev/to_matrix.hpp 669c735ac918735a632d4ba6ffed5a19 *inst/include/stan/math/opencl/rev/trace.hpp b4e77303209b46a291edce966ecbc55b *inst/include/stan/math/opencl/rev/transpose.hpp 7fae7cdacac85168c80a2b360656045b *inst/include/stan/math/opencl/rev/trunc.hpp 9eb584167100fedfe5d6f09e415df457 *inst/include/stan/math/opencl/rev/ub_constrain.hpp 92b880b60f5bcb72d826c610c5e18448 *inst/include/stan/math/opencl/rev/unit_vector_constrain.hpp 36b9f751f5bd7f59547b0867b86acf61 *inst/include/stan/math/opencl/rev/vari.hpp 727adb63c9fa69a9c394e5c023957505 *inst/include/stan/math/opencl/rev/variance.hpp 9e04b95f29f466a7f1e7f70969340949 *inst/include/stan/math/opencl/scalar_type.hpp da12936da4b3cc00d079228cc0b7613f *inst/include/stan/math/opencl/stringify.hpp b521178ac388ed5cd23eefc9c0da02d9 *inst/include/stan/math/opencl/symmetric_eigensolver.hpp 0577edfe769d869e02221e33c5a1aa64 *inst/include/stan/math/opencl/to_ref_for_opencl.hpp 41c05af8fc885830c2aec302825f67eb *inst/include/stan/math/opencl/tri_inverse.hpp 01070d9dbf9980800f734115b02eed8d *inst/include/stan/math/opencl/tridiagonalization.hpp fad17976e5c9cfed13753d3b246d27d0 *inst/include/stan/math/opencl/value_type.hpp 6d05b823b7fd99f679a08bd8a20f122c *inst/include/stan/math/opencl/zeros_strict_tri.hpp b0a035e2aa9f97bb9552488d1facb91a *inst/include/stan/math/prim.hpp 7c2bbf26e12eb14080fbf26f75483c6c *inst/include/stan/math/prim/core.hpp 468a020c395c22c9213495c4c3038eeb *inst/include/stan/math/prim/core/complex_base.hpp 257d68b02a34ce25b7001997142a8e2a *inst/include/stan/math/prim/core/init_threadpool_tbb.hpp 38c764c8cea501eb2151de6aac4d3501 *inst/include/stan/math/prim/core/operator_addition.hpp 5b65bfb1cdc4d60df1c781f827a6ea57 *inst/include/stan/math/prim/core/operator_division.hpp c053893366b2e9fe75190b8a0d6bafba *inst/include/stan/math/prim/core/operator_equal_equal.hpp 9061b1841f785517bc6a203d44aa5104 *inst/include/stan/math/prim/core/operator_minus.hpp 5332a4b76555c9156829c3d301ab92d8 *inst/include/stan/math/prim/core/operator_multiplication.hpp 21ad5606eca05df6f46c8cce9a2a64a2 *inst/include/stan/math/prim/core/operator_not_equal.hpp 23f5b979767b0fa842296b0f9b1c5fea *inst/include/stan/math/prim/core/operator_plus.hpp f08d0931cdcec557c386bf49c9810e2f *inst/include/stan/math/prim/core/operator_subtraction.hpp 13f51d1dc6272d3f873e5004f30dfffa *inst/include/stan/math/prim/eigen_plugins.h 0757d20a06ae683f6b7fb3e5a35cc678 *inst/include/stan/math/prim/err.hpp 6b0df760e0117c656af5a9608ee38ef1 *inst/include/stan/math/prim/err/check_2F1_converges.hpp 64160c46690ecdda4c47a8bdfdc79725 *inst/include/stan/math/prim/err/check_3F2_converges.hpp f90d34a62c26845c04a77873e6f433e4 *inst/include/stan/math/prim/err/check_bounded.hpp a3b0c32d893a365c5b809d148a6d9ebf *inst/include/stan/math/prim/err/check_cholesky_factor.hpp 0196c74557196e63a49980a226468780 *inst/include/stan/math/prim/err/check_cholesky_factor_corr.hpp 85ac91fac586f2e7580d3160691877d8 *inst/include/stan/math/prim/err/check_column_index.hpp 33c3140964e6820110a695d726e59dc2 *inst/include/stan/math/prim/err/check_consistent_size.hpp 9fe732bdf529e473c20dd7dc92d4a4d3 *inst/include/stan/math/prim/err/check_consistent_sizes.hpp 97801ed1fb2640ec1dd95e81c64e8f5a *inst/include/stan/math/prim/err/check_consistent_sizes_mvt.hpp 3b34dc143ff284a77278674e8b4aaaff *inst/include/stan/math/prim/err/check_corr_matrix.hpp 0b697448226197e80fea0491d1d88398 *inst/include/stan/math/prim/err/check_cov_matrix.hpp 820e57ce345ecd6a2fab7dae8e82a86e *inst/include/stan/math/prim/err/check_finite.hpp 8d3dfcb2c5b10e3b41103e0f94ccbbc7 *inst/include/stan/math/prim/err/check_flag_sundials.hpp f163a2144e578e52ada0e68d165fa07c *inst/include/stan/math/prim/err/check_greater.hpp b6b716d2c2079b2d55a3751777e93916 *inst/include/stan/math/prim/err/check_greater_or_equal.hpp 1e72d07fb5855bee7fc20a4e9b3404cd *inst/include/stan/math/prim/err/check_ldlt_factor.hpp 76b6b513f8902dadab1ca614eda0ecc2 *inst/include/stan/math/prim/err/check_less.hpp 26e368172d0a1cab0280282a3c8b3884 *inst/include/stan/math/prim/err/check_less_or_equal.hpp 31ac90b30b711191a41d0a0672d904e5 *inst/include/stan/math/prim/err/check_lower_triangular.hpp faa0afc2f96e52759f266c06095a8ad3 *inst/include/stan/math/prim/err/check_matching_dims.hpp be44653cf1397d8741d9648b6be08763 *inst/include/stan/math/prim/err/check_matching_sizes.hpp 4115abd23f17d400fa025bf0e6695ff4 *inst/include/stan/math/prim/err/check_multiplicable.hpp bb4bc538ab0094a622dccd6d4cc4a8e0 *inst/include/stan/math/prim/err/check_nonnegative.hpp d118ae5f11a4befd7b7bd2198b7ad4b5 *inst/include/stan/math/prim/err/check_nonzero_size.hpp 26547091a51a6b9a6e1b73176319b704 *inst/include/stan/math/prim/err/check_not_nan.hpp b707af785c847dfc125272a22b5dea4c *inst/include/stan/math/prim/err/check_ordered.hpp 0e46ed8c6953b478b09bfdf03ebfd7f5 *inst/include/stan/math/prim/err/check_pos_definite.hpp 8a6a767b6b1ad0051e7da0e188420a37 *inst/include/stan/math/prim/err/check_pos_semidefinite.hpp 3d305b2d7f9dd3cce50fb2f7449ef894 *inst/include/stan/math/prim/err/check_positive.hpp 8a9f70bae598e76fb1df90d973a07f6b *inst/include/stan/math/prim/err/check_positive_finite.hpp 8526d123b973f0ac8404ec8175652cf5 *inst/include/stan/math/prim/err/check_positive_ordered.hpp da950fe9370d813d8702145bbf179663 *inst/include/stan/math/prim/err/check_range.hpp 606fb2100aacc7e8a52083bbbd5629ee *inst/include/stan/math/prim/err/check_row_index.hpp ad249e4923ce433766eb6dd40d2901f7 *inst/include/stan/math/prim/err/check_simplex.hpp b0c01bdbad81d32d7a9fb0fe65aa160d *inst/include/stan/math/prim/err/check_size_match.hpp 8ecea4de010e3e3cb7e1bcd55a73071a *inst/include/stan/math/prim/err/check_sorted.hpp b969755e1cdd515460070f188cff8994 *inst/include/stan/math/prim/err/check_square.hpp 6d1ff0adc6db5790ebde9f9538665b6f *inst/include/stan/math/prim/err/check_std_vector_index.hpp 6a19247f2b281decd85f19d709698639 *inst/include/stan/math/prim/err/check_symmetric.hpp 4876c6518f22c25731de4844f4f1a198 *inst/include/stan/math/prim/err/check_unit_vector.hpp 7b8c84f2a50493a10b7f59a5db8174ab *inst/include/stan/math/prim/err/check_vector.hpp 8217fab425bcce92159fda448f40821f *inst/include/stan/math/prim/err/check_vector_index.hpp 3ddc7c82440ac9603560fffff116130f *inst/include/stan/math/prim/err/constraint_tolerance.hpp 7b6a0b6134fb1e81fe3dce0c64c4d3b2 *inst/include/stan/math/prim/err/domain_error.hpp 7998cd0f09fbd3beecfaec77d02d9a19 *inst/include/stan/math/prim/err/domain_error_vec.hpp b1a7f254876831fb907269989fdabb54 *inst/include/stan/math/prim/err/elementwise_check.hpp 9b2b72dbba32630339b3a2ebaddfa738 *inst/include/stan/math/prim/err/hmm_check.hpp 6b2c6c3b56bfeaa355b1bc09208939cf *inst/include/stan/math/prim/err/invalid_argument.hpp ca1996ec266dce6b20800035ecee3a12 *inst/include/stan/math/prim/err/invalid_argument_vec.hpp f35f0d513edb34d25bf3c8a1c0bbe0af *inst/include/stan/math/prim/err/is_cholesky_factor.hpp 723ecd77fa536b3e9ec9db4a8e7a376e *inst/include/stan/math/prim/err/is_cholesky_factor_corr.hpp 23a0fd7c7646dd34414eb2474b1bcb50 *inst/include/stan/math/prim/err/is_column_index.hpp f4ada91c49d6d672b07a843ce6da06b8 *inst/include/stan/math/prim/err/is_corr_matrix.hpp 7133c463d117c07d186fd181b31b67bd *inst/include/stan/math/prim/err/is_ldlt_factor.hpp f883886387ea6c5ca66d9990d7071883 *inst/include/stan/math/prim/err/is_less_or_equal.hpp b35efa47214b90164838763df65b2b1e *inst/include/stan/math/prim/err/is_lower_triangular.hpp e460e6036583a16d02202452cbdb2950 *inst/include/stan/math/prim/err/is_mat_finite.hpp d84f9024b66af47f21cd584ec134373f *inst/include/stan/math/prim/err/is_matching_dims.hpp 65cb6a545f358157d620ebcaa4ef2ddc *inst/include/stan/math/prim/err/is_matching_size.hpp dffd43db52c491116c6da49d14525f72 *inst/include/stan/math/prim/err/is_nonzero_size.hpp 67e109b30fadac9b39f8fd60e24839bf *inst/include/stan/math/prim/err/is_not_nan.hpp 9a2862b80d4286501bf5cae26e5e828d *inst/include/stan/math/prim/err/is_ordered.hpp 71aa25eea6c6c8888cbdc29e6c228c8e *inst/include/stan/math/prim/err/is_pos_definite.hpp a1f3c23d705deb527df0e818da97614a *inst/include/stan/math/prim/err/is_positive.hpp 88cdff030aa3a268a336a2f0ec208180 *inst/include/stan/math/prim/err/is_scal_finite.hpp 04617fd3f2489792d6491517b5c48810 *inst/include/stan/math/prim/err/is_size_match.hpp b83fbfc06e94e01d7c7649bfa6a55966 *inst/include/stan/math/prim/err/is_square.hpp d8f1e47db8f578a81703ab399c07cffd *inst/include/stan/math/prim/err/is_symmetric.hpp 6f0337029e9b9a33b9448c92257dbecf *inst/include/stan/math/prim/err/is_unit_vector.hpp 292aabdb5b0a999cd32902c924b03280 *inst/include/stan/math/prim/err/make_iter_name.hpp 4cd5e21ade51f8dbbf53930b796aae0f *inst/include/stan/math/prim/err/out_of_range.hpp 909dea3bb3142a5b6c8b448cfd978174 *inst/include/stan/math/prim/err/system_error.hpp e7a9cae76798710369d463f86f9db1ee *inst/include/stan/math/prim/err/throw_domain_error.hpp 283a67ead5f45201e092a6ed36976226 *inst/include/stan/math/prim/err/throw_domain_error_mat.hpp 6481bfc3123e5efaf2c9c1fdb0779499 *inst/include/stan/math/prim/err/throw_domain_error_vec.hpp d71435a8b4427244b0607039d3607a95 *inst/include/stan/math/prim/err/validate_non_negative_index.hpp 51363ab3e9b66d7336e3b4de135ba896 *inst/include/stan/math/prim/err/validate_positive_index.hpp d8242a86389b34e9abe0205cba708b66 *inst/include/stan/math/prim/err/validate_unit_vector_index.hpp 08341fd07f4439d0a3f564fe153f427c *inst/include/stan/math/prim/fun.hpp 59908215d31774a49949596ba9fe89d0 *inst/include/stan/math/prim/fun/Eigen.hpp c260753e38d22e3e9a879c491eea990f *inst/include/stan/math/prim/fun/LDLT_factor.hpp cc95a79cb45c59745ddc3a7ac74d3396 *inst/include/stan/math/prim/fun/MatrixExponential.h 88727a093dedfe974836b52d9ef8802d *inst/include/stan/math/prim/fun/Phi.hpp 09b92bf5b9d5e9df5f9593a412c52f86 *inst/include/stan/math/prim/fun/Phi_approx.hpp 3895bdad605b20d389a7a34f205ae61a *inst/include/stan/math/prim/fun/abs.hpp d00d7a026bfec64759b36ebdb5d4070f *inst/include/stan/math/prim/fun/accumulator.hpp ae231114288be6391b0e3bb40702daf0 *inst/include/stan/math/prim/fun/acos.hpp 283f0bf6b8e1ea5c7074a639dcafb768 *inst/include/stan/math/prim/fun/acosh.hpp 1e0c707f5dedf80f9f8dddd0eb0f42a0 *inst/include/stan/math/prim/fun/add.hpp 664e7ce34cefaa6be184d17f6767aca2 *inst/include/stan/math/prim/fun/add_diag.hpp d8af8ec09eccbf92946ed7148370f0ec *inst/include/stan/math/prim/fun/all.hpp 9a16a69adc6be27d8c4c1a489713ae60 *inst/include/stan/math/prim/fun/any.hpp d04d9a068d08c3c82584cc429171da97 *inst/include/stan/math/prim/fun/append_array.hpp 4299ffafdb363bed836b08a9ae622b05 *inst/include/stan/math/prim/fun/append_col.hpp 6045944a57caa9529bcbdd9f1e42e31d *inst/include/stan/math/prim/fun/append_row.hpp 3e09854f455e3ff4e01f4ca50b525bcc *inst/include/stan/math/prim/fun/arg.hpp 749b6324cbc2ac3e9d651e0d7e392abb *inst/include/stan/math/prim/fun/array_builder.hpp 421bbf9f9568925ba92df6f3b8b05b43 *inst/include/stan/math/prim/fun/as_array_or_scalar.hpp 4609762b68c5d6fbf8555f73fa8c76dd *inst/include/stan/math/prim/fun/as_bool.hpp 18f05f8faf9f500a792b1d28389782ad *inst/include/stan/math/prim/fun/as_column_vector_or_scalar.hpp 3d9907486443dd53cb9a9d86077c6060 *inst/include/stan/math/prim/fun/as_value_array_or_scalar.hpp bcfa4da88baaf1f4a6add68b602b6f95 *inst/include/stan/math/prim/fun/as_value_column_array_or_scalar.hpp 6d744da39d0a4efdcf7c0b73cd47cb55 *inst/include/stan/math/prim/fun/as_value_column_vector_or_scalar.hpp 9ad2a0c819e88866bad01dce08cadb43 *inst/include/stan/math/prim/fun/asin.hpp a5b9c00446e05c9d77a1ce70e4e7c16c *inst/include/stan/math/prim/fun/asinh.hpp 6b5d7564658a310c79b487323ba5f869 *inst/include/stan/math/prim/fun/assign.hpp 4099a49dbd8d94a54675e642b3a4027c *inst/include/stan/math/prim/fun/atan.hpp 81246c897a0a7224587c06e21484888f *inst/include/stan/math/prim/fun/atan2.hpp d494cc8159b5f141bf0f73f66f5dbfde *inst/include/stan/math/prim/fun/atanh.hpp cead643b1eb47510df1eb39c89b8bfab *inst/include/stan/math/prim/fun/autocorrelation.hpp 0ae4bc0ac468fd704b5dc7080871a2b1 *inst/include/stan/math/prim/fun/autocovariance.hpp 67f14d7980cd108799fd775a95fa0344 *inst/include/stan/math/prim/fun/bessel_first_kind.hpp 8fa45274532a2a512e28fd79c2d3a190 *inst/include/stan/math/prim/fun/bessel_second_kind.hpp 33c9ab3fdc528868549b75416f411e03 *inst/include/stan/math/prim/fun/beta.hpp 4d044aefc6beb8dd0454ed5c2df3cd23 *inst/include/stan/math/prim/fun/binary_log_loss.hpp df8ff39a428835c5cdaddd096f7850b2 *inst/include/stan/math/prim/fun/binomial_coefficient_log.hpp 1934d38ad50cf77201a01f136cfc5120 *inst/include/stan/math/prim/fun/block.hpp 4c71a06fa715d7235b81176bb05a917f *inst/include/stan/math/prim/fun/boost_policy.hpp 6343d6183c31c6efb70c70409e754727 *inst/include/stan/math/prim/fun/cbrt.hpp 51038726b7bac666d4b04bc8c6125c03 *inst/include/stan/math/prim/fun/ceil.hpp 22381ed7fc14ce7fa117d793cbe14af5 *inst/include/stan/math/prim/fun/chol2inv.hpp 754f54218a1bbd986b4572841666ec8d *inst/include/stan/math/prim/fun/cholesky_corr_constrain.hpp 84693937a89400590253d672541f067e *inst/include/stan/math/prim/fun/cholesky_corr_free.hpp 0ab7b4abdfe91eb1f3aee0cb6fc26798 *inst/include/stan/math/prim/fun/cholesky_decompose.hpp b9a59d7ee465c609994b84e3d6885e5a *inst/include/stan/math/prim/fun/cholesky_factor_constrain.hpp bce2359e19af5620ec32e47a63822c63 *inst/include/stan/math/prim/fun/cholesky_factor_free.hpp aef8f490dc475ee59e47b6237118711f *inst/include/stan/math/prim/fun/choose.hpp 2e51fe2582d2cab94d79f25a7733b11d *inst/include/stan/math/prim/fun/col.hpp 71aead509d414da8b713725737db12d5 *inst/include/stan/math/prim/fun/cols.hpp d0a9ee2275ae77ba711848bd9cc0d7c2 *inst/include/stan/math/prim/fun/columns_dot_product.hpp 32262ca74ca5e4e696f007d524a3fc0a *inst/include/stan/math/prim/fun/columns_dot_self.hpp 6b3b3b7284be1e9174dd0a6c320a35e9 *inst/include/stan/math/prim/fun/complex_schur_decompose.hpp d3091dd4bfeba8597c0d79d6ee7779ca *inst/include/stan/math/prim/fun/conj.hpp 72d48aded08d077afaff0b28bcc892aa *inst/include/stan/math/prim/fun/constants.hpp f5d775b1ab4fe9cf045f46c5ca2b996f *inst/include/stan/math/prim/fun/copysign.hpp 00336a680f7d4117a5f42c1e30682293 *inst/include/stan/math/prim/fun/corr_constrain.hpp 60bb9344838712c6d050392fe24c076e *inst/include/stan/math/prim/fun/corr_free.hpp a9fe32ced2f72e9af158dcd5e459602c *inst/include/stan/math/prim/fun/corr_matrix_constrain.hpp 23f468f7faf003cfe772f65171ebf5bc *inst/include/stan/math/prim/fun/corr_matrix_free.hpp 57f263089aaa30917182315e8db3bac7 *inst/include/stan/math/prim/fun/cos.hpp 3aafc52be76e5083f6b2c5f9093f10b0 *inst/include/stan/math/prim/fun/cosh.hpp d7d706aec32a35e8305861f02cdf1bcd *inst/include/stan/math/prim/fun/cov_exp_quad.hpp db698bba3c2b066f11b4bebba25bf9c7 *inst/include/stan/math/prim/fun/cov_matrix_constrain.hpp 51072b87f9a4cb95e8042febb5486ba6 *inst/include/stan/math/prim/fun/cov_matrix_constrain_lkj.hpp fbf6376f6ba54fd593d3e68a67682bbc *inst/include/stan/math/prim/fun/cov_matrix_free.hpp 03181239ae6b1793b0b3b6d79f02e80f *inst/include/stan/math/prim/fun/cov_matrix_free_lkj.hpp e38654affa92893bb16f33ff783acfb3 *inst/include/stan/math/prim/fun/crossprod.hpp 2c404995fd6282bc941f53184b7c00a0 *inst/include/stan/math/prim/fun/csr_extract.hpp 9203cdabe677b0585104b1bab4b740ef *inst/include/stan/math/prim/fun/csr_extract_u.hpp 38c5cb4c3c5eb7631a8852d8b22a06c1 *inst/include/stan/math/prim/fun/csr_extract_v.hpp c04d1f9e1e700b7c84fa8a44a9f63076 *inst/include/stan/math/prim/fun/csr_extract_w.hpp d22e48187ffdb6224685a0a944eee223 *inst/include/stan/math/prim/fun/csr_matrix_times_vector.hpp 6a5d67d4ae33d528d2f381a258b32cb2 *inst/include/stan/math/prim/fun/csr_to_dense_matrix.hpp 459bc74df87b5c31d70be4aabe8aa4a4 *inst/include/stan/math/prim/fun/csr_u_to_z.hpp e08316ec75264f4ce549d584790dca21 *inst/include/stan/math/prim/fun/cumulative_sum.hpp 40827a84132af931bf6ccb78878acf3c *inst/include/stan/math/prim/fun/determinant.hpp 87a3dbb8da863ea59765d1a43d4a5ab6 *inst/include/stan/math/prim/fun/diag_matrix.hpp de20c5756d0e1f51b7d1e83ad9311b32 *inst/include/stan/math/prim/fun/diag_post_multiply.hpp fb92afbad33443062a4d3d10877b8abf *inst/include/stan/math/prim/fun/diag_pre_multiply.hpp ce1d8d160dd9860f1457c41380e69650 *inst/include/stan/math/prim/fun/diagonal.hpp bd398445a5ca125ebace1e886808c35e *inst/include/stan/math/prim/fun/digamma.hpp 0566f677675f9e9f111ea3530da1d10c *inst/include/stan/math/prim/fun/dims.hpp 540301502e399353f7239c5783db1e3b *inst/include/stan/math/prim/fun/distance.hpp 4df1ed05b953cc101913bb18f1b4ca31 *inst/include/stan/math/prim/fun/divide.hpp c6e07c076304f4e427e503fa368787bb *inst/include/stan/math/prim/fun/divide_columns.hpp 12c5837bb101211aac615d2854b0b640 *inst/include/stan/math/prim/fun/dot.hpp 269a191442d07eef4a03598339111455 *inst/include/stan/math/prim/fun/dot_product.hpp 309e7c4beac550487f28c7afbc35666d *inst/include/stan/math/prim/fun/dot_self.hpp df97f9e1aa9ee8813a15b0ac16ffa469 *inst/include/stan/math/prim/fun/eigen_comparisons.hpp 1a936a2b255340f9f935c8f9fa51e25a *inst/include/stan/math/prim/fun/eigendecompose.hpp 2c65a0e3f22de2fcc367aa55d6a22566 *inst/include/stan/math/prim/fun/eigendecompose_sym.hpp 4b020f40de2056bb17bd6dd2c8a14e7f *inst/include/stan/math/prim/fun/eigenvalues.hpp 88ebebacdcec1e1e980acab4a4ad649e *inst/include/stan/math/prim/fun/eigenvalues_sym.hpp 3ea26a63c30a8a3ade1d69514f55dc71 *inst/include/stan/math/prim/fun/eigenvectors.hpp 2e6a83e24ff3c0dc32be33ec56eea051 *inst/include/stan/math/prim/fun/eigenvectors_sym.hpp b53c19acebbf66eb30c64b40657a113f *inst/include/stan/math/prim/fun/elt_divide.hpp 5dc13b5515f8994457cd30e3ffce2ed6 *inst/include/stan/math/prim/fun/elt_multiply.hpp 336fc64ebce86cead89f205433dc5f2a *inst/include/stan/math/prim/fun/erf.hpp f0698e50d9d5d991f5345aba3cd9b8e7 *inst/include/stan/math/prim/fun/erfc.hpp 5fc9b3397432f97b2e91c8837e54fad6 *inst/include/stan/math/prim/fun/eval.hpp ea242370b2756a48d1a5df9f1e4ea909 *inst/include/stan/math/prim/fun/exp.hpp d1e4a6f05c9016f7847c916500540b3a *inst/include/stan/math/prim/fun/exp2.hpp fe9556917d6c2fbd8033d10322a1dec4 *inst/include/stan/math/prim/fun/expm1.hpp 0c2687861079ca8c1005fe0f96fc07f8 *inst/include/stan/math/prim/fun/fabs.hpp da44687831f1b003da6a366cc04c53c8 *inst/include/stan/math/prim/fun/factor_U.hpp e67af8ef08a4272c9950eb63f2008d12 *inst/include/stan/math/prim/fun/factor_cov_matrix.hpp f56d1d4d04f7bfe6a6047ed7a703ea0d *inst/include/stan/math/prim/fun/falling_factorial.hpp a14d788205689eb6ca1e55b3f26fcf88 *inst/include/stan/math/prim/fun/fdim.hpp b161775c4833492d284cde0c1ed5ed39 *inst/include/stan/math/prim/fun/fft.hpp fa9fb3daef4ba521aca18db4743d393b *inst/include/stan/math/prim/fun/fill.hpp 7445675c7fae3b9419e456f05702a680 *inst/include/stan/math/prim/fun/finite_diff_stepsize.hpp 6c3fc931446230f0b221a89d20af5d4a *inst/include/stan/math/prim/fun/floor.hpp 6514a8a6f46608ac623e9e3d2470afb3 *inst/include/stan/math/prim/fun/fma.hpp e67617a4aad31fc489f975fb2f3a2e2d *inst/include/stan/math/prim/fun/fmax.hpp 5e097de5975cb6e36f544dafd1839d80 *inst/include/stan/math/prim/fun/fmin.hpp 4d45f17436aa2ab4e7ea98b9152e46f1 *inst/include/stan/math/prim/fun/fmod.hpp f295b34aa5b59c75003f893a9bf05a32 *inst/include/stan/math/prim/fun/gamma_p.hpp 5feb87e394710a205d0f92ebc311f8ef *inst/include/stan/math/prim/fun/gamma_q.hpp 7a9f41fad84229f703cfa224f825d2bc *inst/include/stan/math/prim/fun/generalized_inverse.hpp 62ec3edd1e2c1b9ddf86a97c027dfe76 *inst/include/stan/math/prim/fun/get.hpp 5a2a897d7923779c788019863dbce851 *inst/include/stan/math/prim/fun/get_base1.hpp e9a407cd5cdd38093af297d4b62861ee *inst/include/stan/math/prim/fun/get_base1_lhs.hpp 8c4a02d57973d8f9a0c9113ab3831232 *inst/include/stan/math/prim/fun/get_imag.hpp 79dc26721243f5863de6e48131150282 *inst/include/stan/math/prim/fun/get_lp.hpp dda9c7b53ff6146dcae6d2aa653b31de *inst/include/stan/math/prim/fun/get_real.hpp 24e6d37f6009868d14de504353adeb18 *inst/include/stan/math/prim/fun/gp_dot_prod_cov.hpp 9a82bc01d1ce21dad922700c274035a3 *inst/include/stan/math/prim/fun/gp_exp_quad_cov.hpp 4858d904ef9cc3d10fc70fd740836317 *inst/include/stan/math/prim/fun/gp_exponential_cov.hpp 06416c177bef8d1523efc575cece015e *inst/include/stan/math/prim/fun/gp_matern32_cov.hpp eb53e5e0d431f5f9abd7d4de3968598c *inst/include/stan/math/prim/fun/gp_matern52_cov.hpp 67e2f7ced490a982237755833f143a94 *inst/include/stan/math/prim/fun/gp_periodic_cov.hpp eb55cc95a1e2b3d3d8c1d65172c20627 *inst/include/stan/math/prim/fun/grad_2F1.hpp b7d80154b2ced4261ba709c665614182 *inst/include/stan/math/prim/fun/grad_F32.hpp 72ad56cba6cb4bfbef56ebf4dbd18470 *inst/include/stan/math/prim/fun/grad_inc_beta.hpp 6a5ef670939ec6423ce56d71d7266cdf *inst/include/stan/math/prim/fun/grad_pFq.hpp 71c29db4dec8d2addba9eb2da757fce9 *inst/include/stan/math/prim/fun/grad_reg_inc_beta.hpp c74df8e997dd13fdf471e4f62d4e3874 *inst/include/stan/math/prim/fun/grad_reg_inc_gamma.hpp 4fe54b11d4695eba846b1518b0750dab *inst/include/stan/math/prim/fun/grad_reg_lower_inc_gamma.hpp 099c0d94edd68ded95067b518e1c74ba *inst/include/stan/math/prim/fun/head.hpp 7ff6c88e53afcf72bcefd8e7f98afc3a *inst/include/stan/math/prim/fun/hypergeometric_2F1.hpp 09391a6a9e249d5ddc3cc2a7a7ff8b6d *inst/include/stan/math/prim/fun/hypergeometric_2F2.hpp 30047111dec0dce3fd1ce497283242eb *inst/include/stan/math/prim/fun/hypergeometric_3F2.hpp 040548910302bc21457b8d9cf3c12490 *inst/include/stan/math/prim/fun/hypergeometric_pFq.hpp 2ebae33739fcceedcb9c3e66eec30e0a *inst/include/stan/math/prim/fun/hypot.hpp 54b194dd0f8dc2a11b31eac0ea2d66cf *inst/include/stan/math/prim/fun/i_times.hpp 74a59550c09d2c95b693434f8c626572 *inst/include/stan/math/prim/fun/identity_constrain.hpp 3ae8fd519a893d789010631387355e0b *inst/include/stan/math/prim/fun/identity_free.hpp 2d6beb1eb551be41c183281b4c0c2bd6 *inst/include/stan/math/prim/fun/identity_matrix.hpp 240a2d07202552778420b69b0ee6d324 *inst/include/stan/math/prim/fun/if_else.hpp a7cb162b197e173a2849b66d0d9bd2c1 *inst/include/stan/math/prim/fun/imag.hpp be5729fa9f4476a30902ed1e5ff4f03d *inst/include/stan/math/prim/fun/inc_beta.hpp c082a0a9d63388a3453a795eb16dcdbc *inst/include/stan/math/prim/fun/inc_beta_dda.hpp 2da62e383bfb70b27da91c90719f622c *inst/include/stan/math/prim/fun/inc_beta_ddb.hpp ab314cf831764ba0505f518baabcf1d8 *inst/include/stan/math/prim/fun/inc_beta_ddz.hpp ca20adfe3516eb9daac4e2181975fc50 *inst/include/stan/math/prim/fun/initialize.hpp 4f4c38ee2f24d48eef4b2c21223140f2 *inst/include/stan/math/prim/fun/initialize_fill.hpp cc486a7b0aa179625f5ec219e34bf152 *inst/include/stan/math/prim/fun/int_step.hpp f0d5eec590014b20437936a3640dbdc2 *inst/include/stan/math/prim/fun/inv.hpp 1965c7823e97c4eb1c97cad454327f35 *inst/include/stan/math/prim/fun/inv_Phi.hpp 86e81331ee38a91770dd0702e95095c1 *inst/include/stan/math/prim/fun/inv_cloglog.hpp 7d7a9ce5e2ecaa1afd682448e82d4ac8 *inst/include/stan/math/prim/fun/inv_erfc.hpp cd894be7344245000d37071ef4450617 *inst/include/stan/math/prim/fun/inv_inc_beta.hpp be3bf51d03c70a77032769bc6db0fa1c *inst/include/stan/math/prim/fun/inv_logit.hpp 5f29556a0dc7cccf23c38f7186a65e6d *inst/include/stan/math/prim/fun/inv_sqrt.hpp 35ae67c656b96c94eb0111130a494c49 *inst/include/stan/math/prim/fun/inv_square.hpp c365380e476ddd8c5201c0a74de22ff2 *inst/include/stan/math/prim/fun/inverse.hpp 588415f9e3601162694cf966089fbc2c *inst/include/stan/math/prim/fun/inverse_softmax.hpp 0bc048e112efc46f7fcb78124ce5ef1d *inst/include/stan/math/prim/fun/inverse_spd.hpp eacdeb6090c0bd9e28825ed9b336d163 *inst/include/stan/math/prim/fun/is_any_nan.hpp 6176d3e63829ab4a063f335eb6c852fe *inst/include/stan/math/prim/fun/is_inf.hpp f6cbc0bbe88786c2261c507b5d168705 *inst/include/stan/math/prim/fun/is_integer.hpp 27405814832e662fd5615859eae418b0 *inst/include/stan/math/prim/fun/is_nan.hpp c2d88743d1bbff958a80fd3489e85ee3 *inst/include/stan/math/prim/fun/is_nonpositive_integer.hpp 1f06866b11c9d22fe857e75b014d0539 *inst/include/stan/math/prim/fun/is_uninitialized.hpp e102b75603a2df149adabe3abc672071 *inst/include/stan/math/prim/fun/isfinite.hpp c867ba559f03534c44782f777a18594d *inst/include/stan/math/prim/fun/isinf.hpp 6b1f94295d135434d37bacce7724ace4 *inst/include/stan/math/prim/fun/isnan.hpp f28f5dfde6ec6038a98eef075797ca42 *inst/include/stan/math/prim/fun/isnormal.hpp 128aa36788e3bedcba1de3c63bcbc884 *inst/include/stan/math/prim/fun/lambert_w.hpp 3db36474115d1058dfb820adab3e14c5 *inst/include/stan/math/prim/fun/lb_constrain.hpp 88c010d3923539a29481a8c1bc4c56b4 *inst/include/stan/math/prim/fun/lb_free.hpp 03c94c6c428d5fb75c1717b97a15bd18 *inst/include/stan/math/prim/fun/lbeta.hpp 84f744cf0ad3657f636624e137367ea0 *inst/include/stan/math/prim/fun/ldexp.hpp a91263a1bef5a7d129e8fdc460de2bbe *inst/include/stan/math/prim/fun/lgamma.hpp 0c316433a0beaeeb3318f1efdbb45f4b *inst/include/stan/math/prim/fun/lgamma_stirling.hpp d8fb8161ec416c3a6fe6dca6e1e41993 *inst/include/stan/math/prim/fun/lgamma_stirling_diff.hpp 1fef09de9201159f79168907b0dbc391 *inst/include/stan/math/prim/fun/linspaced_array.hpp 62e306d3dd4b2a846fb10747331bf8af *inst/include/stan/math/prim/fun/linspaced_int_array.hpp 0e6db11ce5cc501cba1c8e05ae5b442c *inst/include/stan/math/prim/fun/linspaced_row_vector.hpp 22f9cda9d301bc769d92510d4d18b505 *inst/include/stan/math/prim/fun/linspaced_vector.hpp c700556349f65b5557ed0fa846ae86f5 *inst/include/stan/math/prim/fun/lmgamma.hpp 26c119c714ee6f0124f519d33bc67604 *inst/include/stan/math/prim/fun/lmultiply.hpp 73845632b85730731424eaf29b674d94 *inst/include/stan/math/prim/fun/log.hpp 02299806d3f078562fcee9402a68dd6e *inst/include/stan/math/prim/fun/log10.hpp ba1bcb84ed5310399fe1e076dfb359bc *inst/include/stan/math/prim/fun/log1m.hpp a98eb4c24ba3861e72cafaa20edf056f *inst/include/stan/math/prim/fun/log1m_exp.hpp 4eabc18768d331fc430e040d77f82fa1 *inst/include/stan/math/prim/fun/log1m_inv_logit.hpp 9f8d12ee870fe0bf07cb1af555ec89a4 *inst/include/stan/math/prim/fun/log1p.hpp 7ad9e8e9aa362b2fa0506edab605dc9a *inst/include/stan/math/prim/fun/log1p_exp.hpp 3d8bfeb277fcec922345d2d9a3d13d21 *inst/include/stan/math/prim/fun/log2.hpp aac356076cc89c0894b31bd04ac85669 *inst/include/stan/math/prim/fun/log_determinant.hpp 51b8f930a54796c9a14b17831eb13f9a *inst/include/stan/math/prim/fun/log_determinant_ldlt.hpp 543035957ddc9d737ef386b757a837bc *inst/include/stan/math/prim/fun/log_determinant_spd.hpp 70681350999f349d977f96683adfdad8 *inst/include/stan/math/prim/fun/log_diff_exp.hpp fca0c6f3d43ca802000b7e7eef68df46 *inst/include/stan/math/prim/fun/log_falling_factorial.hpp b29908668044b99f63b4fc9474d15a06 *inst/include/stan/math/prim/fun/log_inv_logit.hpp 93c1360eae53f9d7dd71c275e352f067 *inst/include/stan/math/prim/fun/log_inv_logit_diff.hpp ae4a8eb5fd1730e6bf8ab61e552467d4 *inst/include/stan/math/prim/fun/log_mix.hpp b25d5ab503d84cb04259e5e556f0b1cc *inst/include/stan/math/prim/fun/log_modified_bessel_first_kind.hpp a313181b782890c5fc2c6bba61135b1c *inst/include/stan/math/prim/fun/log_rising_factorial.hpp a523853cb9e4fe9f8470a5fa8c702dde *inst/include/stan/math/prim/fun/log_softmax.hpp b8c28882a8cca852c15f2bbd4618a4c2 *inst/include/stan/math/prim/fun/log_sum_exp.hpp 50540111b8855afccfd3802f0f08746d *inst/include/stan/math/prim/fun/log_sum_exp_signed.hpp 142ead080741ac7d5c3435c10216c4ef *inst/include/stan/math/prim/fun/logb.hpp a6edb3b7aba1f9cf1567b4fed2d89403 *inst/include/stan/math/prim/fun/logical_and.hpp 3272c716c498ebec5a6ff56d64ee91ea *inst/include/stan/math/prim/fun/logical_eq.hpp 81f115909de2e8e4ca2aac0b1950ec2b *inst/include/stan/math/prim/fun/logical_gt.hpp 526dfd88c396ed37000d7c8c1d481905 *inst/include/stan/math/prim/fun/logical_gte.hpp d5f7043f9b8d449444966c06a0523ca8 *inst/include/stan/math/prim/fun/logical_lt.hpp 67b4b2e407504eba74150f68241fe0b7 *inst/include/stan/math/prim/fun/logical_lte.hpp a3b53ae18721c00d67e160a98b6ae870 *inst/include/stan/math/prim/fun/logical_negation.hpp e6c4cc3e40df731021c3686f6c904774 *inst/include/stan/math/prim/fun/logical_neq.hpp e3d64bdf1ba81abe99a202867d1f1db0 *inst/include/stan/math/prim/fun/logical_or.hpp 01cd508abcf9f99cdb210069a07251d6 *inst/include/stan/math/prim/fun/logit.hpp 42153e1262516928dad66a4c7fa52e49 *inst/include/stan/math/prim/fun/lub_constrain.hpp 94e0b0d8c13937feb33aff8eda5f8ec9 *inst/include/stan/math/prim/fun/lub_free.hpp 67db0f12840407b238cc0083a59084c0 *inst/include/stan/math/prim/fun/make_nu.hpp ebfec60485636a3830aff1008afb89b3 *inst/include/stan/math/prim/fun/matrix_exp.hpp 404bcfdbb62a2d391c9e94cf099ddb25 *inst/include/stan/math/prim/fun/matrix_exp_2x2.hpp f850dc88a8785cb6b3626baa4bf32537 *inst/include/stan/math/prim/fun/matrix_exp_action_handler.hpp 130115e7a2fbc7e83ffc71a803a8d50e *inst/include/stan/math/prim/fun/matrix_exp_multiply.hpp 6086fff5c77a4ef0a30cf665bd6bf30e *inst/include/stan/math/prim/fun/matrix_exp_pade.hpp 2fa3b6d0d06addf347c18268b9744ca6 *inst/include/stan/math/prim/fun/matrix_power.hpp 173695549d95984cef31e348e7ccf528 *inst/include/stan/math/prim/fun/max.hpp 59e95c1f044f20ed0b69ec3ddc5e468c *inst/include/stan/math/prim/fun/max_size.hpp 4d2bba8359d78a2e1825b98d1550bbc5 *inst/include/stan/math/prim/fun/max_size_mvt.hpp cfa82512b8227a00430f9d5a3a43c1d9 *inst/include/stan/math/prim/fun/mdivide_left.hpp d98bf9859515b1a61600c2f8db8ffad8 *inst/include/stan/math/prim/fun/mdivide_left_ldlt.hpp 42cfb877b6815695e5d3332e723800fc *inst/include/stan/math/prim/fun/mdivide_left_spd.hpp 751793b276ff7191783fdd64ccfa6357 *inst/include/stan/math/prim/fun/mdivide_left_tri.hpp b9fd92ec06066ce6a354b4fc9ccafbd9 *inst/include/stan/math/prim/fun/mdivide_left_tri_low.hpp 13e65363c3c5286f013a9bfae8a27e1f *inst/include/stan/math/prim/fun/mdivide_right.hpp a5d6e81cf6bf56c0291691e9a4ddfe0f *inst/include/stan/math/prim/fun/mdivide_right_ldlt.hpp 6ed7ce4c844b3c782a35b9492ffb568f *inst/include/stan/math/prim/fun/mdivide_right_spd.hpp 8c4afeb6f993c2b3d08ba14eb85d8ba9 *inst/include/stan/math/prim/fun/mdivide_right_tri.hpp b21fcc9f596ee074a642a304aeb1e268 *inst/include/stan/math/prim/fun/mdivide_right_tri_low.hpp 926c3fe10a8a2f205db7ef2867c3dfda *inst/include/stan/math/prim/fun/mean.hpp eac16680a64a5b7b32d9520faa643a4f *inst/include/stan/math/prim/fun/min.hpp 2a2e0bb82b204e1073ec7afb398b89eb *inst/include/stan/math/prim/fun/minus.hpp 085475b1e83156ca5afacd145dbd045f *inst/include/stan/math/prim/fun/modified_bessel_first_kind.hpp ab0be77631e9fa159317ad94d99e4a9f *inst/include/stan/math/prim/fun/modified_bessel_second_kind.hpp 4a6fa4ad9a112cc84da8a3acb5e5ee5a *inst/include/stan/math/prim/fun/modulus.hpp 62f1584c65a79edd132ea0d5953db28c *inst/include/stan/math/prim/fun/multiply.hpp c9ff7b82cc24f0efae5a1503776f626e *inst/include/stan/math/prim/fun/multiply_log.hpp 09eb40e42935a52519853c3b2319cdb7 *inst/include/stan/math/prim/fun/multiply_lower_tri_self_transpose.hpp 1a9bf756310a0da50354d6ce80464f4c *inst/include/stan/math/prim/fun/norm.hpp 98701b5dc09a2117483ad2fed93fb2e8 *inst/include/stan/math/prim/fun/norm1.hpp 8aae4649df0ce899728f1d44b76c4630 *inst/include/stan/math/prim/fun/norm2.hpp 69ab7b48adcbe2fa7ae8739f08f412d4 *inst/include/stan/math/prim/fun/num_elements.hpp bc8385fc7755a0616e87d8810935f52c *inst/include/stan/math/prim/fun/offset_multiplier_constrain.hpp ed99f7c6e0429b7ba237783e34c275fc *inst/include/stan/math/prim/fun/offset_multiplier_free.hpp f8515c5d4cf783ce0f14870bb0429d89 *inst/include/stan/math/prim/fun/one_hot_array.hpp 45d0f472922ef06137287dd235f37596 *inst/include/stan/math/prim/fun/one_hot_int_array.hpp 96563578169c59da569b29d1483e17b7 *inst/include/stan/math/prim/fun/one_hot_row_vector.hpp d387951c49d27523a862dfa185f940a2 *inst/include/stan/math/prim/fun/one_hot_vector.hpp 723da42a01f2e5b0699d8e73489dfe87 *inst/include/stan/math/prim/fun/ones_array.hpp 896680820fa7b4f9c55e79c3358f5da4 *inst/include/stan/math/prim/fun/ones_int_array.hpp 0aa42bb56dd184a83d098e64262f2921 *inst/include/stan/math/prim/fun/ones_row_vector.hpp 9e9b26bb304fdf219a34730b48f70d67 *inst/include/stan/math/prim/fun/ones_vector.hpp 2ab5a389e3e56fb0df46a96e6377d1f5 *inst/include/stan/math/prim/fun/ordered_constrain.hpp cf2a9ec19ebfd562b72f9036f8b4aecd *inst/include/stan/math/prim/fun/ordered_free.hpp bae81742b5e733a6a184d440ddee5388 *inst/include/stan/math/prim/fun/owens_t.hpp 3d2156cef8c13a027b3423698bf886d1 *inst/include/stan/math/prim/fun/plus.hpp 5dd05f296cbf3ddf0f108f9a7b60bcf0 *inst/include/stan/math/prim/fun/poisson_binomial_log_probs.hpp e63baf3100766f90705a63eca2623424 *inst/include/stan/math/prim/fun/polar.hpp ffdb89ab6134bfb84ea6323f15a10cd3 *inst/include/stan/math/prim/fun/positive_constrain.hpp 85f5de2e92ee80f32259121c46cba61a *inst/include/stan/math/prim/fun/positive_free.hpp 515f4a9a472919f23bf64af36c31cc46 *inst/include/stan/math/prim/fun/positive_ordered_constrain.hpp 2df303559318ff05ba28e3e37636864a *inst/include/stan/math/prim/fun/positive_ordered_free.hpp d3028945f7b16471f441409ee7bd2afe *inst/include/stan/math/prim/fun/pow.hpp 7c2e940c9dcb941504ba30c7ebdd701c *inst/include/stan/math/prim/fun/primitive_value.hpp 93adc66cee905ad09b5f976924a1d949 *inst/include/stan/math/prim/fun/prob_constrain.hpp 78e9fe3b685deecf8458f1f53b878794 *inst/include/stan/math/prim/fun/prob_free.hpp 201d7d031e95091f251a70048570a4b7 *inst/include/stan/math/prim/fun/prod.hpp e22f52a86a1b131d8a8582e049d5cebe *inst/include/stan/math/prim/fun/proj.hpp 1fccb6df49fbbf63a00b99a5ac5f473e *inst/include/stan/math/prim/fun/promote_elements.hpp 745492f70106415a9b7a65a6f64ef74f *inst/include/stan/math/prim/fun/promote_scalar.hpp 9189b2c61eff7731166a053c7a028b1e *inst/include/stan/math/prim/fun/pseudo_eigenvalues.hpp 65454184bb9ba3c35797bbd55305c7e2 *inst/include/stan/math/prim/fun/pseudo_eigenvectors.hpp ae5cb6c5c3397b5c6bb6b4b94e10447a *inst/include/stan/math/prim/fun/qr.hpp 4d3326775b4ceed95cf1df99ebc9f3e2 *inst/include/stan/math/prim/fun/qr_Q.hpp 81373994829a4d72d7549f593c73354c *inst/include/stan/math/prim/fun/qr_R.hpp 11d947ebbc67c901abb81fcbbb8e12e0 *inst/include/stan/math/prim/fun/qr_thin.hpp b64bba872962eebb9adf29137173ebe4 *inst/include/stan/math/prim/fun/qr_thin_Q.hpp c4a57c25e467faaf0eeb795932122778 *inst/include/stan/math/prim/fun/qr_thin_R.hpp 571121ea7406819df37c4fdeb06340c8 *inst/include/stan/math/prim/fun/quad_form.hpp c1f246ee5482f8dfb668516feeb66865 *inst/include/stan/math/prim/fun/quad_form_diag.hpp 377fdb9be308987ce78cc57356701e82 *inst/include/stan/math/prim/fun/quad_form_sym.hpp 3bdd714538069f6234371c73eb418c13 *inst/include/stan/math/prim/fun/quantile.hpp 82b4714b76b65509a1b25b48e8bed821 *inst/include/stan/math/prim/fun/rank.hpp d4226a336fcda262c947c3fc5e6ae76b *inst/include/stan/math/prim/fun/read_corr_L.hpp 895b785accbe37bed34646b1ba0123a3 *inst/include/stan/math/prim/fun/read_corr_matrix.hpp 8c9faf268d9c905bd552629cdd58a3e6 *inst/include/stan/math/prim/fun/read_cov_L.hpp d6e286dad727e20dd21dd4637e1ac623 *inst/include/stan/math/prim/fun/read_cov_matrix.hpp 7452feaa2a012436c8e452360259801f *inst/include/stan/math/prim/fun/real.hpp 256a6da8119f9fc6a1e849f34d276745 *inst/include/stan/math/prim/fun/rep_array.hpp 75525bdf7244b1015e76eb1b0a05fad9 *inst/include/stan/math/prim/fun/rep_matrix.hpp 2f7d545adaf43e0f6e02557646266281 *inst/include/stan/math/prim/fun/rep_row_vector.hpp d7b35386bfbcf61437c8ac3225570893 *inst/include/stan/math/prim/fun/rep_vector.hpp 0c60b06b031602802cbb4735b26322b8 *inst/include/stan/math/prim/fun/resize.hpp 22833c82e5c27f31ed4ccd73e53a6c62 *inst/include/stan/math/prim/fun/reverse.hpp 75ac6decb3d86ac7bb19f569b0827dc9 *inst/include/stan/math/prim/fun/rising_factorial.hpp d11a259fc81531af558e129c7e315a83 *inst/include/stan/math/prim/fun/round.hpp 786f7299d504333954c3ddbfeb2a873f *inst/include/stan/math/prim/fun/row.hpp b3440168e038896f134f857fd55ffaaa *inst/include/stan/math/prim/fun/rows.hpp 0121e59b09717435de097cde2062f9eb *inst/include/stan/math/prim/fun/rows_dot_product.hpp e70fb0217c649e74684a26508d5020e9 *inst/include/stan/math/prim/fun/rows_dot_self.hpp 1899e270269eec1a16c74284cb20051b *inst/include/stan/math/prim/fun/scalar_seq_view.hpp 68cae61cea9ff6ac101145e97048cedd *inst/include/stan/math/prim/fun/scalbn.hpp ac5bdb6f6b50bc66f557f2a233daeea4 *inst/include/stan/math/prim/fun/scale_matrix_exp_multiply.hpp 62eea15b0bc87995484ef6a83a670f47 *inst/include/stan/math/prim/fun/scaled_add.hpp 4a7f3b62c2cc29433c002750f84b4495 *inst/include/stan/math/prim/fun/sd.hpp b1a80c5229206009d8a8af5627f9d98a *inst/include/stan/math/prim/fun/segment.hpp a9654ec8e29c0e854f52e76ea34fa079 *inst/include/stan/math/prim/fun/select.hpp ce1c3cb1228615bcb7394c2c1503363c *inst/include/stan/math/prim/fun/sign.hpp 3c210a27187ac798a06dfe69539a5dbd *inst/include/stan/math/prim/fun/signbit.hpp d00a6e242647fbf4e5c0d631e6be4c71 *inst/include/stan/math/prim/fun/simplex_constrain.hpp 5d6854bf80d39f4ac0da4bc675533509 *inst/include/stan/math/prim/fun/simplex_free.hpp bbd3fafd9305513a3a5bf957610fdc94 *inst/include/stan/math/prim/fun/sin.hpp e52d54fbe997c3a8aee20e14e9bb297f *inst/include/stan/math/prim/fun/singular_values.hpp 39710d3fc30e378bbee35c3c2dfe9306 *inst/include/stan/math/prim/fun/sinh.hpp fa37010938fecfe4245a751f48a6b7b1 *inst/include/stan/math/prim/fun/size.hpp 4a2da8279dd07faf78f000d6aed6d78a *inst/include/stan/math/prim/fun/size_mvt.hpp e8bfd80200ed03b13cc53ef15ed373c5 *inst/include/stan/math/prim/fun/size_zero.hpp eb57e622d91d6f8c099476721069c56e *inst/include/stan/math/prim/fun/softmax.hpp 672453ca894a870ac1e1124e40f396c4 *inst/include/stan/math/prim/fun/sort_asc.hpp 0f74cbc524c9b59628658269126c684f *inst/include/stan/math/prim/fun/sort_desc.hpp f8504a0c6962a7d2b5685dcd1653ef1b *inst/include/stan/math/prim/fun/sort_indices.hpp 84431afa818cff259249043695aef7d3 *inst/include/stan/math/prim/fun/sort_indices_asc.hpp eea3e4f7090eab96852555273ee1377f *inst/include/stan/math/prim/fun/sort_indices_desc.hpp 30cad1a393a173db4a44954d0c10481b *inst/include/stan/math/prim/fun/sqrt.hpp 0427f9b356ea31e7ac31bc61625e535a *inst/include/stan/math/prim/fun/square.hpp 510ff20c612cdc6a6697bf73d75bf679 *inst/include/stan/math/prim/fun/squared_distance.hpp cf090721470f64ae99b41a90086fcee8 *inst/include/stan/math/prim/fun/stan_print.hpp 35129ac5687c4f3825129b6dc2bead8a *inst/include/stan/math/prim/fun/step.hpp 7503d4fde665d5ae9fa684990503fb19 *inst/include/stan/math/prim/fun/sub_col.hpp 5fb8daa3ad27af8fef77eef995927855 *inst/include/stan/math/prim/fun/sub_row.hpp b0b7e1f83720082bba180319b523857e *inst/include/stan/math/prim/fun/subtract.hpp 48c745f1907bbf527e87503ff84c5687 *inst/include/stan/math/prim/fun/sum.hpp 08438d0500ee157964a07bee83d82bdc *inst/include/stan/math/prim/fun/svd.hpp 1df4dd68ed9b53609d0e866e97a9d8c5 *inst/include/stan/math/prim/fun/svd_U.hpp a19fefa5a737291c2f7de21f8a9fce71 *inst/include/stan/math/prim/fun/svd_V.hpp 54b354afcfad336cb490d63b940c305e *inst/include/stan/math/prim/fun/symmetrize_from_lower_tri.hpp c9648fc6dc56b55e4320da8f891c50b2 *inst/include/stan/math/prim/fun/symmetrize_from_upper_tri.hpp fd4f8f3038afadd62614db22b832c7f7 *inst/include/stan/math/prim/fun/tail.hpp 6bdf2565da2b6653b6203614bce19687 *inst/include/stan/math/prim/fun/tan.hpp 0dc767656fd1140190471c728887d35f *inst/include/stan/math/prim/fun/tanh.hpp 76e748a988fae82d79fa4eb130fd7b87 *inst/include/stan/math/prim/fun/tcrossprod.hpp e89d94b5b2ca774856dfeb3eac07e820 *inst/include/stan/math/prim/fun/tgamma.hpp 15539f26bda765bddce1e4af7fb21575 *inst/include/stan/math/prim/fun/to_array_1d.hpp 154ae9dc212d4e2e7c747d03fa982eee *inst/include/stan/math/prim/fun/to_array_2d.hpp 5ae3f1a5d9fafeaef98aa5a772a29a75 *inst/include/stan/math/prim/fun/to_complex.hpp 6b309fc721316b7a2ea647ec0a3f6a02 *inst/include/stan/math/prim/fun/to_int.hpp ebe49a74e1b45645b81fb89f230690b5 *inst/include/stan/math/prim/fun/to_matrix.hpp 5e5858432a9dd6003058a8100f006f6c *inst/include/stan/math/prim/fun/to_ref.hpp 859196f909d62b1241636ca01c27acaa *inst/include/stan/math/prim/fun/to_row_vector.hpp edc104ef158af3d5c8ef8dfe80d7757e *inst/include/stan/math/prim/fun/to_vector.hpp b9bbd845d838010879eaafd2b513e987 *inst/include/stan/math/prim/fun/trace.hpp 75f24682423e029b1be0fbc8ef059b2f *inst/include/stan/math/prim/fun/trace_gen_inv_quad_form_ldlt.hpp 47135d09ba0955b2825b27a8eb5a3fe5 *inst/include/stan/math/prim/fun/trace_gen_quad_form.hpp 3d419ee8b0e935398554d3664e29f6d8 *inst/include/stan/math/prim/fun/trace_inv_quad_form_ldlt.hpp 471787e325c7a01297d9331a5c983021 *inst/include/stan/math/prim/fun/trace_quad_form.hpp 0914ae5d1e5b924f818eb8b692b2dbae *inst/include/stan/math/prim/fun/transpose.hpp 8a80829b8b2b494380f3ea4e39ec044c *inst/include/stan/math/prim/fun/trigamma.hpp 9875f87462fd12502e086db003f2ad44 *inst/include/stan/math/prim/fun/trunc.hpp e84bd249f3b3bb15aab4de866e4dde0c *inst/include/stan/math/prim/fun/typedefs.hpp f46df05a874ed23f33e2751aac2c8314 *inst/include/stan/math/prim/fun/ub_constrain.hpp 522fdc182445a040226d847626e9975b *inst/include/stan/math/prim/fun/ub_free.hpp 9f2acd4ebd40d151bc549c2e162b5ee7 *inst/include/stan/math/prim/fun/uniform_simplex.hpp 0defe43648e7448173e715a110cd0c5b *inst/include/stan/math/prim/fun/unit_vector_constrain.hpp 963d059d66e175deb22bf4742dc396c8 *inst/include/stan/math/prim/fun/unit_vector_free.hpp 0c20c0d40cabf4b0804f630694a0dc7c *inst/include/stan/math/prim/fun/unitspaced_array.hpp 4cfab6826db41a2b85aabe3ce455ada1 *inst/include/stan/math/prim/fun/value_of.hpp cf783938b57a613c7fed8ab8559baf31 *inst/include/stan/math/prim/fun/value_of_rec.hpp 911ce93b0623296a900aecf09500cc17 *inst/include/stan/math/prim/fun/variance.hpp 656bd609774439c2b1ffadb07889f16c *inst/include/stan/math/prim/fun/vec_concat.hpp ac56e149d920192200bb624038faec2b *inst/include/stan/math/prim/fun/vector_seq_view.hpp fde247e3c342714ba627e7c521988fb5 *inst/include/stan/math/prim/fun/welford_covar_estimator.hpp d3e2ae264ef208e509ba4858afc31a37 *inst/include/stan/math/prim/fun/welford_var_estimator.hpp ff20b088b5e7baca4b240186f41f7353 *inst/include/stan/math/prim/fun/zeros_array.hpp 0daa78ce098e661250dc2c7d80cdd4f4 *inst/include/stan/math/prim/fun/zeros_int_array.hpp ea3ee2fe7aec1ad56c5ede3249fd355a *inst/include/stan/math/prim/fun/zeros_row_vector.hpp ad71b9e381dc2069b202e88a4a13d90f *inst/include/stan/math/prim/fun/zeros_vector.hpp 5e5aef31300b0b9e9519d96f283a7cb2 *inst/include/stan/math/prim/functor.hpp f1c6d2e6df7ddf44f72f8dba89d5975f *inst/include/stan/math/prim/functor/algebra_solver_adapter.hpp 2bb9913b3ffe86f12b9ddf2a94fde4d9 *inst/include/stan/math/prim/functor/apply.hpp 13f40ca674a24e9ff0096e3a8d83c37f *inst/include/stan/math/prim/functor/apply_scalar_binary.hpp 6f2064cc0bd1df648190dfd878ae259a *inst/include/stan/math/prim/functor/apply_scalar_ternary.hpp 5b487e0bd5b9cc2b9873c6c5f8411b48 *inst/include/stan/math/prim/functor/apply_scalar_unary.hpp 5dbd76558d16c58c530a19c19ff2f475 *inst/include/stan/math/prim/functor/apply_vector_unary.hpp 31b6a41a4cb9c35668968d3abaa4eabb *inst/include/stan/math/prim/functor/broadcast_array.hpp 543154c2189166b5744a571bd2b08d13 *inst/include/stan/math/prim/functor/coupled_ode_system.hpp 4b8e68322a1dd6aeebb3724395b00f44 *inst/include/stan/math/prim/functor/finite_diff_gradient.hpp 458702552da777f02073beecff8c6ef5 *inst/include/stan/math/prim/functor/finite_diff_gradient_auto.hpp d0abfefae154add7ff98013fb45a0cd1 *inst/include/stan/math/prim/functor/for_each.hpp 5afe5e92b65b4daf9fd6157c9fd0c9c5 *inst/include/stan/math/prim/functor/hcubature.hpp 942362a14cc0cbc4dcbdd5e0121724a0 *inst/include/stan/math/prim/functor/integrate_1d.hpp 77de6b6028a5ae75afa52b8e8afdbf71 *inst/include/stan/math/prim/functor/integrate_1d_adapter.hpp 99a7928a103de64f26b1a201535a6995 *inst/include/stan/math/prim/functor/integrate_ode_rk45.hpp 7590d01fe78b60164b007f109c26f672 *inst/include/stan/math/prim/functor/integrate_ode_std_vector_interface_adapter.hpp 304f3e9a520f18b9edd14a3de39a473f *inst/include/stan/math/prim/functor/map_rect.hpp 74962beb567e41074b788474a1e77731 *inst/include/stan/math/prim/functor/map_rect_combine.hpp 35238f14d69425591ae4d47312b3d2d5 *inst/include/stan/math/prim/functor/map_rect_concurrent.hpp 8d9020de42a08bb68c2d8ed2a9d9d571 *inst/include/stan/math/prim/functor/map_rect_mpi.hpp 823a4d72c02c9553a92989727fe273cf *inst/include/stan/math/prim/functor/map_rect_reduce.hpp 2447b097b1b93ecd1b2d5a99c2376976 *inst/include/stan/math/prim/functor/mpi_cluster.hpp ae5285a9854322c7dc99320e80514c5d *inst/include/stan/math/prim/functor/mpi_cluster_inst.cpp e0df27269ab0ccee2d9bc8a10c80699b *inst/include/stan/math/prim/functor/mpi_command.hpp 41d8964efb0c07267b2870cbbd8a8a6e *inst/include/stan/math/prim/functor/mpi_distributed_apply.hpp fd209041f89b41226506604372935057 *inst/include/stan/math/prim/functor/mpi_parallel_call.hpp 048aae3adafe466d416ea79b8fd24ebe *inst/include/stan/math/prim/functor/ode_ckrk.hpp d6c12dacc98bce44c3ad64eb90051d8b *inst/include/stan/math/prim/functor/ode_rk45.hpp d283be9a748a24384d9ef0259a926490 *inst/include/stan/math/prim/functor/ode_store_sensitivities.hpp d366ffa97bbc5c4727fbefe6e7d53ea2 *inst/include/stan/math/prim/functor/operands_and_partials.hpp 359c14115dc27d709ce3729f0a49d42d *inst/include/stan/math/prim/functor/partials_propagator.hpp 5c25711961dc64f840d775cb37bc947c *inst/include/stan/math/prim/functor/reduce_sum.hpp 3e244b3031db5bcb5a9f584e0cac13eb *inst/include/stan/math/prim/functor/reduce_sum_static.hpp 8445e95ecef0d0e1ab48e173fb494d7e *inst/include/stan/math/prim/meta.hpp 627d81225d17693185dc0b186e3ccc86 *inst/include/stan/math/prim/meta/StdVectorBuilder.hpp 63162d9f7464fe7fa436325707e26771 *inst/include/stan/math/prim/meta/VectorBuilder.hpp 686abdabc034e7f2762799a04ad1d5bd *inst/include/stan/math/prim/meta/VectorBuilderHelper.hpp f66dc0fd406ea6b131c1b5dc6349e6e2 *inst/include/stan/math/prim/meta/ad_promotable.hpp 3be3429955dcf17832f694ed6b67bf6c *inst/include/stan/math/prim/meta/append_return_type.hpp c8484977466f405c4ed326e49a1d7479 *inst/include/stan/math/prim/meta/base_type.hpp 3f80fb2730652e57a3d26cf79d128fd9 *inst/include/stan/math/prim/meta/bool_constant.hpp 70cc77177abbb183aa960a0478d42f2f *inst/include/stan/math/prim/meta/child_type.hpp c4a9c3b4abc15db59073ff45b08367b5 *inst/include/stan/math/prim/meta/compiler_attributes.hpp 47b16068bb02d5c1e669fbbab30dab59 *inst/include/stan/math/prim/meta/conjunction.hpp 1561aa5b2002f8aa200ea051bcbc6869 *inst/include/stan/math/prim/meta/contains_fvar.hpp 8c8a8d0f25b6af2d9810ad2b39d30792 *inst/include/stan/math/prim/meta/contains_std_vector.hpp 882dddb92310a09bf4172485aea8a081 *inst/include/stan/math/prim/meta/disjunction.hpp 2b397bb9bf713a74c14e129265e3f37d *inst/include/stan/math/prim/meta/error_index.hpp 01060a5e623319720aa4b9560a44f65d *inst/include/stan/math/prim/meta/forward_as.hpp 61a5b30cfaddfa3c7c61461f6b16e0fa *inst/include/stan/math/prim/meta/holder.hpp beebb050afea7bc651ae9b6e0a3641d2 *inst/include/stan/math/prim/meta/include_summand.hpp a62137f5133d038255bef86901f983e5 *inst/include/stan/math/prim/meta/index_apply.hpp 93682e31ba07766aa3ec59805e882829 *inst/include/stan/math/prim/meta/index_type.hpp 46256adf079162f5267816876c57eaf1 *inst/include/stan/math/prim/meta/is_arena_matrix.hpp 6ed4fdebff78b46d24f5c9eb01c7e5a8 *inst/include/stan/math/prim/meta/is_autodiff.hpp 08dc048f5cb8ee09cf0214f9953bc893 *inst/include/stan/math/prim/meta/is_base_pointer_convertible.hpp 72b931ca3234e7c9e1a7cc33f2b3ce2d *inst/include/stan/math/prim/meta/is_complex.hpp c24274165f6958ffe7cfc58c4daf5b77 *inst/include/stan/math/prim/meta/is_constant.hpp ff8ff4b0ac2177af765fdae4732c1f5d *inst/include/stan/math/prim/meta/is_container.hpp 361d63224908989ea21a3f17bf38b67b *inst/include/stan/math/prim/meta/is_container_or_var_matrix.hpp 8927df03d9ca097b16db7e675227a58e *inst/include/stan/math/prim/meta/is_dense_dynamic.hpp cc46663f63f17f3345f1b0187347ae09 *inst/include/stan/math/prim/meta/is_detected.hpp c271b9415494f059df9f6264d1cb0eea *inst/include/stan/math/prim/meta/is_double_or_int.hpp 5fd285dd6bd969c43a2ce0d2aeae4a79 *inst/include/stan/math/prim/meta/is_eigen.hpp 5a70c2ea85b10d3cabe230bc592a310d *inst/include/stan/math/prim/meta/is_eigen_dense_base.hpp 1f3bd6d6c98bcfe264ae9ac9f71560d5 *inst/include/stan/math/prim/meta/is_eigen_dense_dynamic.hpp 1587ab0807c5b003ad76b2094593cfb6 *inst/include/stan/math/prim/meta/is_eigen_matrix.hpp a5e85d9551b37b87b856fd679ed0cd50 *inst/include/stan/math/prim/meta/is_eigen_matrix_base.hpp b0b6e56deb608995c60162715f908517 *inst/include/stan/math/prim/meta/is_eigen_sparse_base.hpp b37f2aeea71682666fdb859f38156d1e *inst/include/stan/math/prim/meta/is_fvar.hpp 00b8a9a3639ecd8ebce67e859aad370c *inst/include/stan/math/prim/meta/is_kernel_expression.hpp 75bdecb216249391deb5eaffa6faf7d0 *inst/include/stan/math/prim/meta/is_matrix.hpp 09cae744d58860ddd5578a0505265de7 *inst/include/stan/math/prim/meta/is_matrix_cl.hpp 63205db46b5ae90ae066ac92b583aede *inst/include/stan/math/prim/meta/is_plain_type.hpp b747fb38b0f73fce87b943de0f1c9372 *inst/include/stan/math/prim/meta/is_rev_matrix.hpp 22455259495876575743a7e6efae8fee *inst/include/stan/math/prim/meta/is_stan_scalar.hpp 3f7d3f3ff219dcb831883e32e967da80 *inst/include/stan/math/prim/meta/is_stan_scalar_or_eigen.hpp 4bc67a5ab842e512f2de4aa26a1dfd0c *inst/include/stan/math/prim/meta/is_string_convertible.hpp 8dba1f02fc519005eb4a3b5c56bc5825 *inst/include/stan/math/prim/meta/is_tuple.hpp d78a2c5a8068cc75188288a6dbd5ad5f *inst/include/stan/math/prim/meta/is_var.hpp bf2927560d159207d2259c5377ea460a *inst/include/stan/math/prim/meta/is_var_and_matrix_types.hpp 5d33c928265c7a7096f8297b9768b740 *inst/include/stan/math/prim/meta/is_var_dense_dynamic.hpp 5273dcca0958d68e70883de19a62cc8c *inst/include/stan/math/prim/meta/is_var_eigen.hpp 3b419d329c63cc6bf62a6167defb0e4b *inst/include/stan/math/prim/meta/is_var_matrix.hpp 67d324bb256a04d93231de4ae9036e05 *inst/include/stan/math/prim/meta/is_var_or_arithmetic.hpp be65474b4da389802832de23d15df85e *inst/include/stan/math/prim/meta/is_vari.hpp 4bb27af932aa8a75843c4c0bec3d3029 *inst/include/stan/math/prim/meta/is_vector.hpp e7da865829a134331ad5bba0571c1a57 *inst/include/stan/math/prim/meta/is_vector_like.hpp 05d4226ac619a81b3e4a8d822c7d88cd *inst/include/stan/math/prim/meta/partials_return_type.hpp 644077f4126facc66cd968b2012ecdd7 *inst/include/stan/math/prim/meta/partials_type.hpp 075fd934c347d02802be944fe7713f59 *inst/include/stan/math/prim/meta/plain_type.hpp 68b43a4368512612ec80efc8dc8fee98 *inst/include/stan/math/prim/meta/possibly_sum.hpp 273dec0324918b0857fb30b929c26307 *inst/include/stan/math/prim/meta/promote_args.hpp fd3f50ea1041e5ee26bdf44846aadfaf *inst/include/stan/math/prim/meta/promote_scalar_type.hpp 65df52477c731dfe86b1c650d2b11e31 *inst/include/stan/math/prim/meta/ref_type.hpp 4991157382a14353e2c0635e667b4d18 *inst/include/stan/math/prim/meta/require_generics.hpp 6028ce2c207e9a010c703b970df4a3d7 *inst/include/stan/math/prim/meta/require_helpers.hpp 2ce31810b7d583090922cdd413446b0a *inst/include/stan/math/prim/meta/return_type.hpp 314eba5ae46ec7782cb5eb7d846e3958 *inst/include/stan/math/prim/meta/scalar_type.hpp c95f86926f000c98062d2e9783d3f30a *inst/include/stan/math/prim/meta/scalar_type_pre.hpp fbdd091f9ecff467aee8abb1fa15d470 *inst/include/stan/math/prim/meta/seq_view.hpp fbec2c5a9b5cf68a7141b9ade30d4965 *inst/include/stan/math/prim/meta/static_select.hpp 4672dc23cbfcc1dfca8d569101cb3fc5 *inst/include/stan/math/prim/meta/value_type.hpp 541b6b7aead413a9bbe11de7d434d773 *inst/include/stan/math/prim/meta/void_t.hpp 552c6314f29674bb2da86ad528691ae8 *inst/include/stan/math/prim/prob.hpp 76eaac22d8d1912308bbfd33d08e00f1 *inst/include/stan/math/prim/prob/bernoulli_ccdf_log.hpp ba80bc24b603f1bc58fba5d7cef02200 *inst/include/stan/math/prim/prob/bernoulli_cdf.hpp d194be1f10f50ba3bfdaab8b99ebb0c0 *inst/include/stan/math/prim/prob/bernoulli_cdf_log.hpp d567eba174e6327bd8888175edba7781 *inst/include/stan/math/prim/prob/bernoulli_lccdf.hpp 12f26a9a453db36abf13d6539a1f04c2 *inst/include/stan/math/prim/prob/bernoulli_lcdf.hpp f9f1c1147bda53007c512c97992e6083 *inst/include/stan/math/prim/prob/bernoulli_log.hpp 3bd64ad74af970d646b35c0919d3492f *inst/include/stan/math/prim/prob/bernoulli_logit_glm_log.hpp b6e6a937ecbf8214037b54ad0b005f86 *inst/include/stan/math/prim/prob/bernoulli_logit_glm_lpmf.hpp f32d0a2083240553973453a13fb552ca *inst/include/stan/math/prim/prob/bernoulli_logit_glm_rng.hpp e4ca47a1e24be270512c7092367efd2e *inst/include/stan/math/prim/prob/bernoulli_logit_log.hpp e8cd6a3a2ec3a8cf21b1ba1587bb847c *inst/include/stan/math/prim/prob/bernoulli_logit_lpmf.hpp 018056aec88aa639c920d4d356a6f2e4 *inst/include/stan/math/prim/prob/bernoulli_logit_rng.hpp 3784e7af2394a49ec2c320869258d45a *inst/include/stan/math/prim/prob/bernoulli_lpmf.hpp 512b3a4d8a41f2061bffaf1b58685f6c *inst/include/stan/math/prim/prob/bernoulli_rng.hpp 9f0288fb25835a46bdb09c5f4f1e359b *inst/include/stan/math/prim/prob/beta_binomial_ccdf_log.hpp 447417078e8e620409b5a98a124cbccb *inst/include/stan/math/prim/prob/beta_binomial_cdf.hpp 8dc8682f4edd9c0c1ee36e24c4312e7b *inst/include/stan/math/prim/prob/beta_binomial_cdf_log.hpp 6988be9bf256edc21f692289431bfb99 *inst/include/stan/math/prim/prob/beta_binomial_lccdf.hpp d81d65b49c92c569ddf915c691d509de *inst/include/stan/math/prim/prob/beta_binomial_lcdf.hpp a59410041b904c16cad0283377dae3a1 *inst/include/stan/math/prim/prob/beta_binomial_log.hpp decfb687e0fd9937c290f12a0bea41fb *inst/include/stan/math/prim/prob/beta_binomial_lpmf.hpp dff9bfb324c320c4dc38f16bf56b98db *inst/include/stan/math/prim/prob/beta_binomial_rng.hpp c2df0b0b3c192dab0673ff68fa5622ca *inst/include/stan/math/prim/prob/beta_ccdf_log.hpp 9d186735187c2516c84b322cdac545f4 *inst/include/stan/math/prim/prob/beta_cdf.hpp c04fbe77a44efb79bdc8c1f4487278ef *inst/include/stan/math/prim/prob/beta_cdf_log.hpp 22ccbe418c16fdc6fca8aa7522870220 *inst/include/stan/math/prim/prob/beta_lccdf.hpp 7d2588cdc68cefbe4c53f0528aaa9b24 *inst/include/stan/math/prim/prob/beta_lcdf.hpp f98b048e116733e7247cc4ca4074952d *inst/include/stan/math/prim/prob/beta_log.hpp 9fcd424ea27801dba7379512c951b7ef *inst/include/stan/math/prim/prob/beta_lpdf.hpp 6e700fe94d6fecca3cf05db3037a67c9 *inst/include/stan/math/prim/prob/beta_proportion_ccdf_log.hpp 8abf8c729f280945acf7b34cdbf72383 *inst/include/stan/math/prim/prob/beta_proportion_cdf_log.hpp 57b0f979a71d93bed7e13f629fbf5a7a *inst/include/stan/math/prim/prob/beta_proportion_lccdf.hpp bfb2bb742fe321b2c1ba4c83c9c5dd8c *inst/include/stan/math/prim/prob/beta_proportion_lcdf.hpp bc3cb581b182ee5821144a7baed5e1d6 *inst/include/stan/math/prim/prob/beta_proportion_log.hpp 4345bf370f1d0b8ad01c463021f311fd *inst/include/stan/math/prim/prob/beta_proportion_lpdf.hpp 90183f70618a6135214f723e96b2013f *inst/include/stan/math/prim/prob/beta_proportion_rng.hpp 6f2084ad6ee55b9a0b50fb151d38a27a *inst/include/stan/math/prim/prob/beta_rng.hpp 649eafcdca5fd35863ca8b2ad996d136 *inst/include/stan/math/prim/prob/binomial_ccdf_log.hpp 9cdea62131caa227259e554d75df6e65 *inst/include/stan/math/prim/prob/binomial_cdf.hpp be38e1f0ef25dcc942b7cd9334ff9f03 *inst/include/stan/math/prim/prob/binomial_cdf_log.hpp 0c79ef88ed807ec4ab4503bc104c9852 *inst/include/stan/math/prim/prob/binomial_lccdf.hpp dab3f9868e2151288d80da03a12b39bd *inst/include/stan/math/prim/prob/binomial_lcdf.hpp f66eef7566d99d8ad96b8c925084ea03 *inst/include/stan/math/prim/prob/binomial_log.hpp 33fd408dfc74254f4a176b7ef9b33d99 *inst/include/stan/math/prim/prob/binomial_logit_log.hpp cba5d99c4c4d075041049c72a48d41d4 *inst/include/stan/math/prim/prob/binomial_logit_lpmf.hpp eccb6c59bff48034357f9a305e1348a1 *inst/include/stan/math/prim/prob/binomial_lpmf.hpp 6a856442907098655e842bfece33c2b0 *inst/include/stan/math/prim/prob/binomial_rng.hpp 9d40680e7bee7692dfc413e9ffb25e26 *inst/include/stan/math/prim/prob/categorical_log.hpp 96796c9d4bfee94bf832393c01a1db00 *inst/include/stan/math/prim/prob/categorical_logit_glm_lpmf.hpp caff3da49e787710ada92b5c4d82a588 *inst/include/stan/math/prim/prob/categorical_logit_log.hpp 6c1f0f606491ea1ec6c3a3831be44d39 *inst/include/stan/math/prim/prob/categorical_logit_lpmf.hpp dd4ce19ce75a2bef1c294f2ffd531f8a *inst/include/stan/math/prim/prob/categorical_logit_rng.hpp a3f5756008168270797636c6bc93131d *inst/include/stan/math/prim/prob/categorical_lpmf.hpp 6a55afa8ccd817baf88eede330c201ed *inst/include/stan/math/prim/prob/categorical_rng.hpp 4f9bf1146bbc9466747f79910436f3e4 *inst/include/stan/math/prim/prob/cauchy_ccdf_log.hpp a6bcb931521dc0e6e2d244ac03b37e9c *inst/include/stan/math/prim/prob/cauchy_cdf.hpp d370763ee1a6311f089849abc6d03fdc *inst/include/stan/math/prim/prob/cauchy_cdf_log.hpp 39f0b8d0b3ca93b1463523b138248d5e *inst/include/stan/math/prim/prob/cauchy_lccdf.hpp 67486ee165cfbd55832847cb6bacbe50 *inst/include/stan/math/prim/prob/cauchy_lcdf.hpp 97ab629dbf9ef59a7c899987f128f6d0 *inst/include/stan/math/prim/prob/cauchy_log.hpp 8bd0da028747c62494301beb3b3cce74 *inst/include/stan/math/prim/prob/cauchy_lpdf.hpp 1739592eeaea79a6951b173c470d11c5 *inst/include/stan/math/prim/prob/cauchy_rng.hpp f27d7d2d3b47659c957d3cf7d21b216e *inst/include/stan/math/prim/prob/chi_square_ccdf_log.hpp f3a6870079a9b502c2f4bb0643e52b8e *inst/include/stan/math/prim/prob/chi_square_cdf.hpp 07f95d13f17d2867699bca37b4be69c1 *inst/include/stan/math/prim/prob/chi_square_cdf_log.hpp a166bc69a7e491bb891b85f58daecffc *inst/include/stan/math/prim/prob/chi_square_lccdf.hpp 522ec70880e85d6da39e4230ed3d6fa0 *inst/include/stan/math/prim/prob/chi_square_lcdf.hpp 759901b1408b7a3487c206da8bd93160 *inst/include/stan/math/prim/prob/chi_square_log.hpp d5f89fbeb20e4e950051365a85e0485a *inst/include/stan/math/prim/prob/chi_square_lpdf.hpp 35815b415cb14a1e8efe941991a7a426 *inst/include/stan/math/prim/prob/chi_square_rng.hpp e7aebdcf7117aeffb986f72d7ad73369 *inst/include/stan/math/prim/prob/dirichlet_log.hpp 90722684351dafe253275312678f78f0 *inst/include/stan/math/prim/prob/dirichlet_lpdf.hpp 1236d0f317374abe48b1cf410affaa2e *inst/include/stan/math/prim/prob/dirichlet_lpmf.hpp a7bbfe00581640aab1afa9a0addfb192 *inst/include/stan/math/prim/prob/dirichlet_rng.hpp 73a640e5dff3423a922869d544622d96 *inst/include/stan/math/prim/prob/discrete_range_ccdf_log.hpp 4e59f607ed919c182547f6152bada0f9 *inst/include/stan/math/prim/prob/discrete_range_cdf.hpp 3493aa1035d15284d63b6583b6b6a048 *inst/include/stan/math/prim/prob/discrete_range_cdf_log.hpp de4c17673b387877adf88641efaf9389 *inst/include/stan/math/prim/prob/discrete_range_lccdf.hpp 2a91ba8f8f3ecfcb6c1e4c63f5d2d3f6 *inst/include/stan/math/prim/prob/discrete_range_lcdf.hpp 09f3337203fce3d0bf3a14b4b8020e1a *inst/include/stan/math/prim/prob/discrete_range_log.hpp da589ceee1d29da8fcf7ddc209f62fac *inst/include/stan/math/prim/prob/discrete_range_lpmf.hpp dd917c0e467eae8c8a0e1fe9b9286475 *inst/include/stan/math/prim/prob/discrete_range_rng.hpp cdad3b2a4c75cae15207481c81a1277e *inst/include/stan/math/prim/prob/double_exponential_ccdf_log.hpp 0592b51b77d6267b513b4a73dc412c6b *inst/include/stan/math/prim/prob/double_exponential_cdf.hpp ff0892b510eba19c08c14fa51430b145 *inst/include/stan/math/prim/prob/double_exponential_cdf_log.hpp 2630b1e06d8d8659c8c5d348e59101af *inst/include/stan/math/prim/prob/double_exponential_lccdf.hpp 15f8ac7d55dca3288171e93114f672d1 *inst/include/stan/math/prim/prob/double_exponential_lcdf.hpp 8f8875a420898f09bcc8e002ce5ee0fb *inst/include/stan/math/prim/prob/double_exponential_log.hpp 12b0f96144c5bd2765314f1d0db1402c *inst/include/stan/math/prim/prob/double_exponential_lpdf.hpp 99cb15f026fcb5e07309ec2d31bf6c13 *inst/include/stan/math/prim/prob/double_exponential_rng.hpp 62e9583bb795cf354cb57480f81eacaa *inst/include/stan/math/prim/prob/exp_mod_normal_ccdf_log.hpp a2606a253118b8f4c27e36a58e68bcb1 *inst/include/stan/math/prim/prob/exp_mod_normal_cdf.hpp a112b2763e0aee43b795907143bae523 *inst/include/stan/math/prim/prob/exp_mod_normal_cdf_log.hpp 3d31bba7b2b555dee57b26de2c70cd0e *inst/include/stan/math/prim/prob/exp_mod_normal_lccdf.hpp 168c73337f903ef3c2f1053bb6d20de9 *inst/include/stan/math/prim/prob/exp_mod_normal_lcdf.hpp e786148f16da0984972161a9820fe211 *inst/include/stan/math/prim/prob/exp_mod_normal_log.hpp 19e701d3061611dd991d522ae6ad61f3 *inst/include/stan/math/prim/prob/exp_mod_normal_lpdf.hpp 1372979fe382d4953fd7c5e4e4b07df1 *inst/include/stan/math/prim/prob/exp_mod_normal_rng.hpp 44560397c63c1f41c7d28cb4b1788c6f *inst/include/stan/math/prim/prob/exponential_ccdf_log.hpp 51db79250a642b1aa6c6c739ee8baeeb *inst/include/stan/math/prim/prob/exponential_cdf.hpp 99e41393e93aea7207949d986de33867 *inst/include/stan/math/prim/prob/exponential_cdf_log.hpp 4a2f827dc629818625eadebaef1dac20 *inst/include/stan/math/prim/prob/exponential_lccdf.hpp ae6a964b2462215f4354bf0b0f0b7882 *inst/include/stan/math/prim/prob/exponential_lcdf.hpp e78ee96b8637a45143bb94a00a6f9462 *inst/include/stan/math/prim/prob/exponential_log.hpp 42ef856429fde0db84af2661d57de6e2 *inst/include/stan/math/prim/prob/exponential_lpdf.hpp 503d444f99091607998810a986183fdd *inst/include/stan/math/prim/prob/exponential_rng.hpp 47b524384e72c0dda8d3551921663f75 *inst/include/stan/math/prim/prob/frechet_ccdf_log.hpp 0b482ebce0414919eafb8d6af1eb168d *inst/include/stan/math/prim/prob/frechet_cdf.hpp 836632da3ebf84f97a1a5d76e5627091 *inst/include/stan/math/prim/prob/frechet_cdf_log.hpp b015f551a5de0fad7a2e1c5bd9aa4d8b *inst/include/stan/math/prim/prob/frechet_lccdf.hpp 0b47f15fa0660488fb9ef49bc5af2d6c *inst/include/stan/math/prim/prob/frechet_lcdf.hpp bd19b2402e552dffb04949cad61ed046 *inst/include/stan/math/prim/prob/frechet_log.hpp 3d48759e9d810b2617d013e31c344286 *inst/include/stan/math/prim/prob/frechet_lpdf.hpp ad59166802e6daeaa3d761f01d54c3fd *inst/include/stan/math/prim/prob/frechet_rng.hpp a8857a83c7efa40172da357d14efd4e6 *inst/include/stan/math/prim/prob/gamma_ccdf_log.hpp e58b39af1ff82094559d482083cb99fc *inst/include/stan/math/prim/prob/gamma_cdf.hpp 5cf040a3830b5e80a00c95cb4466f072 *inst/include/stan/math/prim/prob/gamma_cdf_log.hpp b6a361bbde21420d9b460f3eef8ddfdd *inst/include/stan/math/prim/prob/gamma_lccdf.hpp e3fa99cad293c5ddaf089396cfa085e3 *inst/include/stan/math/prim/prob/gamma_lcdf.hpp a66c465b260450b68c990c559dc3c19b *inst/include/stan/math/prim/prob/gamma_log.hpp 3eef3a5a98f8775a3344b842a24ebf89 *inst/include/stan/math/prim/prob/gamma_lpdf.hpp 59665aaa9b4cf8cc0f7e9cef07572eb8 *inst/include/stan/math/prim/prob/gamma_rng.hpp 2ca15d34d308a055d04888c7bc983cc7 *inst/include/stan/math/prim/prob/gaussian_dlm_obs_log.hpp ef8350e2f3315c4557dbf6838084ccf5 *inst/include/stan/math/prim/prob/gaussian_dlm_obs_lpdf.hpp 9b3720ccaf2b2898a23b6dfd3952723a *inst/include/stan/math/prim/prob/gaussian_dlm_obs_rng.hpp 75afed3b77de54ecf11848c2df35e9ce *inst/include/stan/math/prim/prob/gumbel_ccdf_log.hpp 5bc58783085d69e682684848575dfcc7 *inst/include/stan/math/prim/prob/gumbel_cdf.hpp 7637db82a2ed88c0ec2888f5550f1abb *inst/include/stan/math/prim/prob/gumbel_cdf_log.hpp 090f9a0dcdb09f41c0eddbea4f0443f9 *inst/include/stan/math/prim/prob/gumbel_lccdf.hpp 70756a93f49387b3ca7e8fd8f4658626 *inst/include/stan/math/prim/prob/gumbel_lcdf.hpp 8ce7c9a3a4e3bd9a99b8748648d86fd4 *inst/include/stan/math/prim/prob/gumbel_log.hpp e60d6863b70e1f54410002daffb71f45 *inst/include/stan/math/prim/prob/gumbel_lpdf.hpp 633c466da847e61b973a64e89b085e1e *inst/include/stan/math/prim/prob/gumbel_rng.hpp 0606ee8ad63f8dffbd62da7babbb2425 *inst/include/stan/math/prim/prob/hmm_hidden_state_prob.hpp 01d7b90245d42925637c89f81815396a *inst/include/stan/math/prim/prob/hmm_latent_rng.hpp 69b3d37d09bd99c96f285efe60bb6573 *inst/include/stan/math/prim/prob/hmm_marginal.hpp 974b7c32212ee8127ec2a22d80cb0a97 *inst/include/stan/math/prim/prob/hypergeometric_log.hpp f23f3a276ab362b38b579104720bd2fe *inst/include/stan/math/prim/prob/hypergeometric_lpmf.hpp 5a49f98eab22a5e50d243efc2641f7d2 *inst/include/stan/math/prim/prob/hypergeometric_rng.hpp 86f1395abfd0899114e92757d21d17f6 *inst/include/stan/math/prim/prob/inv_chi_square_ccdf_log.hpp a802882fc8033c9737c1addae86ac672 *inst/include/stan/math/prim/prob/inv_chi_square_cdf.hpp e8db43338424dd847ccfd538fa94dad5 *inst/include/stan/math/prim/prob/inv_chi_square_cdf_log.hpp 7f37acd7293b81b307f76ec6e027dab5 *inst/include/stan/math/prim/prob/inv_chi_square_lccdf.hpp 97f3af378e47b60fa9bf2553cfec273e *inst/include/stan/math/prim/prob/inv_chi_square_lcdf.hpp 1cb4dec4556da7c9cfa1e30d0210398a *inst/include/stan/math/prim/prob/inv_chi_square_log.hpp 48190ff5d662dfe0df17d2a2625d82a0 *inst/include/stan/math/prim/prob/inv_chi_square_lpdf.hpp ba8017908426b3cb936f81a1af5173a0 *inst/include/stan/math/prim/prob/inv_chi_square_rng.hpp a8f940d4c89fea128c2003fb50830a12 *inst/include/stan/math/prim/prob/inv_gamma_ccdf_log.hpp 01e82c3de2b2c2764a0bd1ab9a0cfc5e *inst/include/stan/math/prim/prob/inv_gamma_cdf.hpp d44387ae61f2061c2c144bddad3a611b *inst/include/stan/math/prim/prob/inv_gamma_cdf_log.hpp 98e9b8f84283faf633921cf85d530970 *inst/include/stan/math/prim/prob/inv_gamma_lccdf.hpp c66e35218ee88b607abdd27dc7819325 *inst/include/stan/math/prim/prob/inv_gamma_lcdf.hpp 025db9a40c3f3cd723d12482c2c647dd *inst/include/stan/math/prim/prob/inv_gamma_log.hpp f0ba492f99e92ce1e7ab9e0307507036 *inst/include/stan/math/prim/prob/inv_gamma_lpdf.hpp f18059750cd91cbdb235875baf8d2f37 *inst/include/stan/math/prim/prob/inv_gamma_rng.hpp ac5ab688cc5814b31f2c59a002557bc0 *inst/include/stan/math/prim/prob/inv_wishart_cholesky_lpdf.hpp 7ba893a591556006f2d29ecb77020cce *inst/include/stan/math/prim/prob/inv_wishart_cholesky_rng.hpp ccb8642877d188a9ab43c9b9d334a7f8 *inst/include/stan/math/prim/prob/inv_wishart_log.hpp a0e255cd9fa68e20729b2b7d57a51169 *inst/include/stan/math/prim/prob/inv_wishart_lpdf.hpp 540a7173a1a244834a816425af403f31 *inst/include/stan/math/prim/prob/inv_wishart_rng.hpp 20d4a63adfa2dcae417d889609592a1c *inst/include/stan/math/prim/prob/lkj_corr_cholesky_log.hpp ff715599b19b21c8090d2103cd0ac952 *inst/include/stan/math/prim/prob/lkj_corr_cholesky_lpdf.hpp b781361e95d601b4dec1f14655b4566c *inst/include/stan/math/prim/prob/lkj_corr_cholesky_rng.hpp e5738b4b8262d0f206bd7332e8be37c8 *inst/include/stan/math/prim/prob/lkj_corr_log.hpp ebfb81f7ae9a6a5c0c922ec22e68fc19 *inst/include/stan/math/prim/prob/lkj_corr_lpdf.hpp 47f8f2421abe5a1e044f9b3653f6f25a *inst/include/stan/math/prim/prob/lkj_corr_rng.hpp 670d50e104033b346bda5907b1af9068 *inst/include/stan/math/prim/prob/lkj_cov_log.hpp f8e8267e93f8b28387e667c3e8a196b2 *inst/include/stan/math/prim/prob/lkj_cov_lpdf.hpp 4dd96fdd7dcfd7161ec0691608ab5f1d *inst/include/stan/math/prim/prob/logistic_ccdf_log.hpp 8c3bd12b32cc2d4ab0806f7701f53f38 *inst/include/stan/math/prim/prob/logistic_cdf.hpp 695cc3fb90e6f4c2b3e55219ad863daf *inst/include/stan/math/prim/prob/logistic_cdf_log.hpp ff1705cf9a7ce7407edcb2b834ca929e *inst/include/stan/math/prim/prob/logistic_lccdf.hpp e1340892b36e862546a28a21dbdf2714 *inst/include/stan/math/prim/prob/logistic_lcdf.hpp ff9d5756e6ea09f1d46ff414eb12763c *inst/include/stan/math/prim/prob/logistic_log.hpp ebac41d849c668c365cc9e268f333c41 *inst/include/stan/math/prim/prob/logistic_lpdf.hpp 6be5cf90c4004aca4b80f258a14092c9 *inst/include/stan/math/prim/prob/logistic_rng.hpp 1bde6026c7a6d106ffd167dc5515ce16 *inst/include/stan/math/prim/prob/loglogistic_cdf.hpp 57344da61d50c0950e9e9ce6180f2d6d *inst/include/stan/math/prim/prob/loglogistic_log.hpp 568bf4b77f535e9b9668c66dd318d9b4 *inst/include/stan/math/prim/prob/loglogistic_lpdf.hpp 73fe1a31f7e2dfbc7bd5dd5c9210b992 *inst/include/stan/math/prim/prob/loglogistic_rng.hpp 468dac43fdd4b079d1ffef0cf9aaa992 *inst/include/stan/math/prim/prob/lognormal_ccdf_log.hpp 206af0d444ff799d7414daa6936a3164 *inst/include/stan/math/prim/prob/lognormal_cdf.hpp 2cd6d48219968d1323299ee400af917a *inst/include/stan/math/prim/prob/lognormal_cdf_log.hpp 5fbac57f9c23c32e93df1a3fdb78b943 *inst/include/stan/math/prim/prob/lognormal_lccdf.hpp ef2b00ed80c7e1acc5c05f5c62a80184 *inst/include/stan/math/prim/prob/lognormal_lcdf.hpp 9674e5a7ea08f7f35e85c1d654874473 *inst/include/stan/math/prim/prob/lognormal_log.hpp a733e61c9b6039025174c3542deff6b7 *inst/include/stan/math/prim/prob/lognormal_lpdf.hpp e015a6f0818860f36842c126149181a6 *inst/include/stan/math/prim/prob/lognormal_rng.hpp 783e43f9e997000167071f4d894aabe4 *inst/include/stan/math/prim/prob/matrix_normal_prec_log.hpp b43a8af78670aab5d3c56510c11bc511 *inst/include/stan/math/prim/prob/matrix_normal_prec_lpdf.hpp b107733c729280da011398f8e431fbac *inst/include/stan/math/prim/prob/matrix_normal_prec_rng.hpp 8b6a977fb8aeb808393331620e81c4b3 *inst/include/stan/math/prim/prob/multi_gp_cholesky_log.hpp e63917f18eabc2eaa856662027fbbfd4 *inst/include/stan/math/prim/prob/multi_gp_cholesky_lpdf.hpp 298167212e9208d106eb7d393514a4a8 *inst/include/stan/math/prim/prob/multi_gp_log.hpp 5acb37bba283c602da39d2088920aa4f *inst/include/stan/math/prim/prob/multi_gp_lpdf.hpp ccde82cfbee6ec9cc8a2e2d76f9b48a3 *inst/include/stan/math/prim/prob/multi_normal_cholesky_log.hpp 0d8039de811e629eba3072b8b0b6973a *inst/include/stan/math/prim/prob/multi_normal_cholesky_lpdf.hpp 237cf3e71b53bd5f5af3c32fdcb6d943 *inst/include/stan/math/prim/prob/multi_normal_cholesky_rng.hpp ae6050dce3f3255745973855df2e4078 *inst/include/stan/math/prim/prob/multi_normal_log.hpp 016fe84ef904660d07b67b3505354b48 *inst/include/stan/math/prim/prob/multi_normal_lpdf.hpp d666bb980daa2ac2e4feff214ba8aea3 *inst/include/stan/math/prim/prob/multi_normal_prec_log.hpp fb3dc36faa1c94b990a049f1e58da020 *inst/include/stan/math/prim/prob/multi_normal_prec_lpdf.hpp 4275ea4569432a7810034f0b5f8ab98f *inst/include/stan/math/prim/prob/multi_normal_prec_rng.hpp 66f6504a245ca03ac3f33f4a678559c3 *inst/include/stan/math/prim/prob/multi_normal_rng.hpp 34418ebac65eb959a76d3492a43050ce *inst/include/stan/math/prim/prob/multi_student_t_cholesky_lpdf.hpp dfb68ff113e5c785a994925643f505d4 *inst/include/stan/math/prim/prob/multi_student_t_cholesky_rng.hpp 637b59825604307476c07b823ef28c8d *inst/include/stan/math/prim/prob/multi_student_t_log.hpp 840a612c8807fdc04e8b7825f885093f *inst/include/stan/math/prim/prob/multi_student_t_lpdf.hpp d53f421ad1db2e78867a66c73af91d18 *inst/include/stan/math/prim/prob/multi_student_t_rng.hpp 98834c20a4b8f7e1420927e7262f26fe *inst/include/stan/math/prim/prob/multinomial_log.hpp 3331bf455787b919bd7fb4fa071a59f1 *inst/include/stan/math/prim/prob/multinomial_logit_log.hpp b472691cebb36283c652e207f7d3c6b4 *inst/include/stan/math/prim/prob/multinomial_logit_lpmf.hpp 6817328286a30a980ab34fa672a927aa *inst/include/stan/math/prim/prob/multinomial_logit_rng.hpp c50ceae76ee874da548333eb1877037b *inst/include/stan/math/prim/prob/multinomial_lpmf.hpp fa6bc27c268048cd8984b8d4b0bcaec7 *inst/include/stan/math/prim/prob/multinomial_rng.hpp fe60b54b8adf5fbb8de1afe7b8c29389 *inst/include/stan/math/prim/prob/neg_binomial_2_ccdf_log.hpp 0562abd2ef61ec3d868664925a434482 *inst/include/stan/math/prim/prob/neg_binomial_2_cdf.hpp b16baa2812779c12c12a48e48e6b2590 *inst/include/stan/math/prim/prob/neg_binomial_2_cdf_log.hpp 856190e1fe88bd7d38b7685bf7af1890 *inst/include/stan/math/prim/prob/neg_binomial_2_lccdf.hpp b791965edd286667fff48ce56fb92bf3 *inst/include/stan/math/prim/prob/neg_binomial_2_lcdf.hpp 4eec5fc2481ad68ab7595987b5b4c7cf *inst/include/stan/math/prim/prob/neg_binomial_2_log.hpp 332536aa40dc65060464ac7f5218a80e *inst/include/stan/math/prim/prob/neg_binomial_2_log_glm_log.hpp 2711574d5fb07ca2e091005bbeab3670 *inst/include/stan/math/prim/prob/neg_binomial_2_log_glm_lpmf.hpp 1c87e10dde69a65f4962eac97c47c4b8 *inst/include/stan/math/prim/prob/neg_binomial_2_log_log.hpp 276ec2f5a118715b380f9505185ed9a4 *inst/include/stan/math/prim/prob/neg_binomial_2_log_lpmf.hpp adddab6f4dfd0246f62b95405e1d426e *inst/include/stan/math/prim/prob/neg_binomial_2_log_rng.hpp e251ebd8a498b3cae2b3c8d63a8f0b98 *inst/include/stan/math/prim/prob/neg_binomial_2_lpmf.hpp 3f42a9b2deca98306d49d1800c275cf7 *inst/include/stan/math/prim/prob/neg_binomial_2_rng.hpp c1793c4137299a1e679ca707489ade92 *inst/include/stan/math/prim/prob/neg_binomial_ccdf_log.hpp e7e16f00188a63f4b2cc4a0add0c3f38 *inst/include/stan/math/prim/prob/neg_binomial_cdf.hpp f756155e014221d186bb91763223d261 *inst/include/stan/math/prim/prob/neg_binomial_cdf_log.hpp 81462fb122c58f19a22e1684e2905d3c *inst/include/stan/math/prim/prob/neg_binomial_lccdf.hpp f3edfeca1a5d0087046af08144741200 *inst/include/stan/math/prim/prob/neg_binomial_lcdf.hpp 11b042c06f7923a8f18cab57026fb3e9 *inst/include/stan/math/prim/prob/neg_binomial_log.hpp ab7ac64dab6c7a3d27ce47757a13a72d *inst/include/stan/math/prim/prob/neg_binomial_lpmf.hpp 52a8f92fb4f2db1868fb1710ef3617a5 *inst/include/stan/math/prim/prob/neg_binomial_rng.hpp 3dcfecf94738a5d868711c54ebd8ff4c *inst/include/stan/math/prim/prob/normal_ccdf_log.hpp 113d05f35795336e5e3b4d7a24c5bbe8 *inst/include/stan/math/prim/prob/normal_cdf.hpp 328ff884816ed34f0b3a40c7c3d3e092 *inst/include/stan/math/prim/prob/normal_cdf_log.hpp a6552167e8bb01af56bdf579636c8a1b *inst/include/stan/math/prim/prob/normal_id_glm_log.hpp 4769a6d51a4011471b2d7f34249cdd3d *inst/include/stan/math/prim/prob/normal_id_glm_lpdf.hpp ecc2d197151d14b06457acc3f2a7be8b *inst/include/stan/math/prim/prob/normal_lccdf.hpp b18f9eb064e8df0831ad958b85d81532 *inst/include/stan/math/prim/prob/normal_lcdf.hpp 30c979077285bdbb0efd627c07853230 *inst/include/stan/math/prim/prob/normal_log.hpp 618308352244fd970f892e695450d198 *inst/include/stan/math/prim/prob/normal_lpdf.hpp 824a16d1a4a992348fc5286d7ae36b99 *inst/include/stan/math/prim/prob/normal_rng.hpp 4f3a12b63a316f0c24e6cd8fc605d24d *inst/include/stan/math/prim/prob/normal_sufficient_log.hpp 96f662ff19b449cc1ff6c52d838e6281 *inst/include/stan/math/prim/prob/normal_sufficient_lpdf.hpp 331f9bdfc3a0b97c610dfc1fdf5fdfed *inst/include/stan/math/prim/prob/ordered_logistic_glm_lpmf.hpp 4fd1364f8e67269c0fd33e6f26150548 *inst/include/stan/math/prim/prob/ordered_logistic_log.hpp bb194a7fa210b6d9199978054dec8739 *inst/include/stan/math/prim/prob/ordered_logistic_lpmf.hpp c1d0125dc1646090205edc1436078981 *inst/include/stan/math/prim/prob/ordered_logistic_rng.hpp 4dc6d4ebed2d4e681c1b2b907b340d18 *inst/include/stan/math/prim/prob/ordered_probit_log.hpp 7cad78fcf9a3a5060760e97e9c61ebf6 *inst/include/stan/math/prim/prob/ordered_probit_lpmf.hpp 27f3d39e0e0320e4029e8b25f90cc588 *inst/include/stan/math/prim/prob/ordered_probit_rng.hpp c4314970010b102b479eb6ebe9ea22a0 *inst/include/stan/math/prim/prob/pareto_ccdf_log.hpp 22cb2ce33682400b12f7460ef7eb39a0 *inst/include/stan/math/prim/prob/pareto_cdf.hpp 0b11f6b991496f7a322e37cfc9eac0c6 *inst/include/stan/math/prim/prob/pareto_cdf_log.hpp 25ac838d3a35471ef2e3ffea18a096f3 *inst/include/stan/math/prim/prob/pareto_lccdf.hpp b98a662fd637d35367c803918c5525ef *inst/include/stan/math/prim/prob/pareto_lcdf.hpp f02acaef4aba2d390e27e29ff280c62d *inst/include/stan/math/prim/prob/pareto_log.hpp 931828ed5baa582645454b5851aa23f6 *inst/include/stan/math/prim/prob/pareto_lpdf.hpp d741fbde30f028e70449d378aebf6e92 *inst/include/stan/math/prim/prob/pareto_rng.hpp 978b5cdedc3c4b768de572c9f75da340 *inst/include/stan/math/prim/prob/pareto_type_2_ccdf_log.hpp 9cc51f5ba26ab7f1041e29798a926213 *inst/include/stan/math/prim/prob/pareto_type_2_cdf.hpp 9b741e050261844c9fa15ccf0ae345b6 *inst/include/stan/math/prim/prob/pareto_type_2_cdf_log.hpp 929f7266e84e4ff1981d18a5ee3b6ea7 *inst/include/stan/math/prim/prob/pareto_type_2_lccdf.hpp 5130dd95a09019b97c2c87b4a6d71309 *inst/include/stan/math/prim/prob/pareto_type_2_lcdf.hpp f0aa61aa38846e20ba06dd688e035fa3 *inst/include/stan/math/prim/prob/pareto_type_2_log.hpp 12d07f0659aa6bab9d5a1c0591eaddd2 *inst/include/stan/math/prim/prob/pareto_type_2_lpdf.hpp 72070c912f82e53cbe83f55eb4f6430e *inst/include/stan/math/prim/prob/pareto_type_2_rng.hpp e0a8b6929dd7a39c4fda717761827f16 *inst/include/stan/math/prim/prob/poisson_binomial_ccdf_log.hpp cdee0237eff29e8a6188c3c29f6ce43e *inst/include/stan/math/prim/prob/poisson_binomial_cdf.hpp 6fbe026d4ef9f158e684b4e8aa32afa1 *inst/include/stan/math/prim/prob/poisson_binomial_cdf_log.hpp 78ae7922df97a40d602846892f54aaf9 *inst/include/stan/math/prim/prob/poisson_binomial_lccdf.hpp f4ea92d9cd267213b784f8f2c3299279 *inst/include/stan/math/prim/prob/poisson_binomial_lcdf.hpp ede1143009e5b6bffebca32987de15f3 *inst/include/stan/math/prim/prob/poisson_binomial_log.hpp bc7d1da07ddf014ad239901a1ece35cf *inst/include/stan/math/prim/prob/poisson_binomial_lpmf.hpp 8888d284064cf97b0f89e7735ea82646 *inst/include/stan/math/prim/prob/poisson_binomial_rng.hpp a012fec710bb48bb591f69de8da238fc *inst/include/stan/math/prim/prob/poisson_ccdf_log.hpp d93a8844d42379e2bc855fd1eb3ef6c8 *inst/include/stan/math/prim/prob/poisson_cdf.hpp f72da09359ba9f1c653ca1466ef8edd8 *inst/include/stan/math/prim/prob/poisson_cdf_log.hpp 0ad7b3fae4e00e78132ae211f2cbc433 *inst/include/stan/math/prim/prob/poisson_lccdf.hpp 03ee1bfae58c24c09233fabab5e045f7 *inst/include/stan/math/prim/prob/poisson_lcdf.hpp ee30439184f9383ff7fb5346a95f2d0e *inst/include/stan/math/prim/prob/poisson_log.hpp 7eedce166f7457fae132c4b0effda948 *inst/include/stan/math/prim/prob/poisson_log_glm_log.hpp f09f6ea9d2b14ccee13774147c3cdcc1 *inst/include/stan/math/prim/prob/poisson_log_glm_lpmf.hpp 1b93799bce69cc80fad00288a41c109e *inst/include/stan/math/prim/prob/poisson_log_log.hpp 3476e7c040b0af0d4d072471771a23e5 *inst/include/stan/math/prim/prob/poisson_log_lpmf.hpp 464db3e2a8161a5cd01a086924c98b5c *inst/include/stan/math/prim/prob/poisson_log_rng.hpp deaf28dc686428cc902a0245c5871b74 *inst/include/stan/math/prim/prob/poisson_lpmf.hpp 5c5b91def4a8037156b8df53f910bb54 *inst/include/stan/math/prim/prob/poisson_rng.hpp 5b7461a060b26c1ed3696ce2984805a6 *inst/include/stan/math/prim/prob/rayleigh_ccdf_log.hpp 266887a2b6bb60984fcf574e7f5e330a *inst/include/stan/math/prim/prob/rayleigh_cdf.hpp 65c0ac8902e1afbdc09dd883aa4b31eb *inst/include/stan/math/prim/prob/rayleigh_cdf_log.hpp a8e34a4cdb51b665cc0f99e1de2bde86 *inst/include/stan/math/prim/prob/rayleigh_lccdf.hpp 72a17bc4603f843c0ae5fcd86704b485 *inst/include/stan/math/prim/prob/rayleigh_lcdf.hpp f4c8bed8a9a2d4ef15a1d3eab2607a97 *inst/include/stan/math/prim/prob/rayleigh_log.hpp 434df6c158b00e702dfff79da1f2dde2 *inst/include/stan/math/prim/prob/rayleigh_lpdf.hpp c38e81cfd21ae15f55e7d8db7159f86b *inst/include/stan/math/prim/prob/rayleigh_rng.hpp 4d8c485a06523026b4408c727c6844bc *inst/include/stan/math/prim/prob/scaled_inv_chi_square_ccdf_log.hpp 14868cbcb0de0b0627235e807e99e8ee *inst/include/stan/math/prim/prob/scaled_inv_chi_square_cdf.hpp 01b058c020c87f2a265d989e2d9e3e65 *inst/include/stan/math/prim/prob/scaled_inv_chi_square_cdf_log.hpp 7eaea19c5b997cf32563b7951ad0d47f *inst/include/stan/math/prim/prob/scaled_inv_chi_square_lccdf.hpp 88f37f47d7eb6f0ebda0934859d9ba33 *inst/include/stan/math/prim/prob/scaled_inv_chi_square_lcdf.hpp a6538eb12266d5ac20473a9f729d5162 *inst/include/stan/math/prim/prob/scaled_inv_chi_square_log.hpp e2e8d17775bf573f11a952c76f5e4376 *inst/include/stan/math/prim/prob/scaled_inv_chi_square_lpdf.hpp 5bd7e650c40292bb24e3f017e84b2718 *inst/include/stan/math/prim/prob/scaled_inv_chi_square_rng.hpp c0c77bae19cbfd0f0434f8fc76d8256e *inst/include/stan/math/prim/prob/skew_double_exponential_ccdf_log.hpp 00f565396e1fa96cc853608fa6673067 *inst/include/stan/math/prim/prob/skew_double_exponential_cdf.hpp 6865af7c1e511a6ea05f0c7b319fe7b7 *inst/include/stan/math/prim/prob/skew_double_exponential_cdf_log.hpp 7570e972dc3d7fed6bf917f36ffa76d8 *inst/include/stan/math/prim/prob/skew_double_exponential_lccdf.hpp fad6e307d0b2d40e82fdea63c71006ef *inst/include/stan/math/prim/prob/skew_double_exponential_lcdf.hpp d77c550ee0b03ebede81b672640eb3cb *inst/include/stan/math/prim/prob/skew_double_exponential_log.hpp fbfcd3919a8e32e1ce38d5d86a98d4a7 *inst/include/stan/math/prim/prob/skew_double_exponential_lpdf.hpp 33a24a794e430cb4b9ce1c1c55de8191 *inst/include/stan/math/prim/prob/skew_double_exponential_rng.hpp 8d1502f90c15fd4cfb72ebf05b48e481 *inst/include/stan/math/prim/prob/skew_normal_ccdf_log.hpp 0385dd11698943731da6fa994789ce6b *inst/include/stan/math/prim/prob/skew_normal_cdf.hpp c13e8c6c528ca6993651debcfae391cb *inst/include/stan/math/prim/prob/skew_normal_cdf_log.hpp 3856bf10772dfda219bd9d44dc529c41 *inst/include/stan/math/prim/prob/skew_normal_lccdf.hpp 0e4478f3789cf2d3369a58e50917b157 *inst/include/stan/math/prim/prob/skew_normal_lcdf.hpp 2f266d96f1157da56241caf716c99c18 *inst/include/stan/math/prim/prob/skew_normal_log.hpp 2871114cc83adefc47c54113b5a1a45e *inst/include/stan/math/prim/prob/skew_normal_lpdf.hpp 4f1f434fcf284f81c3d74c6537fddfc4 *inst/include/stan/math/prim/prob/skew_normal_rng.hpp 1db48aa6b47c6088bf28dd6ac4d74053 *inst/include/stan/math/prim/prob/std_normal_ccdf_log.hpp c835813d5768d408d523e452bf01f6f8 *inst/include/stan/math/prim/prob/std_normal_cdf.hpp fbcc64e3f23acf4f90d8697872eba3be *inst/include/stan/math/prim/prob/std_normal_cdf_log.hpp 3567ab270956fd97ae4ef139a22a99d9 *inst/include/stan/math/prim/prob/std_normal_lccdf.hpp bedcd78e8bb9be8d1b4212a08a73d145 *inst/include/stan/math/prim/prob/std_normal_lcdf.hpp e28d70fc24799aa5defecbed5d4c82ab *inst/include/stan/math/prim/prob/std_normal_log.hpp 85ef321900eee0de3837f5eae46dfed9 *inst/include/stan/math/prim/prob/std_normal_log_qf.hpp 1a71974163a07db9a1fcdb4c59fa2d4e *inst/include/stan/math/prim/prob/std_normal_lpdf.hpp 3cbe608835de8100a6c390b7b703bbf6 *inst/include/stan/math/prim/prob/std_normal_rng.hpp 5c16bbde6dba16a657b850ba1764a658 *inst/include/stan/math/prim/prob/student_t_ccdf_log.hpp 3e522d1b4e175b9713c3a1406147c145 *inst/include/stan/math/prim/prob/student_t_cdf.hpp 94d99dcbdc43b28f25dc93a4e126ee53 *inst/include/stan/math/prim/prob/student_t_cdf_log.hpp 72ac553ea6421122a7912e5df4ccd64e *inst/include/stan/math/prim/prob/student_t_lccdf.hpp 5c8c1a7b736690ab38400dae4ba784b7 *inst/include/stan/math/prim/prob/student_t_lcdf.hpp 4ae88fb2f6e9ca26259c49bd879f3a2a *inst/include/stan/math/prim/prob/student_t_log.hpp 99963922b9e98911c9dfe75f565248f2 *inst/include/stan/math/prim/prob/student_t_lpdf.hpp 1b703f68dfd23833acfc1880be6374f7 *inst/include/stan/math/prim/prob/student_t_rng.hpp 8ea1d5f6bd65bd3ed46a94e40ab6f574 *inst/include/stan/math/prim/prob/uniform_ccdf_log.hpp 7be00b80aa815fc3a31e45b5a5a23bd2 *inst/include/stan/math/prim/prob/uniform_cdf.hpp 4302a03eb4b77a10f0cf238f5cc33135 *inst/include/stan/math/prim/prob/uniform_cdf_log.hpp d6b19a97f762ed977388f601e4dc1ed7 *inst/include/stan/math/prim/prob/uniform_lccdf.hpp 9581836db5e5c17e85b54e17b28d6d46 *inst/include/stan/math/prim/prob/uniform_lcdf.hpp 865abb5930e8a566b1c86f345bf39296 *inst/include/stan/math/prim/prob/uniform_log.hpp 25d38b6c4db6c0fe6b357a0ec0552b5d *inst/include/stan/math/prim/prob/uniform_lpdf.hpp 06aef810d9806b32cbf365f407ef81d9 *inst/include/stan/math/prim/prob/uniform_rng.hpp 039c061bc36d9b7d3d0a8d2cbbb14b26 *inst/include/stan/math/prim/prob/von_mises_ccdf_log.hpp 51f51a9bab353240e6c6de00cfb1f159 *inst/include/stan/math/prim/prob/von_mises_cdf.hpp 7e274d3ca799be6279fa0cc0940419cf *inst/include/stan/math/prim/prob/von_mises_cdf_log.hpp efde963311c3a6a3b60abb5bc7c6a08e *inst/include/stan/math/prim/prob/von_mises_lccdf.hpp b9f71f045cb8b09d30897ccb7e27789b *inst/include/stan/math/prim/prob/von_mises_lcdf.hpp cb1e41db26a5c666a9033d50ad7e04cf *inst/include/stan/math/prim/prob/von_mises_log.hpp 3e43a481e27ac525072cd7440702dd5d *inst/include/stan/math/prim/prob/von_mises_lpdf.hpp 2235f534f585cd154867efffbfe5a66a *inst/include/stan/math/prim/prob/von_mises_rng.hpp 113729f06fb2f83db0e8ba9afeb64c74 *inst/include/stan/math/prim/prob/weibull_ccdf_log.hpp 39eb77337b53847be15ab787d19272c0 *inst/include/stan/math/prim/prob/weibull_cdf.hpp 4ca7f2086d3ac4f94e3c5dcff8fe223e *inst/include/stan/math/prim/prob/weibull_cdf_log.hpp 406467fe377f41af97fe5f01a60aa9ee *inst/include/stan/math/prim/prob/weibull_lccdf.hpp e29e91683f5151a0b1394c498c681727 *inst/include/stan/math/prim/prob/weibull_lcdf.hpp eecc43c65f3178477d14c1e12803fa1d *inst/include/stan/math/prim/prob/weibull_log.hpp 629202c55747bd1e270121ac000f650f *inst/include/stan/math/prim/prob/weibull_lpdf.hpp 31e303cf6f2db44edaeccd8edb8cff07 *inst/include/stan/math/prim/prob/weibull_rng.hpp 5904b55b9381dce4cb13b6b823a04ac9 *inst/include/stan/math/prim/prob/wiener_log.hpp e3cff4c1bca774a08468678fa8e6dc77 *inst/include/stan/math/prim/prob/wiener_lpdf.hpp 0eeda4057e3e01e68efbe4778ae89004 *inst/include/stan/math/prim/prob/wishart_cholesky_lpdf.hpp 1cc8114b83e391d1fd9cde0b73d3e636 *inst/include/stan/math/prim/prob/wishart_cholesky_rng.hpp 5766443c7998d8e994bbb379fb9f6a3e *inst/include/stan/math/prim/prob/wishart_log.hpp 271c5e32be29bfc3a6bc4922fe435cde *inst/include/stan/math/prim/prob/wishart_lpdf.hpp 56ca50d8ec9bf2945bf88a175247243f *inst/include/stan/math/prim/prob/wishart_rng.hpp a7468f68d0f46bca38c8eb97681fd766 *inst/include/stan/math/rev.hpp 7b9723fc822a1fa567e81e015e9319d4 *inst/include/stan/math/rev/core.hpp bb711c1dde0e3a09639e7573f4a94148 *inst/include/stan/math/rev/core/Eigen_NumTraits.hpp 1f70a6f3552b47e485066b4ded449ea6 *inst/include/stan/math/rev/core/accumulate_adjoints.hpp 89489d3b6596201974b52cf855c4ce8e *inst/include/stan/math/rev/core/arena_allocator.hpp 3c2d27c112b288b2ded35ef9c3a33bed *inst/include/stan/math/rev/core/arena_matrix.hpp 3329424b843894b26b35ebbeaad755ae *inst/include/stan/math/rev/core/autodiffstackstorage.hpp a71aa89905c31b8852c412ea43aa3821 *inst/include/stan/math/rev/core/build_vari_array.hpp 43e393c782a01601cdd431f17ecc1720 *inst/include/stan/math/rev/core/callback_vari.hpp dccd634c297207015f1ae03a7a69d41a *inst/include/stan/math/rev/core/chainable_alloc.hpp 881985b39a215ab9a00ed15e7e9cda37 *inst/include/stan/math/rev/core/chainable_object.hpp 1d936db1dc08057c3041a370cf4df554 *inst/include/stan/math/rev/core/chainablestack.hpp 05c41ff4f3c8891f9ea990f0c5061d74 *inst/include/stan/math/rev/core/count_vars.hpp 68c34e05d440e630e43045a5c125ed0c *inst/include/stan/math/rev/core/ddv_vari.hpp 1b0989ef5b3eef123f0be622b4c633df *inst/include/stan/math/rev/core/deep_copy_vars.hpp 3df4d777906189f2ad4f1ec7e9347729 *inst/include/stan/math/rev/core/dv_vari.hpp 8d9c07187131a47ebe1422233261635a *inst/include/stan/math/rev/core/dvd_vari.hpp ffb77b76a04a6ee823aba36aa7333a26 *inst/include/stan/math/rev/core/dvv_vari.hpp 8eb13a19e7571d8f7d4659f8d7376101 *inst/include/stan/math/rev/core/empty_nested.hpp f2df3d5e841da0d1727c4fee020dcfb6 *inst/include/stan/math/rev/core/gevv_vvv_vari.hpp 1c9e36b6a1b2510eda3148dffaae7e11 *inst/include/stan/math/rev/core/grad.hpp 3cdc6ece97bbe8640d922f0b9226c2f8 *inst/include/stan/math/rev/core/init_chainablestack.hpp 2f0b2a02f9b761b03c5e83f08f550116 *inst/include/stan/math/rev/core/matrix_vari.hpp 27cb84dc83dbc9d5887ae4897122de24 *inst/include/stan/math/rev/core/nested_rev_autodiff.hpp f2aaf934d7593d23d4dd78df2c748b72 *inst/include/stan/math/rev/core/nested_size.hpp 544bc839f06c8edd36a24905749465bf *inst/include/stan/math/rev/core/operator_addition.hpp db1a79a19f8b5a510fa730fe7ddbc769 *inst/include/stan/math/rev/core/operator_divide_equal.hpp 968bb0a180e9d8409facb4d710755016 *inst/include/stan/math/rev/core/operator_division.hpp 65012b6a06f69fa03e3a0570a2198fe2 *inst/include/stan/math/rev/core/operator_equal.hpp 71c3a5ed5ffa205b0134a6f0dd063459 *inst/include/stan/math/rev/core/operator_greater_than.hpp 1ae1e152d5d9315b50c289fea44b1f5a *inst/include/stan/math/rev/core/operator_greater_than_or_equal.hpp c516a67121a5f15679afd677f254a3fc *inst/include/stan/math/rev/core/operator_less_than.hpp 76a0d7c302351dfd9dd33c351a9a18d5 *inst/include/stan/math/rev/core/operator_less_than_or_equal.hpp c7eae64249fc271818816e91a05f62ea *inst/include/stan/math/rev/core/operator_logical_and.hpp e3306a8f17d2d4230866262a15061dd2 *inst/include/stan/math/rev/core/operator_logical_or.hpp becc3bf8d770320c56a0eb8c3c701e9b *inst/include/stan/math/rev/core/operator_minus_equal.hpp 08f58548414c93a34582c29630b145c3 *inst/include/stan/math/rev/core/operator_multiplication.hpp 6cd03e4e5f4804f8db4231250be89e2e *inst/include/stan/math/rev/core/operator_multiply_equal.hpp 3609c8ff318a651944a0f7dd236bd63f *inst/include/stan/math/rev/core/operator_not_equal.hpp cdc232aa1e63f294cd96cb6a3d4e60c7 *inst/include/stan/math/rev/core/operator_plus_equal.hpp f25daa8adbb95eb0efc61f69b35758ef *inst/include/stan/math/rev/core/operator_subtraction.hpp 13d0a0fa49763535195e3d4e3cc4ccdd *inst/include/stan/math/rev/core/operator_unary_decrement.hpp e91a2509b73241acaa5873852a78d25a *inst/include/stan/math/rev/core/operator_unary_increment.hpp 7f84a03d193967557eccb40217273cd4 *inst/include/stan/math/rev/core/operator_unary_negative.hpp 12faeb19dbf326d500c55c68a7b19c50 *inst/include/stan/math/rev/core/operator_unary_not.hpp 20b943f3c5a9e7866e84a8a7cf70abf6 *inst/include/stan/math/rev/core/operator_unary_plus.hpp e221b7bd6c975aed3dbdf344165d0d42 *inst/include/stan/math/rev/core/precomp_vv_vari.hpp 7baa2dbed5a45f6edb76450f3e7f536b *inst/include/stan/math/rev/core/precomp_vvv_vari.hpp 6fb8bad5d10f9b4d3a7c3ba9ac1752bd *inst/include/stan/math/rev/core/precomputed_gradients.hpp 1a4b4bb10cccb90fa56b63c915a3077d *inst/include/stan/math/rev/core/print_stack.hpp 25e0dd880b522b2e4f17ba3bc7409db1 *inst/include/stan/math/rev/core/profiling.hpp e127149126f107eba2a0295be0e873a4 *inst/include/stan/math/rev/core/read_var.hpp 4626bb8eec847fb6d734fed31357f9d1 *inst/include/stan/math/rev/core/recover_memory.hpp 7bcd016274cf690aaecde2f077ac0fa3 *inst/include/stan/math/rev/core/recover_memory_nested.hpp 0d2c7716212cf70e4ae38f386e8ee42d *inst/include/stan/math/rev/core/reverse_pass_callback.hpp 56f6cafb3d0b9657f64f221409d570ac *inst/include/stan/math/rev/core/save_varis.hpp d36f37dab420780f2bfa3d9a75e88813 *inst/include/stan/math/rev/core/scoped_chainablestack.hpp c5241609e66eaf057f2d33aa23638dea *inst/include/stan/math/rev/core/set_zero_all_adjoints.hpp b8ce2320b0e695493f3caf0b04bdc5c9 *inst/include/stan/math/rev/core/set_zero_all_adjoints_nested.hpp 2cbe59607872d4a0032d1127b8b7e2d9 *inst/include/stan/math/rev/core/start_nested.hpp f6abe65f1b0626ec7e88af831e1cab97 *inst/include/stan/math/rev/core/std_complex.hpp 16f7b9a6dfc2086b2417d174c1af1ae5 *inst/include/stan/math/rev/core/std_isinf.hpp 08fa39cd9c37355b80678b38fb11816b *inst/include/stan/math/rev/core/std_isnan.hpp 510bd81b17a0bbdca239f3cc5040917f *inst/include/stan/math/rev/core/std_iterator_traits.hpp 901047dddc1973764270f998e31260d5 *inst/include/stan/math/rev/core/std_numeric_limits.hpp 4958f53f6968b3472843d84eed259275 *inst/include/stan/math/rev/core/stored_gradient_vari.hpp 9fa7b0bbe2b1692b46a634b6a195746b *inst/include/stan/math/rev/core/typedefs.hpp 1d290398e626d6a85a7353bb2c002f14 *inst/include/stan/math/rev/core/v_vari.hpp 60105a931d773ad57fcc3c1902e5d356 *inst/include/stan/math/rev/core/var.hpp bbef7f1d0a2bc43c7135843a39ab5f67 *inst/include/stan/math/rev/core/var_value_fwd_declare.hpp fd1897da55b187482895be3aaa1f1cc0 *inst/include/stan/math/rev/core/vari.hpp 18dca63624a6b14b617a04d3e960f37e *inst/include/stan/math/rev/core/vd_vari.hpp 708b16c7821b39690fce30b23c0394e7 *inst/include/stan/math/rev/core/vdd_vari.hpp 04e01ccd5afbfa50ad609d7054caddbc *inst/include/stan/math/rev/core/vdv_vari.hpp 2054c78a253073ca5962b40348d9dd8d *inst/include/stan/math/rev/core/vector_vari.hpp 47f1a76471d0fa7784b92224fb0ca987 *inst/include/stan/math/rev/core/vv_vari.hpp 952a30d361a54309ea957986771ec38f *inst/include/stan/math/rev/core/vvd_vari.hpp 17f0e806bbbb7d1c5c45ed4d2758de50 *inst/include/stan/math/rev/core/vvv_vari.hpp f9f642c444690cbe637a9f05b3f58b63 *inst/include/stan/math/rev/core/zero_adjoints.hpp 683231054fb3c33ea5418c5f6e536a0f *inst/include/stan/math/rev/fun.hpp 699c97d24b1ada9d49f68275ff7ea6be *inst/include/stan/math/rev/fun/LDLT_factor.hpp 89f4200d4657cae34f7d7fb05dc84f13 *inst/include/stan/math/rev/fun/Phi.hpp 88fca7514067dc6d56441985d069cb44 *inst/include/stan/math/rev/fun/Phi_approx.hpp 16cb33ecde217dfac1fa39a2d75dec8a *inst/include/stan/math/rev/fun/abs.hpp d5c71a069b9ba2549a8f082490beae9c *inst/include/stan/math/rev/fun/accumulator.hpp ea9b50527aca4521c1cb9f67df39bf8c *inst/include/stan/math/rev/fun/acos.hpp d6b8a5a64cc9bb028459c222d28edd2b *inst/include/stan/math/rev/fun/acosh.hpp 1f4c113a7f211f14ba046830345e2d24 *inst/include/stan/math/rev/fun/adjoint_of.hpp efce87abe6f85c7ef72639639280b545 *inst/include/stan/math/rev/fun/append_col.hpp fa253b366a89e7342e4a664a9eeadf8a *inst/include/stan/math/rev/fun/append_row.hpp 66437b7b01ba754453fc69cd6d1f65ca *inst/include/stan/math/rev/fun/arg.hpp 53c6f3723c80cda923cc7fdfe22e2094 *inst/include/stan/math/rev/fun/as_array_or_scalar.hpp 4695b206d27a98822007079f77d175e3 *inst/include/stan/math/rev/fun/as_bool.hpp 174a37fb005fe572b5fffbacc24122cb *inst/include/stan/math/rev/fun/as_column_vector_or_scalar.hpp c85c674904b493ef4adeecb2002780b0 *inst/include/stan/math/rev/fun/asin.hpp b6581ee7309310cecc5c9182c0b542aa *inst/include/stan/math/rev/fun/asinh.hpp 9dda8ffa6db1e449eb77bb4a2e8d93bd *inst/include/stan/math/rev/fun/atan.hpp 851af129b940796f474a4d35bc541acc *inst/include/stan/math/rev/fun/atan2.hpp a6a67c4b92dcc2e64c2f3583d0d4408f *inst/include/stan/math/rev/fun/atanh.hpp 18c1adb0e764bfa19c31a54f97ce7231 *inst/include/stan/math/rev/fun/bessel_first_kind.hpp 6caca59334081167c2ffaeb88a94138e *inst/include/stan/math/rev/fun/bessel_second_kind.hpp ef3a7f2b035eb652465c4dff32f2d245 *inst/include/stan/math/rev/fun/beta.hpp 37e18b7cdbabf38b2dfbf50189addda7 *inst/include/stan/math/rev/fun/binary_log_loss.hpp cdbcfc80f96cd3aadc1958de34225a81 *inst/include/stan/math/rev/fun/cbrt.hpp a937f45f05ff9b3a70e1b52d3b54ca74 *inst/include/stan/math/rev/fun/ceil.hpp 96e7376dcdb0748628fabb9dae0106a5 *inst/include/stan/math/rev/fun/cholesky_corr_constrain.hpp 71aba6635ba7dbac1579070e8b2cc353 *inst/include/stan/math/rev/fun/cholesky_decompose.hpp 352479776481999ee4948474e4e63722 *inst/include/stan/math/rev/fun/cholesky_factor_constrain.hpp b7fb72d787ad3409027463efd7ba5cde *inst/include/stan/math/rev/fun/columns_dot_product.hpp f6713f66ffcf08b545888c674bcbc81a *inst/include/stan/math/rev/fun/columns_dot_self.hpp 825945e6c70a0b167434e5f8541f7f6b *inst/include/stan/math/rev/fun/conj.hpp 505e7b51d22685e4b76c9708f5611795 *inst/include/stan/math/rev/fun/corr_matrix_constrain.hpp 268bf24e7bcb11cc32eb2011b31d9f5e *inst/include/stan/math/rev/fun/cos.hpp f39ba01a1734b8d4756c97f0bb3d34db *inst/include/stan/math/rev/fun/cosh.hpp 8ddec09199202c1a0a23572c161be89f *inst/include/stan/math/rev/fun/cov_exp_quad.hpp a035029c862bc7ada9ab3c8cf6498930 *inst/include/stan/math/rev/fun/cov_matrix_constrain.hpp 4cc42af7f61227914b4ce576309e0eb3 *inst/include/stan/math/rev/fun/cov_matrix_constrain_lkj.hpp 4d442c798c6270dbb037c6c6448fb1cb *inst/include/stan/math/rev/fun/csr_matrix_times_vector.hpp 88db686ab5672dabc6ab12228088a70e *inst/include/stan/math/rev/fun/cumulative_sum.hpp 5fc2bffe35c0d5fa0102662165b19220 *inst/include/stan/math/rev/fun/determinant.hpp 20bb4f91aeb7b16ad298377d7f4ececc *inst/include/stan/math/rev/fun/diag_post_multiply.hpp bdda36858eb7640097a8557391ab08d4 *inst/include/stan/math/rev/fun/diag_pre_multiply.hpp 7a334ad2e07d77536208fc358c088def *inst/include/stan/math/rev/fun/digamma.hpp 0f365bc7f6396beb1d1ee24d1fcb1a18 *inst/include/stan/math/rev/fun/dims.hpp a9926d50b183cdf2f20c8e9ad1cfd3b9 *inst/include/stan/math/rev/fun/divide.hpp 78233ae0958076f45b4de6b1ff23622a *inst/include/stan/math/rev/fun/dot_product.hpp 16991d959831938c20cc253c6d8ca331 *inst/include/stan/math/rev/fun/dot_self.hpp efe1782fa6340608a8ed2e32cbf6cb82 *inst/include/stan/math/rev/fun/eigendecompose_sym.hpp 057deafbc5d0cf39e3ea24467ed598a6 *inst/include/stan/math/rev/fun/eigenvalues_sym.hpp d9df889b3aac0edffc76d727f58092bc *inst/include/stan/math/rev/fun/eigenvectors_sym.hpp b8dd1123ed818c3fd3cd78553df5140b *inst/include/stan/math/rev/fun/elt_divide.hpp 34f2f38486d1b9c3c0a975736880d9fe *inst/include/stan/math/rev/fun/elt_multiply.hpp bf0c06ff603df31b79d2885cd1a688e0 *inst/include/stan/math/rev/fun/erf.hpp 4a1ef9243e51fe74603e45c4accd645a *inst/include/stan/math/rev/fun/erfc.hpp b083437e74ec2f7e79bd1b18b527280f *inst/include/stan/math/rev/fun/exp.hpp 197dd0fdd7835ee83ae8b9d1d702c49f *inst/include/stan/math/rev/fun/exp2.hpp bb3e6e5b5eabae6295ef966bbc0bb05f *inst/include/stan/math/rev/fun/expm1.hpp e2197eb44b3811c45ca18080322d1fbe *inst/include/stan/math/rev/fun/fabs.hpp 8b8bbc3eb5214af68446957d8f37f071 *inst/include/stan/math/rev/fun/falling_factorial.hpp 5ef70883e287c36c7a125317e425c1d8 *inst/include/stan/math/rev/fun/fdim.hpp 99586f44cd129cf7c612835c0ad9e2d6 *inst/include/stan/math/rev/fun/fft.hpp c06b36cc1096ea2f0bfb322162888e6b *inst/include/stan/math/rev/fun/fill.hpp 4be7b3aaaa76763232d8b368d29d2b13 *inst/include/stan/math/rev/fun/floor.hpp f4f0420d55fdcf251d6b8ff79a9aa628 *inst/include/stan/math/rev/fun/fma.hpp 2eef237e5611ef8d1e0304911e96054e *inst/include/stan/math/rev/fun/fmax.hpp d394fab3b0246be0765bc4eb934c0638 *inst/include/stan/math/rev/fun/fmin.hpp 612f3d5138e7b39be79582d7e3799616 *inst/include/stan/math/rev/fun/fmod.hpp dfedbfdc8571d39c5edc655e8c81bf47 *inst/include/stan/math/rev/fun/from_var_value.hpp 9b69c520270deb542fb85abafb73cac6 *inst/include/stan/math/rev/fun/gamma_p.hpp 70b26e92a618abfc7f5e12499e9fc50d *inst/include/stan/math/rev/fun/gamma_q.hpp d2a2ccd6fd73513cf1e84f7864b657e3 *inst/include/stan/math/rev/fun/generalized_inverse.hpp 0f88518314b2d7f4d1fa24d46bb0f1c3 *inst/include/stan/math/rev/fun/gp_exp_quad_cov.hpp bab4fe87069fb8f18c9381b9b41639b9 *inst/include/stan/math/rev/fun/gp_periodic_cov.hpp 78de3f0c0c31a0d0760274eae36df991 *inst/include/stan/math/rev/fun/grad.hpp 7303389c6d12ec3f3fbe8f91653eccf6 *inst/include/stan/math/rev/fun/grad_inc_beta.hpp 462171a5c30ecf9e25942c2c82d08407 *inst/include/stan/math/rev/fun/hypergeometric_2F1.hpp bb9c592d10f4072015357f836a46e964 *inst/include/stan/math/rev/fun/hypergeometric_pFq.hpp 29681c70b79f1c744ddb576e544191cc *inst/include/stan/math/rev/fun/hypot.hpp ae9a9e724f8c56d410fd84dd57eeb581 *inst/include/stan/math/rev/fun/identity_constrain.hpp 6ec8907024d3e403e3ca20cb86e74e17 *inst/include/stan/math/rev/fun/identity_free.hpp d7bf17c204682dc6a5c0827abfdd8e3a *inst/include/stan/math/rev/fun/if_else.hpp ad0c5d6c13316981df2695f5a0610573 *inst/include/stan/math/rev/fun/inc_beta.hpp 2f3111e276f7be97f96d1f1503d68902 *inst/include/stan/math/rev/fun/initialize_fill.hpp b1989ae803ece28fa81080ce7f29dc2a *inst/include/stan/math/rev/fun/initialize_variable.hpp fb5a740b15eaf6f6b3f3f6938cfa684c *inst/include/stan/math/rev/fun/inv.hpp 40669935b185f2a5724d16eb8d5b7999 *inst/include/stan/math/rev/fun/inv_Phi.hpp caf48b7deb1bee4ea19b25118ebb11d3 *inst/include/stan/math/rev/fun/inv_cloglog.hpp 0ff993cd54913bef9124dba93cc4dd66 *inst/include/stan/math/rev/fun/inv_erfc.hpp f4196b6693a3f62ae07c6c456cde8ef3 *inst/include/stan/math/rev/fun/inv_inc_beta.hpp f3e364f3eded77d6177fa3b61bb7a83c *inst/include/stan/math/rev/fun/inv_logit.hpp 6735e83591f8977d0c42c1a7dc659995 *inst/include/stan/math/rev/fun/inv_sqrt.hpp 6c79fe3257b0df1b5c76b4b3ce2a05e2 *inst/include/stan/math/rev/fun/inv_square.hpp 13f9170a2bacdabf1bed02ae7103119b *inst/include/stan/math/rev/fun/inverse.hpp 880f383f4be0bbfa51296baf03c1b096 *inst/include/stan/math/rev/fun/is_inf.hpp 849545217e2b17b781370d98c2db1524 *inst/include/stan/math/rev/fun/is_nan.hpp 76cd966b9571f87b2a7ad8b5546353bf *inst/include/stan/math/rev/fun/is_uninitialized.hpp d311dbf1a55060865b3620e5858614ce *inst/include/stan/math/rev/fun/lambert_w.hpp 6e949c6e187e30f937a26923cbfe2b5f *inst/include/stan/math/rev/fun/lb_constrain.hpp 5612328c793d75e539f7ae403b63d19f *inst/include/stan/math/rev/fun/lbeta.hpp 48324ca2eed6b775f6d426c706efae60 *inst/include/stan/math/rev/fun/ldexp.hpp fe11f74909c48f4ef9b0c25219122ccb *inst/include/stan/math/rev/fun/lgamma.hpp 45a46ac9ea0a6073a104c57ae1c9dc7b *inst/include/stan/math/rev/fun/lmgamma.hpp f91db9d498f0bdb9d2f2f758e5a55470 *inst/include/stan/math/rev/fun/lmultiply.hpp 646e7b6c376755a0cdba96e9ef2cd3db *inst/include/stan/math/rev/fun/log.hpp 286120699f798319d0c7ce11361d0518 *inst/include/stan/math/rev/fun/log10.hpp 23d603064a698bc607247b8f84861e4d *inst/include/stan/math/rev/fun/log1m.hpp 8a7a964a85a384a2ec54525e6b6ba6c3 *inst/include/stan/math/rev/fun/log1m_exp.hpp a3ddd3aac4b56f6fe0fa0b57533fcc1e *inst/include/stan/math/rev/fun/log1m_inv_logit.hpp a3903707eff32e2a5730910550aa0f2a *inst/include/stan/math/rev/fun/log1p.hpp 13a5256caeb800aa05d2c18e351b7cdf *inst/include/stan/math/rev/fun/log1p_exp.hpp 6980e476927460decc69fcea282b5cda *inst/include/stan/math/rev/fun/log2.hpp 9324baafa8cce1ee203489f056167e0b *inst/include/stan/math/rev/fun/log_determinant.hpp c8a0a06efab2101290cdd37af81bd02c *inst/include/stan/math/rev/fun/log_determinant_ldlt.hpp 50605da6a8f02ea64a2cb7199c8263a9 *inst/include/stan/math/rev/fun/log_determinant_spd.hpp 4d75ace56afb9d4143466b3e4b3ab17d *inst/include/stan/math/rev/fun/log_diff_exp.hpp 570cb1f7e4b80e903405faab6c86fbae *inst/include/stan/math/rev/fun/log_falling_factorial.hpp c7924500125cf0c968aadce6ad94676d *inst/include/stan/math/rev/fun/log_inv_logit.hpp 218266e42287fdd5d947e1d35b30f27b *inst/include/stan/math/rev/fun/log_inv_logit_diff.hpp bf9ea4085d766fa83c5f194d6b3968af *inst/include/stan/math/rev/fun/log_mix.hpp 5b6b42b96ce0343901c0b8143d74ce66 *inst/include/stan/math/rev/fun/log_rising_factorial.hpp 810bec2b8ccd22c41c19b97038790af8 *inst/include/stan/math/rev/fun/log_softmax.hpp 2f1ff40703041eb000697a69256e0fd3 *inst/include/stan/math/rev/fun/log_sum_exp.hpp dd18c224ae522d3e005be37f7ef5fdee *inst/include/stan/math/rev/fun/logit.hpp f7157b13cffaea300a4d330947d6cea3 *inst/include/stan/math/rev/fun/lub_constrain.hpp 2531af3cd020c80660b7c82e39e102e9 *inst/include/stan/math/rev/fun/matrix_exp_multiply.hpp 1c4591621e1ba3e8301fe7179ce92d6c *inst/include/stan/math/rev/fun/matrix_power.hpp e733b0cda9384f6fdd6600cfc282bd54 *inst/include/stan/math/rev/fun/mdivide_left.hpp 5cd99e8e816eba199421f80a2a1ad5f0 *inst/include/stan/math/rev/fun/mdivide_left_ldlt.hpp 502bb75672b237b5564d9ffd828935b0 *inst/include/stan/math/rev/fun/mdivide_left_spd.hpp f4f3afbe82e7fd384d718910c1d0723a *inst/include/stan/math/rev/fun/mdivide_left_tri.hpp b8f55e604ef8418cb72fbb3a4cddeba0 *inst/include/stan/math/rev/fun/modified_bessel_first_kind.hpp aff88b0ca238f247302fb0237465c4a1 *inst/include/stan/math/rev/fun/modified_bessel_second_kind.hpp caaa03691103227223616c7de119b4da *inst/include/stan/math/rev/fun/multiply.hpp 3d1d14bda9107142370d96b1c46fdcad *inst/include/stan/math/rev/fun/multiply_log.hpp 719ad2f4d08b1c0d3e82632e7e31f4a1 *inst/include/stan/math/rev/fun/multiply_lower_tri_self_transpose.hpp 95d8026475a958b7c3aea8dc30a67990 *inst/include/stan/math/rev/fun/norm.hpp 5c2e2a2e1f9877ac3334d4c447516437 *inst/include/stan/math/rev/fun/norm1.hpp b86dbdb3ad707efdf6911b09f6d5216a *inst/include/stan/math/rev/fun/norm2.hpp ecfeaaa39c3eb8efd4b32f2a594a041e *inst/include/stan/math/rev/fun/ordered_constrain.hpp e44a60626c65c27ba81c609696364d5e *inst/include/stan/math/rev/fun/owens_t.hpp c2e71ab55d2ed66559cb5ccb8cca7d12 *inst/include/stan/math/rev/fun/polar.hpp 95582823285d2d1fb3141ce7ffc10974 *inst/include/stan/math/rev/fun/positive_ordered_constrain.hpp 9a3386d7cae7b7cc0b62f5763d1ac8ef *inst/include/stan/math/rev/fun/pow.hpp fa7e032c8ad173c0be94c284f780e199 *inst/include/stan/math/rev/fun/primitive_value.hpp 5285281e40ef664caca74610c9e845c4 *inst/include/stan/math/rev/fun/proj.hpp 2be9205b5666d910683d652b8bbe7c07 *inst/include/stan/math/rev/fun/quad_form.hpp 6958f353ef4877680ee47ae62a6e0ed5 *inst/include/stan/math/rev/fun/quad_form_sym.hpp 7d8c5ea3bf41dcdcb3187b4c2897c283 *inst/include/stan/math/rev/fun/read_corr_L.hpp 29b6e1f7bfdff61370b7b4fe755fe0f9 *inst/include/stan/math/rev/fun/read_corr_matrix.hpp 8e44128f5f57fc681696002b1f17a42d *inst/include/stan/math/rev/fun/read_cov_L.hpp eaa239eec51999945c84cb608427b245 *inst/include/stan/math/rev/fun/read_cov_matrix.hpp fffe51c2da0b7f36018a921169928a23 *inst/include/stan/math/rev/fun/rep_matrix.hpp 3757463c54386ad20608cdb8ed14f901 *inst/include/stan/math/rev/fun/rep_row_vector.hpp 63344bb97631665a1e4e2a26cbc6351f *inst/include/stan/math/rev/fun/rep_vector.hpp 6d017a44c8bda928804abb7432b7ecb7 *inst/include/stan/math/rev/fun/rising_factorial.hpp d7010e4d20b38961c7c1b404f226d7d0 *inst/include/stan/math/rev/fun/round.hpp af83d229d32f6864d5326004cdbaf578 *inst/include/stan/math/rev/fun/rows_dot_product.hpp 4da740a566015cd4ecb4f0869dd5b97e *inst/include/stan/math/rev/fun/rows_dot_self.hpp 800a2cdf13655cb49c5de0772524df56 *inst/include/stan/math/rev/fun/sd.hpp b286e0bfdaa8e1a2640d875f7a47519a *inst/include/stan/math/rev/fun/simplex_constrain.hpp ef613aaaec052c71100359f99562a2be *inst/include/stan/math/rev/fun/sin.hpp e6c451034cbf932b7f6cddf668fba109 *inst/include/stan/math/rev/fun/singular_values.hpp b3f8ed51cafd3012bcb7d9060e015aa7 *inst/include/stan/math/rev/fun/sinh.hpp 8b766abee535a6459cedb711129629e1 *inst/include/stan/math/rev/fun/softmax.hpp b39bfba7eaf9a6122d7714433bf8b8e7 *inst/include/stan/math/rev/fun/sqrt.hpp 5f35a0cc54e84278040df24da615f0af *inst/include/stan/math/rev/fun/square.hpp 29ea2bc15d7eda90ec318c9c05283728 *inst/include/stan/math/rev/fun/squared_distance.hpp 39aea13f5ab69f156bf6f69224e9acce *inst/include/stan/math/rev/fun/stan_print.hpp 97d3dc84b60f509d467c48c0c5bab33e *inst/include/stan/math/rev/fun/step.hpp 7d31ffbdd9771cd446f85625d7738438 *inst/include/stan/math/rev/fun/sum.hpp 11355d4b30968e180845ba95c629e59c *inst/include/stan/math/rev/fun/svd.hpp 8d928db81d6a86f460dd5d34428a1098 *inst/include/stan/math/rev/fun/svd_U.hpp b458ca9f52204b9ec6b218cd98b6590a *inst/include/stan/math/rev/fun/svd_V.hpp 8eb70e4959314fef8abcc961e6e9c43f *inst/include/stan/math/rev/fun/tan.hpp 5ae1f282abd93e8c54887aa48ff274af *inst/include/stan/math/rev/fun/tanh.hpp 9961182ef7e3062a6081db1ed7afd4c7 *inst/include/stan/math/rev/fun/tcrossprod.hpp 979f5ca3f948a479b6f4f66e521211a0 *inst/include/stan/math/rev/fun/tgamma.hpp 7b88e6c8bbaa4eb3ba08d9bd475bfd0e *inst/include/stan/math/rev/fun/to_arena.hpp e7b031d48e0442e48880a0b4b6fbc8b9 *inst/include/stan/math/rev/fun/to_soa_sparse_matrix.hpp f2e4f075d344d60204232a5b171685d5 *inst/include/stan/math/rev/fun/to_var.hpp 5564d6d405b94dd86ec1e22c523ba6b0 *inst/include/stan/math/rev/fun/to_var_value.hpp a0e04f48a523c78cb70945f6d2e92659 *inst/include/stan/math/rev/fun/to_vector.hpp c8234a34ec8a767bfd0eeb720f2d57db *inst/include/stan/math/rev/fun/trace.hpp adace39242226e684f326a2b15e4477d *inst/include/stan/math/rev/fun/trace_gen_inv_quad_form_ldlt.hpp 93baa8a3dee58f0515810c6202131163 *inst/include/stan/math/rev/fun/trace_gen_quad_form.hpp 053c9e7aba44069391d6ede32218d4cd *inst/include/stan/math/rev/fun/trace_inv_quad_form_ldlt.hpp e8ca5b58abe39643e4e3c54f4906870b *inst/include/stan/math/rev/fun/trace_quad_form.hpp 96d9e1a81398ee5f5c2ce2df3af21ad6 *inst/include/stan/math/rev/fun/trigamma.hpp 5b0a6f11001277eda115e649be39f2ec *inst/include/stan/math/rev/fun/trunc.hpp e4e09cebd2441e1fdd2dec65f046cad3 *inst/include/stan/math/rev/fun/ub_constrain.hpp 399a3531850adb317adf7e2633a6ec9c *inst/include/stan/math/rev/fun/unit_vector_constrain.hpp 8ed774035a792f930aed16ed3897edf9 *inst/include/stan/math/rev/fun/value_of.hpp 10303c14c8114394a82ddb2a488b5cc9 *inst/include/stan/math/rev/fun/value_of_rec.hpp 936e030c43bb0409caeb627b949bc04f *inst/include/stan/math/rev/fun/variance.hpp 0333c5b63ff4b19d3954c9e3924fad4c *inst/include/stan/math/rev/functor.hpp cfb8228a4d9ac461309814a223820f4c *inst/include/stan/math/rev/functor/algebra_solver_fp.hpp 05ddcc5a01cb8391823a71612f8b36bc *inst/include/stan/math/rev/functor/algebra_system.hpp cfcf40776c03ee09ebb269fc16af2a72 *inst/include/stan/math/rev/functor/apply_scalar_binary.hpp 9eb538d2f4b38d41e91cc216047118fe *inst/include/stan/math/rev/functor/apply_scalar_unary.hpp 36f2691c377bd7b42c39147767d22733 *inst/include/stan/math/rev/functor/apply_vector_unary.hpp a8516642caa9b30a4096a50a66b1ed43 *inst/include/stan/math/rev/functor/coupled_ode_system.hpp 9f02094bc523316dc8d689564a9d15f8 *inst/include/stan/math/rev/functor/cvodes_integrator.hpp e1e73f194f8216608b2efa1153cb69bc *inst/include/stan/math/rev/functor/cvodes_integrator_adjoint.hpp 9b7aa51c53174e79d2810098d46348c3 *inst/include/stan/math/rev/functor/cvodes_utils.hpp b90ab9a55a9244a0efc76877f229a6f7 *inst/include/stan/math/rev/functor/dae.hpp ea485fdcd307f8a2f60354005c8d9b69 *inst/include/stan/math/rev/functor/dae_system.hpp 9aaa0687656af92c9440a5191dbfa09f *inst/include/stan/math/rev/functor/finite_diff_hessian_auto.hpp 2a73489972a14d651456c54f44c9c8ae *inst/include/stan/math/rev/functor/finite_diff_hessian_times_vector_auto.hpp 4ca2cc95112eb57c03a4019ef433d233 *inst/include/stan/math/rev/functor/gradient.hpp a101dda6f4e24cdbc17d601d4468f4df *inst/include/stan/math/rev/functor/idas_integrator.hpp 5ebc2bab7c99edeb454553db964ae713 *inst/include/stan/math/rev/functor/idas_service.hpp 300db2c3012b3d3b0ac7f5b3fc459a8b *inst/include/stan/math/rev/functor/integrate_1d.hpp 3dfbcab713b31201f20341678ad014f5 *inst/include/stan/math/rev/functor/integrate_ode_adams.hpp c44856c8aba9848527cc5fbd4e6b17e1 *inst/include/stan/math/rev/functor/integrate_ode_bdf.hpp 94a16c0e9383f7f91e859110dbf7b2fa *inst/include/stan/math/rev/functor/jacobian.hpp a5d0f282977f7817b5b356c86e8d3838 *inst/include/stan/math/rev/functor/kinsol_data.hpp 222bc0fa506dbc0ecb5736af0c9e83a0 *inst/include/stan/math/rev/functor/kinsol_solve.hpp 00ee94f40c31b85dc672accf4f3e32c1 *inst/include/stan/math/rev/functor/map_rect_concurrent.hpp a8c55cabb8abd59b5212baa1cf4e8171 *inst/include/stan/math/rev/functor/map_rect_reduce.hpp 9acdd8b870171db2c93c84e492563d6c *inst/include/stan/math/rev/functor/ode_adams.hpp 9b96ce9eb53ad654f38f32f180a0214d *inst/include/stan/math/rev/functor/ode_adjoint.hpp fa73b8754562c3167f7b6b876742e4f3 *inst/include/stan/math/rev/functor/ode_bdf.hpp 2be0f7254a6217aef51308627023a88a *inst/include/stan/math/rev/functor/ode_store_sensitivities.hpp ac7171f5896ad2e9831e567a0f5c90a2 *inst/include/stan/math/rev/functor/operands_and_partials.hpp c6d382a4139e3c9a79ca4aa4f1c2e19e *inst/include/stan/math/rev/functor/partials_propagator.hpp 12fb74f8a6036465e56519ab792c98ac *inst/include/stan/math/rev/functor/reduce_sum.hpp c12026889d02625a9c00d81c021cbabc *inst/include/stan/math/rev/functor/solve_newton.hpp b20bd9b87f2aa8965a99cb0e86d7946f *inst/include/stan/math/rev/functor/solve_powell.hpp a69264c0dd6ce6eee3c39a37e3d1c8aa *inst/include/stan/math/rev/meta.hpp 20735d76d7b734f0c5f6b9bf2193469b *inst/include/stan/math/rev/meta/arena_type.hpp bba85b5a7107e3d05a8d6762f2a3c2fb *inst/include/stan/math/rev/meta/conditional_var_value.hpp 93603693b8f1db5832da980f80d427cc *inst/include/stan/math/rev/meta/is_arena_matrix.hpp 50a565370eafa7c28bd04c1b0738cb47 *inst/include/stan/math/rev/meta/is_rev_matrix.hpp 55631fb57fbebe12d265d4399377d2f1 *inst/include/stan/math/rev/meta/is_var.hpp 54adae9bc52a4465f87935a64d5ec84b *inst/include/stan/math/rev/meta/is_vari.hpp 407b309d5f1358443dc65a8104c76757 *inst/include/stan/math/rev/meta/partials_type.hpp 5a8e9bafdd1f097f9fbf329931c0fec7 *inst/include/stan/math/rev/meta/plain_type.hpp 5f44ae98ac23b09a63d100530e7eda0c *inst/include/stan/math/rev/meta/promote_var_matrix.hpp b6060613a008d54a6307a729e168105e *inst/include/stan/math/rev/meta/return_var_matrix.hpp a01573fb6045bc6a670118efa28fb4dd *inst/include/stan/math/rev/meta/rev_matrix_type.hpp de30b2d12f14c108e9b043382dddf8fc *inst/include/stan/math/rev/prob.hpp 86537444a8d3878d0c21cf27b25540c2 *inst/include/stan/math/rev/prob/std_normal_log_qf.hpp 18fbeed8882ecb0edf667a951eaa02f7 *inst/include/stan/math/version.hpp b3e50636326e2d460f7a76b7176477a2 *inst/include/stan_sundials_printf_override.hpp 9d3b95521f926b87d97eab648ca5dc14 *inst/include/sundials/sundials_band.h d1682a09c6b7885d269787b5dfdcc3c3 *inst/include/sundials/sundials_config.h e925a8fa93872f5a184542953d512657 *inst/include/sundials/sundials_config.in 28e47b37317ba3b3aa1f14c360973b64 *inst/include/sundials/sundials_context.h 58608f89cdf41429543a11dde80e45b4 *inst/include/sundials/sundials_cuda_policies.hpp 947765acca8aa3315ea75732ad9e1426 *inst/include/sundials/sundials_dense.h 9c50173c6b84de3f8ba03b82bbb041c9 *inst/include/sundials/sundials_direct.h e9db0b44eae14e93ffa91896e7f417ef *inst/include/sundials/sundials_export.h d6d3551650e4f9aa76f69cbb6ba777ca *inst/include/sundials/sundials_futils.h 42458737c2f412a28fd12442dbf45edf *inst/include/sundials/sundials_hip_policies.hpp 595eea7a38700d00b47857266ba3ff1f *inst/include/sundials/sundials_iterative.h d8694595b75709355e031f9c5f2289c7 *inst/include/sundials/sundials_lapack.h 19b572836de4d329a8a58649921a700e *inst/include/sundials/sundials_linearsolver.h 73c2859f7c609a292fddd15d3578bed8 *inst/include/sundials/sundials_math.h 6f048eb8d22047c2b707744b6af98283 *inst/include/sundials/sundials_matrix.h 813fe393f6a2d5163ef2fe12ce153462 *inst/include/sundials/sundials_memory.h 080243397f55cea7f4ee8a45406974b3 *inst/include/sundials/sundials_mpi_types.h f7e0608da97dbb01e3995df3cbdbe2a6 *inst/include/sundials/sundials_nonlinearsolver.h 10764906ec796a142c978a3fa7af8035 *inst/include/sundials/sundials_nvector.h a5d75e6990baf249bbe29c361cb8dbc7 *inst/include/sundials/sundials_nvector_senswrapper.h 5c74af8d8bb621045396c37833249845 *inst/include/sundials/sundials_profiler.h 586f047c4f7f4eed1a871f65adf558cd *inst/include/sundials/sundials_sycl_policies.hpp 33b2ab8e24a4c814c2f7c0e006182ad9 *inst/include/sundials/sundials_types.h e4492601bb4219ad3a6442bf8ab2d66a *inst/include/sundials/sundials_version.h e9c6f8ae6632f7e8008db439121ed246 *inst/include/sundials/sundials_xbraid.h 0ed2e7e5399d03ced1ddea48c37a9d70 *inst/include/sundials_debug.h 3dc8806dbc0bacc31b4c8ad890026ba2 *inst/include/sunlinsol/sunlinsol_band.h 240257703f48dcb4fe3984019f9a47cd *inst/include/sunlinsol/sunlinsol_cusolversp_batchqr.h 96673c05bdc1f7e71e0644f33f389361 *inst/include/sunlinsol/sunlinsol_dense.h 36f311fae442cb4d307f5e1c941f3701 *inst/include/sunlinsol/sunlinsol_klu.h f20b3a76432ef85551259d3dee54f6e0 *inst/include/sunlinsol/sunlinsol_lapackband.h fb97fc7006a050d884a6ec66437792c2 *inst/include/sunlinsol/sunlinsol_lapackdense.h 60afe6516850c2b9e5212fbc2882a9a8 *inst/include/sunlinsol/sunlinsol_magmadense.h 547ebe9964d7683e8572bf06920fe12e *inst/include/sunlinsol/sunlinsol_onemkldense.h 9b2f759ec7423e72906223246be64d7e *inst/include/sunlinsol/sunlinsol_pcg.h 3baa2edca0808280bfa1a461c44422b4 *inst/include/sunlinsol/sunlinsol_spbcgs.h 666e51de18687470cb292d8f3a8b07d4 *inst/include/sunlinsol/sunlinsol_spfgmr.h 7a5bdb242e0b9784ac6b542f42fde0e9 *inst/include/sunlinsol/sunlinsol_spgmr.h 146610b0d82213d5ee1dcbb60b78dfc4 *inst/include/sunlinsol/sunlinsol_sptfqmr.h e7fee2ba3df76ef031b723b6ba5e61bf *inst/include/sunlinsol/sunlinsol_superludist.h d4169c423e70a0e9e6357920c6050b97 *inst/include/sunlinsol/sunlinsol_superlumt.h c188d72d6c4b122658c1a135921861af *inst/include/sunmatrix/sunmatrix_band.h a9d973dc1bc4482764df0ad685c33f2e *inst/include/sunmatrix/sunmatrix_cusparse.h 92729833fc54a57d25f143d6381a202c *inst/include/sunmatrix/sunmatrix_dense.h fdc0200bc8070e783280389e8839d602 *inst/include/sunmatrix/sunmatrix_magmadense.h 1981495c252eb97c9a2de2b691619a45 *inst/include/sunmatrix/sunmatrix_onemkldense.h c17096a41b40f56a985f1813e8799975 *inst/include/sunmatrix/sunmatrix_slunrloc.h b83a02cf5b22ccdf763cb8cfd548952f *inst/include/sunmatrix/sunmatrix_sparse.h 21d235f27d6412c94bc1ba906002a729 *inst/include/sunmemory/sunmemory_cuda.h f1889c46fb80106e25ab33659dbe6ff4 *inst/include/sunmemory/sunmemory_hip.h cbfe0a2dee4d38517769933b3901c418 *inst/include/sunmemory/sunmemory_sycl.h 9153aea2ddc22204993373bb718bd1d0 *inst/include/sunmemory/sunmemory_system.h 4b24e5c668ae7f65c4e552302930d4e3 *inst/include/sunnonlinsol/sunnonlinsol_fixedpoint.h 4faccf6b4fd22be8435271d7f9bac24c *inst/include/sunnonlinsol/sunnonlinsol_newton.h cf12e0797e2e7cf945e54a42163660c9 *inst/include/sunnonlinsol/sunnonlinsol_petscsnes.h a424503e8d4d699707868317c919b505 *inst/stanc.js 3ff2d51c2b68bf7e021fe8af6772c9f7 *man/CxxFlags.Rd c89adfeaae9bde83f24fb8ba04156b38 *man/stanFunction.Rd 807aa3edb40840d0b4c8b6b609a39b91 *src/Makevars ce5c3747018ba72cbac1e1cb9ab74b71 *src/Makevars.win c212a5def60ce8654d7852bb9af66e13 *src/cvodes/LICENSE 5dfe22201a2919be5021a8bc7c74efc4 *src/cvodes/NOTICE f6aec6d2e85ff6a36438614bee1daa5d *src/cvodes/README.md 5f27f7af89703f7487a9a5b919f3013a *src/cvodes/cvodea.c 4572f4815087a9166a6431db9349aa65 *src/cvodes/cvodea_io.c e2e4607d7cc7f606b8bb97b16051d286 *src/cvodes/cvodes.c 5995db7034570ec3dece81305b29dfcc *src/cvodes/cvodes_bandpre.c 3711227932b500ed04bbc838659ab7dd *src/cvodes/cvodes_bandpre_impl.h 968e24a4c200f8a2452d57a14aec971d *src/cvodes/cvodes_bbdpre.c d85730e16061562f6fe2cab8f2c842b1 *src/cvodes/cvodes_bbdpre_impl.h 6d9bd2e9df87a436bed42b4d039b29ac *src/cvodes/cvodes_diag.c 6f482dc75dbba2e6a2214c0fadd7b3a8 *src/cvodes/cvodes_diag_impl.h bba89af927a4b72877e5b4dbb5273ca8 *src/cvodes/cvodes_direct.c 9e32fdb9b487d341924c1b75889bcedd *src/cvodes/cvodes_impl.h c81e2be5b372cbf6c5a4fb22a0de9576 *src/cvodes/cvodes_io.c a62eec26fff6eb732c4da30bd137e865 *src/cvodes/cvodes_ls.c 77e560377689e771d7ff727d9d5174f6 *src/cvodes/cvodes_ls_impl.h 23639106b0ddd1a7b000da4af835d60c *src/cvodes/cvodes_nls.c 58ac49ca1f28914f8df20b55cac847d0 *src/cvodes/cvodes_nls_sim.c f2b6817641ad885f69124e6930565b97 *src/cvodes/cvodes_nls_stg.c edcba4be9d8b82b69ddc3ff57024bbe2 *src/cvodes/cvodes_nls_stg1.c 52c9e5ef2d03fa3629740445d9641600 *src/cvodes/cvodes_spils.c cffea942974d158853d2c8eabe7d090b *src/cvodes/fmod/fcvodes_mod.c 47ac4767019c184cad3e84f11e04f64e *src/cvodes/fmod/fcvodes_mod.f90 c212a5def60ce8654d7852bb9af66e13 *src/idas/LICENSE 5dfe22201a2919be5021a8bc7c74efc4 *src/idas/NOTICE e0447da74dde875113e0d0c754bdf926 *src/idas/README.md ff5c5bfce0c5ab4b18162a684f4633de *src/idas/fmod/fidas_mod.c 403fbd47b54483639ffc99277c6be47e *src/idas/fmod/fidas_mod.f90 4c028d2714a377a90d40ced0af85cd54 *src/idas/idaa.c a397e7dab6f1b553f92dd33b96fcc124 *src/idas/idaa_io.c cf8729fab9e84660d283bdbecf934ff4 *src/idas/idas.c fe4856b77d67ea05e97566655ab91c84 *src/idas/idas_bbdpre.c 58140c256e00813af114a29bcdd18c1f *src/idas/idas_bbdpre_impl.h 3f91714f84799d04163ae3e67d01f603 *src/idas/idas_direct.c d24188a08ea8c8b99ff2b2dc6f96192b *src/idas/idas_ic.c 30d72bfd910dbd22b08b3f34d6577cc9 *src/idas/idas_impl.h 4dbe1b0b8ab8352c5244fe42121b9a0d *src/idas/idas_io.c 7bfea3d806eac24f2b3dc99b3a45b55a *src/idas/idas_ls.c 657417bd5dce53b21f43f56f47ea25ee *src/idas/idas_ls_impl.h a0f9c425110812cb59ae5b60e3db73f9 *src/idas/idas_nls.c cd82ec550c1cb0f9c9e25acd845500c0 *src/idas/idas_nls_sim.c 7dd39c919cbdcf33405e30f2d97b0fbb *src/idas/idas_nls_stg.c 1ac80ea0e3ed813b8340d66b59e5f90b *src/idas/idas_spils.c d8d6f3a81ddc136c7bad498731bb0af0 *src/init.c 2ee1232c21d367ea9b13996cb8e38b2e *src/install.libs.R c212a5def60ce8654d7852bb9af66e13 *src/kinsol/LICENSE 5dfe22201a2919be5021a8bc7c74efc4 *src/kinsol/NOTICE 98ce156ab20c0f11a72e2f990c96ce32 *src/kinsol/README.md 57aa8de426003b46ca82dfe7ab7b4c32 *src/kinsol/fmod/fkinsol_mod.c 238ff3cfd22c23cde7bd6fdde625a6d3 *src/kinsol/fmod/fkinsol_mod.f90 2d8caf6394f17b027c0473dbd4dc5053 *src/kinsol/kinsol.c 3ca07bb0ca604f6dd2bd935d4fbebf7c *src/kinsol/kinsol_bbdpre.c ac16337f1105adf5843abc83d5c2d84e *src/kinsol/kinsol_bbdpre_impl.h 1ed7a54119e525684ea391900af327cb *src/kinsol/kinsol_direct.c 4532d345d96a4334745dd698dfa54dce *src/kinsol/kinsol_impl.h f02e8f7b45f31584b6f94f315ba54fd4 *src/kinsol/kinsol_io.c 6112220efe55c7f5e17792e987b054dd *src/kinsol/kinsol_ls.c 118ab1960ba74975f7be9710c389deb2 *src/kinsol/kinsol_ls_impl.h 414c3a006814df38e0f9559abae1e7b4 *src/kinsol/kinsol_spils.c 0943317e7dfd0bf797808f6398e4ffdd *src/nvector/cuda/VectorArrayKernels.cuh db7ed0ef937f68705eeaa211b9e89a9b *src/nvector/cuda/VectorKernels.cuh 3dd3fc8917b041d093467779c94ced14 *src/nvector/cuda/nvector_cuda.cu 312e95621f2b589d01ec05baed5e964f *src/nvector/hip/VectorArrayKernels.hip.hpp 74bf190ab28b27a16ac63c46b93561ab *src/nvector/hip/VectorKernels.hip.hpp b8e4aaeda3f0bb3aebbcc494ca627990 *src/nvector/hip/nvector_hip.hip.cpp aeab2bdf657c2408194fe0c2b0d5eb72 *src/nvector/manyvector/fmod/fnvector_manyvector_mod.c 203545fc4122a165517cf01cb01676dc *src/nvector/manyvector/fmod/fnvector_manyvector_mod.f90 1faae02ef19155924e765fea240b900f *src/nvector/manyvector/fmod/fnvector_mpimanyvector_mod.c 4d25bb8d8e8b9754d8ac66ae6f29ec6a *src/nvector/manyvector/fmod/fnvector_mpimanyvector_mod.f90 7fb682dc39db08444df9dda6ff955193 *src/nvector/manyvector/nvector_manyvector.c 3085cbfa7d2d7933d86c0a771c7a4d7f *src/nvector/mpiplusx/fmod/fnvector_mpiplusx_mod.c 5ea14fc3a2d4be9479608126ffcece7e *src/nvector/mpiplusx/fmod/fnvector_mpiplusx_mod.f90 b20410bdaa68378ffcbf225fbf018d31 *src/nvector/mpiplusx/nvector_mpiplusx.c 6daef2038064bcfd4e3f4627f0d01698 *src/nvector/openmp/fmod/fnvector_openmp_mod.c b42a982fe2a21af36c4d750d3e8597e6 *src/nvector/openmp/fmod/fnvector_openmp_mod.f90 25373537043c458ac99960f18065f835 *src/nvector/openmp/nvector_openmp.c 45987274d25defd8adc2b2d51fe7fa6a *src/nvector/openmpdev/nvector_openmpdev.c aab3f12aa3f34cf1c4c8fcb62bed5cef *src/nvector/parallel/fmod/fnvector_parallel_mod.c aba601667c64e6d38001eddb11e5d37b *src/nvector/parallel/fmod/fnvector_parallel_mod.f90 39669065befe23875a01029931106466 *src/nvector/parallel/nvector_parallel.c 094920673b1a970e254e3df46628839f *src/nvector/parhyp/nvector_parhyp.c a072604c0e611181736fbc8bed79d3a8 *src/nvector/petsc/nvector_petsc.c ebdd085161e89fd1d78dc6387002d50a *src/nvector/pthreads/fmod/fnvector_pthreads_mod.c 4ad28fc75a27f4216a03710bda2e4c14 *src/nvector/pthreads/fmod/fnvector_pthreads_mod.f90 1e80a65c14440ffaeedea6508a717bf8 *src/nvector/pthreads/nvector_pthreads.c 8b8207b92ffcb99bdae8a658592a993c *src/nvector/raja/nvector_raja.cpp 3254a5bfb3b30e73f5c13f94be2ca5d6 *src/nvector/serial/fmod/fnvector_serial_mod.c 8e85144e03511dce57b1d732e3f746d6 *src/nvector/serial/fmod/fnvector_serial_mod.f90 57d3d073b9a155ecb8a15cb7e1d96107 *src/nvector/serial/nvector_serial.c 7f9972d7bbd21eaa6b8256b579b332f6 *src/nvector/sycl/nvector_sycl.cpp 8416dbd3d43bb35b64d0e55857ee14ac *src/nvector/trilinos/nvector_trilinos.cpp f77bda6112ed3c88e9ca7bfc163f82e6 *src/sundials/fmod/fsundials_context_mod.c 720ac792301faa102e332e63b345b25b *src/sundials/fmod/fsundials_context_mod.f90 98b9bc44bd1e7f45379077adc89fe081 *src/sundials/fmod/fsundials_futils_mod.c c082084f493d1f0d9d0f0592a3794307 *src/sundials/fmod/fsundials_futils_mod.f90 12de6830990fb382ef417e0c47b8fece *src/sundials/fmod/fsundials_linearsolver_mod.c f0cad80fff3e135747432d302114c71b *src/sundials/fmod/fsundials_linearsolver_mod.f90 69e4a87394daa1cc48193af567f045d2 *src/sundials/fmod/fsundials_matrix_mod.c 97b44200741a15539c8524d4e351424f *src/sundials/fmod/fsundials_matrix_mod.f90 e85ae86b593f4a07395e0e8806733b32 *src/sundials/fmod/fsundials_nonlinearsolver_mod.c b6a621b020955105f9523d8af927e8dc *src/sundials/fmod/fsundials_nonlinearsolver_mod.f90 28ddfb271ddfaa4a97612d9f66606012 *src/sundials/fmod/fsundials_nvector_mod.c f07ff3ef50b4ccd26ab2681894223442 *src/sundials/fmod/fsundials_nvector_mod.f90 3742952a7592f12f16c099b1f1829f3a *src/sundials/fmod/fsundials_profiler_mod.c 16f62ea411683f717216e37f2cf40709 *src/sundials/fmod/fsundials_profiler_mod.f90 2e1ce5173f45241c1f491f012ab9a1ae *src/sundials/fmod/fsundials_types_mod.c f9bb95438baf35d08412437ad8c23497 *src/sundials/fmod/fsundials_types_mod.f90 7ee5944682feba785b744a9d7199f47e *src/sundials/sundials_band.c 4e9ed23f0ccedae6cd78173b504d308e *src/sundials/sundials_context.c 1557ae1c2569280da1e465f968855e88 *src/sundials/sundials_context_impl.h 02767bb525a5ce66af20891df2909ad3 *src/sundials/sundials_cuda.h 1327138be06a747cea3d9d8b8414efb8 *src/sundials/sundials_cuda_kernels.cuh 0ed2e7e5399d03ced1ddea48c37a9d70 *src/sundials/sundials_debug.h 6a9eb99526dfb5ff2f393f19ac1ea9bb *src/sundials/sundials_dense.c 0cb3999aa5aa6d982977a1ed4fc112ed *src/sundials/sundials_direct.c e9a78c26289ed48fde6dc236056f9c31 *src/sundials/sundials_futils.c 29d51986a5a3cf0331878bba3eab958f *src/sundials/sundials_hashmap.h d59567e69a2d943b7543c42c92a5459c *src/sundials/sundials_hip.h 7aaa3a77c9bc7728a0d1ecd3b5fea902 *src/sundials/sundials_hip_kernels.hip.hpp 2c45eb15d2e27407e0d4df8b800c13b4 *src/sundials/sundials_iterative.c fb9d76cbfcd4b88b60e97664edc825e7 *src/sundials/sundials_iterative_impl.h 19e2344cb4438a5958903f597c5cfd8f *src/sundials/sundials_lapack_defs.h b848b66475b666c18d823687850d89c0 *src/sundials/sundials_linearsolver.c 64849893a3481dbb61d776b57925f394 *src/sundials/sundials_math.c 37a2400de5772c56d1235ec4fb79d721 *src/sundials/sundials_matrix.c 50b3bec7e835d4e06ab3ecbaf2731c4d *src/sundials/sundials_memory.c 253311fe333b253f8c9d1f38e5c43d26 *src/sundials/sundials_nonlinearsolver.c 2fda1a3b9a20b1f95f15532c2a299af8 *src/sundials/sundials_nvector.c cc6de6f0c51f39f408f70b98df346f4c *src/sundials/sundials_nvector_senswrapper.c 13706853d5b8a7a0ebbf281891e248be *src/sundials/sundials_profiler.c 9b14be818dd299a0b33c479da278524a *src/sundials/sundials_reductions.hpp aca4915422009ab6aa682fd750a6c154 *src/sundials/sundials_sycl.h 303ed076910605b98f3a0f95f188ba47 *src/sundials/sundials_version.c 4e9cdc1f0ce2bfbe0d3045df0ffececc *src/sunlinsol/band/fmod/fsunlinsol_band_mod.c 4c151eef587e0822c6b14e933828a187 *src/sunlinsol/band/fmod/fsunlinsol_band_mod.f90 e27687d67587387cee1128a5af89960d *src/sunlinsol/band/sunlinsol_band.c b58cc094d687b9b1a1ff81800beeefbf *src/sunlinsol/cusolversp/sunlinsol_cusolversp_batchqr.cu f8957b78108f3feb799c8d4840f16ef1 *src/sunlinsol/dense/fmod/fsunlinsol_dense_mod.c 9b0addf3573b9ac2ad8e41e1cb493cdf *src/sunlinsol/dense/fmod/fsunlinsol_dense_mod.f90 3f76860a853aa17994aa14ec4b35c044 *src/sunlinsol/dense/sunlinsol_dense.c 219f419e61755669039e9527344e2407 *src/sunlinsol/klu/fmod/fsunlinsol_klu_mod.c 1b226948f42a9a60adbd8f9e74d91759 *src/sunlinsol/klu/fmod/fsunlinsol_klu_mod.f90 5564da946df64cf98b6b2bb3e43a8c0b *src/sunlinsol/klu/sunlinsol_klu.c b77ac9814b0c958db7d03f9e45654706 *src/sunlinsol/lapackband/sunlinsol_lapackband.c 51bdff14da61964a732daf461fbc5bad *src/sunlinsol/lapackdense/sunlinsol_lapackdense.c 0c13ad575cadc22207ab1bef1a630b1f *src/sunlinsol/magmadense/sunlinsol_magmadense.cpp 609acdfd5ac2db1727970afea56a495f *src/sunlinsol/onemkldense/sunlinsol_onemkldense.cpp ca8f59941a8d7a3b16d266c8a47b9635 *src/sunlinsol/pcg/fmod/fsunlinsol_pcg_mod.c 1289eae9ef6a5142f4458037c71d97ce *src/sunlinsol/pcg/fmod/fsunlinsol_pcg_mod.f90 65a0a840e97e0a95686e7e1b7c4b2b29 *src/sunlinsol/pcg/sunlinsol_pcg.c 5b2986d0a56198027625f1382cf51538 *src/sunlinsol/spbcgs/fmod/fsunlinsol_spbcgs_mod.c d65423c9ff46cca57e53c001223ee3a5 *src/sunlinsol/spbcgs/fmod/fsunlinsol_spbcgs_mod.f90 3bd1ea6e0d85ce95fc6b5d90ec941731 *src/sunlinsol/spbcgs/sunlinsol_spbcgs.c 89ef5bf9455d725a92413a037b60d36b *src/sunlinsol/spfgmr/fmod/fsunlinsol_spfgmr_mod.c 2ff77a11dfb8b67dc1c4fb723a21fbd5 *src/sunlinsol/spfgmr/fmod/fsunlinsol_spfgmr_mod.f90 11f4f940bedb0b5bf0202fbc145d9111 *src/sunlinsol/spfgmr/sunlinsol_spfgmr.c b155395d9988576448c40c6a43901e95 *src/sunlinsol/spgmr/fmod/fsunlinsol_spgmr_mod.c f80a1607bde666bd88aacf070d340445 *src/sunlinsol/spgmr/fmod/fsunlinsol_spgmr_mod.f90 071573a060c58d86413268dac61c0589 *src/sunlinsol/spgmr/sunlinsol_spgmr.c a1672cde829601912777c0e32f09fc2a *src/sunlinsol/sptfqmr/fmod/fsunlinsol_sptfqmr_mod.c 2e21aeefbdcb28d5d339153085aafadb *src/sunlinsol/sptfqmr/fmod/fsunlinsol_sptfqmr_mod.f90 5be6d8aca591f6f5a31904bb5d46c0a4 *src/sunlinsol/sptfqmr/sunlinsol_sptfqmr.c 6952fc5b518611cfcfe3e80aabdf9410 *src/sunlinsol/superludist/sunlinsol_superludist.c 71be185929e045ed92e7dba7a5093c72 *src/sunlinsol/superlumt/sunlinsol_superlumt.c df23247bbd6c2a113cec36c9ed513e47 *src/sunmatrix/band/fmod/fsunmatrix_band_mod.c 9a343e1db7fe4561c2c139fbeac155a6 *src/sunmatrix/band/fmod/fsunmatrix_band_mod.f90 f42e5a6c40e5ca436536708b3cbbf5f7 *src/sunmatrix/band/sunmatrix_band.c 5a1f2b4bc9bf97d3c7da8bd52302de44 *src/sunmatrix/cusparse/cusparse_kernels.cuh 254ea2d46853af6b8ebfcab1354c6431 *src/sunmatrix/cusparse/sunmatrix_cusparse.cu f05d5e1fd776efc89d8ca452d2997dfa *src/sunmatrix/dense/fmod/fsunmatrix_dense_mod.c 29d41ea3aac4a5dcbd7d7a6ce97254b7 *src/sunmatrix/dense/fmod/fsunmatrix_dense_mod.f90 d47d459761a3a484a6adb163aee0bea3 *src/sunmatrix/dense/sunmatrix_dense.c 2eda7e4137ee6dc4788c370ad34b2e97 *src/sunmatrix/magmadense/dense_cuda_kernels.cuh a09f3d34f85690881326e89f69bb3c13 *src/sunmatrix/magmadense/dense_hip_kernels.hip.hpp 73000366e4ccb31e56a90133e3f2d1c0 *src/sunmatrix/magmadense/sunmatrix_magmadense.cpp 7a35bca1fc7b48f7f3cf875535040999 *src/sunmatrix/onemkldense/sunmatrix_onemkldense.cpp 3ee421465653dca03690ebe3ec10d723 *src/sunmatrix/slunrloc/sunmatrix_slunrloc.c bc23b5d77851c95c4cf714a51486176e *src/sunmatrix/sparse/fmod/fsunmatrix_sparse_mod.c 8aee6f691aaf6c2db41d1314dd7755f2 *src/sunmatrix/sparse/fmod/fsunmatrix_sparse_mod.f90 6dd5af580eeb358224d8a492fca1b090 *src/sunmatrix/sparse/sunmatrix_sparse.c ff6d3b96ae0aa469de968cf6aa5099e0 *src/sunmemory/cuda/sundials_cuda_memory.cu cf826bb870e4cd15fd7498f3ca341557 *src/sunmemory/hip/sundials_hip_memory.hip.cpp 312148d9b82d749e1f6e2973b189189e *src/sunmemory/sycl/sundials_sycl_memory.cpp 5df3ce3ea14af7da0f7a1c068a1402d9 *src/sunmemory/system/sundials_system_memory.c 50ebbc55939cb0994077b52debbc86da *src/sunnonlinsol/fixedpoint/fmod/fsunnonlinsol_fixedpoint_mod.c 0e2fe3f0278482ea861f68b764dac163 *src/sunnonlinsol/fixedpoint/fmod/fsunnonlinsol_fixedpoint_mod.f90 926c9acb3a25989577c38c6dfc38a74b *src/sunnonlinsol/fixedpoint/sunnonlinsol_fixedpoint.c 982df71896c77d1de565928f7a5453e8 *src/sunnonlinsol/newton/fmod/fsunnonlinsol_newton_mod.c 5215684c1837d9309726ca2048bb36c1 *src/sunnonlinsol/newton/fmod/fsunnonlinsol_newton_mod.f90 f4cdda8684c52d93a30ac82b560188b5 *src/sunnonlinsol/newton/sunnonlinsol_newton.c e9a381acb82a4063c81a6409e48dc691 *src/sunnonlinsol/petscsnes/sunnonlinsol_petscsnes.c 54cf10ca5ac7b36b45a9796ab12a1bfb *tests/rstan.R ddfb7fe84a5f61eaec00c645f5a5c905 *vignettes/sparselm_stan.hpp 33c9e17d63127207e090c353b627fc98 *vignettes/stanmath.Rmd StanHeaders/R/0000755000176200001440000000000014571446075012671 5ustar liggesusersStanHeaders/R/Flags.R0000644000176200001440000000201114571446075014042 0ustar liggesusersCxxFlags <- function(as_character = FALSE) { if (dir.exists(Sys.getenv("TBB_INC"))) { TBB_INC <- normalizePath(Sys.getenv("TBB_INC")) } else { TBB_INC <- system.file("include", package = "RcppParallel", mustWork = TRUE) } if (file.exists(file.path(TBB_INC, "tbb", "version.h"))) { CXXFLAGS <- paste0("-I", shQuote(TBB_INC), " -D_REENTRANT -DSTAN_THREADS -DTBB_INTERFACE_NEW") } else { CXXFLAGS <- paste0("-I", shQuote(TBB_INC), " -D_REENTRANT -DSTAN_THREADS") } if (isTRUE(as_character)) return(CXXFLAGS) cat(CXXFLAGS, " ") return(invisible(NULL)) } LdFlags <- function(as_character = FALSE) { if (dir.exists(Sys.getenv("TBB_LIB"))) { TBB_LIB <- normalizePath(Sys.getenv("TBB_LIB")) } else { TBB_LIB <- system.file("lib", .Platform$r_arch, package = "RcppParallel", mustWork = TRUE) } PKG_LIBS <- paste0("-L", shQuote(TBB_LIB), " -Wl,-rpath,", shQuote(TBB_LIB), " -ltbb -ltbbmalloc") if (isTRUE(as_character)) return(PKG_LIBS) cat(PKG_LIBS, " ") return(invisible(NULL)) } StanHeaders/R/stanFunction.R0000644000176200001440000001145514571446075015475 0ustar liggesusersstanFunction <- function(function_name, ..., env = parent.frame(), rebuild = FALSE, cacheDir = getOption("rcpp.cache.dir", tempdir()), showOutput = verbose, verbose = getOption("verbose")) { make_type <- function(x, recursive = FALSE) { is_array <- is.list(x) if (is_array) { base_type <- make_type(x[[1L]], recursive = TRUE) if (recursive) return(base_type) type <- sub("const ", "", base_type) j <- 1L while(j <= length(x) && is.list(x[[j]])) { type <- paste0("std::vector<", type, " >") j <- j + 1L } type <- paste0("const std::vector<", type, " >&") return(type) } Eigen <- FALSE if (is.matrix(x)) { Eigen <- TRUE if (is.complex(x)) { if (nrow(x) == 1L) { type <- "Eigen::Matrix, 1, -1>" } else type <- "Eigen::MatrixXcd" } else if (nrow(x) == 1L) { type <- "stan::math::row_vector_d" } else type <- "stan::math::matrix_d" } else if (length(x) > 1L) { if (is.integer(x)) { type <- "std::vector" } else { Eigen <- TRUE if (is.complex(x)) { type <- "Eigen::VectorXcd" } else type <- "stan::math::vector_d" } } else if (is.integer(x)) { type <- "int" } else if (is.numeric(x)) { type <- "double" } else if (is.complex(x)) { type <- "std::complex" } else stop(paste("all arguments to", function_name, "must be matrices,", "vectors, integers, doubles, complexes, or lists thereof")) if (Eigen) type <- paste0("const ", type, "&") else type <- paste0("const ", type) return(type) } DOTS <- list(...) types <- sapply(DOTS, FUN = make_type) double_lists <- types == "const std::vector&" if (any(double_lists)) types[double_lists] <- "const List&" int_lists <- types == "const std::vector&" if (any(int_lists)) types[int_lists] <- "const List&" complex_lists <- types == "const std::vector >&" if (any(complex_lists)) types[complex_lists] <- "const List&" code <- paste0("auto ", function_name, "(", paste(types, names(types), collapse = ", "), ") { return stan::math::", function_name, "(", paste(ifelse(double_lists, paste0("std::vector(", names(types), ".begin(), ", names(types), ".end())"), ifelse(int_lists, paste0("std::vector(", names(types), ".begin(), ", names(types), ".end())"), ifelse(complex_lists, paste0("std::vector(", names(types), ".begin(), ", names(types), ".end())"), names(types)))), collapse = ", "), "); }") incl <- dir(system.file("include", "stan", "math", "prim", package = "StanHeaders", mustWork = TRUE), pattern = "hpp$") incl <- setdiff(incl, "core.hpp") incl <- paste0("#include ") if (grepl("_rng$", function_name)) { create_rng <- system.file("include", "src", "stan", "services", "util", "create_rng.hpp", package = "StanHeaders", mustWork = TRUE) op <- options("useFancyQuotes") options(useFancyQuotes = FALSE) on.exit(options(useFancyQuotes = op)) incl <- c(incl, paste0('#include ', dQuote(create_rng))) code <- sub(") {", ", const int random_seed = 0) {", code, fixed = TRUE) code <- sub(" return ", "boost::ecuyer1988 base_rng__ = stan::services::util::create_rng(random_seed, 0); return ", code) code <- sub("); }", ", base_rng__); }", code, fixed = TRUE) } withr::with_makevars( c( PKG_CXXFLAGS = CxxFlags(as_character = TRUE), PKG_LIBS = LdFlags(as_character = TRUE), USE_CXX17 = 1 ), Rcpp::cppFunction(code, depends = c("StanHeaders", "RcppEigen", "BH"), plugins = "cpp17", includes = incl, env = env, rebuild = rebuild, cacheDir = cacheDir, showOutput = showOutput, verbose = verbose) ) if (grepl("_rng$", function_name)) { fun <- get(function_name, envir = env, mode = "function") formals(fun, envir = env)$random_seed <- quote(sample.int(.Machine$integer.max, size = 1L)) assign(function_name, value = fun, envir = env) } return(do.call(function_name, args = DOTS, envir = env)) } StanHeaders/vignettes/0000755000176200001440000000000014645137111014466 5ustar liggesusersStanHeaders/vignettes/sparselm_stan.hpp0000644000176200001440000000376114571446076020074 0ustar liggesusers#include #include #include class sparselm_stan { public: // these would ordinarily be private in the C++ code generated by Stan Eigen::Map > X; Eigen::VectorXd y; sparselm_stan(Eigen::Map > X, Eigen::VectorXd y) : X(X), y(y) {} template // propto__ is usually true but causes log_prob() to return 0 when called from R // jacobian__ is usually true for MCMC but typically is false for optimization T__ log_prob(std::vector& params_r__) const { using namespace stan::math; T__ lp__(0.0); accumulator lp_accum__; // set up model parameters std::vector params_i__; stan::io::deserializer in__(params_r__, params_i__); auto beta = in__.template read >(X.cols()); auto sigma = in__.template read_constrain_lb(0, lp__); // log-likelihood (should add priors) lp_accum__.add(lp__); lp_accum__.add(normal_lpdf(y, (X * beta).eval(), sigma)); return lp_accum__.sum(); } template std::vector gradient(std::vector& params_r__) const { // Calculate gradients using reverse-mode autodiff // although you could do them analytically in this case using std::vector; using stan::math::var; double lp; std::vector gradient; try { vector ad_params_r(params_r__.size()); for (size_t i = 0; i < params_r__.size(); ++i) { var var_i(params_r__[i]); ad_params_r[i] = var_i; } var adLogProb = this->log_prob(ad_params_r); lp = adLogProb.val(); adLogProb.grad(ad_params_r, gradient); } catch (const std::exception &ex) { stan::math::recover_memory(); throw; } stan::math::recover_memory(); return gradient; } }; StanHeaders/vignettes/stanmath.Rmd0000644000176200001440000004617514571446076017001 0ustar liggesusers--- title: "Using the Stan Math C++ Library" author: "Stan Development Team" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Using the Stan Math C++ Library} %\VignetteEncoding{UTF-8} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: inline --- ```{r setup, include = FALSE} options(width = 100) knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) local({ hook_output <- knitr::knit_hooks$get('output') knitr::knit_hooks$set(output = function(x, options) { if (!is.null(options$max.height)) options$attr.output <- c( options$attr.output, sprintf('style="max-height: %s;"', options$max.height) ) hook_output(x, options) }) }) Sys.setenv(USE_CXX17 = "1") set.seed(12345) ``` # Using the **StanHeaders** Package from Other R Packages The **StanHeaders** package contains no R functions. To use the Stan Math Library in other packages, it is often sufficient to specify ``` LinkingTo: StanHeaders (>= 2.26.0), RcppParallel (>= 5.0.1) ``` in the DESCRIPTION file of another package and put something like ``` CXX_STD = CXX17 PKG_CXXFLAGS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "RcppParallel::CxxFlags()") \ $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "StanHeaders:::CxxFlags()") PKG_LIBS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "RcppParallel::RcppParallelLibs()") \ $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "StanHeaders:::LdFlags()") ``` in the src/Makevars and src/Makevars.win files and put `GNU make` in the `SystemRequirements:` field of the package's DESCRIPTION file. If, in addition, the other package needs to utilize the MCMC, optimization, variational inference, or parsing facilities of the Stan Library, then it is also necessary to include the `src` directory of **StanHeaders** in the other package's `PKG_CXXFLAGS` in the src/Makevars and src/Makevars.win files with something like ``` STANHEADERS_SRC = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "message()" \ -e "cat(system.file('include', 'src', package = 'StanHeaders', mustWork = TRUE))" \ -e "message()" | grep "StanHeaders") PKG_CXXFLAGS += -I"$(STANHEADERS_SRC)" ``` # Calling functions in the **StanHeaders** Package from R The only exposed R function in the in the **StanHeaders** package is `stanFunction`, which can be used to call most functions in the Stan Math Library. ```{r} example(stanFunction, package = "StanHeaders", run.dontrun = TRUE) ``` ```{css, echo=FALSE} .scroll-100 { max-height: 100px; overflow-y: auto; background-color: inherit; } ``` The `functions` object defined in this example lists the many Stan functions that could be called (if all their arguments are numeric, see `help(stanFunction, package = "StanHeaders")` for details) ```{r, echo = FALSE, warning = FALSE, class.output="scroll-100"} if (length(functions) %% 2 == 1) { functions <- c(functions, "") } functions <- matrix(functions, ncol = 2, byrow = TRUE) print(functions) ``` # Using Higher-Order Functions in the **StanHeaders** Package This section will demonstrate how to use some of the C++ functions in the **StanHeaders** package whose first argument is another C++ function, in which case the `stanFunction` in the previous section will not work and you have to write your own C++. ## Derivatives and Minimization The following is a toy example of using the Stan Math library via `Rcpp::sourceCpp`: to minimize the function $$\left(\mathbf{x} - \mathbf{a}\right)^\top \left(\mathbf{x} - \mathbf{a}\right)$$ which has a global minimum when $\mathbf{x} = \mathbf{a}$. To find this minimum with autodifferentiation, we need to define the objective function. Then, its gradient with respect to $\mathbf{x}$, which we know is $2\left(\mathbf{x} - \mathbf{a}\right)$ in this case, can be calculated by autodifferentiation. At the optimum (or on the way to the optimum), we might want to evaluate the Hessian matrix, which we know is $2\mathbf{I}$, but would need an additional function to evaluate it via autodifferentiation. Finally, one could reconceptualize the problem as solving a homogeneous system of equations where the gradient is set equal to a vector of zeros. The `stan::math::algebra_solver` function can solve such a system using autodifferentiation to obtain the Jacobian, which we know to be the identity matrix in this case. ```{r} Sys.setenv(PKG_CXXFLAGS = StanHeaders:::CxxFlags(as_character = TRUE)) SH <- system.file(ifelse(.Platform$OS.type == "windows", "libs", "lib"), .Platform$r_arch, package = "StanHeaders", mustWork = TRUE) Sys.setenv(PKG_LIBS = paste0(StanHeaders:::LdFlags(as_character = TRUE), " -L", shQuote(SH), " -lStanHeaders")) ``` Here is C++ code that does all of the above, except for the part of finding the optimum, which is done using the R function `optim` below. ```{Rcpp} // [[Rcpp::depends(BH)]] // [[Rcpp::depends(RcppEigen)]] // [[Rcpp::depends(RcppParallel)]] // [[Rcpp::depends(StanHeaders)]] #include // stuff from mix/ must come first #include // finally pull in everything from rev/ and prim/ #include #include // do this AFTER including stuff from stan/math // [[Rcpp::plugins(cpp17)]] /* Objective function */ // [[Rcpp::export]] auto f(Eigen::VectorXd x, Eigen::VectorXd a) { // objective function in doubles using stan::math::dot_self; // dot_self() is a dot product with self return dot_self( (x - a) ); } /* Gradient */ // [[Rcpp::export]] auto g(Eigen::VectorXd x, Eigen::VectorXd a) { // gradient by AD using Stan double fx; Eigen::VectorXd grad_fx; using stan::math::dot_self; stan::math::gradient([&a](auto x) { return dot_self( (x - a) ); }, x, fx, grad_fx); return grad_fx; } /* Hessian */ // [[Rcpp::export]] auto H(Eigen::VectorXd x, Eigen::VectorXd a) { // Hessian by AD using Stan double fx; Eigen::VectorXd grad_fx; Eigen::MatrixXd H; using stan::math::dot_self; stan::math::hessian([&a](auto x) { return dot_self(x - a); }, x, fx, grad_fx, H); return H; } /* Jacobian */ // [[Rcpp::export]] auto J(Eigen::VectorXd x, Eigen::VectorXd a) { // not actually used Eigen::VectorXd fx; Eigen::MatrixXd J; using stan::math::dot_self; stan::math::jacobian([&a](auto x) { return (2 * (x - a)); }, x, fx, J); return J; } struct equations_functor { template inline Eigen::Matrix operator()(const Eigen::Matrix& x, const Eigen::Matrix& theta, const std::vector& x_r, const std::vector& x_i, std::ostream* pstream__) const { return 2 * (x - stan::math::to_vector(x_r)); } }; // [[Rcpp::export]] auto solution(Eigen::VectorXd a, Eigen::VectorXd guess) { Eigen::VectorXd theta; auto x_r = stan::math::to_array_1d(a); equations_functor f; auto x = stan::math::algebra_solver(f, guess, theta, x_r, {}); return x; } ``` In this compiled RMarkdown document, the **knitr** package has exported functions `f`, `g`, `H`, `J` and `solution` (but not `equations_functor`) to R's global environment using the `sourceCpp` function in the **Rcpp** package, so that they can now be called from R. Here we find the optimum starting from a random point in three dimensions: ```{r} x <- optim(rnorm(3), fn = f, gr = g, a = 1:3, method = "BFGS", hessian = TRUE) x$par x$hessian H(x$par, a = 1:3) J(x$par, a = 1:3) solution(a = 1:3, guess = rnorm(3)) ``` # Integrals and Ordinary Differential Equations The Stan Math library can do one-dimensional numerical integration and can solve stiff and non-stiff systems of differential equations, such as the harmonic oscillator example below. Solving stiff systems utilizes the CVODES library, which is included in **StanHeaders**. ```{Rcpp} // [[Rcpp::depends(BH)]] // [[Rcpp::depends(RcppEigen)]] // [[Rcpp::depends(RcppParallel)]] // [[Rcpp::depends(StanHeaders)]] #include // pulls in everything from rev/ and prim/ #include #include // do this AFTER including stan/math // [[Rcpp::plugins(cpp17)]] /* Definite integrals */ // [[Rcpp::export]] double Cauchy(double scale) { std::vector theta; auto half = stan::math::integrate_1d([](auto x, auto xc, auto theta, auto x_r, auto x_i, auto msgs) { return exp(stan::math::cauchy_lpdf(x, 0, x_r[0])); }, -scale, scale, theta, {scale}, {}, nullptr, 1e-7); return half * 2; // should equal 1 for any positive scale } /* Ordinary Differential Equations */ // [[Rcpp::export]] auto nonstiff(Eigen::MatrixXd A, Eigen::VectorXd y0) { using stan::math::integrate_ode_rk45; using stan::math::to_vector; using stan::math::to_array_1d; std::vector theta; std::vector times = {1, 2}; auto y = integrate_ode_rk45([&A](auto t, auto y, auto theta, auto x_r, auto x_i, std::ostream *msgs) { return to_array_1d( (A * to_vector(y)).eval() ); }, to_array_1d(y0), 0, times, theta, {}, {}); Eigen::VectorXd truth = stan::math::matrix_exp(A) * y0; return (to_vector(y[0]) - truth).eval(); // should be "zero" } // [[Rcpp::export]] auto stiff(Eigen::MatrixXd A, Eigen::VectorXd y0) { // not actually stiff using stan::math::integrate_ode_bdf; // but use the stiff solver anyways using stan::math::to_vector; using stan::math::to_array_1d; std::vector theta; std::vector times = {1, 2}; auto y = integrate_ode_bdf([&A](auto t, auto y, auto theta, auto x_r, auto x_i, std::ostream *msgs) { return to_array_1d( (A * to_vector(y)).eval() ); }, to_array_1d(y0), 0, times, theta, {}, {}); Eigen::VectorXd truth = stan::math::matrix_exp(A) * y0; return (to_vector(y[0]) - truth).eval(); // should be "zero" } ``` Again, in this compiled RMarkdown document, the **knitr** package has exported the `Cauchy`, `nonstiff` and `stiff` functions to R's global environment using the `sourceCpp` function in the **Rcpp** package so that they can be called from R. First, we numerically integrate the Cauchy PDF over its interquartile range --- which has an area of $\frac{1}{2}$ --- that we then double to verify that it is almost within machine precision of $1$. ```{r} all.equal(1, Cauchy(rexp(1)), tol = 1e-15) ``` Next, we consider the system of differential equations $$\frac{d}{dt}\mathbf{y} = \mathbf{A}\mathbf{y}$$ where $\mathbf{A}$ is a square matrix such as that for a simple harmonic oscillator $$\mathbf{A} = \begin{bmatrix}0 & 1 \\ -1 & -\theta\end{bmatrix}$$ for $\theta \in \left(0,1\right)$. The solution for $\mathbf{y}_t = e^{t\mathbf{A}}\mathbf{y}_0$ can be obtained via the matrix exponential function, which is available in the Stan Math Library, but it can also be obtained numerically using a fourth-order Runge-Kutta solver, which is appropriate for non-stiff systems of ODEs, such as this one. However, it is possible, albeit less efficient in this case, to use the backward-differentiation formula solver for stiff systems of ODEs. In both cases, we calculate the difference between the analytical solution and the numerical one, and the stiff version does produce somewhat better accuracy in this case. ```{r} A <- matrix(c(0, -1, 1, -runif(1)), nrow = 2, ncol = 2) y0 <- rexp(2) all.equal(nonstiff(A, y0), c(0, 0), tol = 1e-5) all.equal( stiff(A, y0), c(0, 0), tol = 1e-8) ``` # Parellelization ## `map_rect` Function The Stan Math Library includes the `map_rect` function, which applies a function to each element of rectangular arrays and returns a vector, making it a bit like a restricted version of R's `sapply` function. However, `map_rect` can also be executed in parallel by defining the pre-processor directive `STAN_THREADS` and then setting the `STAN_NUM_THREADS` environmental variable to be the number of threads to use, as in ```{r} Sys.setenv(STAN_NUM_THREADS = 2) # specify -1 to use all available cores ``` Below is C++ code to test whether an integer is prime, using a rather brute-force algorithm and running it in parallel via `map_rect`. ```{Rcpp} // [[Rcpp::depends(BH)]] // [[Rcpp::depends(RcppEigen)]] // [[Rcpp::depends(RcppParallel)]] // [[Rcpp::depends(StanHeaders)]] #include // pulls in everything from rev/ and prim/ #include #include // do this AFTER including stan/math // [[Rcpp::plugins(cpp17)]] // see https://en.wikipedia.org/wiki/Primality_test#Pseudocode struct is_prime { is_prime() {} template auto operator()(const Eigen::Matrix& eta, const Eigen::Matrix& theta, const std::vector& x_r, const std::vector& x_i, std::ostream* msgs = nullptr) const { Eigen::VectorXd res(1); // can only return double or var vectors int n = x_i[0]; if (n <= 3) { res.coeffRef(0) = n > 1; return res; } else if ( (n % 2 == 0) || (n % 3 == 0) ) { res.coeffRef(0) = false; return res; } int i = 5; while (i * i <= n) { if ( (n % i == 0) || (n % (i + 2) == 0) ) { res.coeffRef(0) = false; return res; } i += 6; } res.coeffRef(0) = true; return res; } }; /* parallelization */ // [[Rcpp::export]] auto psapply(std::vector > n) { std::vector eta(n.size()); // these all have to be the same size Eigen::VectorXd theta; std::vector > x_d(n.size()); return stan::math::map_rect<0, is_prime>(theta, eta, x_d, n, &Rcpp::Rcout); } ``` Since the signature for `n` is a `std::vector >`, we have to pass it from R as a list (which is converted to the outer `std::vector<>`) of integer vectors (which is converted to the inner `std::vector`) that happen to be of size one in this case. ```{r} odd <- seq.int(from = 2^25 - 1, to = 2^26 - 1, by = 2) tail(psapply(n = as.list(odd))) == 1 # check your process manager while this is running ``` Thus, $2^{26} - 5 = 67,108,859$ is a prime number. ## `reduce_sum` Function The `reduce_sum` function can be used to sum a function of recursively-partitioned data in parallel. The Probability Mass Function (PMF) of the logarithmic distribution is $$\Pr\left(x = k \mid p\right) = \frac{p^k}{-k\ln\left(1 - p\right)} $$ for a positive integer $k$ and $0 < p < 1$. To verify that this PMF sums to $1$ over the natural numbers, we could accumulate it for a large finite number of terms, but would like to do so in parallel. To do so, we need to define a `partial_sum` function that adds the PMF for some subset of natural numbers and pass it to the `reduce_sum` function like ```{Rcpp} // [[Rcpp::depends(BH)]] // [[Rcpp::depends(RcppEigen)]] // [[Rcpp::depends(RcppParallel)]] // [[Rcpp::depends(StanHeaders)]] #include // pulls in everything from rev/ and prim/ #include #include // do this AFTER including stan/math // [[Rcpp::plugins(cpp17)]] template struct partial_sum { partial_sum() {} T operator()(const std::vector& k_slice, int start, int end, // these are ignored in this example std::ostream* msgs, double p) { double S = 0; for (int n = 0; n < k_slice.size(); n++) { int k = k_slice[n]; S += stan::math::pow(p, k) / k; } return S; } }; // [[Rcpp::export]] double check_logarithmic_PMF(const double p, const int N = 1000) { using stan::math::reduce_sum; std::vector k(N); std::iota(k.begin(), k.end(), 1); return reduce_sum >(k, 1, nullptr, p) / -std::log1p(-p); } ``` The second argument passed to `reduce_sum` is the `grainsize`, which governs the size (and number) of the recursive subsets of `k` that are passed to the `partial_sum` function. A `grainsize` of $1$ indicates that the software will try to automatically tune the subset size for good performance, but that may be worse than choosing a `grainsize` by hand in a problem-specific fashion. In any event, we can call the wrapper function `check_logarithmic_PMF` in R to verify that it returns $1$ to numerical tolerance: ```{r} stopifnot(all.equal(1, check_logarithmic_PMF(p = 1 / sqrt(2)))) ``` # Defining a Stan Model in C++ The Stan _language_ does not have much support for sparse matrices for a variety of reasons. Essentially the only applicable function is `csr_matrix_times_vector`, which pre-multiplies a vector by a sparse matrix in compressed row storage by taking as arguments its number of rows, columns, non-zero values, column indices of non-zero values, and locations where the non-zero values start in each row. While the `csr_matrix_times_vector` function could be used to implement the example below, we illustrate how to use the sparse data structures in the **Matrix** and **RcppEigen** packages in a Stan model written in C++, which could easily be extended to more complicated models with sparse data structures. Our C++ file for the log-likelihood of a linear model with a sparse design matrix reads as ```{r, echo = FALSE, comment = ""} cat(readLines("sparselm_stan.hpp"), sep = "\n") ``` To use it from R, we call the `exposeClass` function in the **Rcpp** package with the necessary arguments and then call `sourceCpp` on the file it wrote in the temporary directory: ```{r, message = FALSE} library(Rcpp) tf <- tempfile(fileext = "Module.cpp") exposeClass("sparselm_stan", constructors = list(c("Eigen::Map >", "Eigen::VectorXd")), fields = c("X", "y"), methods = c("log_prob<>", "gradient<>"), rename = c(log_prob = "log_prob<>", gradient = "gradient<>"), header = c("// [[Rcpp::depends(BH)]]", "// [[Rcpp::depends(RcppEigen)]]", "// [[Rcpp::depends(RcppParallel)]", "// [[Rcpp::depends(StanHeaders)]]", "// [[Rcpp::plugins(cpp17)]]", paste0("#include <", file.path(getwd(), "sparselm_stan.hpp"), ">")), file = tf, Rfile = FALSE) Sys.setenv(PKG_CXXFLAGS = paste0(Sys.getenv("PKG_CXXFLAGS"), " -I", system.file("include", "src", package = "StanHeaders", mustWork = TRUE))) sourceCpp(tf) sparselm_stan ``` At this point, we need a sparse design matrix and (dense) outcome vector to pass to the constructor. The former can be created with a variety of functions in the **Matrix** package, such as ```{r} dd <- data.frame(a = gl(3, 4), b = gl(4, 1, 12)) X <- Matrix::sparse.model.matrix(~ a + b, data = dd) X ``` Finally, we call the `new` function in the **methods** package, which essentially calls our C++ constructor and provides an R interface to the instantiated object, which contains the `log_prob` and `gradient` methods we defined and can be called with arbitrary inputs. ```{r} sm <- new(sparselm_stan, X = X, y = rnorm(nrow(X))) sm$log_prob(c(beta = rnorm(ncol(X)), log_sigma = log(pi))) round(sm$gradient(c(beta = rnorm(ncol(X)), log_sigma = log(pi))), digits = 4) ``` StanHeaders/src/0000755000176200001440000000000014571446076013260 5ustar liggesusersStanHeaders/src/Makevars.win0000644000176200001440000000327314571446076015555 0ustar liggesusersCXX_STD = CXX17 PKG_CPPFLAGS = -DNO_FPRINTF_OUTPUT -I"../inst/include" -I"sundials" -include stan_sundials_printf_override.hpp PKG_CPPFLAGS += -DBOOST_DISABLE_ASSERTS -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION -DBOOST_NO_AUTO_PTR -D_REENTRANT -DSTAN_THREADS -DSTRICT_R_HEADERS PKG_CXXFLAGS += -DRCPP_PARALLEL_USE_TBB=1 -DSTRICT_R_HEADERS PKG_CPPFLAGS += -DUSE_WINTHREAD PKG_LIBS = -Wl,--allow-multiple-definition SHLIB_LDFLAGS = $(SHLIB_CXXLDFLAGS) SHLIB_LD = $(SHLIB_CXXLD) SUNDIALS_CVODES := \ $(wildcard cvodes/*.c) \ $(wildcard sundials/*.c) \ $(wildcard sunmatrix/band/[^f]*.c) \ $(wildcard sunmatrix/dense/[^f]*.c) \ $(wildcard sunlinsol/band/[^f]*.c) \ $(wildcard sunlinsol/dense/[^f]*.c) \ $(wildcard sunnonlinsol/newton/[^f]*.c) \ $(wildcard sunnonlinsol/fixedpoint/[^f]*.c) SUNDIALS_IDAS := \ $(wildcard idas/*.c) \ $(wildcard sundials/*.c) \ $(wildcard sunmatrix/band/[^f]*.c) \ $(wildcard sunmatrix/dense/[^f]*.c) \ $(wildcard sunlinsol/band/[^f]*.c) \ $(wildcard sunlinsol/dense/[^f]*.c) \ $(wildcard sunnonlinsol/newton/[^f]*.c) \ $(wildcard sunnonlinsol/fixedpoint/[^f]*.c) SUNDIALS_KINSOL := \ $(wildcard kinsol/*.c) \ $(wildcard sundials/*.c) \ $(wildcard sunmatrix/band/[^f]*.c) \ $(wildcard sunmatrix/dense/[^f]*.c) \ $(wildcard sunlinsol/band/[^f]*.c) \ $(wildcard sunlinsol/dense/[^f]*.c) \ $(wildcard sunnonlinsol/newton/[^f]*.c) \ $(wildcard sunnonlinsol/fixedpoint/[^f]*.c) SUNDIALS_NVECSERIAL := \ $(addprefix , nvector/serial/nvector_serial.c sundials/sundials_math.c) SOURCES = $(SUNDIALS_CVODES) $(SUNDIALS_IDAS) $(SUNDIALS_KINSOL) $(SUNDIALS_NVECSERIAL) init.c OBJECTS_C = $(SOURCES:.c=.o) OBJECTS = $(OBJECTS_C:.cpp=.o) $(SHLIB): $(OBJECTS) clean: rm $(OBJECTS) StanHeaders/src/nvector/0000755000176200001440000000000014511135055014722 5ustar liggesusersStanHeaders/src/nvector/raja/0000755000176200001440000000000014511135055015637 5ustar liggesusersStanHeaders/src/nvector/raja/nvector_raja.cpp0000644000176200001440000015732214645137106021041 0ustar liggesusers/* --------------------------------------------------------------------------- * Programmer(s): Slaven Peles, Cody J. Balos, Daniel McGreer, and * David J. Gardner @ LLNL * --------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * --------------------------------------------------------------------------- * This is the implementation file for a RAJA implementation of the NVECTOR * class with support for CUDA, HIP, and SYCL backends. * ---------------------------------------------------------------------------*/ #include #include #include #include #include "sundials_debug.h" // RAJA defines #if defined(SUNDIALS_RAJA_BACKENDS_CUDA) #include #include "sundials_cuda.h" #define SUNDIALS_RAJA_EXEC_STREAM RAJA::cuda_exec< 256 > #define SUNDIALS_RAJA_EXEC_REDUCE RAJA::cuda_exec< 256 > #define SUNDIALS_RAJA_REDUCE RAJA::cuda_reduce #define SUNDIALS_GPU_PREFIX(val) cuda ## val #define SUNDIALS_GPU_VERIFY SUNDIALS_CUDA_VERIFY #elif defined(SUNDIALS_RAJA_BACKENDS_HIP) #include #include "sundials_hip.h" #define SUNDIALS_RAJA_EXEC_STREAM RAJA::hip_exec< 512 > #define SUNDIALS_RAJA_EXEC_REDUCE RAJA::hip_exec< 512 > #define SUNDIALS_RAJA_REDUCE RAJA::hip_reduce #define SUNDIALS_GPU_PREFIX(val) hip ## val #define SUNDIALS_GPU_VERIFY SUNDIALS_HIP_VERIFY #elif defined(SUNDIALS_RAJA_BACKENDS_SYCL) #include #include #define SUNDIALS_RAJA_EXEC_STREAM RAJA::sycl_exec< 256 > #define SUNDIALS_RAJA_EXEC_REDUCE RAJA::sycl_exec_nontrivial< 256 > #define SUNDIALS_RAJA_REDUCE RAJA::sycl_reduce #else #error "Unknown RAJA backend" #endif #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) extern "C" { // Static constants static constexpr sunindextype zeroIdx = 0; // Helpful macros #define NVEC_RAJA_CONTENT(x) ((N_VectorContent_Raja)(x->content)) #define NVEC_RAJA_MEMSIZE(x) (NVEC_RAJA_CONTENT(x)->length * sizeof(realtype)) #define NVEC_RAJA_MEMHELP(x) (NVEC_RAJA_CONTENT(x)->mem_helper) #define NVEC_RAJA_HDATAp(x) ((realtype*) NVEC_RAJA_CONTENT(x)->host_data->ptr) #define NVEC_RAJA_DDATAp(x) ((realtype*) NVEC_RAJA_CONTENT(x)->device_data->ptr) // Macros to access vector private content #define NVEC_RAJA_PRIVATE(x) ((N_PrivateVectorContent_Raja)(NVEC_RAJA_CONTENT(x)->priv)) #define NVEC_RAJA_HBUFFERp(x) ((realtype*) NVEC_RAJA_PRIVATE(x)->reduce_buffer_host->ptr) #define NVEC_RAJA_DBUFFERp(x) ((realtype*) NVEC_RAJA_PRIVATE(x)->reduce_buffer_dev->ptr) /* * Private structure definition */ struct _N_PrivateVectorContent_Raja { booleantype use_managed_mem; /* do data pointers use managed memory */ /* fused op workspace */ SUNMemory fused_buffer_dev; /* device memory for fused ops */ SUNMemory fused_buffer_host; /* host memory for fused ops */ size_t fused_buffer_bytes; /* current size of the buffers */ size_t fused_buffer_offset; /* current offset into the buffer */ }; typedef struct _N_PrivateVectorContent_Raja *N_PrivateVectorContent_Raja; /* * Utility functions */ // Allocate vector data static int AllocateData(N_Vector v); // Fused operation buffer functions static int FusedBuffer_Init(N_Vector v, int nreal, int nptr); static int FusedBuffer_CopyRealArray(N_Vector v, realtype *r_data, int nval, realtype **shortcut); static int FusedBuffer_CopyPtrArray1D(N_Vector v, N_Vector *X, int nvec, realtype ***shortcut); static int FusedBuffer_CopyPtrArray2D(N_Vector v, N_Vector **X, int nvec, int nsum, realtype ***shortcut); static int FusedBuffer_CopyToDevice(N_Vector v); static int FusedBuffer_Free(N_Vector v); N_Vector N_VNewEmpty_Raja(SUNContext sunctx) { N_Vector v; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Raja; v->ops->nvclone = N_VClone_Raja; v->ops->nvcloneempty = N_VCloneEmpty_Raja; v->ops->nvdestroy = N_VDestroy_Raja; v->ops->nvspace = N_VSpace_Raja; v->ops->nvgetlength = N_VGetLength_Raja; v->ops->nvgetarraypointer = N_VGetHostArrayPointer_Raja; v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_Raja; v->ops->nvsetarraypointer = N_VSetHostArrayPointer_Raja; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Raja; v->ops->nvconst = N_VConst_Raja; v->ops->nvprod = N_VProd_Raja; v->ops->nvdiv = N_VDiv_Raja; v->ops->nvscale = N_VScale_Raja; v->ops->nvabs = N_VAbs_Raja; v->ops->nvinv = N_VInv_Raja; v->ops->nvaddconst = N_VAddConst_Raja; v->ops->nvdotprod = N_VDotProd_Raja; v->ops->nvmaxnorm = N_VMaxNorm_Raja; v->ops->nvmin = N_VMin_Raja; v->ops->nvl1norm = N_VL1Norm_Raja; v->ops->nvinvtest = N_VInvTest_Raja; v->ops->nvconstrmask = N_VConstrMask_Raja; v->ops->nvminquotient = N_VMinQuotient_Raja; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Raja; v->ops->nvwrmsnorm = N_VWrmsNorm_Raja; v->ops->nvwl2norm = N_VWL2Norm_Raja; v->ops->nvcompare = N_VCompare_Raja; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Raja; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Raja; v->ops->nvdotprodlocal = N_VDotProd_Raja; v->ops->nvmaxnormlocal = N_VMaxNorm_Raja; v->ops->nvminlocal = N_VMin_Raja; v->ops->nvl1normlocal = N_VL1Norm_Raja; v->ops->nvinvtestlocal = N_VInvTest_Raja; v->ops->nvconstrmasklocal = N_VConstrMask_Raja; v->ops->nvminquotientlocal = N_VMinQuotient_Raja; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_Raja; v->ops->nvbufpack = N_VBufPack_Raja; v->ops->nvbufunpack = N_VBufUnpack_Raja; /* print operation for debugging */ v->ops->nvprint = N_VPrint_Raja; v->ops->nvprintfile = N_VPrintFile_Raja; v->content = (N_VectorContent_Raja) malloc(sizeof(_N_VectorContent_Raja)); if (v->content == NULL) { N_VDestroy(v); return NULL; } NVEC_RAJA_CONTENT(v)->priv = malloc(sizeof(_N_PrivateVectorContent_Raja)); if (NVEC_RAJA_CONTENT(v)->priv == NULL) { N_VDestroy(v); return NULL; } NVEC_RAJA_CONTENT(v)->length = 0; NVEC_RAJA_CONTENT(v)->mem_helper = NULL; NVEC_RAJA_CONTENT(v)->own_helper = SUNFALSE; NVEC_RAJA_CONTENT(v)->host_data = NULL; NVEC_RAJA_CONTENT(v)->device_data = NULL; NVEC_RAJA_PRIVATE(v)->use_managed_mem = SUNFALSE; NVEC_RAJA_PRIVATE(v)->fused_buffer_dev = NULL; NVEC_RAJA_PRIVATE(v)->fused_buffer_host = NULL; NVEC_RAJA_PRIVATE(v)->fused_buffer_bytes = 0; NVEC_RAJA_PRIVATE(v)->fused_buffer_offset = 0; return(v); } N_Vector N_VNew_Raja(sunindextype length, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Raja(sunctx); if (v == NULL) return(NULL); NVEC_RAJA_CONTENT(v)->length = length; #if defined(SUNDIALS_RAJA_BACKENDS_CUDA) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); #elif defined(SUNDIALS_RAJA_BACKENDS_HIP) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Hip(sunctx); #elif defined(SUNDIALS_RAJA_BACKENDS_SYCL) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Sycl(sunctx); #endif NVEC_RAJA_CONTENT(v)->own_helper = SUNTRUE; NVEC_RAJA_CONTENT(v)->host_data = NULL; NVEC_RAJA_CONTENT(v)->device_data = NULL; NVEC_RAJA_PRIVATE(v)->use_managed_mem = SUNFALSE; if (NVEC_RAJA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Raja: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Raja: AllocateData returned nonzero\n"); N_VDestroy(v); return NULL; } return(v); } N_Vector N_VNewWithMemHelp_Raja(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper, SUNContext sunctx) { N_Vector v; if (helper == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Raja: helper is NULL\n"); return(NULL); } if (!SUNMemoryHelper_ImplementsRequiredOps(helper)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Raja: helper doesn't implement all required ops\n"); return(NULL); } v = NULL; v = N_VNewEmpty_Raja(sunctx); if (v == NULL) return(NULL); NVEC_RAJA_CONTENT(v)->length = length; NVEC_RAJA_CONTENT(v)->mem_helper = helper; NVEC_RAJA_CONTENT(v)->own_helper = SUNFALSE; NVEC_RAJA_CONTENT(v)->host_data = NULL; NVEC_RAJA_CONTENT(v)->device_data = NULL; NVEC_RAJA_PRIVATE(v)->use_managed_mem = use_managed_mem; if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Raja: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VNewManaged_Raja(sunindextype length, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Raja(sunctx); if (v == NULL) return(NULL); NVEC_RAJA_CONTENT(v)->length = length; #if defined(SUNDIALS_RAJA_BACKENDS_CUDA) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); #elif defined(SUNDIALS_RAJA_BACKENDS_HIP) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Hip(sunctx); #elif defined(SUNDIALS_RAJA_BACKENDS_SYCL) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Sycl(sunctx); #endif NVEC_RAJA_CONTENT(v)->own_helper = SUNTRUE; NVEC_RAJA_CONTENT(v)->host_data = NULL; NVEC_RAJA_CONTENT(v)->device_data = NULL; NVEC_RAJA_PRIVATE(v)->use_managed_mem = SUNTRUE; if (NVEC_RAJA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Raja: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Raja: AllocateData returned nonzero\n"); N_VDestroy(v); return NULL; } return(v); } N_Vector N_VMake_Raja(sunindextype length, realtype *h_vdata, realtype *d_vdata, SUNContext sunctx) { N_Vector v; if (h_vdata == NULL || d_vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_Raja(sunctx); if (v == NULL) return(NULL); NVEC_RAJA_CONTENT(v)->length = length; NVEC_RAJA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(h_vdata, SUNMEMTYPE_HOST); NVEC_RAJA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap(d_vdata, SUNMEMTYPE_DEVICE); #if defined(SUNDIALS_RAJA_BACKENDS_CUDA) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); #elif defined(SUNDIALS_RAJA_BACKENDS_HIP) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Hip(sunctx); #elif defined(SUNDIALS_RAJA_BACKENDS_SYCL) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Sycl(sunctx); #endif NVEC_RAJA_CONTENT(v)->own_helper = SUNTRUE; NVEC_RAJA_PRIVATE(v)->use_managed_mem = SUNFALSE; if (NVEC_RAJA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Raja: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (NVEC_RAJA_CONTENT(v)->device_data == NULL || NVEC_RAJA_CONTENT(v)->host_data == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Raja: SUNMemoryHelper_Wrap returned NULL\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VMakeManaged_Raja(sunindextype length, realtype *vdata, SUNContext sunctx) { N_Vector v; if (vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_Raja(sunctx); if (v == NULL) return(NULL); NVEC_RAJA_CONTENT(v)->length = length; NVEC_RAJA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(vdata, SUNMEMTYPE_UVM); NVEC_RAJA_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_RAJA_CONTENT(v)->host_data); #if defined(SUNDIALS_RAJA_BACKENDS_CUDA) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); #elif defined(SUNDIALS_RAJA_BACKENDS_HIP) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Hip(sunctx); #elif defined(SUNDIALS_RAJA_BACKENDS_SYCL) NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Sycl(sunctx); #endif NVEC_RAJA_CONTENT(v)->own_helper = SUNTRUE; NVEC_RAJA_PRIVATE(v)->use_managed_mem = SUNTRUE; if (NVEC_RAJA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Raja: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (NVEC_RAJA_CONTENT(v)->device_data == NULL || NVEC_RAJA_CONTENT(v)->host_data == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Raja: SUNMemoryHelper_Wrap returned NULL\n"); N_VDestroy(v); return(NULL); } return(v); } /* ----------------------------------------------------------------- * Function to return the global length of the vector. * This is defined as an inline function in nvector_raja.h, so * we just mark it as extern here. */ extern sunindextype N_VGetLength_Raja(N_Vector v); /* ---------------------------------------------------------------------------- * Return pointer to the raw host data. * This is defined as an inline function in nvector_raja.h, so * we just mark it as extern here. */ extern realtype *N_VGetHostArrayPointer_Raja(N_Vector x); /* ---------------------------------------------------------------------------- * Return pointer to the raw device data. * This is defined as an inline function in nvector_raja.h, so * we just mark it as extern here. */ extern realtype *N_VGetDeviceArrayPointer_Raja(N_Vector x); /* ---------------------------------------------------------------------------- * Set pointer to the raw host data. Does not free the existing pointer. */ void N_VSetHostArrayPointer_Raja(realtype* h_vdata, N_Vector v) { if (N_VIsManagedMemory_Raja(v)) { if (NVEC_RAJA_CONTENT(v)->host_data) { NVEC_RAJA_CONTENT(v)->host_data->ptr = (void*) h_vdata; NVEC_RAJA_CONTENT(v)->device_data->ptr = (void*) h_vdata; } else { NVEC_RAJA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_UVM); NVEC_RAJA_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_RAJA_CONTENT(v)->host_data); } } else { if (NVEC_RAJA_CONTENT(v)->host_data) { NVEC_RAJA_CONTENT(v)->host_data->ptr = (void*) h_vdata; } else { NVEC_RAJA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_HOST); } } } /* ---------------------------------------------------------------------------- * Set pointer to the raw device data */ void N_VSetDeviceArrayPointer_Raja(realtype* d_vdata, N_Vector v) { if (N_VIsManagedMemory_Raja(v)) { if (NVEC_RAJA_CONTENT(v)->device_data) { NVEC_RAJA_CONTENT(v)->device_data->ptr = (void*) d_vdata; NVEC_RAJA_CONTENT(v)->host_data->ptr = (void*) d_vdata; } else { NVEC_RAJA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_UVM); NVEC_RAJA_CONTENT(v)->host_data = SUNMemoryHelper_Alias(NVEC_RAJA_CONTENT(v)->device_data); } } else { if (NVEC_RAJA_CONTENT(v)->device_data) { NVEC_RAJA_CONTENT(v)->device_data->ptr = (void*) d_vdata; } else { NVEC_RAJA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_DEVICE); } } } /* ---------------------------------------------------------------------------- * Return a flag indicating if the memory for the vector data is managed */ booleantype N_VIsManagedMemory_Raja(N_Vector x) { return NVEC_RAJA_PRIVATE(x)->use_managed_mem; } /* ---------------------------------------------------------------------------- * Copy vector data to the device */ void N_VCopyToDevice_Raja(N_Vector x) { int copy_fail; #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) void* queue = static_cast(::RAJA::sycl::detail::getQueue()); #else void* queue = nullptr; #endif copy_fail = SUNMemoryHelper_CopyAsync(NVEC_RAJA_MEMHELP(x), NVEC_RAJA_CONTENT(x)->device_data, NVEC_RAJA_CONTENT(x)->host_data, NVEC_RAJA_MEMSIZE(x), queue); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyToDevice_Raja: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but on the default stream currently */ #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) ::sycl::queue* q = ::RAJA::sycl::detail::getQueue(); q->wait_and_throw(); #else SUNDIALS_GPU_VERIFY(SUNDIALS_GPU_PREFIX(StreamSynchronize)(0)); #endif } /* ---------------------------------------------------------------------------- * Copy vector data from the device to the host */ void N_VCopyFromDevice_Raja(N_Vector x) { int copy_fail; #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) void* queue = static_cast(::RAJA::sycl::detail::getQueue()); #else void* queue = nullptr; #endif copy_fail = SUNMemoryHelper_CopyAsync(NVEC_RAJA_MEMHELP(x), NVEC_RAJA_CONTENT(x)->host_data, NVEC_RAJA_CONTENT(x)->device_data, NVEC_RAJA_MEMSIZE(x), queue); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyFromDevice_Raja: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but only in this stream */ #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) ::sycl::queue* q = ::RAJA::sycl::detail::getQueue(); q->wait_and_throw(); #else SUNDIALS_GPU_VERIFY(SUNDIALS_GPU_PREFIX(StreamSynchronize)(0)); #endif } /* ---------------------------------------------------------------------------- * Function to print the a serial vector to stdout */ void N_VPrint_Raja(N_Vector X) { N_VPrintFile_Raja(X, stdout); } /* ---------------------------------------------------------------------------- * Function to print the a serial vector to outfile */ void N_VPrintFile_Raja(N_Vector X, FILE *outfile) { sunindextype i; for (i = 0; i < NVEC_RAJA_CONTENT(X)->length; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%35.32Lg\n", NVEC_RAJA_HDATAp(X)[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%19.16g\n", NVEC_RAJA_HDATAp(X)[i]); #else fprintf(outfile, "%11.8g\n", NVEC_RAJA_HDATAp(X)[i]); #endif } fprintf(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Raja(N_Vector w) { N_Vector v; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty_Raja(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Set content */ NVEC_RAJA_CONTENT(v)->length = NVEC_RAJA_CONTENT(w)->length; NVEC_RAJA_CONTENT(v)->host_data = NULL; NVEC_RAJA_CONTENT(v)->device_data = NULL; NVEC_RAJA_PRIVATE(v)->use_managed_mem = NVEC_RAJA_PRIVATE(w)->use_managed_mem; return(v); } N_Vector N_VClone_Raja(N_Vector w) { N_Vector v; v = NULL; v = N_VCloneEmpty_Raja(w); if (v == NULL) return(NULL); NVEC_RAJA_CONTENT(v)->mem_helper = SUNMemoryHelper_Clone(NVEC_RAJA_MEMHELP(w)); NVEC_RAJA_CONTENT(v)->own_helper = SUNTRUE; if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Raja: AllocateData returned nonzero\n"); N_VDestroy(v); return NULL; } return(v); } void N_VDestroy_Raja(N_Vector v) { N_VectorContent_Raja vc; N_PrivateVectorContent_Raja vcp; if (v == NULL) return; /* free ops structure */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } /* extract content */ vc = NVEC_RAJA_CONTENT(v); if (vc == NULL) { free(v); v = NULL; return; } /* free private content */ vcp = (N_PrivateVectorContent_Raja) vc->priv; if (vcp != NULL) { /* free items in private content */ FusedBuffer_Free(v); free(vcp); vc->priv = NULL; } /* free items in content */ if (NVEC_RAJA_MEMHELP(v)) { #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) void* queue = static_cast(::RAJA::sycl::detail::getQueue()); #else void* queue = nullptr; #endif SUNMemoryHelper_Dealloc(NVEC_RAJA_MEMHELP(v), vc->host_data, queue); vc->host_data = NULL; SUNMemoryHelper_Dealloc(NVEC_RAJA_MEMHELP(v), vc->device_data, queue); vc->device_data = NULL; if (vc->own_helper) SUNMemoryHelper_Destroy(vc->mem_helper); vc->mem_helper = NULL; } else { SUNDIALS_DEBUG_PRINT("WARNING in N_VDestroy_Raja: mem_helper was NULL when trying to dealloc data, this could result in a memory leak\n"); } /* free content struct */ free(vc); /* free vector */ free(v); return; } void N_VSpace_Raja(N_Vector X, sunindextype *lrw, sunindextype *liw) { *lrw = NVEC_RAJA_CONTENT(X)->length; *liw = 2; } void N_VConst_Raja(realtype c, N_Vector Z) { const sunindextype N = NVEC_RAJA_CONTENT(Z)->length; realtype *zdata = NVEC_RAJA_DDATAp(Z); RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { zdata[i] = c; }); } void N_VLinearSum_Raja(realtype a, N_Vector X, realtype b, N_Vector Y, N_Vector Z) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const realtype *ydata = NVEC_RAJA_DDATAp(Y); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; realtype *zdata = NVEC_RAJA_DDATAp(Z); RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { zdata[i] = a*xdata[i] + b*ydata[i]; } ); } void N_VProd_Raja(N_Vector X, N_Vector Y, N_Vector Z) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const realtype *ydata = NVEC_RAJA_DDATAp(Y); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; realtype *zdata = NVEC_RAJA_DDATAp(Z); RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { zdata[i] = xdata[i] * ydata[i]; } ); } void N_VDiv_Raja(N_Vector X, N_Vector Y, N_Vector Z) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const realtype *ydata = NVEC_RAJA_DDATAp(Y); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; realtype *zdata = NVEC_RAJA_DDATAp(Z); RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { zdata[i] = xdata[i] / ydata[i]; } ); } void N_VScale_Raja(realtype c, N_Vector X, N_Vector Z) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; realtype *zdata = NVEC_RAJA_DDATAp(Z); RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { zdata[i] = c * xdata[i]; } ); } void N_VAbs_Raja(N_Vector X, N_Vector Z) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; realtype *zdata = NVEC_RAJA_DDATAp(Z); RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { zdata[i] = abs(xdata[i]); } ); } void N_VInv_Raja(N_Vector X, N_Vector Z) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; realtype *zdata = NVEC_RAJA_DDATAp(Z); RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { zdata[i] = ONE / xdata[i]; } ); } void N_VAddConst_Raja(N_Vector X, realtype b, N_Vector Z) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; realtype *zdata = NVEC_RAJA_DDATAp(Z); RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { zdata[i] = xdata[i] + b; } ); } realtype N_VDotProd_Raja(N_Vector X, N_Vector Y) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const realtype *ydata = NVEC_RAJA_DDATAp(Y); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; RAJA::ReduceSum< SUNDIALS_RAJA_REDUCE, realtype> gpu_result(0.0); RAJA::forall< SUNDIALS_RAJA_EXEC_REDUCE >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { gpu_result += xdata[i] * ydata[i] ; } ); return (static_cast(gpu_result)); } realtype N_VMaxNorm_Raja(N_Vector X) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; RAJA::ReduceMax< SUNDIALS_RAJA_REDUCE, realtype> gpu_result(0.0); RAJA::forall< SUNDIALS_RAJA_EXEC_REDUCE >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { gpu_result.max(abs(xdata[i])); } ); return (static_cast(gpu_result)); } realtype N_VWSqrSumLocal_Raja(N_Vector X, N_Vector W) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const realtype *wdata = NVEC_RAJA_DDATAp(W); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; RAJA::ReduceSum< SUNDIALS_RAJA_REDUCE, realtype> gpu_result(0.0); RAJA::forall< SUNDIALS_RAJA_EXEC_REDUCE >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { gpu_result += (xdata[i] * wdata[i] * xdata[i] * wdata[i]); } ); return (static_cast(gpu_result)); } realtype N_VWrmsNorm_Raja(N_Vector X, N_Vector W) { const realtype sum = N_VWSqrSumLocal_Raja(X, W); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; return std::sqrt(sum/N); } realtype N_VWSqrSumMaskLocal_Raja(N_Vector X, N_Vector W, N_Vector ID) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const realtype *wdata = NVEC_RAJA_DDATAp(W); const realtype *iddata = NVEC_RAJA_DDATAp(ID); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; RAJA::ReduceSum< SUNDIALS_RAJA_REDUCE, realtype> gpu_result(0.0); RAJA::forall< SUNDIALS_RAJA_EXEC_REDUCE >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { if (iddata[i] > ZERO) gpu_result += (xdata[i] * wdata[i] * xdata[i] * wdata[i]); } ); return (static_cast(gpu_result)); } realtype N_VWrmsNormMask_Raja(N_Vector X, N_Vector W, N_Vector ID) { const realtype sum = N_VWSqrSumMaskLocal_Raja(X, W, ID); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; return std::sqrt(sum/N); } realtype N_VMin_Raja(N_Vector X) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; RAJA::ReduceMin< SUNDIALS_RAJA_REDUCE, realtype> gpu_result(std::numeric_limits::max()); RAJA::forall< SUNDIALS_RAJA_EXEC_REDUCE >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { gpu_result.min(xdata[i]); } ); return (static_cast(gpu_result)); } realtype N_VWL2Norm_Raja(N_Vector X, N_Vector W) { return std::sqrt(N_VWSqrSumLocal_Raja(X, W)); } realtype N_VL1Norm_Raja(N_Vector X) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; RAJA::ReduceSum< SUNDIALS_RAJA_REDUCE, realtype> gpu_result(0.0); RAJA::forall< SUNDIALS_RAJA_EXEC_REDUCE >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { gpu_result += (abs(xdata[i])); } ); return (static_cast(gpu_result)); } void N_VCompare_Raja(realtype c, N_Vector X, N_Vector Z) { const realtype *xdata = NVEC_RAJA_DDATAp(X); const sunindextype N = NVEC_RAJA_CONTENT(X)->length; realtype *zdata = NVEC_RAJA_DDATAp(Z); RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { zdata[i] = abs(xdata[i]) >= c ? ONE : ZERO; } ); } booleantype N_VInvTest_Raja(N_Vector x, N_Vector z) { const realtype *xdata = NVEC_RAJA_DDATAp(x); const sunindextype N = NVEC_RAJA_CONTENT(x)->length; realtype *zdata = NVEC_RAJA_DDATAp(z); RAJA::ReduceSum< SUNDIALS_RAJA_REDUCE, realtype> gpu_result(ZERO); RAJA::forall< SUNDIALS_RAJA_EXEC_REDUCE >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { if (xdata[i] == ZERO) { gpu_result += ONE; } else { zdata[i] = ONE/xdata[i]; } } ); realtype minimum = static_cast(gpu_result); return (minimum < HALF); } booleantype N_VConstrMask_Raja(N_Vector c, N_Vector x, N_Vector m) { const realtype *cdata = NVEC_RAJA_DDATAp(c); const realtype *xdata = NVEC_RAJA_DDATAp(x); const sunindextype N = NVEC_RAJA_CONTENT(x)->length; realtype *mdata = NVEC_RAJA_DDATAp(m); RAJA::ReduceSum< SUNDIALS_RAJA_REDUCE, realtype> gpu_result(ZERO); RAJA::forall< SUNDIALS_RAJA_EXEC_REDUCE >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { bool test = (abs(cdata[i]) > ONEPT5 && cdata[i]*xdata[i] <= ZERO) || (abs(cdata[i]) > HALF && cdata[i]*xdata[i] < ZERO); mdata[i] = test ? ONE : ZERO; gpu_result += mdata[i]; } ); realtype sum = static_cast(gpu_result); return(sum < HALF); } realtype N_VMinQuotient_Raja(N_Vector num, N_Vector denom) { const realtype *ndata = NVEC_RAJA_DDATAp(num); const realtype *ddata = NVEC_RAJA_DDATAp(denom); const sunindextype N = NVEC_RAJA_CONTENT(num)->length; RAJA::ReduceMin< SUNDIALS_RAJA_REDUCE, realtype> gpu_result(std::numeric_limits::max()); RAJA::forall< SUNDIALS_RAJA_EXEC_REDUCE >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { if (ddata[i] != ZERO) gpu_result.min(ndata[i]/ddata[i]); } ); return (static_cast(gpu_result)); } /* * ----------------------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------------------- */ int N_VLinearCombination_Raja(int nvec, realtype* c, N_Vector* X, N_Vector z) { const sunindextype N = NVEC_RAJA_CONTENT(z)->length; realtype* zdata = NVEC_RAJA_DDATAp(z); // Fused op workspace shortcuts realtype* cdata = NULL; realtype** xdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(z, nvec, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Raja: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(z, c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Raja: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(z, X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Raja: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(z)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Raja: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { zdata[i] = cdata[0] * xdata[0][i]; for (int j=1; jlength; const realtype *xdata = NVEC_RAJA_DDATAp(x); // Shortcuts to the fused op workspace realtype* cdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(x, nvec, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Raja: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(x, c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Raja: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(x, Y, nvec, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Raja: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(x, Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Raja: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Raja: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { for (int j=0; jlength; // Shortcuts to the fused op workspace realtype** xdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], 0, 3 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Y, nvec, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinaerSumVectorArray_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { for (int j=0; jlength; // Shortcuts to the fused op workspace arrays realtype* cdata = NULL; realtype** xdata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], nvec, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(Z[0], c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyReadArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { for (int j=0; jlength; // Shortcuts to the fused op workspace arrays realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], 0, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { for (int j=0; jlength; // Shortcuts to the fused op workspace realtype* cdata = NULL; realtype** xdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(X[0], nsum, nvec + 2 * nvec * nsum)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiArray_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(X[0], c, nsum, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiArray_Sycl: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(X[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(X[0], Y, nvec, nsum, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Sycl: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(X[0], Z, nvec, nsum, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Sycl: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(X[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { for (int j=0; jlength; // Shortcuts to the fused op workspace arrays realtype* cdata = NULL; realtype** xdata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], nsum, nvec + nvec * nsum)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(Z[0], c, nsum, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(Z[0], X, nvec, nsum, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } RAJA::forall< SUNDIALS_RAJA_EXEC_STREAM >(RAJA::RangeSegment(zeroIdx, N), [=] RAJA_DEVICE (sunindextype i) { for (int j=0; j(::RAJA::sycl::detail::getQueue()); #else void* queue = nullptr; #endif copy_fail = SUNMemoryHelper_CopyAsync(NVEC_RAJA_MEMHELP(x), buf_mem, NVEC_RAJA_CONTENT(x)->device_data, NVEC_RAJA_MEMSIZE(x), queue); /* we synchronize with respect to the host, but only in this stream */ #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) ::sycl::queue* q = ::RAJA::sycl::detail::getQueue(); q->wait_and_throw(); #else cuerr = SUNDIALS_GPU_PREFIX(StreamSynchronize)(0); #endif SUNMemoryHelper_Dealloc(NVEC_RAJA_MEMHELP(x), buf_mem, queue); #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) return (copy_fail ? -1 : 0); #else return (!SUNDIALS_GPU_VERIFY(cuerr) || copy_fail ? -1 : 0); #endif } int N_VBufUnpack_Raja(N_Vector x, void *buf) { int copy_fail = 0; #if !defined(SUNDIALS_RAJA_BACKENDS_SYCL) SUNDIALS_GPU_PREFIX(Error_t) cuerr; #endif if (x == NULL || buf == NULL) return(-1); SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST); if (buf_mem == NULL) return(-1); #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) void* queue = static_cast(::RAJA::sycl::detail::getQueue()); #else void* queue = nullptr; #endif copy_fail = SUNMemoryHelper_CopyAsync(NVEC_RAJA_MEMHELP(x), NVEC_RAJA_CONTENT(x)->device_data, buf_mem, NVEC_RAJA_MEMSIZE(x), queue); /* we synchronize with respect to the host, but only in this stream */ #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) ::sycl::queue* q = ::RAJA::sycl::detail::getQueue(); q->wait_and_throw(); #else cuerr = SUNDIALS_GPU_PREFIX(StreamSynchronize)(0); #endif SUNMemoryHelper_Dealloc(NVEC_RAJA_MEMHELP(x), buf_mem, queue); #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) return (copy_fail ? -1 : 0); #else return (!SUNDIALS_GPU_VERIFY(cuerr) || copy_fail ? -1 : 0); #endif } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Raja; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Raja; v->ops->nvdotprodmulti = NULL; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Raja; v->ops->nvscalevectorarray = N_VScaleVectorArray_Raja; v->ops->nvconstvectorarray = N_VConstVectorArray_Raja; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Raja; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Raja; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_Raja; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_Raja; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableLinearSumVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Raja; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_Raja; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_Raja; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Raja; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Raja; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } /* * ----------------------------------------------------------------- * Private utility functions * ----------------------------------------------------------------- */ int AllocateData(N_Vector v) { int alloc_fail = 0; N_VectorContent_Raja vc = NVEC_RAJA_CONTENT(v); N_PrivateVectorContent_Raja vcp = NVEC_RAJA_PRIVATE(v); if (N_VGetLength_Raja(v) == 0) return(0); #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) void* queue = static_cast(::RAJA::sycl::detail::getQueue()); #else void* queue = nullptr; #endif if (vcp->use_managed_mem) { alloc_fail = SUNMemoryHelper_Alloc(NVEC_RAJA_MEMHELP(v), &(vc->device_data), NVEC_RAJA_MEMSIZE(v), SUNMEMTYPE_UVM, queue); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed for SUNMEMTYPE_UVM\n"); } vc->host_data = SUNMemoryHelper_Alias(vc->device_data); } else { alloc_fail = SUNMemoryHelper_Alloc(NVEC_RAJA_MEMHELP(v), &(vc->host_data), NVEC_RAJA_MEMSIZE(v), SUNMEMTYPE_HOST, queue); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); } alloc_fail = SUNMemoryHelper_Alloc(NVEC_RAJA_MEMHELP(v), &(vc->device_data), NVEC_RAJA_MEMSIZE(v), SUNMEMTYPE_DEVICE, queue); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); } } return(alloc_fail ? -1 : 0); } static int FusedBuffer_Init(N_Vector v, int nreal, int nptr) { int alloc_fail = 0; booleantype alloc_mem = SUNFALSE; size_t bytes = nreal * sizeof(realtype) + nptr * sizeof(realtype*); // Get the vector private memory structure N_PrivateVectorContent_Raja vcp = NVEC_RAJA_PRIVATE(v); // Check if the existing memory is not large enough if (vcp->fused_buffer_bytes < bytes) { FusedBuffer_Free(v); alloc_mem = SUNTRUE; } if (alloc_mem) { #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) void* queue = static_cast(::RAJA::sycl::detail::getQueue()); #else void* queue = nullptr; #endif // allocate pinned memory on the host alloc_fail = SUNMemoryHelper_Alloc(NVEC_RAJA_MEMHELP(v), &(vcp->fused_buffer_host), bytes, SUNMEMTYPE_PINNED, queue); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("WARNING in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_PINNED, using SUNMEMTYPE_HOST instead\n"); // if pinned alloc failed, allocate plain host memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_RAJA_MEMHELP(v), &(vcp->fused_buffer_host), bytes, SUNMEMTYPE_HOST, queue); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); return -1; } } // allocate device memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_RAJA_MEMHELP(v), &(vcp->fused_buffer_dev), bytes, SUNMEMTYPE_DEVICE, queue); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); return -1; } // Store the size of the fused op buffer vcp->fused_buffer_bytes = bytes; } // Reset the buffer offset vcp->fused_buffer_offset = 0; return 0; } static int FusedBuffer_CopyRealArray(N_Vector v, realtype *rdata, int nval, realtype **shortcut) { // Get the vector private memory structure N_PrivateVectorContent_Raja vcp = NVEC_RAJA_PRIVATE(v); // Check buffer space and fill the host buffer if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyRealArray: Buffer offset is exceedes the buffer size\n"); return -1; } realtype* h_buffer = (realtype*) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nval; j++) { h_buffer[j] = rdata[j]; } // Set shortcut to the device buffer and update offset *shortcut = (realtype*) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); vcp->fused_buffer_offset += nval * sizeof(realtype); return 0; } static int FusedBuffer_CopyPtrArray1D(N_Vector v, N_Vector *X, int nvec, realtype ***shortcut) { // Get the vector private memory structure N_PrivateVectorContent_Raja vcp = NVEC_RAJA_PRIVATE(v); // Check buffer space and fill the host buffer if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyPtrArray1D: Buffer offset is exceedes the buffer size\n"); return -1; return -1; } realtype** h_buffer = (realtype**) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nvec; j++) { h_buffer[j] = NVEC_RAJA_DDATAp(X[j]); } // Set shortcut to the device buffer and update offset *shortcut = (realtype**) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); vcp->fused_buffer_offset += nvec * sizeof(realtype*); return 0; } static int FusedBuffer_CopyPtrArray2D(N_Vector v, N_Vector **X, int nvec, int nsum, realtype ***shortcut) { // Get the vector private memory structure N_PrivateVectorContent_Raja vcp = NVEC_RAJA_PRIVATE(v); // Check buffer space and fill the host buffer if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyPtrArray2D: Buffer offset is exceedes the buffer size\n"); return -1; } realtype** h_buffer = (realtype**) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nvec; j++) { for (int k = 0; k < nsum; k++) { h_buffer[j * nsum + k] = NVEC_RAJA_DDATAp(X[k][j]); } } // Set shortcut to the device buffer and update offset *shortcut = (realtype**) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); // Update the offset vcp->fused_buffer_offset += nvec * nsum * sizeof(realtype*); return 0; } static int FusedBuffer_CopyToDevice(N_Vector v) { // Get the vector private memory structure N_PrivateVectorContent_Raja vcp = NVEC_RAJA_PRIVATE(v); #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) void* queue = static_cast(::RAJA::sycl::detail::getQueue()); #else void* queue = nullptr; #endif // Copy the fused buffer to the device int copy_fail = SUNMemoryHelper_CopyAsync(NVEC_RAJA_MEMHELP(v), vcp->fused_buffer_dev, vcp->fused_buffer_host, vcp->fused_buffer_offset, queue); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyToDevice: SUNMemoryHelper_CopyAsync failed\n"); return -1; } return 0; } static int FusedBuffer_Free(N_Vector v) { N_PrivateVectorContent_Raja vcp = NVEC_RAJA_PRIVATE(v); if (vcp == NULL) return 0; #if defined(SUNDIALS_RAJA_BACKENDS_SYCL) void* queue = static_cast(::RAJA::sycl::detail::getQueue()); #else void* queue = nullptr; #endif if (vcp->fused_buffer_host) { SUNMemoryHelper_Dealloc(NVEC_RAJA_MEMHELP(v), vcp->fused_buffer_host, queue); vcp->fused_buffer_host = NULL; } if (vcp->fused_buffer_dev) { SUNMemoryHelper_Dealloc(NVEC_RAJA_MEMHELP(v), vcp->fused_buffer_dev, queue); vcp->fused_buffer_dev = NULL; } vcp->fused_buffer_bytes = 0; vcp->fused_buffer_offset = 0; return 0; } } // extern "C" StanHeaders/src/nvector/parhyp/0000755000176200001440000000000014511135055016225 5ustar liggesusersStanHeaders/src/nvector/parhyp/nvector_parhyp.c0000644000176200001440000013611314645137106021450 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles @ LLNL and Jean M. Sexton @ SMU * ----------------------------------------------------------------- * Based on work by Scott D. Cohen, Alan C. Hindmarsh, Radu Serban, * and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a HYPRE ParVector wrapper * for the NVECTOR package. * -----------------------------------------------------------------*/ #include #include #include #include #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* * ----------------------------------------------------------------- * Simplifying macros NV_CONTENT_PH, NV_DATA_PH, NV_LOCLENGTH_PH, * NV_GLOBLENGTH_PH, and NV_COMM_PH * ----------------------------------------------------------------- * In the descriptions below, the following user declarations * are assumed: * * N_Vector v; * sunindextype v_len, s_len, i; * * (1) NV_CONTENT_PH * * This routines gives access to the contents of the HYPRE * vector wrapper (the N_Vector). * * The assignment v_cont = NV_CONTENT_PH(v) sets v_cont to be * a pointer to the N_Vector content structure. * * (2) NV_DATA_PH, NV_LOCLENGTH_PH, NV_GLOBLENGTH_PH, and NV_COMM_PH * * These routines give access to the individual parts of * the content structure of a parhyp N_Vector. * * The assignment v_llen = NV_LOCLENGTH_PH(v) sets v_llen to * be the length of the local part of the vector v. The call * NV_LOCLENGTH_PH(v) = llen_v generally should NOT be used! It * will change locally stored value with the HYPRE local vector * length, but it will NOT change the length of the actual HYPRE * local vector. * * The assignment v_glen = NV_GLOBLENGTH_PH(v) sets v_glen to * be the global length of the vector v. The call * NV_GLOBLENGTH_PH(v) = glen_v generally should NOT be used! It * will change locally stored value with the HYPRE parallel vector * length, but it will NOT change the length of the actual HYPRE * parallel vector. * * The assignment v_comm = NV_COMM_PH(v) sets v_comm to be the * MPI communicator of the vector v. The assignment * NV_COMM_C(v) = comm_v sets the MPI communicator of v to be * NV_COMM_PH(v) = comm_v generally should NOT be used! It * will change locally stored value with the HYPRE parallel vector * communicator, but it will NOT change the communicator of the * actual HYPRE parallel vector. * * (3) NV_DATA_PH, NV_HYPRE_PARVEC_PH * * The assignment v_data = NV_DATA_PH(v) sets v_data to be * a pointer to the first component of the data inside the * local vector of the HYPRE_parhyp vector for the vector v. * The assignment NV_DATA_PH(v) = data_v should NOT be used. * Instead, use NV_HYPRE_PARVEC_PH to obtain pointer to HYPRE * vector and then use HYPRE functions to manipulate vector data. * * The assignment v_parhyp = NV_HYPRE_PARVEC_PH(v) sets v_parhyp * to be a pointer to HYPRE_ParVector of vector v. The assignment * NV_HYPRE_PARVEC_PH(v) = parhyp_v sets pointer to * HYPRE_ParVector of vector v to be parhyp_v. * * ----------------------------------------------------------------- */ #define NV_CONTENT_PH(v) ( (N_VectorContent_ParHyp)(v->content) ) #define NV_LOCLENGTH_PH(v) ( NV_CONTENT_PH(v)->local_length ) #define NV_GLOBLENGTH_PH(v) ( NV_CONTENT_PH(v)->global_length ) #define NV_OWN_PARVEC_PH(v) ( NV_CONTENT_PH(v)->own_parvector ) #define NV_HYPRE_PARVEC_PH(v) ( NV_CONTENT_PH(v)->x ) #define NV_DATA_PH(v) ( NV_HYPRE_PARVEC_PH(v) == NULL ? NULL : hypre_VectorData(hypre_ParVectorLocalVector(NV_HYPRE_PARVEC_PH(v))) ) #define NV_COMM_PH(v) ( NV_CONTENT_PH(v)->comm ) /* Private function prototypes */ /* z=x+y */ static void VSum_ParHyp(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VDiff_ParHyp(N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleSum_ParHyp(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VScaleDiff_ParHyp(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin1_ParHyp(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void VLin2_ParHyp(realtype a, N_Vector x, N_Vector y, N_Vector z); /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_ParHyp(N_Vector v) { return SUNDIALS_NVEC_PARHYP; } /* ---------------------------------------------------------------- * Function to create a new parhyp vector without underlying * HYPRE vector. */ N_Vector N_VNewEmpty_ParHyp(MPI_Comm comm, sunindextype local_length, sunindextype global_length, SUNContext sunctx) { N_Vector v; N_VectorContent_ParHyp content; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_ParHyp; v->ops->nvclone = N_VClone_ParHyp; v->ops->nvcloneempty = N_VCloneEmpty_ParHyp; v->ops->nvdestroy = N_VDestroy_ParHyp; v->ops->nvspace = N_VSpace_ParHyp; v->ops->nvgetarraypointer = N_VGetArrayPointer_ParHyp; v->ops->nvsetarraypointer = N_VSetArrayPointer_ParHyp; v->ops->nvgetcommunicator = N_VGetCommunicator_ParHyp; v->ops->nvgetlength = N_VGetLength_ParHyp; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_ParHyp; v->ops->nvconst = N_VConst_ParHyp; v->ops->nvprod = N_VProd_ParHyp; v->ops->nvdiv = N_VDiv_ParHyp; v->ops->nvscale = N_VScale_ParHyp; v->ops->nvabs = N_VAbs_ParHyp; v->ops->nvinv = N_VInv_ParHyp; v->ops->nvaddconst = N_VAddConst_ParHyp; v->ops->nvdotprod = N_VDotProd_ParHyp; v->ops->nvmaxnorm = N_VMaxNorm_ParHyp; v->ops->nvwrmsnormmask = N_VWrmsNormMask_ParHyp; v->ops->nvwrmsnorm = N_VWrmsNorm_ParHyp; v->ops->nvmin = N_VMin_ParHyp; v->ops->nvwl2norm = N_VWL2Norm_ParHyp; v->ops->nvl1norm = N_VL1Norm_ParHyp; v->ops->nvcompare = N_VCompare_ParHyp; v->ops->nvinvtest = N_VInvTest_ParHyp; v->ops->nvconstrmask = N_VConstrMask_ParHyp; v->ops->nvminquotient = N_VMinQuotient_ParHyp; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProdLocal_ParHyp; v->ops->nvmaxnormlocal = N_VMaxNormLocal_ParHyp; v->ops->nvminlocal = N_VMinLocal_ParHyp; v->ops->nvl1normlocal = N_VL1NormLocal_ParHyp; v->ops->nvinvtestlocal = N_VInvTestLocal_ParHyp; v->ops->nvconstrmasklocal = N_VConstrMaskLocal_ParHyp; v->ops->nvminquotientlocal = N_VMinQuotientLocal_ParHyp; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_ParHyp; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_ParHyp; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_ParHyp; v->ops->nvdotprodmultiallreduce = N_VDotProdMultiAllReduce_ParHyp; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_ParHyp; v->ops->nvbufpack = N_VBufPack_ParHyp; v->ops->nvbufunpack = N_VBufUnpack_ParHyp; /* Create content */ content = NULL; content = (N_VectorContent_ParHyp) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Attach lengths and communicator */ content->local_length = local_length; content->global_length = global_length; content->comm = comm; content->own_parvector = SUNFALSE; content->x = NULL; return(v); } /* ---------------------------------------------------------------- * Function to create a parhyp N_Vector wrapper around user * supplie HYPRE vector. */ N_Vector N_VMake_ParHyp(HYPRE_ParVector x, SUNContext sunctx) { N_Vector v; MPI_Comm comm = hypre_ParVectorComm(x); sunindextype global_length = (sunindextype) hypre_ParVectorGlobalSize(x); sunindextype local_begin = (sunindextype) hypre_ParVectorFirstIndex(x); sunindextype local_end = (sunindextype) hypre_ParVectorLastIndex(x); sunindextype local_length = local_end - local_begin + 1; v = NULL; v = N_VNewEmpty_ParHyp(comm, local_length, global_length, sunctx); if (v == NULL) return(NULL); NV_OWN_PARVEC_PH(v) = SUNFALSE; NV_HYPRE_PARVEC_PH(v) = x; return(v); } /* ---------------------------------------------------------------- * Function to create an array of new parhyp vectors. */ N_Vector *N_VCloneVectorArray_ParHyp(int count, N_Vector w) { return(N_VCloneVectorArray(count, w)); } /* ---------------------------------------------------------------- * Function to create an array of new parhyp vector wrappers * without uderlying HYPRE vectors. */ N_Vector *N_VCloneVectorArrayEmpty_ParHyp(int count, N_Vector w) { return(N_VCloneEmptyVectorArray(count, w)); } /* ---------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_ParHyp */ void N_VDestroyVectorArray_ParHyp(N_Vector *vs, int count) { N_VDestroyVectorArray(vs, count); return; } /* ---------------------------------------------------------------- * Extract HYPRE vector */ HYPRE_ParVector N_VGetVector_ParHyp(N_Vector v) { return NV_HYPRE_PARVEC_PH(v); } /* ---------------------------------------------------------------- * Function to print a parhyp vector. * TODO: Consider using a HYPRE function for this. */ void N_VPrint_ParHyp(N_Vector x) { N_VPrintFile_ParHyp(x, stdout); } /* ---------------------------------------------------------------- * Function to print a parhyp vector. * TODO: Consider using a HYPRE function for this. */ void N_VPrintFile_ParHyp(N_Vector x, FILE *outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%g\n", xd[i]); #else STAN_SUNDIALS_FPRINTF(outfile, "%g\n", xd[i]); #endif } STAN_SUNDIALS_FPRINTF(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_ParHyp(N_Vector w) { N_Vector v; N_VectorContent_ParHyp content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_ParHyp) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->local_length = NV_LOCLENGTH_PH(w); content->global_length = NV_GLOBLENGTH_PH(w); content->comm = NV_COMM_PH(w); content->own_parvector = SUNFALSE; content->x = NULL; return(v); } /* * Clone HYPRE vector wrapper. * */ N_Vector N_VClone_ParHyp(N_Vector w) { N_Vector v; HYPRE_ParVector vx; const HYPRE_ParVector wx = NV_HYPRE_PARVEC_PH(w); v = NULL; v = N_VCloneEmpty_ParHyp(w); if (v==NULL) return(NULL); vx = hypre_ParVectorCreate(wx->comm, wx->global_size, wx->partitioning); hypre_ParVectorInitialize(vx); #ifdef hypre_ParVectorOwnsPartitioning hypre_ParVectorSetPartitioningOwner(vx, 0); #endif hypre_ParVectorSetDataOwner(vx, 1); hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(vx), 1); NV_HYPRE_PARVEC_PH(v) = vx; NV_OWN_PARVEC_PH(v) = SUNTRUE; return(v); } void N_VDestroy_ParHyp(N_Vector v) { if (v == NULL) return; /* free content */ if (v->content != NULL) { /* free the hypre parvector if it's owned by the vector wrapper */ if (NV_OWN_PARVEC_PH(v) && NV_HYPRE_PARVEC_PH(v) != NULL) { hypre_ParVectorDestroy(NV_HYPRE_PARVEC_PH(v)); NV_HYPRE_PARVEC_PH(v) = NULL; } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } void N_VSpace_ParHyp(N_Vector v, sunindextype *lrw, sunindextype *liw) { MPI_Comm comm; int npes; comm = NV_COMM_PH(v); MPI_Comm_size(comm, &npes); *lrw = NV_GLOBLENGTH_PH(v); *liw = 2*npes; return; } /* * This function is disabled in ParHyp implementation and returns NULL. * The user should extract HYPRE vector using N_VGetVector_ParHyp and * then use HYPRE functions to get pointer to raw data of the local HYPRE * vector. */ realtype *N_VGetArrayPointer_ParHyp(N_Vector v) { return NULL; /* ((realtype *) NV_DATA_PH(v)); */ } /* * This method is not implemented for HYPRE vector wrapper. * TODO: Put error handler in the function body. */ void N_VSetArrayPointer_ParHyp(realtype *v_data, N_Vector v) { /* Not implemented for Hypre vector */ } void *N_VGetCommunicator_ParHyp(N_Vector v) { return((void *) &(NV_COMM_PH(v))); } sunindextype N_VGetLength_ParHyp(N_Vector v) { return(NV_GLOBLENGTH_PH(v)); } /* * Computes z[i] = a*x[i] + b*y[i] * */ void N_VLinearSum_ParHyp(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i, N; realtype c, *xd, *yd, *zd; N_Vector v1, v2; booleantype test; xd = yd = zd = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ HYPRE_Complex alpha=a; HYPRE_ParVectorAxpy( alpha, (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(x), (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(y)); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ HYPRE_Complex beta=b; HYPRE_ParVectorAxpy( beta, (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(y), (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(x)); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_ParHyp(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_ParHyp(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_ParHyp(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_ParHyp(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_ParHyp(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_ParHyp(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); yd = NV_DATA_PH(y); zd = NV_DATA_PH(z); for (i = 0; i < N; i++) zd[i] = (a*xd[i])+(b*yd[i]); return; } void N_VConst_ParHyp(realtype c, N_Vector z) { HYPRE_Complex value = c; HYPRE_ParVectorSetConstantValues( (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(z), value); return; } /* ---------------------------------------------------------------------------- * Compute componentwise product z[i] = x[i]*y[i] */ void N_VProd_ParHyp(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); yd = NV_DATA_PH(y); zd = NV_DATA_PH(z); for (i = 0; i < N; i++) zd[i] = xd[i]*yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise division z[i] = x[i]/y[i] */ void N_VDiv_ParHyp(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); yd = NV_DATA_PH(y); zd = NV_DATA_PH(z); for (i = 0; i < N; i++) zd[i] = xd[i]/yd[i]; return; } void N_VScale_ParHyp(realtype c, N_Vector x, N_Vector z) { HYPRE_Complex value = c; if (x != z) { HYPRE_ParVectorCopy((HYPRE_ParVector) NV_HYPRE_PARVEC_PH(x), (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(z)); } HYPRE_ParVectorScale(value, (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(z)); return; } void N_VAbs_ParHyp(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); zd = NV_DATA_PH(z); for (i = 0; i < N; i++) zd[i] = SUNRabs(xd[i]); return; } void N_VInv_ParHyp(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); zd = NV_DATA_PH(z); for (i = 0; i < N; i++) zd[i] = ONE/xd[i]; return; } void N_VAddConst_ParHyp(N_Vector x, realtype b, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); zd = NV_DATA_PH(z); for (i = 0; i < N; i++) zd[i] = xd[i] + b; return; } realtype N_VDotProdLocal_ParHyp(N_Vector x, N_Vector y) { sunindextype i, N; realtype sum, *xd, *yd; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); yd = NV_DATA_PH(y); sum = ZERO; for (i = 0; i < N; i++) sum += xd[i]*yd[i]; return(sum); } realtype N_VDotProd_ParHyp(N_Vector x, N_Vector y) { HYPRE_Real gsum; HYPRE_ParVectorInnerProd( (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(x), (HYPRE_ParVector) NV_HYPRE_PARVEC_PH(y), &gsum); return(gsum); } realtype N_VMaxNormLocal_ParHyp(N_Vector x) { sunindextype i, N; realtype max, *xd; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); max = ZERO; for (i = 0; i < N; i++) if (SUNRabs(xd[i]) > max) max = SUNRabs(xd[i]); return(max); } realtype N_VMaxNorm_ParHyp(N_Vector x) { realtype lmax, gmax; lmax = N_VMaxNormLocal_ParHyp(x); MPI_Allreduce(&lmax, &gmax, 1, MPI_SUNREALTYPE, MPI_MAX, NV_COMM_PH(x)); return(gmax); } realtype N_VWSqrSumLocal_ParHyp(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, prodi, *xd, *wd; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); wd = NV_DATA_PH(w); sum = ZERO; for (i = 0; i < N; i++) { prodi = xd[i]*wd[i]; sum += SUNSQR(prodi); } return(sum); } realtype N_VWrmsNorm_ParHyp(N_Vector x, N_Vector w) { realtype lsum, gsum; lsum = N_VWSqrSumLocal_ParHyp(x, w); MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, NV_COMM_PH(x)); return(SUNRsqrt(gsum/(NV_GLOBLENGTH_PH(x)))); } realtype N_VWSqrSumMaskLocal_ParHyp(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, prodi, *xd, *wd, *idd; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); wd = NV_DATA_PH(w); idd = NV_DATA_PH(id); sum = ZERO; for (i = 0; i < N; i++) { if (idd[i] > ZERO) { prodi = xd[i]*wd[i]; sum += SUNSQR(prodi); } } return(sum); } realtype N_VWrmsNormMask_ParHyp(N_Vector x, N_Vector w, N_Vector id) { realtype lsum, gsum; lsum = N_VWSqrSumMaskLocal_ParHyp(x, w, id); MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, NV_COMM_PH(x)); return(SUNRsqrt(gsum/(NV_GLOBLENGTH_PH(x)))); } realtype N_VMinLocal_ParHyp(N_Vector x) { sunindextype i, N; realtype min, *xd; xd = NULL; N = NV_LOCLENGTH_PH(x); min = BIG_REAL; if (N > 0) { xd = NV_DATA_PH(x); min = xd[0]; for (i = 1; i < N; i++) if (xd[i] < min) min = xd[i]; } return(min); } realtype N_VMin_ParHyp(N_Vector x) { realtype lmin, gmin; lmin = N_VMinLocal_ParHyp(x); MPI_Allreduce(&lmin, &gmin, 1, MPI_SUNREALTYPE, MPI_MIN, NV_COMM_PH(x)); return(gmin); } realtype N_VWL2Norm_ParHyp(N_Vector x, N_Vector w) { realtype lsum, gsum; lsum = N_VWSqrSumLocal_ParHyp(x, w); MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, NV_COMM_PH(x)); return(SUNRsqrt(gsum)); } realtype N_VL1NormLocal_ParHyp(N_Vector x) { sunindextype i, N; realtype sum, *xd; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); sum = ZERO; for (i = 0; i= c) ? ONE : ZERO; } return; } booleantype N_VInvTestLocal_ParHyp(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd, val; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); zd = NV_DATA_PH(z); val = ONE; for (i = 0; i < N; i++) { if (xd[i] == ZERO) val = ZERO; else zd[i] = ONE/xd[i]; } if (val == ZERO) return(SUNFALSE); else return(SUNTRUE); } booleantype N_VInvTest_ParHyp(N_Vector x, N_Vector z) { realtype val, gval; val = (N_VInvTestLocal_ParHyp(x, z)) ? ONE : ZERO; MPI_Allreduce(&val, &gval, 1, MPI_SUNREALTYPE, MPI_MIN, NV_COMM_PH(x)); if (gval == ZERO) return(SUNFALSE); else return(SUNTRUE); } booleantype N_VConstrMaskLocal_ParHyp(N_Vector c, N_Vector x, N_Vector m) { sunindextype i, N; realtype temp; realtype *cd, *xd, *md; booleantype test; N = NV_LOCLENGTH_PH(x); xd = NV_DATA_PH(x); cd = NV_DATA_PH(c); md = NV_DATA_PH(m); temp = ZERO; for (i = 0; i < N; i++) { md[i] = ZERO; /* Continue if no constraints were set for the variable */ if (cd[i] == ZERO) continue; /* Check if a set constraint has been violated */ test = (SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) || (SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO); if (test) { temp = md[i] = ONE; } } /* Return false if any constraint was violated */ return (temp == ONE) ? SUNFALSE : SUNTRUE; } booleantype N_VConstrMask_ParHyp(N_Vector c, N_Vector x, N_Vector m) { realtype temp, temp2; temp = (N_VConstrMaskLocal_ParHyp(c, x, m)) ? ZERO : ONE; MPI_Allreduce(&temp, &temp2, 1, MPI_SUNREALTYPE, MPI_MAX, NV_COMM_PH(x)); return (temp2 == ONE) ? SUNFALSE : SUNTRUE; } realtype N_VMinQuotientLocal_ParHyp(N_Vector num, N_Vector denom) { booleantype notEvenOnce; sunindextype i, N; realtype *nd, *dd, min; nd = dd = NULL; N = NV_LOCLENGTH_PH(num); nd = NV_DATA_PH(num); dd = NV_DATA_PH(denom); notEvenOnce = SUNTRUE; min = BIG_REAL; for (i = 0; i < N; i++) { if (dd[i] == ZERO) continue; else { if (!notEvenOnce) min = SUNMIN(min, nd[i]/dd[i]); else { min = nd[i]/dd[i]; notEvenOnce = SUNFALSE; } } } return(min); } realtype N_VMinQuotient_ParHyp(N_Vector num, N_Vector denom) { realtype lmin, gmin; lmin = N_VMinQuotientLocal_ParHyp(num, denom); MPI_Allreduce(&lmin, &gmin, 1, MPI_SUNREALTYPE, MPI_MIN, NV_COMM_PH(num)); return(gmin); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_ParHyp(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i; sunindextype j, N; realtype* zd=NULL; realtype* xd=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_ParHyp(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_ParHyp(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LOCLENGTH_PH(z); zd = NV_DATA_PH(z); /* * X[0] += c[i]*X[i], i = 1,...,nvec-1 */ if ((X[0] == z) && (c[0] == ONE)) { for (i=1; i ZERO) nrm[i] += SUNSQR(xd[j] * wd[j]); } } retval = MPI_Allreduce(MPI_IN_PLACE, nrm, nvec, MPI_SUNREALTYPE, MPI_SUM, comm); for (i=0; i 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_ParHyp(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LOCLENGTH_PH(X[0]); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { for (i=0; i 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; jops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_ParHyp; v->ops->nvscaleaddmulti = N_VScaleAddMulti_ParHyp; v->ops->nvdotprodmulti = N_VDotProdMulti_ParHyp; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_ParHyp; v->ops->nvscalevectorarray = N_VScaleVectorArray_ParHyp; v->ops->nvconstvectorarray = N_VConstVectorArray_ParHyp; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_ParHyp; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_ParHyp; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_ParHyp; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_ParHyp; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_ParHyp; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_ParHyp; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_ParHyp; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmulti = N_VDotProdMulti_ParHyp; else v->ops->nvdotprodmulti = NULL; /* return success */ return(0); } int N_VEnableLinearSumVectorArray_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_ParHyp; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_ParHyp; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_ParHyp; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_ParHyp; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_ParHyp; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_ParHyp; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_ParHyp; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } int N_VEnableDotProdMultiLocal_ParHyp(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_ParHyp; else v->ops->nvdotprodmultilocal = NULL; /* return success */ return(0); } StanHeaders/src/nvector/mpiplusx/0000755000176200001440000000000014511135055016603 5ustar liggesusersStanHeaders/src/nvector/mpiplusx/nvector_mpiplusx.c0000644000176200001440000000437714645137106022412 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the MPIPlusX NVECTOR. * -----------------------------------------------------------------*/ #include #include #include #include #include #define MPIPLUSX_LOCAL_VECTOR(v) ( N_VGetSubvector_MPIManyVector(v, 0) ) N_Vector N_VMake_MPIPlusX(MPI_Comm comm, N_Vector X, SUNContext sunctx) { N_Vector v; if (X == NULL) return NULL; v = NULL; v = N_VMake_MPIManyVector(comm, 1, &X, sunctx); if (v == NULL) return NULL; /* override certain ops */ v->ops->nvgetvectorid = N_VGetVectorID_MPIPlusX; v->ops->nvgetarraypointer = N_VGetArrayPointer_MPIPlusX; v->ops->nvsetarraypointer = N_VSetArrayPointer_MPIPlusX; /* debugging functions */ if (X->ops->nvprint) v->ops->nvprint = N_VPrint_MPIPlusX; if (X->ops->nvprintfile) v->ops->nvprintfile = N_VPrintFile_MPIPlusX; return v; } N_Vector_ID N_VGetVectorID_MPIPlusX(N_Vector v) { return SUNDIALS_NVEC_MPIPLUSX; } realtype* N_VGetArrayPointer_MPIPlusX(N_Vector v) { return N_VGetSubvectorArrayPointer_MPIManyVector(v, 0); } void N_VSetArrayPointer_MPIPlusX(realtype *vdata, N_Vector v) { N_VSetSubvectorArrayPointer_MPIManyVector(vdata, v, 0); } void N_VPrint_MPIPlusX(N_Vector v) { N_Vector x = MPIPLUSX_LOCAL_VECTOR(v); if (x->ops->nvprint) x->ops->nvprint(x); } void N_VPrintFile_MPIPlusX(N_Vector v, FILE *outfile) { N_Vector x = MPIPLUSX_LOCAL_VECTOR(v); if (x->ops->nvprintfile) x->ops->nvprintfile(x, outfile); } N_Vector N_VGetLocalVector_MPIPlusX(N_Vector v) { return MPIPLUSX_LOCAL_VECTOR(v); } sunindextype N_VGetLocalLength_MPIPlusX(N_Vector v) { return N_VGetLength(MPIPLUSX_LOCAL_VECTOR(v)); } StanHeaders/src/nvector/mpiplusx/fmod/0000755000176200001440000000000014511135055017530 5ustar liggesusersStanHeaders/src/nvector/mpiplusx/fmod/fnvector_mpiplusx_mod.c0000644000176200001440000002135414645137106024336 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nvector.h" #include "nvector/nvector_mpiplusx.h" SWIGEXPORT N_Vector _wrap_FN_VMake_MPIPlusX(int const *farg1, N_Vector farg2, void *farg3) { N_Vector fresult ; MPI_Comm arg1 ; N_Vector arg2 = (N_Vector) 0 ; SUNContext arg3 = (SUNContext) 0 ; N_Vector result; #ifdef SUNDIALS_MPI_ENABLED arg1 = MPI_Comm_f2c((MPI_Fint)(*farg1)); #else arg1 = *farg1; #endif arg2 = (N_Vector)(farg2); arg3 = (SUNContext)(farg3); result = (N_Vector)N_VMake_MPIPlusX(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT int _wrap_FN_VGetVectorID_MPIPlusX(N_Vector farg1) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector_ID result; arg1 = (N_Vector)(farg1); result = (N_Vector_ID)N_VGetVectorID_MPIPlusX(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT double * _wrap_FN_VGetArrayPointer_MPIPlusX(N_Vector farg1) { double * fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype *result = 0 ; arg1 = (N_Vector)(farg1); result = (realtype *)N_VGetArrayPointer_MPIPlusX(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VSetArrayPointer_MPIPlusX(double *farg1, N_Vector farg2) { realtype *arg1 = (realtype *) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype *)(farg1); arg2 = (N_Vector)(farg2); N_VSetArrayPointer_MPIPlusX(arg1,arg2); } SWIGEXPORT void _wrap_FN_VPrint_MPIPlusX(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VPrint_MPIPlusX(arg1); } SWIGEXPORT void _wrap_FN_VPrintFile_MPIPlusX(N_Vector farg1, void *farg2) { N_Vector arg1 = (N_Vector) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (FILE *)(farg2); N_VPrintFile_MPIPlusX(arg1,arg2); } SWIGEXPORT N_Vector _wrap_FN_VGetLocalVector_MPIPlusX(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VGetLocalVector_MPIPlusX(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FN_VGetLocalLength_MPIPlusX(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetLocalLength_MPIPlusX(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableFusedOps_MPIPlusX(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableFusedOps_MPIPlusX(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/nvector/mpiplusx/fmod/fnvector_mpiplusx_mod.f900000644000176200001440000001437214645137106024514 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fnvector_mpiplusx_mod use, intrinsic :: ISO_C_BINDING use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FN_VMake_MPIPlusX public :: FN_VGetVectorID_MPIPlusX public :: FN_VGetArrayPointer_MPIPlusX public :: FN_VSetArrayPointer_MPIPlusX public :: FN_VPrint_MPIPlusX public :: FN_VPrintFile_MPIPlusX public :: FN_VGetLocalVector_MPIPlusX public :: FN_VGetLocalLength_MPIPlusX public :: FN_VEnableFusedOps_MPIPlusX ! WRAPPER DECLARATIONS interface function swigc_FN_VMake_MPIPlusX(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VMake_MPIPlusX") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FN_VGetVectorID_MPIPlusX(farg1) & bind(C, name="_wrap_FN_VGetVectorID_MPIPlusX") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FN_VGetArrayPointer_MPIPlusX(farg1) & bind(C, name="_wrap_FN_VGetArrayPointer_MPIPlusX") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VSetArrayPointer_MPIPlusX(farg1, farg2) & bind(C, name="_wrap_FN_VSetArrayPointer_MPIPlusX") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VPrint_MPIPlusX(farg1) & bind(C, name="_wrap_FN_VPrint_MPIPlusX") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VPrintFile_MPIPlusX(farg1, farg2) & bind(C, name="_wrap_FN_VPrintFile_MPIPlusX") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FN_VGetLocalVector_MPIPlusX(farg1) & bind(C, name="_wrap_FN_VGetLocalVector_MPIPlusX") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VGetLocalLength_MPIPlusX(farg1) & bind(C, name="_wrap_FN_VGetLocalLength_MPIPlusX") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FN_VEnableFusedOps_MPIPlusX(farg1, farg2) & bind(C, name="_wrap_FN_VEnableFusedOps_MPIPlusX") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FN_VMake_MPIPlusX(comm, x, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer :: comm type(N_Vector), target, intent(inout) :: x type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = int(comm, C_INT) farg2 = c_loc(x) farg3 = sunctx fresult = swigc_FN_VMake_MPIPlusX(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FN_VGetVectorID_MPIPlusX(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(N_Vector_ID) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetVectorID_MPIPlusX(farg1) swig_result = fresult end function function FN_VGetArrayPointer_MPIPlusX(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetArrayPointer_MPIPlusX(farg1) call c_f_pointer(fresult, swig_result, [1]) end function subroutine FN_VSetArrayPointer_MPIPlusX(vdata, v) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(*), target, intent(inout) :: vdata type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(vdata(1)) farg2 = c_loc(v) call swigc_FN_VSetArrayPointer_MPIPlusX(farg1, farg2) end subroutine subroutine FN_VPrint_MPIPlusX(x) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(C_PTR) :: farg1 farg1 = c_loc(x) call swigc_FN_VPrint_MPIPlusX(farg1) end subroutine subroutine FN_VPrintFile_MPIPlusX(x, outfile) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(C_PTR) :: outfile type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = outfile call swigc_FN_VPrintFile_MPIPlusX(farg1, farg2) end subroutine function FN_VGetLocalVector_MPIPlusX(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetLocalVector_MPIPlusX(farg1) call c_f_pointer(fresult, swig_result) end function function FN_VGetLocalLength_MPIPlusX(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetLocalLength_MPIPlusX(farg1) swig_result = fresult end function function FN_VEnableFusedOps_MPIPlusX(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableFusedOps_MPIPlusX(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/nvector/openmp/0000755000176200001440000000000014511135055016220 5ustar liggesusersStanHeaders/src/nvector/openmp/fmod/0000755000176200001440000000000014511135055017145 5ustar liggesusersStanHeaders/src/nvector/openmp/fmod/fnvector_openmp_mod.c0000644000176200001440000006261514645137106023375 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nvector.h" #include "nvector/nvector_openmp.h" SWIGEXPORT N_Vector _wrap_FN_VNew_OpenMP(int64_t const *farg1, int const *farg2, void *farg3) { N_Vector fresult ; sunindextype arg1 ; int arg2 ; SUNContext arg3 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (int)(*farg2); arg3 = (SUNContext)(farg3); result = (N_Vector)N_VNew_OpenMP(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VNewEmpty_OpenMP(int64_t const *farg1, int const *farg2, void *farg3) { N_Vector fresult ; sunindextype arg1 ; int arg2 ; SUNContext arg3 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (int)(*farg2); arg3 = (SUNContext)(farg3); result = (N_Vector)N_VNewEmpty_OpenMP(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VMake_OpenMP(int64_t const *farg1, double *farg2, int const *farg3, void *farg4) { N_Vector fresult ; sunindextype arg1 ; realtype *arg2 = (realtype *) 0 ; int arg3 ; SUNContext arg4 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (realtype *)(farg2); arg3 = (int)(*farg3); arg4 = (SUNContext)(farg4); result = (N_Vector)N_VMake_OpenMP(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FN_VGetLength_OpenMP(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetLength_OpenMP(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VPrint_OpenMP(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VPrint_OpenMP(arg1); } SWIGEXPORT void _wrap_FN_VPrintFile_OpenMP(N_Vector farg1, void *farg2) { N_Vector arg1 = (N_Vector) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (FILE *)(farg2); N_VPrintFile_OpenMP(arg1,arg2); } SWIGEXPORT int _wrap_FN_VGetVectorID_OpenMP(N_Vector farg1) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector_ID result; arg1 = (N_Vector)(farg1); result = (N_Vector_ID)N_VGetVectorID_OpenMP(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FN_VCloneEmpty_OpenMP(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VCloneEmpty_OpenMP(arg1); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VClone_OpenMP(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VClone_OpenMP(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroy_OpenMP(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VDestroy_OpenMP(arg1); } SWIGEXPORT void _wrap_FN_VSpace_OpenMP(N_Vector farg1, int64_t *farg2, int64_t *farg3) { N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; sunindextype *arg3 = (sunindextype *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); arg3 = (sunindextype *)(farg3); N_VSpace_OpenMP(arg1,arg2,arg3); } SWIGEXPORT double * _wrap_FN_VGetArrayPointer_OpenMP(N_Vector farg1) { double * fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype *result = 0 ; arg1 = (N_Vector)(farg1); result = (realtype *)N_VGetArrayPointer_OpenMP(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VSetArrayPointer_OpenMP(double *farg1, N_Vector farg2) { realtype *arg1 = (realtype *) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype *)(farg1); arg2 = (N_Vector)(farg2); N_VSetArrayPointer_OpenMP(arg1,arg2); } SWIGEXPORT void _wrap_FN_VLinearSum_OpenMP(double const *farg1, N_Vector farg2, double const *farg3, N_Vector farg4, N_Vector farg5) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); N_VLinearSum_OpenMP(arg1,arg2,arg3,arg4,arg5); } SWIGEXPORT void _wrap_FN_VConst_OpenMP(double const *farg1, N_Vector farg2) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); N_VConst_OpenMP(arg1,arg2); } SWIGEXPORT void _wrap_FN_VProd_OpenMP(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VProd_OpenMP(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VDiv_OpenMP(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VDiv_OpenMP(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VScale_OpenMP(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VScale_OpenMP(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VAbs_OpenMP(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VAbs_OpenMP(arg1,arg2); } SWIGEXPORT void _wrap_FN_VInv_OpenMP(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VInv_OpenMP(arg1,arg2); } SWIGEXPORT void _wrap_FN_VAddConst_OpenMP(N_Vector farg1, double const *farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); N_VAddConst_OpenMP(arg1,arg2,arg3); } SWIGEXPORT double _wrap_FN_VDotProd_OpenMP(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VDotProd_OpenMP(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMaxNorm_OpenMP(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMaxNorm_OpenMP(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNorm_OpenMP(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWrmsNorm_OpenMP(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNormMask_OpenMP(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWrmsNormMask_OpenMP(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMin_OpenMP(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMin_OpenMP(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWL2Norm_OpenMP(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWL2Norm_OpenMP(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VL1Norm_OpenMP(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VL1Norm_OpenMP(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VCompare_OpenMP(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VCompare_OpenMP(arg1,arg2,arg3); } SWIGEXPORT int _wrap_FN_VInvTest_OpenMP(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VInvTest_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstrMask_OpenMP(N_Vector farg1, N_Vector farg2, N_Vector farg3) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)N_VConstrMask_OpenMP(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinQuotient_OpenMP(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VMinQuotient_OpenMP(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearCombination_OpenMP(int const *farg1, double *farg2, void *farg3, N_Vector farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); result = (int)N_VLinearCombination_OpenMP(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleAddMulti_OpenMP(int const *farg1, double *farg2, N_Vector farg3, void *farg4, void *farg5) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector *)(farg4); arg5 = (N_Vector *)(farg5); result = (int)N_VScaleAddMulti_OpenMP(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMulti_OpenMP(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMulti_OpenMP(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearSumVectorArray_OpenMP(int const *farg1, double const *farg2, void *farg3, double const *farg4, void *farg5, void *farg6) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype arg4 ; N_Vector *arg5 = (N_Vector *) 0 ; N_Vector *arg6 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector *)(farg5); arg6 = (N_Vector *)(farg6); result = (int)N_VLinearSumVectorArray_OpenMP(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleVectorArray_OpenMP(int const *farg1, double *farg2, void *farg3, void *farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector *)(farg4); result = (int)N_VScaleVectorArray_OpenMP(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstVectorArray_OpenMP(int const *farg1, double const *farg2, void *farg3) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)N_VConstVectorArray_OpenMP(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormVectorArray_OpenMP(int const *farg1, void *farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VWrmsNormVectorArray_OpenMP(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormMaskVectorArray_OpenMP(int const *farg1, void *farg2, void *farg3, N_Vector farg4, double *farg5) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype *arg5 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype *)(farg5); result = (int)N_VWrmsNormMaskVectorArray_OpenMP(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumLocal_OpenMP(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWSqrSumLocal_OpenMP(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumMaskLocal_OpenMP(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWSqrSumMaskLocal_OpenMP(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufSize_OpenMP(N_Vector farg1, int64_t *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); result = (int)N_VBufSize_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufPack_OpenMP(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufPack_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufUnpack_OpenMP(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufUnpack_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableFusedOps_OpenMP(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableFusedOps_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearCombination_OpenMP(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearCombination_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleAddMulti_OpenMP(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleAddMulti_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableDotProdMulti_OpenMP(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableDotProdMulti_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearSumVectorArray_OpenMP(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearSumVectorArray_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleVectorArray_OpenMP(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleVectorArray_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableConstVectorArray_OpenMP(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableConstVectorArray_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormVectorArray_OpenMP(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormVectorArray_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormMaskVectorArray_OpenMP(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormMaskVectorArray_OpenMP(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT void * _wrap_FN_VCloneVectorArray_OpenMP(int const *farg1, N_Vector farg2) { void * fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); result = (N_Vector *)N_VCloneVectorArray_OpenMP(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void * _wrap_FN_VCloneVectorArrayEmpty_OpenMP(int const *farg1, N_Vector farg2) { void * fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); result = (N_Vector *)N_VCloneVectorArrayEmpty_OpenMP(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroyVectorArray_OpenMP(void *farg1, int const *farg2) { N_Vector *arg1 = (N_Vector *) 0 ; int arg2 ; arg1 = (N_Vector *)(farg1); arg2 = (int)(*farg2); N_VDestroyVectorArray_OpenMP(arg1,arg2); } StanHeaders/src/nvector/openmp/fmod/fnvector_openmp_mod.f900000644000176200001440000012017314645137106023543 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fnvector_openmp_mod use, intrinsic :: ISO_C_BINDING use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FN_VNew_OpenMP public :: FN_VNewEmpty_OpenMP public :: FN_VMake_OpenMP public :: FN_VGetLength_OpenMP public :: FN_VPrint_OpenMP public :: FN_VPrintFile_OpenMP public :: FN_VGetVectorID_OpenMP public :: FN_VCloneEmpty_OpenMP public :: FN_VClone_OpenMP public :: FN_VDestroy_OpenMP public :: FN_VSpace_OpenMP public :: FN_VGetArrayPointer_OpenMP public :: FN_VSetArrayPointer_OpenMP public :: FN_VLinearSum_OpenMP public :: FN_VConst_OpenMP public :: FN_VProd_OpenMP public :: FN_VDiv_OpenMP public :: FN_VScale_OpenMP public :: FN_VAbs_OpenMP public :: FN_VInv_OpenMP public :: FN_VAddConst_OpenMP public :: FN_VDotProd_OpenMP public :: FN_VMaxNorm_OpenMP public :: FN_VWrmsNorm_OpenMP public :: FN_VWrmsNormMask_OpenMP public :: FN_VMin_OpenMP public :: FN_VWL2Norm_OpenMP public :: FN_VL1Norm_OpenMP public :: FN_VCompare_OpenMP public :: FN_VInvTest_OpenMP public :: FN_VConstrMask_OpenMP public :: FN_VMinQuotient_OpenMP public :: FN_VLinearCombination_OpenMP public :: FN_VScaleAddMulti_OpenMP public :: FN_VDotProdMulti_OpenMP public :: FN_VLinearSumVectorArray_OpenMP public :: FN_VScaleVectorArray_OpenMP public :: FN_VConstVectorArray_OpenMP public :: FN_VWrmsNormVectorArray_OpenMP public :: FN_VWrmsNormMaskVectorArray_OpenMP public :: FN_VWSqrSumLocal_OpenMP public :: FN_VWSqrSumMaskLocal_OpenMP public :: FN_VBufSize_OpenMP public :: FN_VBufPack_OpenMP public :: FN_VBufUnpack_OpenMP public :: FN_VEnableFusedOps_OpenMP public :: FN_VEnableLinearCombination_OpenMP public :: FN_VEnableScaleAddMulti_OpenMP public :: FN_VEnableDotProdMulti_OpenMP public :: FN_VEnableLinearSumVectorArray_OpenMP public :: FN_VEnableScaleVectorArray_OpenMP public :: FN_VEnableConstVectorArray_OpenMP public :: FN_VEnableWrmsNormVectorArray_OpenMP public :: FN_VEnableWrmsNormMaskVectorArray_OpenMP public :: FN_VCloneVectorArray_OpenMP public :: FN_VCloneVectorArrayEmpty_OpenMP public :: FN_VDestroyVectorArray_OpenMP ! WRAPPER DECLARATIONS interface function swigc_FN_VNew_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VNew_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FN_VNewEmpty_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VNewEmpty_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FN_VMake_OpenMP(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VMake_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FN_VGetLength_OpenMP(farg1) & bind(C, name="_wrap_FN_VGetLength_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function subroutine swigc_FN_VPrint_OpenMP(farg1) & bind(C, name="_wrap_FN_VPrint_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VPrintFile_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VPrintFile_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FN_VGetVectorID_OpenMP(farg1) & bind(C, name="_wrap_FN_VGetVectorID_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FN_VCloneEmpty_OpenMP(farg1) & bind(C, name="_wrap_FN_VCloneEmpty_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VClone_OpenMP(farg1) & bind(C, name="_wrap_FN_VClone_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroy_OpenMP(farg1) & bind(C, name="_wrap_FN_VDestroy_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VSpace_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VSpace_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VGetArrayPointer_OpenMP(farg1) & bind(C, name="_wrap_FN_VGetArrayPointer_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VSetArrayPointer_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VSetArrayPointer_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VLinearSum_OpenMP(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VLinearSum_OpenMP") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 end subroutine subroutine swigc_FN_VConst_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VConst_OpenMP") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VProd_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VProd_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VDiv_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VDiv_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VScale_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VScale_OpenMP") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VAbs_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VAbs_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VInv_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VInv_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VAddConst_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VAddConst_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VDotProd_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VDotProd_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VMaxNorm_OpenMP(farg1) & bind(C, name="_wrap_FN_VMaxNorm_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNorm_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VWrmsNorm_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNormMask_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWrmsNormMask_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VMin_OpenMP(farg1) & bind(C, name="_wrap_FN_VMin_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWL2Norm_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VWL2Norm_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VL1Norm_OpenMP(farg1) & bind(C, name="_wrap_FN_VL1Norm_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function subroutine swigc_FN_VCompare_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VCompare_OpenMP") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VInvTest_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VInvTest_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VConstrMask_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstrMask_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VMinQuotient_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VMinQuotient_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VLinearCombination_OpenMP(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VLinearCombination_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VScaleAddMulti_OpenMP(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VScaleAddMulti_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMulti_OpenMP(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMulti_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VLinearSumVectorArray_OpenMP(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FN_VLinearSumVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FN_VScaleVectorArray_OpenMP(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VScaleVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VConstVectorArray_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormVectorArray_OpenMP(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VWrmsNormVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormMaskVectorArray_OpenMP(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VWrmsNormMaskVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VWSqrSumLocal_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VWSqrSumLocal_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumMaskLocal_OpenMP(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWSqrSumMaskLocal_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VBufSize_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VBufSize_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufPack_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VBufPack_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufUnpack_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VBufUnpack_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableFusedOps_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VEnableFusedOps_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearCombination_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearCombination_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleAddMulti_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleAddMulti_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableDotProdMulti_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VEnableDotProdMulti_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearSumVectorArray_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearSumVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleVectorArray_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableConstVectorArray_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VEnableConstVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormVectorArray_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormMaskVectorArray_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormMaskVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VCloneVectorArray_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VCloneVectorArray_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VCloneVectorArrayEmpty_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VCloneVectorArrayEmpty_OpenMP") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroyVectorArray_OpenMP(farg1, farg2) & bind(C, name="_wrap_FN_VDestroyVectorArray_OpenMP") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 end subroutine end interface contains ! MODULE SUBPROGRAMS function FN_VNew_OpenMP(vec_length, num_threads, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: vec_length integer(C_INT), intent(in) :: num_threads type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = vec_length farg2 = num_threads farg3 = sunctx fresult = swigc_FN_VNew_OpenMP(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FN_VNewEmpty_OpenMP(vec_length, num_threads, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: vec_length integer(C_INT), intent(in) :: num_threads type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = vec_length farg2 = num_threads farg3 = sunctx fresult = swigc_FN_VNewEmpty_OpenMP(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FN_VMake_OpenMP(vec_length, v_data, num_threads, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: vec_length real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data integer(C_INT), intent(in) :: num_threads type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = vec_length farg2 = c_loc(v_data(1)) farg3 = num_threads farg4 = sunctx fresult = swigc_FN_VMake_OpenMP(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FN_VGetLength_OpenMP(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetLength_OpenMP(farg1) swig_result = fresult end function subroutine FN_VPrint_OpenMP(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VPrint_OpenMP(farg1) end subroutine subroutine FN_VPrintFile_OpenMP(v, outfile) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: outfile type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v) farg2 = outfile call swigc_FN_VPrintFile_OpenMP(farg1, farg2) end subroutine function FN_VGetVectorID_OpenMP(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(N_Vector_ID) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetVectorID_OpenMP(farg1) swig_result = fresult end function function FN_VCloneEmpty_OpenMP(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VCloneEmpty_OpenMP(farg1) call c_f_pointer(fresult, swig_result) end function function FN_VClone_OpenMP(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VClone_OpenMP(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FN_VDestroy_OpenMP(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VDestroy_OpenMP(farg1) end subroutine subroutine FN_VSpace_OpenMP(v, lrw, liw) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), dimension(*), target, intent(inout) :: lrw integer(C_INT64_T), dimension(*), target, intent(inout) :: liw type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(v) farg2 = c_loc(lrw(1)) farg3 = c_loc(liw(1)) call swigc_FN_VSpace_OpenMP(farg1, farg2, farg3) end subroutine function FN_VGetArrayPointer_OpenMP(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetArrayPointer_OpenMP(farg1) call c_f_pointer(fresult, swig_result, [1]) end function subroutine FN_VSetArrayPointer_OpenMP(v_data, v) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v_data(1)) farg2 = c_loc(v) call swigc_FN_VSetArrayPointer_OpenMP(farg1, farg2) end subroutine subroutine FN_VLinearSum_OpenMP(a, x, b, y, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: a type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = a farg2 = c_loc(x) farg3 = b farg4 = c_loc(y) farg5 = c_loc(z) call swigc_FN_VLinearSum_OpenMP(farg1, farg2, farg3, farg4, farg5) end subroutine subroutine FN_VConst_OpenMP(c, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 farg1 = c farg2 = c_loc(z) call swigc_FN_VConst_OpenMP(farg1, farg2) end subroutine subroutine FN_VProd_OpenMP(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VProd_OpenMP(farg1, farg2, farg3) end subroutine subroutine FN_VDiv_OpenMP(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VDiv_OpenMP(farg1, farg2, farg3) end subroutine subroutine FN_VScale_OpenMP(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VScale_OpenMP(farg1, farg2, farg3) end subroutine subroutine FN_VAbs_OpenMP(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VAbs_OpenMP(farg1, farg2) end subroutine subroutine FN_VInv_OpenMP(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VInv_OpenMP(farg1, farg2) end subroutine subroutine FN_VAddConst_OpenMP(x, b, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = b farg3 = c_loc(z) call swigc_FN_VAddConst_OpenMP(farg1, farg2, farg3) end subroutine function FN_VDotProd_OpenMP(x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(y) fresult = swigc_FN_VDotProd_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VMaxNorm_OpenMP(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMaxNorm_OpenMP(farg1) swig_result = fresult end function function FN_VWrmsNorm_OpenMP(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWrmsNorm_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VWrmsNormMask_OpenMP(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWrmsNormMask_OpenMP(farg1, farg2, farg3) swig_result = fresult end function function FN_VMin_OpenMP(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMin_OpenMP(farg1) swig_result = fresult end function function FN_VWL2Norm_OpenMP(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWL2Norm_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VL1Norm_OpenMP(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VL1Norm_OpenMP(farg1) swig_result = fresult end function subroutine FN_VCompare_OpenMP(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VCompare_OpenMP(farg1, farg2, farg3) end subroutine function FN_VInvTest_OpenMP(x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) fresult = swigc_FN_VInvTest_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VConstrMask_OpenMP(c, x, m) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: m integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(c) farg2 = c_loc(x) farg3 = c_loc(m) fresult = swigc_FN_VConstrMask_OpenMP(farg1, farg2, farg3) swig_result = fresult end function function FN_VMinQuotient_OpenMP(num, denom) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: num type(N_Vector), target, intent(inout) :: denom real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(num) farg2 = c_loc(denom) fresult = swigc_FN_VMinQuotient_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VLinearCombination_OpenMP(nvec, c, v, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: v type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = v farg4 = c_loc(z) fresult = swigc_FN_VLinearCombination_OpenMP(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VScaleAddMulti_OpenMP(nvec, a, x, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = c_loc(a(1)) farg3 = c_loc(x) farg4 = y farg5 = z fresult = swigc_FN_VScaleAddMulti_OpenMP(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdMulti_OpenMP(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMulti_OpenMP(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VLinearSumVectorArray_OpenMP(nvec, a, x, b, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), intent(in) :: a type(C_PTR) :: x real(C_DOUBLE), intent(in) :: b type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 farg1 = nvec farg2 = a farg3 = x farg4 = b farg5 = y farg6 = z fresult = swigc_FN_VLinearSumVectorArray_OpenMP(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FN_VScaleVectorArray_OpenMP(nvec, c, x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: x type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = x farg4 = z fresult = swigc_FN_VScaleVectorArray_OpenMP(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VConstVectorArray_OpenMP(nvecs, c, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs real(C_DOUBLE), intent(in) :: c type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = nvecs farg2 = c farg3 = z fresult = swigc_FN_VConstVectorArray_OpenMP(farg1, farg2, farg3) swig_result = fresult end function function FN_VWrmsNormVectorArray_OpenMP(nvecs, x, w, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs type(C_PTR) :: x type(C_PTR) :: w real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvecs farg2 = x farg3 = w farg4 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormVectorArray_OpenMP(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VWrmsNormMaskVectorArray_OpenMP(nvecs, x, w, id, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs type(C_PTR) :: x type(C_PTR) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvecs farg2 = x farg3 = w farg4 = c_loc(id) farg5 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormMaskVectorArray_OpenMP(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VWSqrSumLocal_OpenMP(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWSqrSumLocal_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VWSqrSumMaskLocal_OpenMP(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWSqrSumMaskLocal_OpenMP(farg1, farg2, farg3) swig_result = fresult end function function FN_VBufSize_OpenMP(x, size) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x integer(C_INT64_T), dimension(*), target, intent(inout) :: size integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(size(1)) fresult = swigc_FN_VBufSize_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VBufPack_OpenMP(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufPack_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VBufUnpack_OpenMP(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufUnpack_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VEnableFusedOps_OpenMP(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableFusedOps_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearCombination_OpenMP(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearCombination_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleAddMulti_OpenMP(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleAddMulti_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VEnableDotProdMulti_OpenMP(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableDotProdMulti_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearSumVectorArray_OpenMP(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearSumVectorArray_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleVectorArray_OpenMP(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleVectorArray_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VEnableConstVectorArray_OpenMP(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableConstVectorArray_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormVectorArray_OpenMP(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormVectorArray_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormMaskVectorArray_OpenMP(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormMaskVectorArray_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VCloneVectorArray_OpenMP(count, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = count farg2 = c_loc(w) fresult = swigc_FN_VCloneVectorArray_OpenMP(farg1, farg2) swig_result = fresult end function function FN_VCloneVectorArrayEmpty_OpenMP(count, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = count farg2 = c_loc(w) fresult = swigc_FN_VCloneVectorArrayEmpty_OpenMP(farg1, farg2) swig_result = fresult end function subroutine FN_VDestroyVectorArray_OpenMP(vs, count) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: vs integer(C_INT), intent(in) :: count type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = vs farg2 = count call swigc_FN_VDestroyVectorArray_OpenMP(farg1, farg2) end subroutine end module StanHeaders/src/nvector/openmp/nvector_openmp.c0000644000176200001440000017746714645137106021457 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for an OpenMP implementation * of the NVECTOR module. * -----------------------------------------------------------------*/ #include #include #include #include #include #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private functions for special cases of vector operations */ static void VCopy_OpenMP(N_Vector x, N_Vector z); /* z=x */ static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */ static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VNeg_OpenMP(N_Vector x, N_Vector z); /* z=-x */ static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */ static void VScaleBy_OpenMP(realtype a, N_Vector x); /* x <- ax */ /* Private functions for special cases of vector array operations */ static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */ static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */ static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */ static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */ static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */ static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */ static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */ /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_OpenMP(N_Vector v) { return SUNDIALS_NVEC_OPENMP; } /* ---------------------------------------------------------------------------- * Function to create a new empty vector */ N_Vector N_VNewEmpty_OpenMP(sunindextype length, int num_threads, SUNContext sunctx) { N_Vector v; N_VectorContent_OpenMP content; /* Create vector */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_OpenMP; v->ops->nvclone = N_VClone_OpenMP; v->ops->nvcloneempty = N_VCloneEmpty_OpenMP; v->ops->nvdestroy = N_VDestroy_OpenMP; v->ops->nvspace = N_VSpace_OpenMP; v->ops->nvgetarraypointer = N_VGetArrayPointer_OpenMP; v->ops->nvsetarraypointer = N_VSetArrayPointer_OpenMP; v->ops->nvgetlength = N_VGetLength_OpenMP; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_OpenMP; v->ops->nvconst = N_VConst_OpenMP; v->ops->nvprod = N_VProd_OpenMP; v->ops->nvdiv = N_VDiv_OpenMP; v->ops->nvscale = N_VScale_OpenMP; v->ops->nvabs = N_VAbs_OpenMP; v->ops->nvinv = N_VInv_OpenMP; v->ops->nvaddconst = N_VAddConst_OpenMP; v->ops->nvdotprod = N_VDotProd_OpenMP; v->ops->nvmaxnorm = N_VMaxNorm_OpenMP; v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMP; v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMP; v->ops->nvmin = N_VMin_OpenMP; v->ops->nvwl2norm = N_VWL2Norm_OpenMP; v->ops->nvl1norm = N_VL1Norm_OpenMP; v->ops->nvcompare = N_VCompare_OpenMP; v->ops->nvinvtest = N_VInvTest_OpenMP; v->ops->nvconstrmask = N_VConstrMask_OpenMP; v->ops->nvminquotient = N_VMinQuotient_OpenMP; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction kernels */ v->ops->nvdotprodlocal = N_VDotProd_OpenMP; v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMP; v->ops->nvminlocal = N_VMin_OpenMP; v->ops->nvl1normlocal = N_VL1Norm_OpenMP; v->ops->nvinvtestlocal = N_VInvTest_OpenMP; v->ops->nvconstrmasklocal = N_VConstrMask_OpenMP; v->ops->nvminquotientlocal = N_VMinQuotient_OpenMP; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMP; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMP; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMP; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_OpenMP; v->ops->nvbufpack = N_VBufPack_OpenMP; v->ops->nvbufunpack = N_VBufUnpack_OpenMP; /* debugging functions */ v->ops->nvprint = N_VPrint_OpenMP; v->ops->nvprintfile = N_VPrintFile_OpenMP; /* Create content */ content = NULL; content = (N_VectorContent_OpenMP) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = length; content->num_threads = num_threads; content->own_data = SUNFALSE; content->data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Function to create a new vector */ N_Vector N_VNew_OpenMP(sunindextype length, int num_threads, SUNContext sunctx) { N_Vector v; realtype *data; v = NULL; v = N_VNewEmpty_OpenMP(length, num_threads, sunctx); if (v == NULL) return(NULL); /* Create data */ if (length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); } /* Attach data */ NV_OWN_DATA_OMP(v) = SUNTRUE; NV_DATA_OMP(v) = data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create a vector with user data component */ N_Vector N_VMake_OpenMP(sunindextype length, realtype *v_data, int num_threads, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_OpenMP(length, num_threads, sunctx); if (v == NULL) return(NULL); if (length > 0) { /* Attach data */ NV_OWN_DATA_OMP(v) = SUNFALSE; NV_DATA_OMP(v) = v_data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors. */ N_Vector* N_VCloneVectorArray_OpenMP(int count, N_Vector w) { return(N_VCloneVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors with NULL data array. */ N_Vector* N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w) { return(N_VCloneEmptyVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_OpenMP */ void N_VDestroyVectorArray_OpenMP(N_Vector* vs, int count) { N_VDestroyVectorArray(vs, count); return; } /* ---------------------------------------------------------------------------- * Function to return number of vector elements */ sunindextype N_VGetLength_OpenMP(N_Vector v) { return NV_LENGTH_OMP(v); } /* ---------------------------------------------------------------------------- * Function to print a vector to stdout */ void N_VPrint_OpenMP(N_Vector x) { N_VPrintFile_OpenMP(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print a vector to outfile */ void N_VPrintFile_OpenMP(N_Vector x, FILE *outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%11.8Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]); #else STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]); #endif } STAN_SUNDIALS_FPRINTF(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Create new vector from existing vector without attaching data */ N_Vector N_VCloneEmpty_OpenMP(N_Vector w) { N_Vector v; N_VectorContent_OpenMP content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_OpenMP) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = NV_LENGTH_OMP(w); content->num_threads = NV_NUM_THREADS_OMP(w); content->own_data = SUNFALSE; content->data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Create new vector from existing vector and attach data */ N_Vector N_VClone_OpenMP(N_Vector w) { N_Vector v; realtype *data; sunindextype length; v = NULL; v = N_VCloneEmpty_OpenMP(w); if (v == NULL) return(NULL); length = NV_LENGTH_OMP(w); /* Create data */ if (length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); } /* Attach data */ NV_OWN_DATA_OMP(v) = SUNTRUE; NV_DATA_OMP(v) = data; } return(v); } /* ---------------------------------------------------------------------------- * Destroy vector and free vector memory */ void N_VDestroy_OpenMP(N_Vector v) { if (v == NULL) return; /* free content */ if (v->content != NULL) { /* free data array if it's owned by the vector */ if (NV_OWN_DATA_OMP(v) && NV_DATA_OMP(v) != NULL) { free(NV_DATA_OMP(v)); NV_DATA_OMP(v) = NULL; } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } /* ---------------------------------------------------------------------------- * Get storage requirement for N_Vector */ void N_VSpace_OpenMP(N_Vector v, sunindextype *lrw, sunindextype *liw) { *lrw = NV_LENGTH_OMP(v); *liw = 1; return; } /* ---------------------------------------------------------------------------- * Get vector data pointer */ realtype *N_VGetArrayPointer_OpenMP(N_Vector v) { return((realtype *) NV_DATA_OMP(v)); } /* ---------------------------------------------------------------------------- * Set vector data pointer */ void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v) { if (NV_LENGTH_OMP(v) > 0) NV_DATA_OMP(v) = v_data; return; } /* ---------------------------------------------------------------------------- * Compute linear combination z[i] = a*x[i]+b*y[i] */ void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i, N; realtype c, *xd, *yd, *zd; N_Vector v1, v2; booleantype test; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_OpenMP(a,x,y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_OpenMP(b,y,x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_OpenMP(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_OpenMP(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_OpenMP(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_OpenMP(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_OpenMP(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_OpenMP(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,a,b,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = (a*xd[i])+(b*yd[i]); return; } /* ---------------------------------------------------------------------------- * Assigns constant value to all vector elements, z[i] = c */ void N_VConst_OpenMP(realtype c, N_Vector z) { sunindextype i, N; realtype *zd; i = 0; /* initialize to suppress clang warning */ zd = NULL; N = NV_LENGTH_OMP(z); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(z)) for (i = 0; i < N; i++) zd[i] = c; return; } /* ---------------------------------------------------------------------------- * Compute componentwise product z[i] = x[i]*y[i] */ void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]*yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise division z[i] = x[i]/y[i] */ void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; i = 0; /* initialize to suppress clang warning */ xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]/yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaler multiplication z[i] = c*x[i] */ void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_OpenMP(c, x); return; } if (c == ONE) { VCopy_OpenMP(x, z); } else if (c == -ONE) { VNeg_OpenMP(x, z); } else { N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = c*xd[i]; } return; } /* ---------------------------------------------------------------------------- * Compute absolute value of vector components z[i] = SUNRabs(x[i]) */ void N_VAbs_OpenMP(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = SUNRabs(xd[i]); return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = 1 / x[i] */ void N_VInv_OpenMP(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = ONE/xd[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise addition of a scaler to a vector z[i] = x[i] + b */ void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z) { sunindextype i, N; realtype *xd, *zd; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,b,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]+b; return; } /* ---------------------------------------------------------------------------- * Computes the dot product of two vectors, a = sum(x[i]*y[i]) */ realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y) { sunindextype i, N; realtype sum, *xd, *yd; i = 0; /* initialize to suppress clang warning */ sum = ZERO; xd = yd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); #pragma omp parallel for default(none) private(i) shared(N,xd,yd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { sum += xd[i]*yd[i]; } return(sum); } /* ---------------------------------------------------------------------------- * Computes max norm of a vector */ realtype N_VMaxNorm_OpenMP(N_Vector x) { sunindextype i, N; realtype tmax, max, *xd; i = 0; /* initialize to suppress clang warning */ max = ZERO; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); #pragma omp parallel default(none) private(i,tmax) shared(N,max,xd) \ num_threads(NV_NUM_THREADS_OMP(x)) { tmax = ZERO; #pragma omp for schedule(static) for (i = 0; i < N; i++) { if (SUNRabs(xd[i]) > tmax) tmax = SUNRabs(xd[i]); } #pragma omp critical { if (tmax > max) max = tmax; } } return(max); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a vector */ realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w) { return(SUNRsqrt(N_VWSqrSumLocal_OpenMP(x, w)/(NV_LENGTH_OMP(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a masked vector */ realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id) { return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMP(x, w, id)/(NV_LENGTH_OMP(x)))); } /* ---------------------------------------------------------------------------- * Finds the minimun component of a vector */ realtype N_VMin_OpenMP(N_Vector x) { sunindextype i, N; realtype min, *xd; realtype tmin; i = 0; /* initialize to suppress clang warning */ xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); min = xd[0]; #pragma omp parallel default(none) private(i,tmin) shared(N,min,xd) \ num_threads(NV_NUM_THREADS_OMP(x)) { tmin = xd[0]; #pragma omp for schedule(static) for (i = 1; i < N; i++) { if (xd[i] < tmin) tmin = xd[i]; } if (tmin < min) { #pragma omp critical { if (tmin < min) min = tmin; } } } return(min); } /* ---------------------------------------------------------------------------- * Computes weighted L2 norm of a vector */ realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd, *wd; i = 0; /* initialize to suppress clang warning */ sum = ZERO; xd = wd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); wd = NV_DATA_OMP(w); #pragma omp parallel for default(none) private(i) shared(N,xd,wd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { sum += SUNSQR(xd[i]*wd[i]); } return(SUNRsqrt(sum)); } /* ---------------------------------------------------------------------------- * Computes L1 norm of a vector */ realtype N_VL1Norm_OpenMP(N_Vector x) { sunindextype i, N; realtype sum, *xd; i = 0; /* initialize to suppress clang warning */ sum = ZERO; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); #pragma omp parallel for default(none) private(i) shared(N,xd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i= c) ? ONE : ZERO; } return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO */ booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd, val; i = 0; /* initialize to suppress clang warning */ xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); val = ZERO; #pragma omp parallel for default(none) private(i) shared(N,val,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { if (xd[i] == ZERO) val = ONE; else zd[i] = ONE/xd[i]; } if (val > ZERO) return (SUNFALSE); else return (SUNTRUE); } /* ---------------------------------------------------------------------------- * Compute constraint mask of a vector */ booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m) { sunindextype i, N; realtype temp; realtype *cd, *xd, *md; booleantype test; i = 0; /* initialize to suppress clang warning */ cd = xd = md = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); cd = NV_DATA_OMP(c); md = NV_DATA_OMP(m); temp = ZERO; #pragma omp parallel for default(none) private(i,test) shared(N,xd,cd,md,temp) \ schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { md[i] = ZERO; /* Continue if no constraints were set for the variable */ if (cd[i] == ZERO) continue; /* Check if a set constraint has been violated */ test = (SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) || (SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO); if (test) { temp = md[i] = ONE; /* Here is a race to write to temp */ } } /* Return false if any constraint was violated */ return (temp == ONE) ? SUNFALSE : SUNTRUE; } /* ---------------------------------------------------------------------------- * Compute minimum componentwise quotient */ realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom) { sunindextype i, N; realtype *nd, *dd, min, tmin, val; i = 0; /* initialize to suppress clang warning */ nd = dd = NULL; N = NV_LENGTH_OMP(num); nd = NV_DATA_OMP(num); dd = NV_DATA_OMP(denom); min = BIG_REAL; #pragma omp parallel default(none) private(i,tmin,val) shared(N,min,nd,dd) \ num_threads(NV_NUM_THREADS_OMP(num)) { tmin = BIG_REAL; #pragma omp for schedule(static) for (i = 0; i < N; i++) { if (dd[i] != ZERO) { val = nd[i]/dd[i]; if (val < tmin) tmin = val; } } if (tmin < min) { #pragma omp critical { if (tmin < min) min = tmin; } } } return(min); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a vector */ realtype N_VWSqrSumLocal_OpenMP(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd, *wd; i = 0; /* initialize to suppress clang warning */ sum = ZERO; xd = wd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); wd = NV_DATA_OMP(w); #pragma omp parallel for default(none) private(i) shared(N,xd,wd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { sum += SUNSQR(xd[i]*wd[i]); } return(sum); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a masked vector */ realtype N_VWSqrSumMaskLocal_OpenMP(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, *xd, *wd, *idd; i = 0; /* initialize to suppress clang warning */ sum = ZERO; xd = wd = idd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); wd = NV_DATA_OMP(w); idd = NV_DATA_OMP(id); #pragma omp parallel for default(none) private(i) shared(N,xd,wd,idd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { if (idd[i] > ZERO) { sum += SUNSQR(xd[i]*wd[i]); } } return(sum); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i; sunindextype j, N; realtype* zd=NULL; realtype* xd=NULL; i = 0; /* initialize to suppress clang warning */ j = 0; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMP(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_OpenMP(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMP(z); zd = NV_DATA_OMP(z); /* * X[0] += c[i]*X[i], i = 1,...,nvec-1 */ if ((X[0] == z) && (c[0] == ONE)) { #pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \ num_threads(NV_NUM_THREADS_OMP(z)) { for (i=1; i ZERO) sum += SUNSQR(xd[j] * wd[j]); } #pragma omp critical { nrm[i] += sum; } } } for (i=0; i 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_OpenMP(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LENGTH_OMP(X[0]); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp parallel default(none) private(i,j,k,xd,yd) shared(nvec,nsum,X,Y,N,a) \ num_threads(NV_NUM_THREADS_OMP(X[0])) { for (i=0; i 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; jops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_OpenMP; v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP; v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP; v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP; v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMP; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_OpenMP; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) { v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP; v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMP; } else { v->ops->nvdotprodmulti = NULL; v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearSumVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_OpenMP(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } StanHeaders/src/nvector/cuda/0000755000176200001440000000000014511135055015636 5ustar liggesusersStanHeaders/src/nvector/cuda/VectorKernels.cuh0000644000176200001440000001661514645137106021145 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Slaven Peles, Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- */ #ifndef _NVECTOR_CUDA_KERNELS_CUH_ #define _NVECTOR_CUDA_KERNELS_CUH_ #include #include #include "sundials_cuda_kernels.cuh" namespace sundials { namespace cuda { namespace impl { /* * Sets all elements of the vector X to constant value a. * */ template __global__ void setConstKernel(T a, T *X, I n) { GRID_STRIDE_XLOOP(I, i, n) { X[i] = a; } } /* * Computes linear sum (combination) of two vectors. * */ template __global__ void linearSumKernel(T a, const T *X, T b, const T *Y, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = a*X[i] + b*Y[i]; } } /* * Elementwise product of two vectors. * */ template __global__ void prodKernel(const T *X, const T *Y, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = X[i]*Y[i]; } } /* * Elementwise division of two vectors. * */ template __global__ void divKernel(const T *X, const T *Y, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = X[i]/Y[i]; } } /* * Scale vector with scalar value 'a'. * */ template __global__ void scaleKernel(T a, const T *X, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = a*X[i]; } } /* * Stores absolute values of vector X elements into vector Z. * */ template __global__ void absKernel(const T *X, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = abs(X[i]); } } /* * Elementwise inversion. * */ template __global__ void invKernel(const T *X, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = 1.0/(X[i]); } } /* * Add constant 'c' to each vector element. * */ template __global__ void addConstKernel(T a, const T *X, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = a + X[i]; } } /* * Compare absolute values of vector 'X' with constant 'c'. * */ template __global__ void compareKernel(T c, const T *X, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = (abs(X[i]) >= c) ? 1.0 : 0.0; } } /* * Dot product of two vectors. * */ template class GridReducer> __global__ void dotProdKernel(const T *x, const T *y, T *out, I n, unsigned int *device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T sum = Id; GRID_STRIDE_XLOOP(I, i, n) { sum += x[i] * y[i]; } GridReducer{}(sum, Id, out, device_count); } /* * Finds max norm the vector. * */ template class GridReducer> __global__ void maxNormKernel(const T *x, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::maximum; const T Id = op::identity(); T maximum = Id; GRID_STRIDE_XLOOP(I, i, n) { maximum = max(abs(x[i]), maximum); } GridReducer{}(maximum, Id, out, device_count); } /* * Weighted L2 norm squared. * */ template class GridReducer> __global__ void wL2NormSquareKernel(const T *x, const T *w, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T sum = Id; GRID_STRIDE_XLOOP(I, i, n) { sum += x[i] * w[i] * x[i] * w[i]; } GridReducer{}(sum, Id, out, device_count); } /* * Weighted L2 norm squared with mask. Vector id specifies the mask. * */ template class GridReducer> __global__ void wL2NormSquareMaskKernel(const T *x, const T *w, const T *id, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T sum = Id; GRID_STRIDE_XLOOP(I, i, n) { if(id[i] > 0.0) sum += x[i] * w[i] * x[i] * w[i]; } GridReducer{}(sum, Id, out, device_count); } /* * Finds min value in the vector. * */ template class GridReducer> __global__ void findMinKernel(T MAX_VAL, const T *x, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::minimum; const T Id = op::identity(); T minimum = Id; GRID_STRIDE_XLOOP(I, i, n) { minimum = min(x[i], minimum); } GridReducer{}(minimum, Id, out, device_count); } /* * Computes L1 norm of vector * */ template class GridReducer> __global__ void L1NormKernel(const T *x, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T sum = Id; GRID_STRIDE_XLOOP(I, i, n) { sum += abs(x[i]); } GridReducer{}(sum, Id, out, device_count); } /* * Vector inverse z[i] = 1/x[i] with check for zeros. Reduction is performed * to flag the result if any x[i] = 0. * */ template class GridReducer> __global__ void invTestKernel(const T *x, T *z, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T flag = Id; GRID_STRIDE_XLOOP(I, i, n) { if (x[i] == static_cast(0.0)) flag += 1.0; else z[i] = 1.0/x[i]; } GridReducer{}(flag, Id, out, device_count); } /* * Checks if inequality constraints are satisfied. Constraint check * results are stored in vector 'm'. A sum reduction over all elements * of 'm' is performed to find if any of the constraints is violated. * If all constraints are satisfied sum == 0. * */ template class GridReducer> __global__ void constrMaskKernel(const T *c, const T *x, T *m, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T sum = Id; GRID_STRIDE_XLOOP(I, i, n) { // test = true if constraints violated bool test = (std::abs(c[i]) > 1.5 && c[i]*x[i] <= 0.0) || (std::abs(c[i]) > 0.5 && c[i]*x[i] < 0.0); m[i] = test ? 1.0 : 0.0; sum = m[i]; } GridReducer{}(sum, Id, out, device_count); } /* * Finds minimum component-wise quotient. * */ template class GridReducer> __global__ void minQuotientKernel(const T MAX_VAL, const T *num, const T *den, T *min_quotient, I n, unsigned int* device_count) { using op = sundials::reductions::impl::minimum; const T Id = op::identity(); T minimum = Id; T quotient = 0.0; GRID_STRIDE_XLOOP(I, i, n) { quotient = (den[i] == static_cast(0.0)) ? Id : num[i]/den[i]; minimum = min(quotient, minimum); } GridReducer{}(minimum, Id, min_quotient, device_count); } } // namespace impl } // namespace cuda } // namespace sundials #endif // _NVECTOR_CUDA_KERNELS_CUH_ StanHeaders/src/nvector/cuda/VectorArrayKernels.cuh0000644000176200001440000001232414645137106022135 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): David Gardner, Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- */ #ifndef _NVECTOR_CUDA_ARRAY_KERNELS_CUH_ #define _NVECTOR_CUDA_ARRAY_KERNELS_CUH_ #include #include #include "sundials_cuda_kernels.cuh" namespace sundials { namespace cuda { namespace impl { /* * ----------------------------------------------------------------------------- * fused vector operation kernels * ----------------------------------------------------------------------------- */ /* * Computes the linear combination of nv vectors */ template __global__ void linearCombinationKernel(int nv, T* c, T** xd, T* zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { zd[i] = c[0]*xd[0][i]; for (int j=1; j __global__ void scaleAddMultiKernel(int nv, T* c, T* xd, T** yd, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int j=0; j class GridReducer> __global__ void dotProdMultiKernel(int nv, const T* xd, T** yd, T* out, I n) { // REQUIRES nv blocks (i.e. gridDim.x == nv) using op = sundials::reductions::impl::plus; constexpr T Id = op::identity(); const I k = blockIdx.x; // Initialize to zero. T sum = Id; for (I i = threadIdx.x; i < n; i += blockDim.x) { // each thread computes n/blockDim.x elements sum += xd[i] * yd[k][i]; } GridReducer{}(sum, Id, &out[k], nullptr); } /* * ----------------------------------------------------------------------------- * vector array operation kernels * ----------------------------------------------------------------------------- */ /* * Computes the linear sum of multiple vectors */ template __global__ void linearSumVectorArrayKernel(int nv, T a, T** xd, T b, T** yd, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int j=0; j __global__ void scaleVectorArrayKernel(int nv, T* c, T** xd, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int j=0; j __global__ void constVectorArrayKernel(int nv, T c, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int j=0; j class GridReducer> __global__ void wL2NormSquareVectorArrayKernel(int nv, T** xd, T** wd, T* out, I n) { // REQUIRES nv blocks (i.e. gridDim.x == nv) using op = sundials::reductions::impl::plus; constexpr T Id = op::identity(); const I k = blockIdx.x; // Initialize to zero. T sum = 0.0; for (I i = threadIdx.x; i < n; i += blockDim.x) { // each thread computes n/blockDim.x elements sum += xd[k][i] * wd[k][i] * xd[k][i] * wd[k][i]; } GridReducer{}(sum, Id, &out[k], nullptr); } /* * Masked WRMS norm of nv vectors. * */ template class GridReducer> __global__ void wL2NormSquareMaskVectorArrayKernel(int nv, T** xd, T** wd, T* id, T* out, I n) { // REQUIRES nv blocks (i.e. gridDim.x == nv) using op = sundials::reductions::impl::plus; constexpr T Id = op::identity(); const I k = blockIdx.x; // Initialize to zero. T sum = 0.0; for (I i = threadIdx.x; i < n; i += blockDim.x) { // each thread computes n/blockDim.x elements if (id[i] > 0.0) sum += xd[k][i] * wd[k][i] * xd[k][i] * wd[k][i]; } GridReducer{}(sum, Id, &out[k], nullptr); } /* * Computes the scaled sum of a vector array with multiple other vector arrays */ template __global__ void scaleAddMultiVectorArrayKernel(int nv, int ns, T* c, T** xd, T** yd, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int k=0; k __global__ void linearCombinationVectorArrayKernel(int nv, int ns, T* c, T** xd, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int k=0; k #include #include #include #include #include #include "VectorArrayKernels.cuh" #include "VectorKernels.cuh" #include "sundials_cuda.h" #include "sundials_debug.h" #define ZERO RCONST(0.0) #define HALF RCONST(0.5) using namespace sundials; using namespace sundials::cuda; using namespace sundials::cuda::impl; /* * Private function definitions */ // Allocate vector data static int AllocateData(N_Vector v); // Reduction buffer functions static int InitializeDeviceCounter(N_Vector v); static int FreeDeviceCounter(N_Vector v); static int InitializeReductionBuffer(N_Vector v, realtype value, size_t n = 1); static void FreeReductionBuffer(N_Vector v); static int CopyReductionBufferFromDevice(N_Vector v, size_t n = 1); // Fused operation buffer functions static int FusedBuffer_Init(N_Vector v, int nreal, int nptr); static int FusedBuffer_CopyRealArray(N_Vector v, realtype *r_data, int nval, realtype **shortcut); static int FusedBuffer_CopyPtrArray1D(N_Vector v, N_Vector *X, int nvec, realtype ***shortcut); static int FusedBuffer_CopyPtrArray2D(N_Vector v, N_Vector **X, int nvec, int nsum, realtype ***shortcut); static int FusedBuffer_CopyToDevice(N_Vector v); static int FusedBuffer_Free(N_Vector v); // Kernel launch parameters static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block, size_t& shMemSize, cudaStream_t& stream, size_t n = 0); static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block, size_t& shMemSize, cudaStream_t& stream, bool& atomic, size_t n = 0); static void PostKernelLaunch(); /* * Macro definitions */ // Macros to access vector content #define NVEC_CUDA_CONTENT(x) ((N_VectorContent_Cuda)(x->content)) #define NVEC_CUDA_MEMSIZE(x) (NVEC_CUDA_CONTENT(x)->length * sizeof(realtype)) #define NVEC_CUDA_MEMHELP(x) (NVEC_CUDA_CONTENT(x)->mem_helper) #define NVEC_CUDA_HDATAp(x) ((realtype*) NVEC_CUDA_CONTENT(x)->host_data->ptr) #define NVEC_CUDA_DDATAp(x) ((realtype*) NVEC_CUDA_CONTENT(x)->device_data->ptr) #define NVEC_CUDA_STREAM(x) (NVEC_CUDA_CONTENT(x)->stream_exec_policy->stream()) // Macros to access vector private content #define NVEC_CUDA_PRIVATE(x) ((N_PrivateVectorContent_Cuda)(NVEC_CUDA_CONTENT(x)->priv)) #define NVEC_CUDA_HBUFFERp(x) ((realtype*) NVEC_CUDA_PRIVATE(x)->reduce_buffer_host->ptr) #define NVEC_CUDA_DBUFFERp(x) ((realtype*) NVEC_CUDA_PRIVATE(x)->reduce_buffer_dev->ptr) #define NVEC_CUDA_DCOUNTERp(x) ((unsigned int*) NVEC_CUDA_PRIVATE(x)->device_counter->ptr) /* * Private structure definition */ struct _N_PrivateVectorContent_Cuda { booleantype use_managed_mem; /* do data pointers use managed memory */ // reduction workspace SUNMemory device_counter; // device memory for a counter (used in LDS reductions) SUNMemory reduce_buffer_dev; // device memory for reductions SUNMemory reduce_buffer_host; // host memory for reductions size_t reduce_buffer_bytes; // current size of reduction buffers // fused op workspace SUNMemory fused_buffer_dev; // device memory for fused ops SUNMemory fused_buffer_host; // host memory for fused ops size_t fused_buffer_bytes; // current size of the buffers size_t fused_buffer_offset; // current offset into the buffer }; typedef struct _N_PrivateVectorContent_Cuda *N_PrivateVectorContent_Cuda; /* Default policies to clone */ ThreadDirectExecPolicy DEFAULT_STREAMING_EXECPOLICY(256); BlockReduceAtomicExecPolicy DEFAULT_REDUCTION_EXECPOLICY(256); extern "C" { N_Vector N_VNewEmpty_Cuda(SUNContext sunctx) { N_Vector v; /* Create vector */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Cuda; v->ops->nvclone = N_VClone_Cuda; v->ops->nvcloneempty = N_VCloneEmpty_Cuda; v->ops->nvdestroy = N_VDestroy_Cuda; v->ops->nvspace = N_VSpace_Cuda; v->ops->nvgetlength = N_VGetLength_Cuda; v->ops->nvgetarraypointer = N_VGetHostArrayPointer_Cuda; v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_Cuda; v->ops->nvsetarraypointer = N_VSetHostArrayPointer_Cuda; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Cuda; v->ops->nvconst = N_VConst_Cuda; v->ops->nvprod = N_VProd_Cuda; v->ops->nvdiv = N_VDiv_Cuda; v->ops->nvscale = N_VScale_Cuda; v->ops->nvabs = N_VAbs_Cuda; v->ops->nvinv = N_VInv_Cuda; v->ops->nvaddconst = N_VAddConst_Cuda; v->ops->nvdotprod = N_VDotProd_Cuda; v->ops->nvmaxnorm = N_VMaxNorm_Cuda; v->ops->nvmin = N_VMin_Cuda; v->ops->nvl1norm = N_VL1Norm_Cuda; v->ops->nvinvtest = N_VInvTest_Cuda; v->ops->nvconstrmask = N_VConstrMask_Cuda; v->ops->nvminquotient = N_VMinQuotient_Cuda; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Cuda; v->ops->nvwrmsnorm = N_VWrmsNorm_Cuda; v->ops->nvwl2norm = N_VWL2Norm_Cuda; v->ops->nvcompare = N_VCompare_Cuda; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProd_Cuda; v->ops->nvmaxnormlocal = N_VMaxNorm_Cuda; v->ops->nvminlocal = N_VMin_Cuda; v->ops->nvl1normlocal = N_VL1Norm_Cuda; v->ops->nvinvtestlocal = N_VInvTest_Cuda; v->ops->nvconstrmasklocal = N_VConstrMask_Cuda; v->ops->nvminquotientlocal = N_VMinQuotient_Cuda; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Cuda; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Cuda; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_Cuda; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_Cuda; v->ops->nvbufpack = N_VBufPack_Cuda; v->ops->nvbufunpack = N_VBufUnpack_Cuda; /* print operation for debugging */ v->ops->nvprint = N_VPrint_Cuda; v->ops->nvprintfile = N_VPrintFile_Cuda; /* Create content */ v->content = (N_VectorContent_Cuda) malloc(sizeof(_N_VectorContent_Cuda)); if (v->content == NULL) { N_VDestroy(v); return(NULL); } NVEC_CUDA_CONTENT(v)->priv = malloc(sizeof(_N_PrivateVectorContent_Cuda)); if (NVEC_CUDA_CONTENT(v)->priv == NULL) { N_VDestroy(v); return(NULL); } // Initialize content NVEC_CUDA_CONTENT(v)->length = 0; NVEC_CUDA_CONTENT(v)->host_data = NULL; NVEC_CUDA_CONTENT(v)->device_data = NULL; NVEC_CUDA_CONTENT(v)->stream_exec_policy = NULL; NVEC_CUDA_CONTENT(v)->reduce_exec_policy = NULL; NVEC_CUDA_CONTENT(v)->mem_helper = NULL; NVEC_CUDA_CONTENT(v)->own_helper = SUNFALSE; // Initialize private content NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE; NVEC_CUDA_PRIVATE(v)->device_counter = NULL; NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL; NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL; NVEC_CUDA_PRIVATE(v)->reduce_buffer_bytes = 0; NVEC_CUDA_PRIVATE(v)->fused_buffer_dev = NULL; NVEC_CUDA_PRIVATE(v)->fused_buffer_host = NULL; NVEC_CUDA_PRIVATE(v)->fused_buffer_bytes = 0; NVEC_CUDA_PRIVATE(v)->fused_buffer_offset = 0; return(v); } N_Vector N_VNew_Cuda(sunindextype length, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Cuda(sunctx); if (v == NULL) return(NULL); NVEC_CUDA_CONTENT(v)->length = length; NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); NVEC_CUDA_CONTENT(v)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE; if (NVEC_CUDA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Cuda: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Cuda: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VNewWithMemHelp_Cuda(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper, SUNContext sunctx) { N_Vector v; if (helper == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: helper is NULL\n"); return(NULL); } if (!SUNMemoryHelper_ImplementsRequiredOps(helper)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: helper doesn't implement all required ops\n"); return(NULL); } v = NULL; v = N_VNewEmpty_Cuda(sunctx); if (v == NULL) return(NULL); NVEC_CUDA_CONTENT(v)->length = length; NVEC_CUDA_CONTENT(v)->mem_helper = helper; NVEC_CUDA_CONTENT(v)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); NVEC_CUDA_CONTENT(v)->own_helper = SUNFALSE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = use_managed_mem; if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VNewManaged_Cuda(sunindextype length, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Cuda(sunctx); if (v == NULL) return(NULL); NVEC_CUDA_CONTENT(v)->length = length; NVEC_CUDA_CONTENT(v)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNTRUE; if (NVEC_CUDA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Cuda: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Cuda: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VMake_Cuda(sunindextype length, realtype *h_vdata, realtype *d_vdata, SUNContext sunctx) { N_Vector v; if (h_vdata == NULL || d_vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_Cuda(sunctx); if (v == NULL) return(NULL); NVEC_CUDA_CONTENT(v)->length = length; NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(h_vdata, SUNMEMTYPE_HOST); NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap(d_vdata, SUNMEMTYPE_DEVICE); NVEC_CUDA_CONTENT(v)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE; if (NVEC_CUDA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Cuda: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (NVEC_CUDA_CONTENT(v)->device_data == NULL || NVEC_CUDA_CONTENT(v)->host_data == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Cuda: SUNMemoryHelper_Wrap returned NULL\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VMakeManaged_Cuda(sunindextype length, realtype *vdata, SUNContext sunctx) { N_Vector v; if (vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_Cuda(sunctx); if (v == NULL) return(NULL); NVEC_CUDA_CONTENT(v)->length = length; NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(vdata, SUNMEMTYPE_UVM); NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->host_data); NVEC_CUDA_CONTENT(v)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda(sunctx); NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE; NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNTRUE; if (NVEC_CUDA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Cuda: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (NVEC_CUDA_CONTENT(v)->device_data == NULL || NVEC_CUDA_CONTENT(v)->host_data == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Cuda: SUNMemoryHelper_Wrap returned NULL\n"); N_VDestroy(v); return(NULL); } return(v); } /* ---------------------------------------------------------------------------- * Set pointer to the raw host data. Does not free the existing pointer. */ void N_VSetHostArrayPointer_Cuda(realtype* h_vdata, N_Vector v) { if (N_VIsManagedMemory_Cuda(v)) { if (NVEC_CUDA_CONTENT(v)->host_data) { NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) h_vdata; NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) h_vdata; } else { NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_UVM); NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->host_data); } } else { if (NVEC_CUDA_CONTENT(v)->host_data) { NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) h_vdata; } else { NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_HOST); } } } /* ---------------------------------------------------------------------------- * Set pointer to the raw device data */ void N_VSetDeviceArrayPointer_Cuda(realtype* d_vdata, N_Vector v) { if (N_VIsManagedMemory_Cuda(v)) { if (NVEC_CUDA_CONTENT(v)->device_data) { NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) d_vdata; NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) d_vdata; } else { NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_UVM); NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->device_data); } } else { if (NVEC_CUDA_CONTENT(v)->device_data) { NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) d_vdata; } else { NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_DEVICE); } } } /* ---------------------------------------------------------------------------- * Return a flag indicating if the memory for the vector data is managed */ booleantype N_VIsManagedMemory_Cuda(N_Vector x) { return NVEC_CUDA_PRIVATE(x)->use_managed_mem; } int N_VSetKernelExecPolicy_Cuda(N_Vector x, SUNCudaExecPolicy* stream_exec_policy, SUNCudaExecPolicy* reduce_exec_policy) { if (x == NULL) return(-1); /* Delete the old policies */ delete NVEC_CUDA_CONTENT(x)->stream_exec_policy; delete NVEC_CUDA_CONTENT(x)->reduce_exec_policy; /* Reset the policy if it is null */ if (stream_exec_policy == NULL) NVEC_CUDA_CONTENT(x)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); else NVEC_CUDA_CONTENT(x)->stream_exec_policy = stream_exec_policy->clone(); if (reduce_exec_policy == NULL) NVEC_CUDA_CONTENT(x)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); else NVEC_CUDA_CONTENT(x)->reduce_exec_policy = reduce_exec_policy->clone(); return(0); } /* ---------------------------------------------------------------------------- * Copy vector data to the device */ void N_VCopyToDevice_Cuda(N_Vector x) { int copy_fail; copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x), NVEC_CUDA_CONTENT(x)->device_data, NVEC_CUDA_CONTENT(x)->host_data, NVEC_CUDA_MEMSIZE(x), (void*) NVEC_CUDA_STREAM(x)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyToDevice_Cuda: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but only in this stream */ SUNDIALS_CUDA_VERIFY(cudaStreamSynchronize(*NVEC_CUDA_STREAM(x))); } /* ---------------------------------------------------------------------------- * Copy vector data from the device to the host */ void N_VCopyFromDevice_Cuda(N_Vector x) { int copy_fail; copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x), NVEC_CUDA_CONTENT(x)->host_data, NVEC_CUDA_CONTENT(x)->device_data, NVEC_CUDA_MEMSIZE(x), (void*) NVEC_CUDA_STREAM(x)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyFromDevice_Cuda: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but only in this stream */ SUNDIALS_CUDA_VERIFY(cudaStreamSynchronize(*NVEC_CUDA_STREAM(x))); } /* ---------------------------------------------------------------------------- * Function to print the a CUDA-based vector to stdout */ void N_VPrint_Cuda(N_Vector x) { N_VPrintFile_Cuda(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print the a CUDA-based vector to outfile */ void N_VPrintFile_Cuda(N_Vector x, FILE *outfile) { sunindextype i; #ifdef SUNDIALS_DEBUG_PRINTVEC N_VCopyFromDevice_Cuda(x); #endif for (i = 0; i < NVEC_CUDA_CONTENT(x)->length; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%35.32Le\n", NVEC_CUDA_HDATAp(x)[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%19.16e\n", NVEC_CUDA_HDATAp(x)[i]); #else fprintf(outfile, "%11.8e\n", NVEC_CUDA_HDATAp(x)[i]); #endif } fprintf(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Cuda(N_Vector w) { N_Vector v; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty_Cuda(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Set content */ NVEC_CUDA_CONTENT(v)->length = NVEC_CUDA_CONTENT(w)->length; NVEC_CUDA_PRIVATE(v)->use_managed_mem = NVEC_CUDA_PRIVATE(w)->use_managed_mem; return(v); } N_Vector N_VClone_Cuda(N_Vector w) { N_Vector v; v = NULL; v = N_VCloneEmpty_Cuda(w); if (v == NULL) return(NULL); NVEC_CUDA_MEMHELP(v) = SUNMemoryHelper_Clone(NVEC_CUDA_MEMHELP(w)); NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE; NVEC_CUDA_CONTENT(v)->stream_exec_policy = NVEC_CUDA_CONTENT(w)->stream_exec_policy->clone(); NVEC_CUDA_CONTENT(v)->reduce_exec_policy = NVEC_CUDA_CONTENT(w)->reduce_exec_policy->clone(); if (NVEC_CUDA_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Cuda: SUNMemoryHelper_Clone returned NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Cuda: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } void N_VDestroy_Cuda(N_Vector v) { N_VectorContent_Cuda vc; N_PrivateVectorContent_Cuda vcp; if (v == NULL) return; /* free ops structure */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } /* extract content */ vc = NVEC_CUDA_CONTENT(v); if (vc == NULL) { free(v); v = NULL; return; } /* free private content */ vcp = (N_PrivateVectorContent_Cuda) vc->priv; if (vcp != NULL) { /* free items in private content */ FreeDeviceCounter(v); FreeReductionBuffer(v); FusedBuffer_Free(v); free(vcp); vc->priv = NULL; } /* free items in content */ if (NVEC_CUDA_MEMHELP(v)) { SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vc->host_data, (void*) NVEC_CUDA_STREAM(v)); vc->host_data = NULL; SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vc->device_data, (void*) NVEC_CUDA_STREAM(v)); vc->device_data = NULL; if (vc->own_helper) SUNMemoryHelper_Destroy(vc->mem_helper); vc->mem_helper = NULL; } /* we can delete the exec policies now that we are done with the streams */ delete vc->stream_exec_policy; delete vc->reduce_exec_policy; /* free content struct */ free(vc); /* free vector */ free(v); return; } void N_VSpace_Cuda(N_Vector X, sunindextype *lrw, sunindextype *liw) { *lrw = NVEC_CUDA_CONTENT(X)->length; *liw = 2; } void N_VConst_Cuda(realtype a, N_Vector X) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConst_Cuda: GetKernelParameters returned nonzero\n"); } setConstKernel<<>> ( a, NVEC_CUDA_DDATAp(X), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VLinearSum_Cuda(realtype a, N_Vector X, realtype b, N_Vector Y, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSum_Cuda: GetKernelParameters returned nonzero\n"); } linearSumKernel<<>> ( a, NVEC_CUDA_DDATAp(X), b, NVEC_CUDA_DDATAp(Y), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VProd_Cuda(N_Vector X, N_Vector Y, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VProd_Cuda: GetKernelParameters returned nonzero\n"); } prodKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Y), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VDiv_Cuda(N_Vector X, N_Vector Y, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDiv_Cuda: GetKernelParameters returned nonzero\n"); } divKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Y), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VScale_Cuda(realtype a, N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScale_Cuda: GetKernelParameters returned nonzero\n"); } scaleKernel<<>> ( a, NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VAbs_Cuda(N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VAbs_Cuda: GetKernelParameters returned nonzero\n"); } absKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VInv_Cuda(N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInv_Cuda: GetKernelParameters returned nonzero\n"); } invKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } void N_VAddConst_Cuda(N_Vector X, realtype b, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VAddConst_Cuda: GetKernelParameters returned nonzero\n"); } addConstKernel<<>> ( b, NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } realtype N_VDotProd_Cuda(N_Vector X, N_Vector Y) { bool atomic; size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Cuda: GetKernelParameters returned nonzero\n"); } // When using atomic reductions, we only need one output value const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { dotProdKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Y), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, nullptr ); } else { dotProdKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Y), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, NVEC_CUDA_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } realtype N_VMaxNorm_Cuda(N_Vector X) { bool atomic; size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Cuda: GetKernelParameters returned nonzero\n"); } // When using atomic reductions, we only need one output value const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { maxNormKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, nullptr ); } else { maxNormKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, NVEC_CUDA_DCOUNTERp(X) ); } PostKernelLaunch(); // Finish reduction on CPU if there are less than two blocks of data left. CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWSqrSumLocal_Cuda(N_Vector X, N_Vector W) { bool atomic; size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Cuda: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { wL2NormSquareKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(W), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, nullptr ); } else { wL2NormSquareKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(W), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, NVEC_CUDA_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWrmsNorm_Cuda(N_Vector X, N_Vector W) { const realtype sum = N_VWSqrSumLocal_Cuda(X, W); return std::sqrt(sum/NVEC_CUDA_CONTENT(X)->length); } realtype N_VWSqrSumMaskLocal_Cuda(N_Vector X, N_Vector W, N_Vector Id) { bool atomic; size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Cuda: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { wL2NormSquareMaskKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(W), NVEC_CUDA_DDATAp(Id), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, nullptr ); } else { wL2NormSquareMaskKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(W), NVEC_CUDA_DDATAp(Id), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, NVEC_CUDA_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWrmsNormMask_Cuda(N_Vector X, N_Vector W, N_Vector Id) { const realtype sum = N_VWSqrSumMaskLocal_Cuda(X, W, Id); return std::sqrt(sum/NVEC_CUDA_CONTENT(X)->length); } realtype N_VMin_Cuda(N_Vector X) { bool atomic; size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = std::numeric_limits::max(); if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Cuda: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { findMinKernel<<>> ( gpu_result, NVEC_CUDA_DDATAp(X), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, nullptr ); } else { findMinKernel<<>> ( gpu_result, NVEC_CUDA_DDATAp(X), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, NVEC_CUDA_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWL2Norm_Cuda(N_Vector X, N_Vector W) { const realtype sum = N_VWSqrSumLocal_Cuda(X, W); return std::sqrt(sum); } realtype N_VL1Norm_Cuda(N_Vector X) { bool atomic; size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Cuda: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { L1NormKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, nullptr ); } else { L1NormKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, NVEC_CUDA_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return gpu_result; } void N_VCompare_Cuda(realtype c, N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCompare_Cuda: GetKernelParameters returned nonzero\n"); } compareKernel<<>> ( c, NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_CONTENT(X)->length ); PostKernelLaunch(); } booleantype N_VInvTest_Cuda(N_Vector X, N_Vector Z) { bool atomic; size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Cuda: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { invTestKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, nullptr ); } else { invTestKernel<<>> ( NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(Z), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, NVEC_CUDA_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return (gpu_result < HALF); } booleantype N_VConstrMask_Cuda(N_Vector C, N_Vector X, N_Vector M) { bool atomic; size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Cuda: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { constrMaskKernel<<>> ( NVEC_CUDA_DDATAp(C), NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(M), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, nullptr ); } else { constrMaskKernel<<>> ( NVEC_CUDA_DDATAp(C), NVEC_CUDA_DDATAp(X), NVEC_CUDA_DDATAp(M), NVEC_CUDA_DBUFFERp(X), NVEC_CUDA_CONTENT(X)->length, NVEC_CUDA_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_CUDA_HBUFFERp(X)[0]; return (gpu_result < HALF); } realtype N_VMinQuotient_Cuda(N_Vector num, N_Vector denom) { bool atomic; size_t grid, block, shMemSize; cudaStream_t stream; realtype gpu_result = std::numeric_limits::max();; if (GetKernelParameters(num, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Cuda: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(num, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Cuda: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { minQuotientKernel<<>> ( gpu_result, NVEC_CUDA_DDATAp(num), NVEC_CUDA_DDATAp(denom), NVEC_CUDA_DBUFFERp(num), NVEC_CUDA_CONTENT(num)->length, nullptr ); } else { minQuotientKernel<<>> ( gpu_result, NVEC_CUDA_DDATAp(num), NVEC_CUDA_DDATAp(denom), NVEC_CUDA_DBUFFERp(num), NVEC_CUDA_CONTENT(num)->length, NVEC_CUDA_DCOUNTERp(num) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(num); gpu_result = NVEC_CUDA_HBUFFERp(num)[0]; return gpu_result; } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_Cuda(int nvec, realtype* c, N_Vector* X, N_Vector z) { // Fused op workspace shortcuts realtype* cdata = NULL; realtype** xdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(z, nvec, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(z, c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Cuda: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(z, X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(z)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters and launch size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Cuda: GetKernelParameters returned nonzero\n"); return -1; } linearCombinationKernel<<>> ( nvec, cdata, xdata, NVEC_CUDA_DDATAp(z), NVEC_CUDA_CONTENT(z)->length ); PostKernelLaunch(); return 0; } int N_VScaleAddMulti_Cuda(int nvec, realtype* c, N_Vector x, N_Vector* Y, N_Vector* Z) { // Shortcuts to the fused op workspace realtype* cdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(x, nvec, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(x, c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(x, Y, nvec, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(x, Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(x, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Cuda: GetKernelParameters returned nonzero\n"); return -1; } scaleAddMultiKernel<<>> ( nvec, cdata, NVEC_CUDA_DDATAp(x), ydata, zdata, NVEC_CUDA_CONTENT(x)->length ); PostKernelLaunch(); return 0; } int N_VDotProdMulti_Cuda(int nvec, N_Vector x, N_Vector* Y, realtype* dots) { // Fused op workspace shortcuts realtype** ydata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(x, 0, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProdMulti_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(x, Y, nvec, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProdMulti_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProdMulti_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(x, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProdMulti_Cuda: GetKernelParameters returned nonzero\n"); return -1; } grid = nvec; if (InitializeReductionBuffer(x, ZERO, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Cuda: InitializeReductionBuffer returned nonzero\n"); } dotProdMultiKernel<<>> ( nvec, NVEC_CUDA_DDATAp(x), ydata, NVEC_CUDA_DBUFFERp(x), NVEC_CUDA_CONTENT(x)->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(x, nvec); for (int i = 0; i < nvec; ++i) { dots[i] = NVEC_CUDA_HBUFFERp(x)[i]; } return 0; } /* * ----------------------------------------------------------------------------- * vector array operations * ----------------------------------------------------------------------------- */ int N_VLinearSumVectorArray_Cuda(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { // Shortcuts to the fused op workspace realtype** xdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], 0, 3 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Y, nvec, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinaerSumVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } linearSumVectorArrayKernel<<>> ( nvec, a, xdata, b, ydata, zdata, NVEC_CUDA_CONTENT(Z[0])->length ); PostKernelLaunch(); return 0; } int N_VScaleVectorArray_Cuda(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { // Shortcuts to the fused op workspace arrays realtype* cdata = NULL; realtype** xdata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], nvec, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(Z[0], c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } scaleVectorArrayKernel<<>> ( nvec, cdata, xdata, zdata, NVEC_CUDA_CONTENT(Z[0])->length ); PostKernelLaunch(); return 0; } int N_VConstVectorArray_Cuda(int nvec, realtype c, N_Vector* Z) { // Shortcuts to the fused op workspace arrays realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], 0, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } constVectorArrayKernel<<>> ( nvec, c, zdata, NVEC_CUDA_CONTENT(Z[0])->length ); PostKernelLaunch(); return 0; } int N_VWrmsNormVectorArray_Cuda(int nvec, N_Vector* X, N_Vector* W, realtype* norms) { // Fused op workspace shortcuts realtype** xdata = NULL; realtype** wdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(W[0], 0, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(W[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(W[0], W, nvec, &wdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(W[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } if (InitializeReductionBuffer(W[0], ZERO, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: InitializeReductionBuffer returned nonzero\n"); } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(W[0], true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } grid = nvec; wL2NormSquareVectorArrayKernel<<>> ( nvec, xdata, wdata, NVEC_CUDA_DBUFFERp(W[0]), NVEC_CUDA_CONTENT(W[0])->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(W[0], nvec); for (int i = 0; i < nvec; ++i) { norms[i] = std::sqrt(NVEC_CUDA_HBUFFERp(W[0])[i] / NVEC_CUDA_CONTENT(W[0])->length); } return 0; } int N_VWrmsNormMaskVectorArray_Cuda(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* norms) { // Fused op workspace shortcuts realtype** xdata = NULL; realtype** wdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(W[0], 0, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(W[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(W[0], W, nvec, &wdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(W[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } if (InitializeReductionBuffer(W[0], ZERO, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormVectorArray_Cuda: InitializeReductionBuffer returned nonzero\n"); } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(W[0], true, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWrmsNormMaskVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } grid = nvec; wL2NormSquareMaskVectorArrayKernel<<>> ( nvec, xdata, wdata, NVEC_CUDA_DDATAp(id), NVEC_CUDA_DBUFFERp(W[0]), NVEC_CUDA_CONTENT(W[0])->length ); PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(W[0], nvec); for (int i = 0; i < nvec; ++i) { norms[i] = std::sqrt(NVEC_CUDA_HBUFFERp(W[0])[i] / NVEC_CUDA_CONTENT(W[0])->length); } return 0; } int N_VScaleAddMultiVectorArray_Cuda(int nvec, int nsum, realtype* c, N_Vector* X, N_Vector** Y, N_Vector** Z) { // Shortcuts to the fused op workspace realtype* cdata = NULL; realtype** xdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(X[0], nsum, nvec + 2 * nvec * nsum)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(X[0], c, nsum, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiArray_Cuda: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(X[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(X[0], Y, nvec, nsum, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Cuda: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(X[0], Z, nvec, nsum, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Cuda: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(X[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(X[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } scaleAddMultiVectorArrayKernel<<>> ( nvec, nsum, cdata, xdata, ydata, zdata, NVEC_CUDA_CONTENT(X[0])->length ); PostKernelLaunch(); return 0; } int N_VLinearCombinationVectorArray_Cuda(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { // Shortcuts to the fused op workspace arrays realtype* cdata = NULL; realtype** xdata = NULL; realtype** zdata = NULL; // Setup the fused op workspace if (FusedBuffer_Init(Z[0], nsum, nvec + nvec * nsum)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(Z[0], c, nsum, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(Z[0], X, nvec, nsum, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } // Set kernel parameters size_t grid, block, shMemSize; cudaStream_t stream; if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Cuda: GetKernelParameters returned nonzero\n"); return -1; } linearCombinationVectorArrayKernel<<>> ( nvec, nsum, cdata, xdata, zdata, NVEC_CUDA_CONTENT(Z[0])->length ); PostKernelLaunch(); return 0; } /* * ----------------------------------------------------------------- * OPTIONAL XBraid interface operations * ----------------------------------------------------------------- */ int N_VBufSize_Cuda(N_Vector x, sunindextype *size) { if (x == NULL) return(-1); *size = (sunindextype)NVEC_CUDA_MEMSIZE(x); return(0); } int N_VBufPack_Cuda(N_Vector x, void *buf) { int copy_fail = 0; cudaError_t cuerr; if (x == NULL || buf == NULL) return(-1); SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST); if (buf_mem == NULL) return(-1); copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x), buf_mem, NVEC_CUDA_CONTENT(x)->device_data, NVEC_CUDA_MEMSIZE(x), (void*) NVEC_CUDA_STREAM(x)); /* we synchronize with respect to the host, but only in this stream */ cuerr = cudaStreamSynchronize(*NVEC_CUDA_STREAM(x)); SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(x), buf_mem, (void*) NVEC_CUDA_STREAM(x)); return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0); } int N_VBufUnpack_Cuda(N_Vector x, void *buf) { int copy_fail = 0; cudaError_t cuerr; if (x == NULL || buf == NULL) return(-1); SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST); if (buf_mem == NULL) return(-1); copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x), NVEC_CUDA_CONTENT(x)->device_data, buf_mem, NVEC_CUDA_MEMSIZE(x), (void*) NVEC_CUDA_STREAM(x)); /* we synchronize with respect to the host, but only in this stream */ cuerr = cudaStreamSynchronize(*NVEC_CUDA_STREAM(x)); SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(x), buf_mem, (void*) NVEC_CUDA_STREAM(x)); return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0); } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_Cuda(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Cuda; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Cuda; v->ops->nvdotprodmulti = N_VDotProdMulti_Cuda; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Cuda; v->ops->nvscalevectorarray = N_VScaleVectorArray_Cuda; v->ops->nvconstvectorarray = N_VConstVectorArray_Cuda; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Cuda; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Cuda; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Cuda; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Cuda; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_Cuda; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvlinearcombination = tf ? N_VLinearCombination_Cuda : NULL; return 0; } int N_VEnableScaleAddMulti_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvscaleaddmulti = tf ? N_VScaleAddMulti_Cuda : NULL; return 0; } int N_VEnableDotProdMulti_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvdotprodmulti = tf ? N_VDotProdMulti_Cuda : NULL; v->ops->nvdotprodmultilocal = tf ? N_VDotProdMulti_Cuda : NULL; return 0; } int N_VEnableLinearSumVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvlinearsumvectorarray = tf ? N_VLinearSumVectorArray_Cuda : NULL; return 0; } int N_VEnableScaleVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvscalevectorarray = tf ? N_VScaleVectorArray_Cuda : NULL; return 0; } int N_VEnableConstVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvconstvectorarray = tf ? N_VConstVectorArray_Cuda : NULL; return 0; } int N_VEnableWrmsNormVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvwrmsnormvectorarray = tf ? N_VWrmsNormVectorArray_Cuda : NULL; return 0; } int N_VEnableWrmsNormMaskVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvwrmsnormmaskvectorarray = tf ? N_VWrmsNormMaskVectorArray_Cuda : NULL; return 0; } int N_VEnableScaleAddMultiVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvscaleaddmultivectorarray = tf ? N_VScaleAddMultiVectorArray_Cuda : NULL; return 0; } int N_VEnableLinearCombinationVectorArray_Cuda(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvlinearcombinationvectorarray = tf ? N_VLinearCombinationVectorArray_Cuda : NULL; return 0; } } // extern "C" /* * Private helper functions. */ static int AllocateData(N_Vector v) { int alloc_fail = 0; N_VectorContent_Cuda vc = NVEC_CUDA_CONTENT(v); N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); if (N_VGetLength_Cuda(v) == 0) return(0); if (vcp->use_managed_mem) { alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->device_data), NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_UVM, (void*) NVEC_CUDA_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed for SUNMEMTYPE_UVM\n"); } vc->host_data = SUNMemoryHelper_Alias(vc->device_data); } else { alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->host_data), NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_HOST, (void*) NVEC_CUDA_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); } alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->device_data), NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_DEVICE, (void*) NVEC_CUDA_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); } } return(alloc_fail ? -1 : 0); } /* * Initializes the internal buffer used for reductions. * If the buffer is already allocated, it will only be reallocated * if it is no longer large enough. This may occur if the length * of the vector is increased. The buffer is initialized to the * value given. */ static int InitializeReductionBuffer(N_Vector v, realtype value, size_t n) { int alloc_fail = 0; int copy_fail = 0; booleantype alloc_mem = SUNFALSE; size_t bytes = n * sizeof(realtype); // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Check if the existing reduction memory is not large enough if (vcp->reduce_buffer_bytes < bytes) { FreeReductionBuffer(v); alloc_mem = SUNTRUE; } if (alloc_mem) { // Allocate pinned memory on the host alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->reduce_buffer_host), bytes, SUNMEMTYPE_PINNED, (void*) NVEC_CUDA_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("WARNING in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_PINNED, using SUNMEMTYPE_HOST instead\n"); // If pinned alloc failed, allocate plain host memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->reduce_buffer_host), bytes, SUNMEMTYPE_HOST, (void*) NVEC_CUDA_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); } } // Allocate device memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->reduce_buffer_dev), bytes, SUNMEMTYPE_DEVICE, (void*) NVEC_CUDA_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); } } if (!alloc_fail) { // Store the size of the reduction memory buffer vcp->reduce_buffer_bytes = bytes; // Initialize the host memory with the value for (int i = 0; i < n; ++i) ((realtype*)vcp->reduce_buffer_host->ptr)[i] = value; // Initialize the device memory with the value copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(v), vcp->reduce_buffer_dev, vcp->reduce_buffer_host, bytes, (void*) NVEC_CUDA_STREAM(v)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_CopyAsync failed\n"); } } return((alloc_fail || copy_fail) ? -1 : 0); } /* Free the reduction buffer */ static void FreeReductionBuffer(N_Vector v) { N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); if (vcp == NULL) return; // Free device mem if (vcp->reduce_buffer_dev != NULL) SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->reduce_buffer_dev, (void*) NVEC_CUDA_STREAM(v)); vcp->reduce_buffer_dev = NULL; // Free host mem if (vcp->reduce_buffer_host != NULL) SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->reduce_buffer_host, (void*) NVEC_CUDA_STREAM(v)); vcp->reduce_buffer_host = NULL; // Reset allocated memory size vcp->reduce_buffer_bytes = 0; } /* Copy the reduction buffer from the device to the host. */ static int CopyReductionBufferFromDevice(N_Vector v, size_t n) { int copy_fail; cudaError_t cuerr; copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(v), NVEC_CUDA_PRIVATE(v)->reduce_buffer_host, NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev, n * sizeof(realtype), (void*) NVEC_CUDA_STREAM(v)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in CopyReductionBufferFromDevice: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but only in this stream */ cuerr = cudaStreamSynchronize(*NVEC_CUDA_STREAM(v)); return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0); } static int FusedBuffer_Init(N_Vector v, int nreal, int nptr) { int alloc_fail = 0; booleantype alloc_mem = SUNFALSE; // pad buffer with single precision data #if defined(SUNDIALS_SINGLE_PRECISION) size_t bytes = nreal * 2 * sizeof(realtype) + nptr * sizeof(realtype*); #elif defined(SUNDIALS_DOUBLE_PRECISION) size_t bytes = nreal * sizeof(realtype) + nptr * sizeof(realtype*); #else #error Incompatible precision for CUDA #endif // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Check if the existing memory is not large enough if (vcp->fused_buffer_bytes < bytes) { FusedBuffer_Free(v); alloc_mem = SUNTRUE; } if (alloc_mem) { // Allocate pinned memory on the host alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->fused_buffer_host), bytes, SUNMEMTYPE_PINNED, (void*) NVEC_CUDA_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("WARNING in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_PINNED, using SUNMEMTYPE_HOST instead\n"); // If pinned alloc failed, allocate plain host memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->fused_buffer_host), bytes, SUNMEMTYPE_HOST, (void*) NVEC_CUDA_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); return -1; } } // Allocate device memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vcp->fused_buffer_dev), bytes, SUNMEMTYPE_DEVICE, (void*) NVEC_CUDA_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); return -1; } // Store the size of the fused op buffer vcp->fused_buffer_bytes = bytes; } // Reset the buffer offset vcp->fused_buffer_offset = 0; return 0; } static int FusedBuffer_CopyRealArray(N_Vector v, realtype *rdata, int nval, realtype **shortcut) { // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Check buffer space and fill the host buffer if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyRealArray: Buffer offset is exceedes the buffer size\n"); return -1; } realtype* h_buffer = (realtype*) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nval; j++) { h_buffer[j] = rdata[j]; } // Set shortcut to the device buffer and update offset *shortcut = (realtype*) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); // accounting for buffer padding #if defined(SUNDIALS_SINGLE_PRECISION) vcp->fused_buffer_offset += nval * 2 * sizeof(realtype); #elif defined(SUNDIALS_DOUBLE_PRECISION) vcp->fused_buffer_offset += nval * sizeof(realtype); #else #error Incompatible precision for CUDA #endif return 0; } static int FusedBuffer_CopyPtrArray1D(N_Vector v, N_Vector *X, int nvec, realtype ***shortcut) { // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Check buffer space and fill the host buffer if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyPtrArray1D: Buffer offset is exceedes the buffer size\n"); return -1; } realtype** h_buffer = (realtype**) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nvec; j++) { h_buffer[j] = NVEC_CUDA_DDATAp(X[j]); } // Set shortcut to the device buffer and update offset *shortcut = (realtype**) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); vcp->fused_buffer_offset += nvec * sizeof(realtype*); return 0; } static int FusedBuffer_CopyPtrArray2D(N_Vector v, N_Vector **X, int nvec, int nsum, realtype ***shortcut) { // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Check buffer space and fill the host buffer if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyPtrArray2D: Buffer offset is exceedes the buffer size\n"); return -1; } realtype** h_buffer = (realtype**) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nvec; j++) { for (int k = 0; k < nsum; k++) { h_buffer[j * nsum + k] = NVEC_CUDA_DDATAp(X[k][j]); } } // Set shortcut to the device buffer and update offset *shortcut = (realtype**) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); // Update the offset vcp->fused_buffer_offset += nvec * nsum * sizeof(realtype*); return 0; } static int FusedBuffer_CopyToDevice(N_Vector v) { // Get the vector private memory structure N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); // Copy the fused buffer to the device int copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(v), vcp->fused_buffer_dev, vcp->fused_buffer_host, vcp->fused_buffer_offset, (void*) NVEC_CUDA_STREAM(v)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyToDevice: SUNMemoryHelper_CopyAsync failed\n"); return -1; } // Synchronize with respect to the host, but only in this stream SUNDIALS_CUDA_VERIFY(cudaStreamSynchronize(*NVEC_CUDA_STREAM(v))); return 0; } static int FusedBuffer_Free(N_Vector v) { N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v); if (vcp == NULL) return 0; if (vcp->fused_buffer_host) { SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->fused_buffer_host, (void*) NVEC_CUDA_STREAM(v)); vcp->fused_buffer_host = NULL; } if (vcp->fused_buffer_dev) { SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->fused_buffer_dev, (void*) NVEC_CUDA_STREAM(v)); vcp->fused_buffer_dev = NULL; } vcp->fused_buffer_bytes = 0; vcp->fused_buffer_offset = 0; return 0; } static int InitializeDeviceCounter(N_Vector v) { int retval = 0; if (NVEC_CUDA_PRIVATE(v)->device_counter == NULL) { retval = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(NVEC_CUDA_PRIVATE(v)->device_counter), sizeof(unsigned int), SUNMEMTYPE_DEVICE, (void*) NVEC_CUDA_STREAM(v)); } cudaMemsetAsync(NVEC_CUDA_DCOUNTERp(v), 0, sizeof(unsigned int), *NVEC_CUDA_STREAM(v)); return retval; } static int FreeDeviceCounter(N_Vector v) { int retval = 0; if (NVEC_CUDA_PRIVATE(v)->device_counter) retval = SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), NVEC_CUDA_PRIVATE(v)->device_counter, (void*) NVEC_CUDA_STREAM(v)); return retval; } /* Get the kernel launch parameters based on the kernel type (reduction or not), * using the appropriate kernel execution policy. */ static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block, size_t& shMemSize, cudaStream_t& stream, bool& atomic, size_t n) { n = (n == 0) ? NVEC_CUDA_CONTENT(v)->length : n; if (reduction) { SUNCudaExecPolicy* reduce_exec_policy = NVEC_CUDA_CONTENT(v)->reduce_exec_policy; grid = reduce_exec_policy->gridSize(n); block = reduce_exec_policy->blockSize(); shMemSize = 0; stream = *(reduce_exec_policy->stream()); atomic = reduce_exec_policy->atomic(); if (!atomic) { if (InitializeDeviceCounter(v)) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("SUNMemoryHelper_Alloc returned nonzero\n"); #endif return(-1); } } if (block % sundials::cuda::WARP_SIZE) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("the block size must be a multiple must be of the CUDA warp size"); #endif return(-1); } } else { SUNCudaExecPolicy* stream_exec_policy = NVEC_CUDA_CONTENT(v)->stream_exec_policy; grid = stream_exec_policy->gridSize(n); block = stream_exec_policy->blockSize(); shMemSize = 0; stream = *(stream_exec_policy->stream()); atomic = false; } if (grid == 0) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("the grid size must be > 0"); #endif return(-1); } if (block == 0) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("the block size must be > 0"); #endif return(-1); } return(0); } static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block, size_t& shMemSize, cudaStream_t& stream, size_t n) { bool atomic; return GetKernelParameters(v, reduction, grid, block, shMemSize, stream, atomic, n); } /* Should be called after a kernel launch. * If SUNDIALS_DEBUG_CUDA_LASTERROR is not defined, then the function does nothing. * If it is defined, the function will synchronize and check the last CUDA error. */ static void PostKernelLaunch() { #ifdef SUNDIALS_DEBUG_CUDA_LASTERROR cudaDeviceSynchronize(); SUNDIALS_CUDA_VERIFY(cudaGetLastError()); #endif } StanHeaders/src/nvector/pthreads/0000755000176200001440000000000014511135055016534 5ustar liggesusersStanHeaders/src/nvector/pthreads/nvector_pthreads.c0000644000176200001440000042754114645137106022276 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a POSIX Threads (Pthreads) * implementation of the NVECTOR package using a LOCAL array of * structures to pass data to threads. * -----------------------------------------------------------------*/ #include #include #include #include #include /* define NAN */ #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private functions for special cases of vector operations */ static void VCopy_Pthreads(N_Vector x, N_Vector z); /* z=x */ static void VSum_Pthreads(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */ static void VDiff_Pthreads(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VNeg_Pthreads(N_Vector x, N_Vector z); /* z=-x */ static void VScaleSum_Pthreads(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleDiff_Pthreads(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VLin1_Pthreads(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin2_Pthreads(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void Vaxpy_Pthreads(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */ static void VScaleBy_Pthreads(realtype a, N_Vector x); /* x <- ax */ /* Private functions for special cases of vector array operations */ static int VSumVectorArray_Pthreads(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */ static int VDiffVectorArray_Pthreads(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */ static int VScaleSumVectorArray_Pthreads(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */ static int VScaleDiffVectorArray_Pthreads(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */ static int VLin1VectorArray_Pthreads(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */ static int VLin2VectorArray_Pthreads(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */ static int VaxpyVectorArray_Pthreads(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */ /* Pthread companion functions for vector operations */ static void *N_VLinearSum_PT(void *thread_data); static void *N_VConst_PT(void *thread_data); static void *N_VProd_PT(void *thread_data); static void *N_VDiv_PT(void *thread_data); static void *N_VScale_PT(void *thread_data); static void *N_VAbs_PT(void *thread_data); static void *N_VInv_PT(void *thread_data); static void *N_VAddConst_PT(void *thread_data); static void *N_VCompare_PT(void *thread_data); static void *N_VDotProd_PT(void *thread_data); static void *N_VMaxNorm_PT(void *thread_data); static void *N_VWSqrSum_PT(void *thread_data); static void *N_VMin_PT(void *thread_data); static void *N_VWL2Norm_PT(void *thread_data); static void *N_VL1Norm_PT(void *thread_data); static void *N_VInvTest_PT(void *thread_data); static void *N_VWSqrSumMask_PT(void *thread_data); static void *N_VConstrMask_PT(void *thread_data); static void *N_VMinQuotient_PT(void *thread_data); /* Pthread companion functions special cases of vector operations */ static void *VCopy_PT(void *thread_data); static void *VSum_PT(void *thread_data); static void *VDiff_PT(void *thread_data); static void *VNeg_PT(void *thread_data); static void *VScaleSum_PT(void *thread_data); static void *VScaleDiff_PT(void *thread_data); static void *VLin1_PT(void *thread_data); static void *VLin2_PT(void *thread_data); static void *VScaleBy_PT(void *thread_data); static void *Vaxpy_PT(void *thread_data); /* Pthread companion functions for fused vector operations */ static void *N_VLinearCombination_PT(void *thread_data); static void *N_VScaleAddMulti_PT(void *thread_data); static void *N_VDotProdMulti_PT(void *thread_data); /* Pthread companion functions for vector array operations */ static void *N_VLinearSumVectorArray_PT(void *thread_data); static void *N_VScaleVectorArray_PT(void *thread_data); static void *N_VConstVectorArray_PT(void *thread_data); static void *N_VWrmsNormVectorArray_PT(void *thread_data); static void *N_VWrmsNormMaskVectorArray_PT(void *thread_data); static void *N_VScaleAddMultiVectorArray_PT(void *thread_data); static void *N_VLinearCombinationVectorArray_PT(void *thread_data); /* Pthread companion functions special cases of vector array operations */ static void *VSumVectorArray_PT(void *thread_data); static void *VDiffVectorArray_PT(void *thread_data); static void *VScaleSumVectorArray_PT(void *thread_data); static void *VScaleDiffVectorArray_PT(void *thread_data); static void *VLin1VectorArray_PT(void *thread_data); static void *VLin2VectorArray_PT(void *thread_data); static void *VaxpyVectorArray_PT(void *thread_data); /* Pthread companion functions for XBraid interface operations */ static void *VBufPack_PT(void *thread_data); static void *VBufUnpack_PT(void *thread_data); /* Function to determine loop values for threads */ static void N_VSplitLoop(int myid, int *nthreads, sunindextype *N, sunindextype *start, sunindextype *end); /* Function to initialize thread data */ static void N_VInitThreadData(Pthreads_Data *thread_data); /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_Pthreads(N_Vector v) { return SUNDIALS_NVEC_PTHREADS; } /* ---------------------------------------------------------------------------- * Function to create a new empty vector */ N_Vector N_VNewEmpty_Pthreads(sunindextype length, int num_threads, SUNContext sunctx) { N_Vector v; N_VectorContent_Pthreads content; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Pthreads; v->ops->nvclone = N_VClone_Pthreads; v->ops->nvcloneempty = N_VCloneEmpty_Pthreads; v->ops->nvdestroy = N_VDestroy_Pthreads; v->ops->nvspace = N_VSpace_Pthreads; v->ops->nvgetarraypointer = N_VGetArrayPointer_Pthreads; v->ops->nvsetarraypointer = N_VSetArrayPointer_Pthreads; v->ops->nvgetlength = N_VGetLength_Pthreads; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Pthreads; v->ops->nvconst = N_VConst_Pthreads; v->ops->nvprod = N_VProd_Pthreads; v->ops->nvdiv = N_VDiv_Pthreads; v->ops->nvscale = N_VScale_Pthreads; v->ops->nvabs = N_VAbs_Pthreads; v->ops->nvinv = N_VInv_Pthreads; v->ops->nvaddconst = N_VAddConst_Pthreads; v->ops->nvdotprod = N_VDotProd_Pthreads; v->ops->nvmaxnorm = N_VMaxNorm_Pthreads; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Pthreads; v->ops->nvwrmsnorm = N_VWrmsNorm_Pthreads; v->ops->nvmin = N_VMin_Pthreads; v->ops->nvwl2norm = N_VWL2Norm_Pthreads; v->ops->nvl1norm = N_VL1Norm_Pthreads; v->ops->nvcompare = N_VCompare_Pthreads; v->ops->nvinvtest = N_VInvTest_Pthreads; v->ops->nvconstrmask = N_VConstrMask_Pthreads; v->ops->nvminquotient = N_VMinQuotient_Pthreads; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProd_Pthreads; v->ops->nvmaxnormlocal = N_VMaxNorm_Pthreads; v->ops->nvminlocal = N_VMin_Pthreads; v->ops->nvl1normlocal = N_VL1Norm_Pthreads; v->ops->nvinvtestlocal = N_VInvTest_Pthreads; v->ops->nvconstrmasklocal = N_VConstrMask_Pthreads; v->ops->nvminquotientlocal = N_VMinQuotient_Pthreads; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Pthreads; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Pthreads; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_Pthreads; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_Pthreads; v->ops->nvbufpack = N_VBufPack_Pthreads; v->ops->nvbufunpack = N_VBufUnpack_Pthreads; /* debugging functions */ v->ops->nvprint = N_VPrint_Pthreads; v->ops->nvprintfile = N_VPrintFile_Pthreads; /* Create content */ content = NULL; content = (N_VectorContent_Pthreads) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = length; content->num_threads = num_threads; content->own_data = SUNFALSE; content->data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Function to create a new vector */ N_Vector N_VNew_Pthreads(sunindextype length, int num_threads, SUNContext sunctx) { N_Vector v; realtype *data; v = NULL; v = N_VNewEmpty_Pthreads(length, num_threads, sunctx); if (v == NULL) return(NULL); /* Create data */ if (length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if(data == NULL) { N_VDestroy_Pthreads(v); return(NULL); } /* Attach data */ NV_OWN_DATA_PT(v) = SUNTRUE; NV_DATA_PT(v) = data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create a vector with user data component */ N_Vector N_VMake_Pthreads(sunindextype length, int num_threads, realtype *v_data, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Pthreads(length, num_threads, sunctx); if (v == NULL) return(NULL); if (length > 0) { /* Attach data */ NV_OWN_DATA_PT(v) = SUNFALSE; NV_DATA_PT(v) = v_data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors. */ N_Vector* N_VCloneVectorArray_Pthreads(int count, N_Vector w) { return(N_VCloneVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors with NULL data array. */ N_Vector* N_VCloneVectorArrayEmpty_Pthreads(int count, N_Vector w) { return(N_VCloneEmptyVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_Pthreads */ void N_VDestroyVectorArray_Pthreads(N_Vector* vs, int count) { N_VDestroyVectorArray(vs, count); return; } /* ---------------------------------------------------------------------------- * Function to return number of vector elements */ sunindextype N_VGetLength_Pthreads(N_Vector v) { return NV_LENGTH_PT(v); } /* ---------------------------------------------------------------------------- * Function to print a vector to stdout */ void N_VPrint_Pthreads(N_Vector x) { N_VPrintFile_Pthreads(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print a vector to outfile */ void N_VPrintFile_Pthreads(N_Vector x, FILE *outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LENGTH_PT(x); xd = NV_DATA_PT(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%11.8Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]); #else STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]); #endif } STAN_SUNDIALS_FPRINTF(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Create new vector from existing vector without attaching data */ N_Vector N_VCloneEmpty_Pthreads(N_Vector w) { N_Vector v; N_VectorContent_Pthreads content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_Pthreads) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = NV_LENGTH_PT(w); content->num_threads = NV_NUM_THREADS_PT(w); content->own_data = SUNFALSE; content->data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Create new vector from existing vector and attach data */ N_Vector N_VClone_Pthreads(N_Vector w) { N_Vector v; realtype *data; sunindextype length; v = NULL; v = N_VCloneEmpty_Pthreads(w); if (v == NULL) return(NULL); length = NV_LENGTH_PT(w); /* Create data */ if (length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if(data == NULL) { N_VDestroy_Pthreads(v); return(NULL); } /* Attach data */ NV_OWN_DATA_PT(v) = SUNTRUE; NV_DATA_PT(v) = data; } return(v); } /* ---------------------------------------------------------------------------- * Destroy vector and free vector memory */ void N_VDestroy_Pthreads(N_Vector v) { if (v == NULL) return; /* free content */ if (v->content != NULL) { if (NV_OWN_DATA_PT(v) && NV_DATA_PT(v) != NULL) { free(NV_DATA_PT(v)); NV_DATA_PT(v) = NULL; } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } /* ---------------------------------------------------------------------------- * Get storage requirement for vector */ void N_VSpace_Pthreads(N_Vector v, sunindextype *lrw, sunindextype *liw) { *lrw = NV_LENGTH_PT(v); *liw = 1; return; } /* ---------------------------------------------------------------------------- * Get vector data pointer */ realtype *N_VGetArrayPointer_Pthreads(N_Vector v) { return((realtype *) NV_DATA_PT(v)); } /* ---------------------------------------------------------------------------- * Set vector data pointer */ void N_VSetArrayPointer_Pthreads(realtype *v_data, N_Vector v) { if (NV_LENGTH_PT(v) > 0) NV_DATA_PT(v) = v_data; return; } /* ---------------------------------------------------------------------------- * Compute linear sum z[i] = a*x[i]+b*y[i] */ void N_VLinearSum_Pthreads(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; realtype c; N_Vector v1, v2; booleantype test; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_Pthreads(a,x,y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_Pthreads(b,y,x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_Pthreads(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_Pthreads(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_Pthreads(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_Pthreads(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_Pthreads(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_Pthreads(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = (pthread_t *) malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; b = my_data->c2; xd = my_data->v1; yd = my_data->v2; zd = my_data->v3; start = my_data->start; end = my_data->end; /* compute linear sum */ for (i = start; i < end; i++){ zd[i] = (a*xd[i])+(b*yd[i]); } /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Assigns constant value to all vector elements, z[i] = c */ void N_VConst_Pthreads(realtype c, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(z); nthreads = NV_NUM_THREADS_PT(z); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; zd = my_data->v1; start = my_data->start; end = my_data->end; /* assign constant values */ for (i = start; i < end; i++) zd[i] = c; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute componentwise product z[i] = x[i]*y[i] */ void N_VProd_Pthreads(N_Vector x, N_Vector y, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; iv1; yd = my_data->v2; zd = my_data->v3; start = my_data->start; end = my_data->end; /* compute componentwise product */ for (i = start; i < end; i++) zd[i] = xd[i]*yd[i]; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute componentwise division z[i] = x[i]/y[i] */ void N_VDiv_Pthreads(N_Vector x, N_Vector y, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; iv1; yd = my_data->v2; zd = my_data->v3; start = my_data->start; end = my_data->end; /* compute componentwise division */ for (i = start; i < end; i++) zd[i] = xd[i]/yd[i]; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute scaler multiplication z[i] = c*x[i] */ void N_VScale_Pthreads(realtype c, N_Vector x, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_Pthreads(c, x); return; } if (c == ONE) { VCopy_Pthreads(x, z); } else if (c == -ONE) { VNeg_Pthreads(x, z); } else { /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; xd = my_data->v1; zd = my_data->v2; start = my_data->start; end = my_data->end; /* compute scaler multiplication */ for (i = start; i < end; i++) zd[i] = c*xd[i]; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute absolute value of vector components z[i] = SUNRabs(x[i]) */ void N_VAbs_Pthreads(N_Vector x, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; iv1; zd = my_data->v2; start = my_data->start; end = my_data->end; /* compute absolute value of components */ for (i = start; i < end; i++) zd[i] = SUNRabs(xd[i]); /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = 1 / x[i] */ void N_VInv_Pthreads(N_Vector x, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; iv1; zd = my_data->v2; start = my_data->start; end = my_data->end; /* compute componentwise inverse */ for (i = start; i < end; i++) zd[i] = ONE/xd[i]; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute componentwise addition of a scaler to a vector z[i] = x[i] + b */ void N_VAddConst_Pthreads(N_Vector x, realtype b, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; xd = my_data->v1; zd = my_data->v2; start = my_data->start; end = my_data->end; /* compute componentwise constant addition */ for (i = start; i < end; i++) zd[i] = xd[i] + b; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Computes the dot product of two vectors, a = sum(x[i]*y[i]) */ realtype N_VDotProd_Pthreads(N_Vector x, N_Vector y) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; pthread_mutex_t global_mutex; realtype sum = ZERO; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); /* lock for reduction */ pthread_mutex_init(&global_mutex, NULL); for (i=0; iv1; yd = my_data->v2; global_sum = my_data->global_val; global_mutex = my_data->global_mutex; start = my_data->start; end = my_data->end; /* compute dot product */ local_sum = ZERO; for (i = start; i < end; i++) local_sum += xd[i] * yd[i]; /* update global sum */ pthread_mutex_lock(global_mutex); *global_sum += local_sum; pthread_mutex_unlock(global_mutex); /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Computes max norm of the vector */ realtype N_VMaxNorm_Pthreads(N_Vector x) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; pthread_mutex_t global_mutex; realtype max = ZERO; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); /* lock for reduction */ pthread_mutex_init(&global_mutex, NULL); for (i=0; iv1; global_max = my_data->global_val; global_mutex = my_data->global_mutex; start = my_data->start; end = my_data->end; /* find local max */ local_max = ZERO; for (i = start; i < end; i++) if (SUNRabs(xd[i]) > local_max) local_max = SUNRabs(xd[i]); /* update global max */ pthread_mutex_lock(global_mutex); if (local_max > *global_max) { *global_max = local_max; } pthread_mutex_unlock(global_mutex); /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a vector */ realtype N_VWrmsNorm_Pthreads(N_Vector x, N_Vector w) { return(SUNRsqrt(N_VWSqrSumLocal_Pthreads(x, w)/(NV_LENGTH_PT(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a vector */ realtype N_VWSqrSumLocal_Pthreads(N_Vector x, N_Vector w) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; pthread_mutex_t global_mutex; realtype sum = ZERO; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); /* lock for reduction */ pthread_mutex_init(&global_mutex, NULL); for (i=0; iv1; wd = my_data->v2; global_sum = my_data->global_val; global_mutex = my_data->global_mutex; start = my_data->start; end = my_data->end; /* compute wrms norm */ local_sum = ZERO; for (i = start; i < end; i++) local_sum += SUNSQR(xd[i] * wd[i]); /* update global sum */ pthread_mutex_lock(global_mutex); *global_sum += local_sum; pthread_mutex_unlock(global_mutex); /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a masked vector */ realtype N_VWrmsNormMask_Pthreads(N_Vector x, N_Vector w, N_Vector id) { return(SUNRsqrt(N_VWSqrSumMaskLocal_Pthreads(x, w, id)/(NV_LENGTH_PT(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a masked vector */ realtype N_VWSqrSumMaskLocal_Pthreads(N_Vector x, N_Vector w, N_Vector id) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; pthread_mutex_t global_mutex; realtype sum = ZERO; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); /* lock for reduction */ pthread_mutex_init(&global_mutex, NULL); for (i=0; iv1; wd = my_data->v2; idd = my_data->v3; global_sum = my_data->global_val; global_mutex = my_data->global_mutex; start = my_data->start; end = my_data->end; /* compute wrms norm with mask */ local_sum = ZERO; for (i = start; i < end; i++) { if (idd[i] > ZERO) local_sum += SUNSQR(xd[i]*wd[i]); } /* update global sum */ pthread_mutex_lock(global_mutex); *global_sum += local_sum; pthread_mutex_unlock(global_mutex); /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Finds the minimun component of a vector */ realtype N_VMin_Pthreads(N_Vector x) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; pthread_mutex_t global_mutex; realtype min; /* initialize global min */ min = NV_Ith_PT(x,0); /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); /* lock for reduction */ pthread_mutex_init(&global_mutex, NULL); for (i=0; iv1; global_min = my_data->global_val; global_mutex = my_data->global_mutex; start = my_data->start; end = my_data->end; /* find local min */ local_min = *global_min; for (i = start; i < end; i++) { if (xd[i] < local_min) local_min = xd[i]; } /* update global min */ pthread_mutex_lock(global_mutex); if (local_min < *global_min) *global_min = local_min; pthread_mutex_unlock(global_mutex); /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Computes weighted L2 norm of a vector */ realtype N_VWL2Norm_Pthreads(N_Vector x, N_Vector w) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; pthread_mutex_t global_mutex; realtype sum = ZERO; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); /* lock for reduction */ pthread_mutex_init(&global_mutex, NULL); for (i=0; iv1; wd = my_data->v2; global_sum = my_data->global_val; global_mutex = my_data->global_mutex; start = my_data->start; end = my_data->end; /* compute WL2 norm */ local_sum = ZERO; for (i = start; i < end; i++) local_sum += SUNSQR(xd[i]*wd[i]); /* update global sum */ pthread_mutex_lock(global_mutex); *global_sum += local_sum; pthread_mutex_unlock(global_mutex); /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Computes L1 norm of a vector */ realtype N_VL1Norm_Pthreads(N_Vector x) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; pthread_mutex_t global_mutex; realtype sum = ZERO; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); /* lock for reduction */ pthread_mutex_init(&global_mutex, NULL); for (i=0; iv1; global_sum = my_data->global_val; global_mutex = my_data->global_mutex; start = my_data->start; end = my_data->end; /* compute L1 norm */ local_sum = ZERO; for (i = start; i < end; i++) local_sum += SUNRabs(xd[i]); /* update global sum */ pthread_mutex_lock(global_mutex); *global_sum += local_sum; pthread_mutex_unlock(global_mutex); /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compare vector component values to a scaler */ void N_VCompare_Pthreads(realtype c, N_Vector x, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; xd = my_data->v1; zd = my_data->v2; start = my_data->start; end = my_data->end; /* compare component to scaler */ for (i = start; i < end; i++) zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = ONE/x[i] and check if x[i] == ZERO */ booleantype N_VInvTest_Pthreads(N_Vector x, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; realtype val = ZERO; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; i ZERO) return (SUNFALSE); else return (SUNTRUE); } /* ---------------------------------------------------------------------------- * Pthread companion function to N_VInvTest */ static void *N_VInvTest_PT(void *thread_data) { sunindextype i, start, end; realtype *xd, *zd; realtype local_val, *global_val; Pthreads_Data *my_data; /* extract thread data */ my_data = (Pthreads_Data *) thread_data; xd = my_data->v1; zd = my_data->v2; global_val = my_data->global_val; start = my_data->start; end = my_data->end; /* compute inverse with check for divide by ZERO */ local_val = ZERO; for (i = start; i < end; i++) { if (xd[i] == ZERO) local_val = ONE; else zd[i] = ONE/xd[i]; } /* update global val */ if (local_val > ZERO) { *global_val = local_val; } /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute constraint mask of a vector */ booleantype N_VConstrMask_Pthreads(N_Vector c, N_Vector x, N_Vector m) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; realtype val = ZERO; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; i ZERO) return(SUNFALSE); else return(SUNTRUE); } /* ---------------------------------------------------------------------------- * Pthread companion function to N_VConstrMask */ static void *N_VConstrMask_PT(void *thread_data) { sunindextype i, start, end; realtype *cd, *xd, *md; realtype local_val, *global_val; Pthreads_Data *my_data; /* extract thread data */ my_data = (Pthreads_Data *) thread_data; cd = my_data->v1; xd = my_data->v2; md = my_data->v3; global_val = my_data->global_val; start = my_data->start; end = my_data->end; /* compute constraint mask */ local_val = ZERO; for (i = start; i < end; i++) { md[i] = ZERO; /* Continue if no constraints were set for the variable */ if (cd[i] == ZERO) continue; /* Check if a set constraint has been violated */ if ((SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) || (SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO)) { local_val = md[i] = ONE; } } /* update global val */ if (local_val > ZERO) { *global_val = local_val; } /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute minimum componentwise quotient */ realtype N_VMinQuotient_Pthreads(N_Vector num, N_Vector denom) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; pthread_mutex_t global_mutex; realtype min = BIG_REAL; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(num); nthreads = NV_NUM_THREADS_PT(num); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); /* lock for reduction */ pthread_mutex_init(&global_mutex, NULL); for (i=0; iv1; dd = my_data->v2; global_min = my_data->global_val; global_mutex = my_data->global_mutex; start = my_data->start; end = my_data->end; /* compute minimum quotient */ local_min = BIG_REAL; for (i = start; i < end; i++) { if (dd[i] == ZERO) continue; local_min = SUNMIN(local_min, nd[i]/dd[i]); } /* update global min */ pthread_mutex_lock(global_mutex); if (local_min < *global_min) *global_min = local_min; pthread_mutex_unlock(global_mutex); /* exit */ pthread_exit(NULL); } /* * ----------------------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------------------- */ /* ----------------------------------------------------------------------------- * Compute the linear combination z = c[i]*X[i] */ int N_VLinearCombination_Pthreads(int nvec, realtype* c, N_Vector* X, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_Pthreads(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_Pthreads(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LENGTH_PT(z); nthreads = NV_NUM_THREADS_PT(z); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; istart; end = my_data->end; c = my_data->cvals; zd = NV_DATA_PT(my_data->x1); /* * X[0] += c[i]*X[i], i = 1,...,nvec-1 */ if ((my_data->Y1[0] == my_data->x1) && (c[0] == ONE)) { for (i=1; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); for (j=start; jY1[0] == my_data->x1) { for (j=start; jnvec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); for (j=start; jY1[0]); for (j=start; jnvec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); for (j=start; jstart; end = my_data->end; a = my_data->cvals; xd = NV_DATA_PT(my_data->x1); /* * Y[i][j] += a[i] * x[j] */ if (my_data->Y1 == my_data->Y2) { for (i=0; invec; i++) { yd = NV_DATA_PT(my_data->Y1[i]); for (j=start; jnvec; i++) { yd = NV_DATA_PT(my_data->Y1[i]); zd = NV_DATA_PT(my_data->Y2[i]); for (j=start; jstart; end = my_data->end; lock = my_data->global_mutex; xd = NV_DATA_PT(my_data->x1); dotprods = my_data->cvals; /* compute multiple dot products */ for (i=0; invec; i++) { yd = NV_DATA_PT(my_data->Y1[i]); sum = ZERO; for (j=start; jstart; end = my_data->end; a = my_data->c1; b = my_data->c2; /* compute linear sum for each vector pair in vector arrays */ for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); yd = NV_DATA_PT(my_data->Y2[i]); zd = NV_DATA_PT(my_data->Y3[i]); for (j=start; jstart; end = my_data->end; c = my_data->cvals; /* * X[i] *= c[i] */ if (my_data->Y1 == my_data->Y2) { for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); for (j=start; jnvec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); zd = NV_DATA_PT(my_data->Y2[i]); for (j=start; jstart; end = my_data->end; /* set each vector in the vector array to a constant */ for (i=0; invec; i++) { zd = NV_DATA_PT(my_data->Y1[i]); for (j=start; jc1; } } /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute the weighted root mean square norm of multiple vectors */ int N_VWrmsNormVectorArray_Pthreads(int nvec, N_Vector* X, N_Vector* W, realtype* nrm) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; pthread_mutex_t global_mutex; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNorm_Pthreads(X[0], W[0]); return(0); } /* initialize output array */ for (i=0; istart; end = my_data->end; lock = my_data->global_mutex; nrm = my_data->cvals; /* compute the WRMS norm for each vector in the vector array */ for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); wd = NV_DATA_PT(my_data->Y2[i]); sum = ZERO; for (j=start; jstart; end = my_data->end; lock = my_data->global_mutex; nrm = my_data->cvals; idd = NV_DATA_PT(my_data->x1); /* compute the WRMS norm for each vector in the vector array */ for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); wd = NV_DATA_PT(my_data->Y2[i]); sum = ZERO; for (j=start; j ZERO) sum += SUNSQR(xd[j] * wd[j]); } /* update global sum */ pthread_mutex_lock(lock); nrm[i] += sum; pthread_mutex_unlock(lock); } /* exit */ pthread_exit(NULL); } /* ----------------------------------------------------------------------------- * Scale and add a vector to multiple vectors Z = Y + a*X */ int N_VScaleAddMultiVectorArray_Pthreads(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z) { sunindextype N; int i, j, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; int retval; N_Vector* YY; N_Vector* ZZ; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VLinearSum */ if (nsum == 1) { N_VLinearSum_Pthreads(a[0], X[0], ONE, Y[0][0], Z[0][0]); return(0); } /* should have called N_VScaleAddMulti */ YY = (N_Vector*) malloc(nsum * sizeof(N_Vector)); ZZ = (N_Vector*) malloc(nsum * sizeof(N_Vector)); for (j=0; j 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_Pthreads(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length and data array */ N = NV_LENGTH_PT(X[0]); nthreads = NV_NUM_THREADS_PT(X[0]); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; istart; end = my_data->end; a = my_data->cvals; /* * Y[i][j] += a[i] * x[j] */ if (my_data->ZZ1 == my_data->ZZ2) { for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); for (j=0; jnsum; j++){ yd = NV_DATA_PT(my_data->ZZ1[j][i]); for (k=start; knvec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); for (j=0; jnsum; j++){ yd = NV_DATA_PT(my_data->ZZ1[j][i]); zd = NV_DATA_PT(my_data->ZZ2[j][i]); for (k=start; k 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; jstart; end = my_data->end; c = my_data->cvals; /* * X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1 */ if ((my_data->ZZ1[0] == my_data->Y1) && (c[0] == ONE)) { for (j=0; jnvec; j++) { zd = NV_DATA_PT(my_data->Y1[j]); for (i=1; insum; i++) { xd = NV_DATA_PT(my_data->ZZ1[i][j]); for (k=start; kZZ1[0] == my_data->Y1) { for (j=0; jnvec; j++) { zd = NV_DATA_PT(my_data->Y1[j]); for (k=start; knsum; i++) { xd = NV_DATA_PT(my_data->ZZ1[i][j]); for (k=start; knvec; j++) { xd = NV_DATA_PT(my_data->ZZ1[0][j]); zd = NV_DATA_PT(my_data->Y1[j]); for (k=start; knsum; i++) { xd = NV_DATA_PT(my_data->ZZ1[i][j]); for (k=start; kv1; bd = my_data->v2; start = my_data->start; end = my_data->end; /* pack the buffer */ for (i = start; i < end; i++) bd[i] = xd[i]; /* exit */ pthread_exit(NULL); } /* ----------------------------------------------------------------------------- * Unpack butter */ int N_VBufUnpack_Pthreads(N_Vector x, void *buf) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; if (x == NULL || buf == NULL) return(-1); /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; iv1; bd = my_data->v2; start = my_data->start; end = my_data->end; /* unpack the buffer */ for (i = start; i < end; i++) xd[i] = bd[i]; /* exit */ pthread_exit(NULL); } /* * ----------------------------------------------------------------- * private functions for special cases of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Copy vector components into second vector */ static void VCopy_Pthreads(N_Vector x, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; iv1; zd = my_data->v2; start = my_data->start; end = my_data->end; /* copy vector components */ for (i = start; i < end; i++) zd[i] = xd[i]; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute vector sum */ static void VSum_Pthreads(N_Vector x, N_Vector y, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; iv1; yd = my_data->v2; zd = my_data->v3; start = my_data->start; end = my_data->end; /* compute vector sum */ for (i = start; i < end; i++) zd[i] = xd[i] + yd[i]; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute vector difference */ static void VDiff_Pthreads(N_Vector x, N_Vector y, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; iv1; yd = my_data->v2; zd = my_data->v3; start = my_data->start; end = my_data->end; /* compute vector difference */ for (i = start; i < end; i++) zd[i] = xd[i] - yd[i]; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute the negative of a vector */ static void VNeg_Pthreads(N_Vector x, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; iv1; zd = my_data->v2; start = my_data->start; end = my_data->end; /* compute negative of vector */ for (i = start; i < end; i++) zd[i] = -xd[i]; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute scaled vector sum */ static void VScaleSum_Pthreads(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; xd = my_data->v1; yd = my_data->v2; zd = my_data->v3; start = my_data->start; end = my_data->end; /* compute scaled vector sum */ for (i = start; i < end; i++) zd[i] = c*(xd[i] + yd[i]); /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute scaled vector difference */ static void VScaleDiff_Pthreads(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; xd = my_data->v1; yd = my_data->v2; zd = my_data->v3; start = my_data->start; end = my_data->end; /* compute scaled vector difference */ for (i = start; i < end; i++) zd[i] = c*(xd[i] - yd[i]); /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute vector sum z[i] = a*x[i]+y[i] */ static void VLin1_Pthreads(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; xd = my_data->v1; yd = my_data->v2; zd = my_data->v3; start = my_data->start; end = my_data->end; /* compute vector sum */ for (i = start; i < end; i++) zd[i] = (a*xd[i]) + yd[i]; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute vector difference z[i] = a*x[i]-y[i] */ static void VLin2_Pthreads(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; xd = my_data->v1; yd = my_data->v2; zd = my_data->v3; start = my_data->start; end = my_data->end; /* compute vector difference */ for (i = start; i < end; i++) zd[i] = (a*xd[i]) - yd[i]; /* exit */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute special cases of linear sum */ static void Vaxpy_Pthreads(realtype a, N_Vector x, N_Vector y) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; xd = my_data->v1; yd = my_data->v2; start = my_data->start; end = my_data->end; /* compute axpy */ if (a == ONE) { for (i = start; i < end; i++) yd[i] += xd[i]; /* exit */ pthread_exit(NULL); } if (a == -ONE) { for (i = start; i < end; i++) yd[i] -= xd[i]; /* exit */ pthread_exit(NULL); } for (i = start; i < end; i++) yd[i] += a*xd[i]; /* return */ pthread_exit(NULL); } /* ---------------------------------------------------------------------------- * Compute scaled vector */ static void VScaleBy_Pthreads(realtype a, N_Vector x) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(x); nthreads = NV_NUM_THREADS_PT(x); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i=0; ic1; xd = my_data->v1; start = my_data->start; end = my_data->end; /* compute scaled vector */ for (i = start; i < end; i++) xd[i] *= a; /* exit */ pthread_exit(NULL); } /* * ----------------------------------------------------------------------------- * private functions for special cases of vector array operations * ----------------------------------------------------------------------------- */ static int VSumVectorArray_Pthreads(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { sunindextype N; int i, nthreads; pthread_t *threads; Pthreads_Data *thread_data; pthread_attr_t attr; /* allocate threads and thread data structs */ N = NV_LENGTH_PT(X[0]); nthreads = NV_NUM_THREADS_PT(X[0]); threads = malloc(nthreads*sizeof(pthread_t)); thread_data = (Pthreads_Data *) malloc(nthreads*sizeof(struct _Pthreads_Data)); /* set thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); /* pack thread data, distribute loop indices, and create threads/call kernel */ for (i=0; istart; end = my_data->end; for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); yd = NV_DATA_PT(my_data->Y2[i]); zd = NV_DATA_PT(my_data->Y3[i]); for (j=start; jstart; end = my_data->end; for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); yd = NV_DATA_PT(my_data->Y2[i]); zd = NV_DATA_PT(my_data->Y3[i]); for (j=start; jstart; end = my_data->end; c = my_data->c1; for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); yd = NV_DATA_PT(my_data->Y2[i]); zd = NV_DATA_PT(my_data->Y3[i]); for (j=start; jstart; end = my_data->end; c = my_data->c1; for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); yd = NV_DATA_PT(my_data->Y2[i]); zd = NV_DATA_PT(my_data->Y3[i]); for (j=start; jstart; end = my_data->end; a = my_data->c1; for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); yd = NV_DATA_PT(my_data->Y2[i]); zd = NV_DATA_PT(my_data->Y3[i]); for (j=start; jstart; end = my_data->end; a = my_data->c1; for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); yd = NV_DATA_PT(my_data->Y2[i]); zd = NV_DATA_PT(my_data->Y3[i]); for (j=start; jstart; end = my_data->end; a = my_data->c1; if (a == ONE) { for (i=0; invec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); yd = NV_DATA_PT(my_data->Y2[i]); for (j=start; jnvec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); yd = NV_DATA_PT(my_data->Y2[i]); for (j=start; jnvec; i++) { xd = NV_DATA_PT(my_data->Y1[i]); yd = NV_DATA_PT(my_data->Y2[i]); for (j=start; jstart = -1; thread_data->end = -1; #if __STDC_VERSION__ >= 199901L thread_data->c1 = NAN; thread_data->c2 = NAN; #else thread_data->c1 = ZERO; thread_data->c2 = ZERO; #endif thread_data->v1 = NULL; thread_data->v2 = NULL; thread_data->v3 = NULL; thread_data->global_val = NULL; thread_data->global_mutex = NULL; thread_data->nvec = ZERO; thread_data->nsum = ZERO; thread_data->cvals = NULL; thread_data->Y1 = NULL; thread_data->Y2 = NULL; thread_data->Y3 = NULL; } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Pthreads; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Pthreads; v->ops->nvdotprodmulti = N_VDotProdMulti_Pthreads; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Pthreads; v->ops->nvscalevectorarray = N_VScaleVectorArray_Pthreads; v->ops->nvconstvectorarray = N_VConstVectorArray_Pthreads; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Pthreads; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Pthreads; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Pthreads; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Pthreads; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_Pthreads; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_Pthreads; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_Pthreads; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) { v->ops->nvdotprodmulti = N_VDotProdMulti_Pthreads; v->ops->nvdotprodmultilocal = N_VDotProdMulti_Pthreads; } else { v->ops->nvdotprodmulti = NULL; v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearSumVectorArray_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Pthreads; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_Pthreads; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_Pthreads; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Pthreads; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Pthreads; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Pthreads; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_Pthreads(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Pthreads; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } StanHeaders/src/nvector/pthreads/fmod/0000755000176200001440000000000014511135055017461 5ustar liggesusersStanHeaders/src/nvector/pthreads/fmod/fnvector_pthreads_mod.f900000644000176200001440000012123414645137106024372 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fnvector_pthreads_mod use, intrinsic :: ISO_C_BINDING use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FN_VNew_Pthreads public :: FN_VNewEmpty_Pthreads public :: FN_VMake_Pthreads public :: FN_VGetLength_Pthreads public :: FN_VPrint_Pthreads public :: FN_VPrintFile_Pthreads public :: FN_VGetVectorID_Pthreads public :: FN_VCloneEmpty_Pthreads public :: FN_VClone_Pthreads public :: FN_VDestroy_Pthreads public :: FN_VSpace_Pthreads public :: FN_VGetArrayPointer_Pthreads public :: FN_VSetArrayPointer_Pthreads public :: FN_VLinearSum_Pthreads public :: FN_VConst_Pthreads public :: FN_VProd_Pthreads public :: FN_VDiv_Pthreads public :: FN_VScale_Pthreads public :: FN_VAbs_Pthreads public :: FN_VInv_Pthreads public :: FN_VAddConst_Pthreads public :: FN_VDotProd_Pthreads public :: FN_VMaxNorm_Pthreads public :: FN_VWrmsNorm_Pthreads public :: FN_VWrmsNormMask_Pthreads public :: FN_VMin_Pthreads public :: FN_VWL2Norm_Pthreads public :: FN_VL1Norm_Pthreads public :: FN_VCompare_Pthreads public :: FN_VInvTest_Pthreads public :: FN_VConstrMask_Pthreads public :: FN_VMinQuotient_Pthreads public :: FN_VLinearCombination_Pthreads public :: FN_VScaleAddMulti_Pthreads public :: FN_VDotProdMulti_Pthreads public :: FN_VLinearSumVectorArray_Pthreads public :: FN_VScaleVectorArray_Pthreads public :: FN_VConstVectorArray_Pthreads public :: FN_VWrmsNormVectorArray_Pthreads public :: FN_VWrmsNormMaskVectorArray_Pthreads public :: FN_VWSqrSumLocal_Pthreads public :: FN_VWSqrSumMaskLocal_Pthreads public :: FN_VBufSize_Pthreads public :: FN_VBufPack_Pthreads public :: FN_VBufUnpack_Pthreads public :: FN_VEnableFusedOps_Pthreads public :: FN_VEnableLinearCombination_Pthreads public :: FN_VEnableScaleAddMulti_Pthreads public :: FN_VEnableDotProdMulti_Pthreads public :: FN_VEnableLinearSumVectorArray_Pthreads public :: FN_VEnableScaleVectorArray_Pthreads public :: FN_VEnableConstVectorArray_Pthreads public :: FN_VEnableWrmsNormVectorArray_Pthreads public :: FN_VEnableWrmsNormMaskVectorArray_Pthreads public :: FN_VCloneVectorArray_Pthreads public :: FN_VCloneVectorArrayEmpty_Pthreads public :: FN_VDestroyVectorArray_Pthreads ! WRAPPER DECLARATIONS interface function swigc_FN_VNew_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VNew_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FN_VNewEmpty_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VNewEmpty_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FN_VMake_Pthreads(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VMake_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FN_VGetLength_Pthreads(farg1) & bind(C, name="_wrap_FN_VGetLength_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function subroutine swigc_FN_VPrint_Pthreads(farg1) & bind(C, name="_wrap_FN_VPrint_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VPrintFile_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VPrintFile_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FN_VGetVectorID_Pthreads(farg1) & bind(C, name="_wrap_FN_VGetVectorID_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FN_VCloneEmpty_Pthreads(farg1) & bind(C, name="_wrap_FN_VCloneEmpty_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VClone_Pthreads(farg1) & bind(C, name="_wrap_FN_VClone_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroy_Pthreads(farg1) & bind(C, name="_wrap_FN_VDestroy_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VSpace_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VSpace_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VGetArrayPointer_Pthreads(farg1) & bind(C, name="_wrap_FN_VGetArrayPointer_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VSetArrayPointer_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VSetArrayPointer_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VLinearSum_Pthreads(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VLinearSum_Pthreads") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 end subroutine subroutine swigc_FN_VConst_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VConst_Pthreads") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VProd_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VProd_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VDiv_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VDiv_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VScale_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VScale_Pthreads") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VAbs_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VAbs_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VInv_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VInv_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VAddConst_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VAddConst_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VDotProd_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VDotProd_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VMaxNorm_Pthreads(farg1) & bind(C, name="_wrap_FN_VMaxNorm_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNorm_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VWrmsNorm_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNormMask_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWrmsNormMask_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VMin_Pthreads(farg1) & bind(C, name="_wrap_FN_VMin_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWL2Norm_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VWL2Norm_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VL1Norm_Pthreads(farg1) & bind(C, name="_wrap_FN_VL1Norm_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function subroutine swigc_FN_VCompare_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VCompare_Pthreads") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VInvTest_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VInvTest_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VConstrMask_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstrMask_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VMinQuotient_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VMinQuotient_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VLinearCombination_Pthreads(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VLinearCombination_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VScaleAddMulti_Pthreads(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VScaleAddMulti_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMulti_Pthreads(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMulti_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VLinearSumVectorArray_Pthreads(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FN_VLinearSumVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FN_VScaleVectorArray_Pthreads(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VScaleVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VConstVectorArray_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormVectorArray_Pthreads(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VWrmsNormVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormMaskVectorArray_Pthreads(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VWrmsNormMaskVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VWSqrSumLocal_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VWSqrSumLocal_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumMaskLocal_Pthreads(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWSqrSumMaskLocal_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VBufSize_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VBufSize_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufPack_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VBufPack_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufUnpack_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VBufUnpack_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableFusedOps_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VEnableFusedOps_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearCombination_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearCombination_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleAddMulti_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleAddMulti_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableDotProdMulti_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VEnableDotProdMulti_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearSumVectorArray_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearSumVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleVectorArray_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableConstVectorArray_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VEnableConstVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormVectorArray_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormMaskVectorArray_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormMaskVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VCloneVectorArray_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VCloneVectorArray_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VCloneVectorArrayEmpty_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VCloneVectorArrayEmpty_Pthreads") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroyVectorArray_Pthreads(farg1, farg2) & bind(C, name="_wrap_FN_VDestroyVectorArray_Pthreads") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 end subroutine end interface contains ! MODULE SUBPROGRAMS function FN_VNew_Pthreads(vec_length, n_threads, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: vec_length integer(C_INT), intent(in) :: n_threads type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = vec_length farg2 = n_threads farg3 = sunctx fresult = swigc_FN_VNew_Pthreads(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FN_VNewEmpty_Pthreads(vec_length, n_threads, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: vec_length integer(C_INT), intent(in) :: n_threads type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = vec_length farg2 = n_threads farg3 = sunctx fresult = swigc_FN_VNewEmpty_Pthreads(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FN_VMake_Pthreads(vec_length, n_threads, v_data, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: vec_length integer(C_INT), intent(in) :: n_threads real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = vec_length farg2 = n_threads farg3 = c_loc(v_data(1)) farg4 = sunctx fresult = swigc_FN_VMake_Pthreads(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FN_VGetLength_Pthreads(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetLength_Pthreads(farg1) swig_result = fresult end function subroutine FN_VPrint_Pthreads(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VPrint_Pthreads(farg1) end subroutine subroutine FN_VPrintFile_Pthreads(v, outfile) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: outfile type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v) farg2 = outfile call swigc_FN_VPrintFile_Pthreads(farg1, farg2) end subroutine function FN_VGetVectorID_Pthreads(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(N_Vector_ID) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetVectorID_Pthreads(farg1) swig_result = fresult end function function FN_VCloneEmpty_Pthreads(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VCloneEmpty_Pthreads(farg1) call c_f_pointer(fresult, swig_result) end function function FN_VClone_Pthreads(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VClone_Pthreads(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FN_VDestroy_Pthreads(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VDestroy_Pthreads(farg1) end subroutine subroutine FN_VSpace_Pthreads(v, lrw, liw) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), dimension(*), target, intent(inout) :: lrw integer(C_INT64_T), dimension(*), target, intent(inout) :: liw type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(v) farg2 = c_loc(lrw(1)) farg3 = c_loc(liw(1)) call swigc_FN_VSpace_Pthreads(farg1, farg2, farg3) end subroutine function FN_VGetArrayPointer_Pthreads(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetArrayPointer_Pthreads(farg1) call c_f_pointer(fresult, swig_result, [1]) end function subroutine FN_VSetArrayPointer_Pthreads(v_data, v) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v_data(1)) farg2 = c_loc(v) call swigc_FN_VSetArrayPointer_Pthreads(farg1, farg2) end subroutine subroutine FN_VLinearSum_Pthreads(a, x, b, y, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: a type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = a farg2 = c_loc(x) farg3 = b farg4 = c_loc(y) farg5 = c_loc(z) call swigc_FN_VLinearSum_Pthreads(farg1, farg2, farg3, farg4, farg5) end subroutine subroutine FN_VConst_Pthreads(c, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 farg1 = c farg2 = c_loc(z) call swigc_FN_VConst_Pthreads(farg1, farg2) end subroutine subroutine FN_VProd_Pthreads(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VProd_Pthreads(farg1, farg2, farg3) end subroutine subroutine FN_VDiv_Pthreads(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VDiv_Pthreads(farg1, farg2, farg3) end subroutine subroutine FN_VScale_Pthreads(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VScale_Pthreads(farg1, farg2, farg3) end subroutine subroutine FN_VAbs_Pthreads(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VAbs_Pthreads(farg1, farg2) end subroutine subroutine FN_VInv_Pthreads(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VInv_Pthreads(farg1, farg2) end subroutine subroutine FN_VAddConst_Pthreads(x, b, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = b farg3 = c_loc(z) call swigc_FN_VAddConst_Pthreads(farg1, farg2, farg3) end subroutine function FN_VDotProd_Pthreads(x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(y) fresult = swigc_FN_VDotProd_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VMaxNorm_Pthreads(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMaxNorm_Pthreads(farg1) swig_result = fresult end function function FN_VWrmsNorm_Pthreads(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWrmsNorm_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VWrmsNormMask_Pthreads(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWrmsNormMask_Pthreads(farg1, farg2, farg3) swig_result = fresult end function function FN_VMin_Pthreads(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMin_Pthreads(farg1) swig_result = fresult end function function FN_VWL2Norm_Pthreads(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWL2Norm_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VL1Norm_Pthreads(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VL1Norm_Pthreads(farg1) swig_result = fresult end function subroutine FN_VCompare_Pthreads(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VCompare_Pthreads(farg1, farg2, farg3) end subroutine function FN_VInvTest_Pthreads(x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) fresult = swigc_FN_VInvTest_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VConstrMask_Pthreads(c, x, m) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: m integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(c) farg2 = c_loc(x) farg3 = c_loc(m) fresult = swigc_FN_VConstrMask_Pthreads(farg1, farg2, farg3) swig_result = fresult end function function FN_VMinQuotient_Pthreads(num, denom) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: num type(N_Vector), target, intent(inout) :: denom real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(num) farg2 = c_loc(denom) fresult = swigc_FN_VMinQuotient_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VLinearCombination_Pthreads(nvec, c, x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = x farg4 = c_loc(z) fresult = swigc_FN_VLinearCombination_Pthreads(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VScaleAddMulti_Pthreads(nvec, a, x, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = c_loc(a(1)) farg3 = c_loc(x) farg4 = y farg5 = z fresult = swigc_FN_VScaleAddMulti_Pthreads(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdMulti_Pthreads(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMulti_Pthreads(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VLinearSumVectorArray_Pthreads(nvec, a, x, b, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), intent(in) :: a type(C_PTR) :: x real(C_DOUBLE), intent(in) :: b type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 farg1 = nvec farg2 = a farg3 = x farg4 = b farg5 = y farg6 = z fresult = swigc_FN_VLinearSumVectorArray_Pthreads(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FN_VScaleVectorArray_Pthreads(nvec, c, x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: x type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = x farg4 = z fresult = swigc_FN_VScaleVectorArray_Pthreads(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VConstVectorArray_Pthreads(nvec, c, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), intent(in) :: c type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = nvec farg2 = c farg3 = z fresult = swigc_FN_VConstVectorArray_Pthreads(farg1, farg2, farg3) swig_result = fresult end function function FN_VWrmsNormVectorArray_Pthreads(nvec, x, w, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(C_PTR) :: x type(C_PTR) :: w real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = x farg3 = w farg4 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormVectorArray_Pthreads(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VWrmsNormMaskVectorArray_Pthreads(nvec, x, w, id, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(C_PTR) :: x type(C_PTR) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = x farg3 = w farg4 = c_loc(id) farg5 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormMaskVectorArray_Pthreads(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VWSqrSumLocal_Pthreads(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWSqrSumLocal_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VWSqrSumMaskLocal_Pthreads(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWSqrSumMaskLocal_Pthreads(farg1, farg2, farg3) swig_result = fresult end function function FN_VBufSize_Pthreads(x, size) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x integer(C_INT64_T), dimension(*), target, intent(inout) :: size integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(size(1)) fresult = swigc_FN_VBufSize_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VBufPack_Pthreads(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufPack_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VBufUnpack_Pthreads(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufUnpack_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VEnableFusedOps_Pthreads(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableFusedOps_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearCombination_Pthreads(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearCombination_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleAddMulti_Pthreads(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleAddMulti_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VEnableDotProdMulti_Pthreads(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableDotProdMulti_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearSumVectorArray_Pthreads(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearSumVectorArray_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleVectorArray_Pthreads(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleVectorArray_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VEnableConstVectorArray_Pthreads(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableConstVectorArray_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormVectorArray_Pthreads(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormVectorArray_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormMaskVectorArray_Pthreads(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormMaskVectorArray_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VCloneVectorArray_Pthreads(count, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = count farg2 = c_loc(w) fresult = swigc_FN_VCloneVectorArray_Pthreads(farg1, farg2) swig_result = fresult end function function FN_VCloneVectorArrayEmpty_Pthreads(count, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = count farg2 = c_loc(w) fresult = swigc_FN_VCloneVectorArrayEmpty_Pthreads(farg1, farg2) swig_result = fresult end function subroutine FN_VDestroyVectorArray_Pthreads(vs, count) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: vs integer(C_INT), intent(in) :: count type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = vs farg2 = count call swigc_FN_VDestroyVectorArray_Pthreads(farg1, farg2) end subroutine end module StanHeaders/src/nvector/pthreads/fmod/fnvector_pthreads_mod.c0000644000176200001440000006316314645137106024224 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nvector.h" #include "nvector/nvector_pthreads.h" SWIGEXPORT N_Vector _wrap_FN_VNew_Pthreads(int64_t const *farg1, int const *farg2, void *farg3) { N_Vector fresult ; sunindextype arg1 ; int arg2 ; SUNContext arg3 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (int)(*farg2); arg3 = (SUNContext)(farg3); result = (N_Vector)N_VNew_Pthreads(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VNewEmpty_Pthreads(int64_t const *farg1, int const *farg2, void *farg3) { N_Vector fresult ; sunindextype arg1 ; int arg2 ; SUNContext arg3 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (int)(*farg2); arg3 = (SUNContext)(farg3); result = (N_Vector)N_VNewEmpty_Pthreads(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VMake_Pthreads(int64_t const *farg1, int const *farg2, double *farg3, void *farg4) { N_Vector fresult ; sunindextype arg1 ; int arg2 ; realtype *arg3 = (realtype *) 0 ; SUNContext arg4 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (int)(*farg2); arg3 = (realtype *)(farg3); arg4 = (SUNContext)(farg4); result = (N_Vector)N_VMake_Pthreads(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FN_VGetLength_Pthreads(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetLength_Pthreads(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VPrint_Pthreads(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VPrint_Pthreads(arg1); } SWIGEXPORT void _wrap_FN_VPrintFile_Pthreads(N_Vector farg1, void *farg2) { N_Vector arg1 = (N_Vector) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (FILE *)(farg2); N_VPrintFile_Pthreads(arg1,arg2); } SWIGEXPORT int _wrap_FN_VGetVectorID_Pthreads(N_Vector farg1) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector_ID result; arg1 = (N_Vector)(farg1); result = (N_Vector_ID)N_VGetVectorID_Pthreads(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FN_VCloneEmpty_Pthreads(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VCloneEmpty_Pthreads(arg1); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VClone_Pthreads(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VClone_Pthreads(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroy_Pthreads(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VDestroy_Pthreads(arg1); } SWIGEXPORT void _wrap_FN_VSpace_Pthreads(N_Vector farg1, int64_t *farg2, int64_t *farg3) { N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; sunindextype *arg3 = (sunindextype *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); arg3 = (sunindextype *)(farg3); N_VSpace_Pthreads(arg1,arg2,arg3); } SWIGEXPORT double * _wrap_FN_VGetArrayPointer_Pthreads(N_Vector farg1) { double * fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype *result = 0 ; arg1 = (N_Vector)(farg1); result = (realtype *)N_VGetArrayPointer_Pthreads(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VSetArrayPointer_Pthreads(double *farg1, N_Vector farg2) { realtype *arg1 = (realtype *) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype *)(farg1); arg2 = (N_Vector)(farg2); N_VSetArrayPointer_Pthreads(arg1,arg2); } SWIGEXPORT void _wrap_FN_VLinearSum_Pthreads(double const *farg1, N_Vector farg2, double const *farg3, N_Vector farg4, N_Vector farg5) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); N_VLinearSum_Pthreads(arg1,arg2,arg3,arg4,arg5); } SWIGEXPORT void _wrap_FN_VConst_Pthreads(double const *farg1, N_Vector farg2) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); N_VConst_Pthreads(arg1,arg2); } SWIGEXPORT void _wrap_FN_VProd_Pthreads(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VProd_Pthreads(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VDiv_Pthreads(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VDiv_Pthreads(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VScale_Pthreads(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VScale_Pthreads(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VAbs_Pthreads(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VAbs_Pthreads(arg1,arg2); } SWIGEXPORT void _wrap_FN_VInv_Pthreads(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VInv_Pthreads(arg1,arg2); } SWIGEXPORT void _wrap_FN_VAddConst_Pthreads(N_Vector farg1, double const *farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); N_VAddConst_Pthreads(arg1,arg2,arg3); } SWIGEXPORT double _wrap_FN_VDotProd_Pthreads(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VDotProd_Pthreads(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMaxNorm_Pthreads(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMaxNorm_Pthreads(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNorm_Pthreads(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWrmsNorm_Pthreads(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNormMask_Pthreads(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWrmsNormMask_Pthreads(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMin_Pthreads(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMin_Pthreads(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWL2Norm_Pthreads(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWL2Norm_Pthreads(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VL1Norm_Pthreads(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VL1Norm_Pthreads(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VCompare_Pthreads(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VCompare_Pthreads(arg1,arg2,arg3); } SWIGEXPORT int _wrap_FN_VInvTest_Pthreads(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VInvTest_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstrMask_Pthreads(N_Vector farg1, N_Vector farg2, N_Vector farg3) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)N_VConstrMask_Pthreads(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinQuotient_Pthreads(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VMinQuotient_Pthreads(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearCombination_Pthreads(int const *farg1, double *farg2, void *farg3, N_Vector farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); result = (int)N_VLinearCombination_Pthreads(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleAddMulti_Pthreads(int const *farg1, double *farg2, N_Vector farg3, void *farg4, void *farg5) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector *)(farg4); arg5 = (N_Vector *)(farg5); result = (int)N_VScaleAddMulti_Pthreads(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMulti_Pthreads(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMulti_Pthreads(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearSumVectorArray_Pthreads(int const *farg1, double const *farg2, void *farg3, double const *farg4, void *farg5, void *farg6) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype arg4 ; N_Vector *arg5 = (N_Vector *) 0 ; N_Vector *arg6 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector *)(farg5); arg6 = (N_Vector *)(farg6); result = (int)N_VLinearSumVectorArray_Pthreads(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleVectorArray_Pthreads(int const *farg1, double *farg2, void *farg3, void *farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector *)(farg4); result = (int)N_VScaleVectorArray_Pthreads(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstVectorArray_Pthreads(int const *farg1, double const *farg2, void *farg3) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)N_VConstVectorArray_Pthreads(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormVectorArray_Pthreads(int const *farg1, void *farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VWrmsNormVectorArray_Pthreads(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormMaskVectorArray_Pthreads(int const *farg1, void *farg2, void *farg3, N_Vector farg4, double *farg5) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype *arg5 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype *)(farg5); result = (int)N_VWrmsNormMaskVectorArray_Pthreads(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumLocal_Pthreads(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWSqrSumLocal_Pthreads(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumMaskLocal_Pthreads(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWSqrSumMaskLocal_Pthreads(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufSize_Pthreads(N_Vector farg1, int64_t *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); result = (int)N_VBufSize_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufPack_Pthreads(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufPack_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufUnpack_Pthreads(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufUnpack_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableFusedOps_Pthreads(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableFusedOps_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearCombination_Pthreads(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearCombination_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleAddMulti_Pthreads(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleAddMulti_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableDotProdMulti_Pthreads(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableDotProdMulti_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearSumVectorArray_Pthreads(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearSumVectorArray_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleVectorArray_Pthreads(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleVectorArray_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableConstVectorArray_Pthreads(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableConstVectorArray_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormVectorArray_Pthreads(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormVectorArray_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormMaskVectorArray_Pthreads(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormMaskVectorArray_Pthreads(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT void * _wrap_FN_VCloneVectorArray_Pthreads(int const *farg1, N_Vector farg2) { void * fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); result = (N_Vector *)N_VCloneVectorArray_Pthreads(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void * _wrap_FN_VCloneVectorArrayEmpty_Pthreads(int const *farg1, N_Vector farg2) { void * fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); result = (N_Vector *)N_VCloneVectorArrayEmpty_Pthreads(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroyVectorArray_Pthreads(void *farg1, int const *farg2) { N_Vector *arg1 = (N_Vector *) 0 ; int arg2 ; arg1 = (N_Vector *)(farg1); arg2 = (int)(*farg2); N_VDestroyVectorArray_Pthreads(arg1,arg2); } StanHeaders/src/nvector/serial/0000755000176200001440000000000014645137104016206 5ustar liggesusersStanHeaders/src/nvector/serial/nvector_serial.c0000644000176200001440000013455114645137106021404 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Scott D. Cohen, Alan C. Hindmarsh, Radu Serban, * and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a serial implementation * of the NVECTOR package. * -----------------------------------------------------------------*/ #include #include #include #include #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private functions for special cases of vector operations */ static void VCopy_Serial(N_Vector x, N_Vector z); /* z=x */ static void VSum_Serial(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */ static void VDiff_Serial(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VNeg_Serial(N_Vector x, N_Vector z); /* z=-x */ static void VScaleSum_Serial(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleDiff_Serial(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VLin1_Serial(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin2_Serial(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void Vaxpy_Serial(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */ static void VScaleBy_Serial(realtype a, N_Vector x); /* x <- ax */ /* Private functions for special cases of vector array operations */ static int VSumVectorArray_Serial(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */ static int VDiffVectorArray_Serial(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */ static int VScaleSumVectorArray_Serial(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */ static int VScaleDiffVectorArray_Serial(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */ static int VLin1VectorArray_Serial(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */ static int VLin2VectorArray_Serial(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */ static int VaxpyVectorArray_Serial(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */ /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_Serial(N_Vector v) { return SUNDIALS_NVEC_SERIAL; } /* ---------------------------------------------------------------------------- * Function to create a new empty serial vector */ N_Vector N_VNewEmpty_Serial(sunindextype length, SUNContext sunctx) { N_Vector v; N_VectorContent_Serial content; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Serial; v->ops->nvclone = N_VClone_Serial; v->ops->nvcloneempty = N_VCloneEmpty_Serial; v->ops->nvdestroy = N_VDestroy_Serial; v->ops->nvspace = N_VSpace_Serial; v->ops->nvgetarraypointer = N_VGetArrayPointer_Serial; v->ops->nvsetarraypointer = N_VSetArrayPointer_Serial; v->ops->nvgetlength = N_VGetLength_Serial; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Serial; v->ops->nvconst = N_VConst_Serial; v->ops->nvprod = N_VProd_Serial; v->ops->nvdiv = N_VDiv_Serial; v->ops->nvscale = N_VScale_Serial; v->ops->nvabs = N_VAbs_Serial; v->ops->nvinv = N_VInv_Serial; v->ops->nvaddconst = N_VAddConst_Serial; v->ops->nvdotprod = N_VDotProd_Serial; v->ops->nvmaxnorm = N_VMaxNorm_Serial; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Serial; v->ops->nvwrmsnorm = N_VWrmsNorm_Serial; v->ops->nvmin = N_VMin_Serial; v->ops->nvwl2norm = N_VWL2Norm_Serial; v->ops->nvl1norm = N_VL1Norm_Serial; v->ops->nvcompare = N_VCompare_Serial; v->ops->nvinvtest = N_VInvTest_Serial; v->ops->nvconstrmask = N_VConstrMask_Serial; v->ops->nvminquotient = N_VMinQuotient_Serial; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProd_Serial; v->ops->nvmaxnormlocal = N_VMaxNorm_Serial; v->ops->nvminlocal = N_VMin_Serial; v->ops->nvl1normlocal = N_VL1Norm_Serial; v->ops->nvinvtestlocal = N_VInvTest_Serial; v->ops->nvconstrmasklocal = N_VConstrMask_Serial; v->ops->nvminquotientlocal = N_VMinQuotient_Serial; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Serial; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Serial; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_Serial; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_Serial; v->ops->nvbufpack = N_VBufPack_Serial; v->ops->nvbufunpack = N_VBufUnpack_Serial; /* debugging functions */ v->ops->nvprint = N_VPrint_Serial; v->ops->nvprintfile = N_VPrintFile_Serial; /* Create content */ content = NULL; content = (N_VectorContent_Serial) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = length; content->own_data = SUNFALSE; content->data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Function to create a new serial vector */ N_Vector N_VNew_Serial(sunindextype length, SUNContext sunctx) { N_Vector v; realtype *data; v = NULL; v = N_VNewEmpty_Serial(length, sunctx); if (v == NULL) return(NULL); /* Create data */ if (length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if(data == NULL) { N_VDestroy_Serial(v); return(NULL); } /* Attach data */ NV_OWN_DATA_S(v) = SUNTRUE; NV_DATA_S(v) = data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create a serial N_Vector with user data component */ N_Vector N_VMake_Serial(sunindextype length, realtype *v_data, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Serial(length, sunctx); if (v == NULL) return(NULL); if (length > 0) { /* Attach data */ NV_OWN_DATA_S(v) = SUNFALSE; NV_DATA_S(v) = v_data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create an array of new serial vectors. */ N_Vector* N_VCloneVectorArray_Serial(int count, N_Vector w) { return(N_VCloneVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to create an array of new serial vectors with NULL data array. */ N_Vector* N_VCloneVectorArrayEmpty_Serial(int count, N_Vector w) { return(N_VCloneEmptyVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_Serial */ void N_VDestroyVectorArray_Serial(N_Vector* vs, int count) { N_VDestroyVectorArray(vs, count); return; } /* ---------------------------------------------------------------------------- * Function to return number of vector elements */ sunindextype N_VGetLength_Serial(N_Vector v) { return NV_LENGTH_S(v); } /* ---------------------------------------------------------------------------- * Function to print the a serial vector to stdout */ void N_VPrint_Serial(N_Vector x) { N_VPrintFile_Serial(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print the a serial vector to outfile */ void N_VPrintFile_Serial(N_Vector x, FILE* outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%35.32Le\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%19.16e\n", xd[i]); #else STAN_SUNDIALS_FPRINTF(outfile, "%11.8e\n", xd[i]); #endif } STAN_SUNDIALS_FPRINTF(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Serial(N_Vector w) { N_Vector v; N_VectorContent_Serial content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_Serial) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = NV_LENGTH_S(w); content->own_data = SUNFALSE; content->data = NULL; return(v); } N_Vector N_VClone_Serial(N_Vector w) { N_Vector v; realtype *data; sunindextype length; v = NULL; v = N_VCloneEmpty_Serial(w); if (v == NULL) return(NULL); length = NV_LENGTH_S(w); /* Create data */ if (length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if(data == NULL) { N_VDestroy_Serial(v); return(NULL); } /* Attach data */ NV_OWN_DATA_S(v) = SUNTRUE; NV_DATA_S(v) = data; } return(v); } void N_VDestroy_Serial(N_Vector v) { if (v == NULL) return; /* free content */ if (v->content != NULL) { /* free data array if it's owned by the vector */ if (NV_OWN_DATA_S(v) && NV_DATA_S(v) != NULL) { free(NV_DATA_S(v)); NV_DATA_S(v) = NULL; } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } void N_VSpace_Serial(N_Vector v, sunindextype *lrw, sunindextype *liw) { *lrw = NV_LENGTH_S(v); *liw = 1; return; } realtype *N_VGetArrayPointer_Serial(N_Vector v) { return((realtype *) NV_DATA_S(v)); } void N_VSetArrayPointer_Serial(realtype *v_data, N_Vector v) { if (NV_LENGTH_S(v) > 0) NV_DATA_S(v) = v_data; return; } void N_VLinearSum_Serial(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i, N; realtype c, *xd, *yd, *zd; N_Vector v1, v2; booleantype test; xd = yd = zd = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_Serial(a,x,y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_Serial(b,y,x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_Serial(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_Serial(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_Serial(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_Serial(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_Serial(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_Serial(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LENGTH_S(x); xd = NV_DATA_S(x); yd = NV_DATA_S(y); zd = NV_DATA_S(z); for (i = 0; i < N; i++) zd[i] = (a*xd[i])+(b*yd[i]); return; } void N_VConst_Serial(realtype c, N_Vector z) { sunindextype i, N; realtype *zd; zd = NULL; N = NV_LENGTH_S(z); zd = NV_DATA_S(z); for (i = 0; i < N; i++) zd[i] = c; return; } void N_VProd_Serial(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); yd = NV_DATA_S(y); zd = NV_DATA_S(z); for (i = 0; i < N; i++) zd[i] = xd[i]*yd[i]; return; } void N_VDiv_Serial(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); yd = NV_DATA_S(y); zd = NV_DATA_S(z); for (i = 0; i < N; i++) zd[i] = xd[i]/yd[i]; return; } void N_VScale_Serial(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_Serial(c, x); return; } if (c == ONE) { VCopy_Serial(x, z); } else if (c == -ONE) { VNeg_Serial(x, z); } else { N = NV_LENGTH_S(x); xd = NV_DATA_S(x); zd = NV_DATA_S(z); for (i = 0; i < N; i++) zd[i] = c*xd[i]; } return; } void N_VAbs_Serial(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); zd = NV_DATA_S(z); for (i = 0; i < N; i++) zd[i] = SUNRabs(xd[i]); return; } void N_VInv_Serial(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); zd = NV_DATA_S(z); for (i = 0; i < N; i++) zd[i] = ONE/xd[i]; return; } void N_VAddConst_Serial(N_Vector x, realtype b, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); zd = NV_DATA_S(z); for (i = 0; i < N; i++) zd[i] = xd[i]+b; return; } realtype N_VDotProd_Serial(N_Vector x, N_Vector y) { sunindextype i, N; realtype sum, *xd, *yd; sum = ZERO; xd = yd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); yd = NV_DATA_S(y); for (i = 0; i < N; i++) sum += xd[i]*yd[i]; return(sum); } realtype N_VMaxNorm_Serial(N_Vector x) { sunindextype i, N; realtype max, *xd; max = ZERO; xd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); for (i = 0; i < N; i++) { if (SUNRabs(xd[i]) > max) max = SUNRabs(xd[i]); } return(max); } realtype N_VWrmsNorm_Serial(N_Vector x, N_Vector w) { return(SUNRsqrt(N_VWSqrSumLocal_Serial(x, w)/(NV_LENGTH_S(x)))); } realtype N_VWSqrSumLocal_Serial(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, prodi, *xd, *wd; sum = ZERO; xd = wd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); wd = NV_DATA_S(w); for (i = 0; i < N; i++) { prodi = xd[i]*wd[i]; sum += SUNSQR(prodi); } return(sum); } realtype N_VWrmsNormMask_Serial(N_Vector x, N_Vector w, N_Vector id) { return(SUNRsqrt(N_VWSqrSumMaskLocal_Serial(x, w, id) / (NV_LENGTH_S(x)))); } realtype N_VWSqrSumMaskLocal_Serial(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, prodi, *xd, *wd, *idd; sum = ZERO; xd = wd = idd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); wd = NV_DATA_S(w); idd = NV_DATA_S(id); for (i = 0; i < N; i++) { if (idd[i] > ZERO) { prodi = xd[i]*wd[i]; sum += SUNSQR(prodi); } } return(sum); } realtype N_VMin_Serial(N_Vector x) { sunindextype i, N; realtype min, *xd; xd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); min = xd[0]; for (i = 1; i < N; i++) { if (xd[i] < min) min = xd[i]; } return(min); } realtype N_VWL2Norm_Serial(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, prodi, *xd, *wd; sum = ZERO; xd = wd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); wd = NV_DATA_S(w); for (i = 0; i < N; i++) { prodi = xd[i]*wd[i]; sum += SUNSQR(prodi); } return(SUNRsqrt(sum)); } realtype N_VL1Norm_Serial(N_Vector x) { sunindextype i, N; realtype sum, *xd; sum = ZERO; xd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); for (i = 0; i= c) ? ONE : ZERO; } return; } booleantype N_VInvTest_Serial(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; booleantype no_zero_found; xd = zd = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); zd = NV_DATA_S(z); no_zero_found = SUNTRUE; for (i = 0; i < N; i++) { if (xd[i] == ZERO) no_zero_found = SUNFALSE; else zd[i] = ONE/xd[i]; } return no_zero_found; } booleantype N_VConstrMask_Serial(N_Vector c, N_Vector x, N_Vector m) { sunindextype i, N; realtype temp; realtype *cd, *xd, *md; booleantype test; cd = xd = md = NULL; N = NV_LENGTH_S(x); xd = NV_DATA_S(x); cd = NV_DATA_S(c); md = NV_DATA_S(m); temp = ZERO; for (i = 0; i < N; i++) { md[i] = ZERO; /* Continue if no constraints were set for the variable */ if (cd[i] == ZERO) continue; /* Check if a set constraint has been violated */ test = (SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) || (SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO); if (test) { temp = md[i] = ONE; } } /* Return false if any constraint was violated */ return (temp == ONE) ? SUNFALSE : SUNTRUE; } realtype N_VMinQuotient_Serial(N_Vector num, N_Vector denom) { booleantype notEvenOnce; sunindextype i, N; realtype *nd, *dd, min; nd = dd = NULL; N = NV_LENGTH_S(num); nd = NV_DATA_S(num); dd = NV_DATA_S(denom); notEvenOnce = SUNTRUE; min = BIG_REAL; for (i = 0; i < N; i++) { if (dd[i] == ZERO) continue; else { if (!notEvenOnce) min = SUNMIN(min, nd[i]/dd[i]); else { min = nd[i]/dd[i]; notEvenOnce = SUNFALSE; } } } return(min); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_Serial(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i; sunindextype j, N; realtype* zd=NULL; realtype* xd=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_Serial(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_Serial(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LENGTH_S(z); zd = NV_DATA_S(z); /* * X[0] += c[i]*X[i], i = 1,...,nvec-1 */ if ((X[0] == z) && (c[0] == ONE)) { for (i=1; i ZERO) nrm[i] += SUNSQR(xd[j] * wd[j]); } nrm[i] = SUNRsqrt(nrm[i]/N); } return(0); } int N_VScaleAddMultiVectorArray_Serial(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z) { int i, j; sunindextype k, N; realtype* xd=NULL; realtype* yd=NULL; realtype* zd=NULL; int retval; N_Vector* YY; N_Vector* ZZ; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VLinearSum */ if (nsum == 1) { N_VLinearSum_Serial(a[0], X[0], ONE, Y[0][0], Z[0][0]); return(0); } /* should have called N_VScaleAddMulti */ YY = (N_Vector*) malloc(nsum * sizeof(N_Vector)); ZZ = (N_Vector*) malloc(nsum * sizeof(N_Vector)); for (j=0; j 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_Serial(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LENGTH_S(X[0]); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { for (i=0; i 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; jops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Serial; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Serial; v->ops->nvdotprodmulti = N_VDotProdMulti_Serial; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Serial; v->ops->nvscalevectorarray = N_VScaleVectorArray_Serial; v->ops->nvconstvectorarray = N_VConstVectorArray_Serial; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Serial; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Serial; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Serial; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Serial; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_Serial; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_Serial(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_Serial; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_Serial(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_Serial; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_Serial(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) { v->ops->nvdotprodmulti = N_VDotProdMulti_Serial; v->ops->nvdotprodmultilocal = N_VDotProdMulti_Serial; } else { v->ops->nvdotprodmulti = NULL; v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearSumVectorArray_Serial(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Serial; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_Serial(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_Serial; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_Serial(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_Serial; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_Serial(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Serial; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_Serial(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Serial; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_Serial(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Serial; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_Serial(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Serial; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } StanHeaders/src/nvector/serial/fmod/0000755000176200001440000000000014511135055017126 5ustar liggesusersStanHeaders/src/nvector/serial/fmod/fnvector_serial_mod.c0000644000176200001440000006233114645137106023332 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nvector.h" #include "nvector/nvector_serial.h" SWIGEXPORT N_Vector _wrap_FN_VNew_Serial(int64_t const *farg1, void *farg2) { N_Vector fresult ; sunindextype arg1 ; SUNContext arg2 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (SUNContext)(farg2); result = (N_Vector)N_VNew_Serial(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VNewEmpty_Serial(int64_t const *farg1, void *farg2) { N_Vector fresult ; sunindextype arg1 ; SUNContext arg2 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (SUNContext)(farg2); result = (N_Vector)N_VNewEmpty_Serial(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VMake_Serial(int64_t const *farg1, double *farg2, void *farg3) { N_Vector fresult ; sunindextype arg1 ; realtype *arg2 = (realtype *) 0 ; SUNContext arg3 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (realtype *)(farg2); arg3 = (SUNContext)(farg3); result = (N_Vector)N_VMake_Serial(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FN_VGetLength_Serial(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetLength_Serial(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VPrint_Serial(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VPrint_Serial(arg1); } SWIGEXPORT void _wrap_FN_VPrintFile_Serial(N_Vector farg1, void *farg2) { N_Vector arg1 = (N_Vector) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (FILE *)(farg2); N_VPrintFile_Serial(arg1,arg2); } SWIGEXPORT int _wrap_FN_VGetVectorID_Serial(N_Vector farg1) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector_ID result; arg1 = (N_Vector)(farg1); result = (N_Vector_ID)N_VGetVectorID_Serial(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FN_VCloneEmpty_Serial(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VCloneEmpty_Serial(arg1); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VClone_Serial(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VClone_Serial(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroy_Serial(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VDestroy_Serial(arg1); } SWIGEXPORT void _wrap_FN_VSpace_Serial(N_Vector farg1, int64_t *farg2, int64_t *farg3) { N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; sunindextype *arg3 = (sunindextype *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); arg3 = (sunindextype *)(farg3); N_VSpace_Serial(arg1,arg2,arg3); } SWIGEXPORT double * _wrap_FN_VGetArrayPointer_Serial(N_Vector farg1) { double * fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype *result = 0 ; arg1 = (N_Vector)(farg1); result = (realtype *)N_VGetArrayPointer_Serial(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VSetArrayPointer_Serial(double *farg1, N_Vector farg2) { realtype *arg1 = (realtype *) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype *)(farg1); arg2 = (N_Vector)(farg2); N_VSetArrayPointer_Serial(arg1,arg2); } SWIGEXPORT void _wrap_FN_VLinearSum_Serial(double const *farg1, N_Vector farg2, double const *farg3, N_Vector farg4, N_Vector farg5) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); N_VLinearSum_Serial(arg1,arg2,arg3,arg4,arg5); } SWIGEXPORT void _wrap_FN_VConst_Serial(double const *farg1, N_Vector farg2) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); N_VConst_Serial(arg1,arg2); } SWIGEXPORT void _wrap_FN_VProd_Serial(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VProd_Serial(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VDiv_Serial(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VDiv_Serial(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VScale_Serial(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VScale_Serial(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VAbs_Serial(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VAbs_Serial(arg1,arg2); } SWIGEXPORT void _wrap_FN_VInv_Serial(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VInv_Serial(arg1,arg2); } SWIGEXPORT void _wrap_FN_VAddConst_Serial(N_Vector farg1, double const *farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); N_VAddConst_Serial(arg1,arg2,arg3); } SWIGEXPORT double _wrap_FN_VDotProd_Serial(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VDotProd_Serial(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMaxNorm_Serial(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMaxNorm_Serial(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNorm_Serial(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWrmsNorm_Serial(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNormMask_Serial(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWrmsNormMask_Serial(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMin_Serial(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMin_Serial(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWL2Norm_Serial(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWL2Norm_Serial(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VL1Norm_Serial(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VL1Norm_Serial(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VCompare_Serial(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VCompare_Serial(arg1,arg2,arg3); } SWIGEXPORT int _wrap_FN_VInvTest_Serial(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VInvTest_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstrMask_Serial(N_Vector farg1, N_Vector farg2, N_Vector farg3) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)N_VConstrMask_Serial(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinQuotient_Serial(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VMinQuotient_Serial(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearCombination_Serial(int const *farg1, double *farg2, void *farg3, N_Vector farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); result = (int)N_VLinearCombination_Serial(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleAddMulti_Serial(int const *farg1, double *farg2, N_Vector farg3, void *farg4, void *farg5) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector *)(farg4); arg5 = (N_Vector *)(farg5); result = (int)N_VScaleAddMulti_Serial(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMulti_Serial(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMulti_Serial(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearSumVectorArray_Serial(int const *farg1, double const *farg2, void *farg3, double const *farg4, void *farg5, void *farg6) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype arg4 ; N_Vector *arg5 = (N_Vector *) 0 ; N_Vector *arg6 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector *)(farg5); arg6 = (N_Vector *)(farg6); result = (int)N_VLinearSumVectorArray_Serial(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleVectorArray_Serial(int const *farg1, double *farg2, void *farg3, void *farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector *)(farg4); result = (int)N_VScaleVectorArray_Serial(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstVectorArray_Serial(int const *farg1, double const *farg2, void *farg3) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)N_VConstVectorArray_Serial(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormVectorArray_Serial(int const *farg1, void *farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VWrmsNormVectorArray_Serial(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormMaskVectorArray_Serial(int const *farg1, void *farg2, void *farg3, N_Vector farg4, double *farg5) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype *arg5 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype *)(farg5); result = (int)N_VWrmsNormMaskVectorArray_Serial(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumLocal_Serial(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWSqrSumLocal_Serial(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumMaskLocal_Serial(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWSqrSumMaskLocal_Serial(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufSize_Serial(N_Vector farg1, int64_t *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); result = (int)N_VBufSize_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufPack_Serial(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufPack_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufUnpack_Serial(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufUnpack_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableFusedOps_Serial(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableFusedOps_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearCombination_Serial(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearCombination_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleAddMulti_Serial(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleAddMulti_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableDotProdMulti_Serial(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableDotProdMulti_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearSumVectorArray_Serial(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearSumVectorArray_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleVectorArray_Serial(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleVectorArray_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableConstVectorArray_Serial(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableConstVectorArray_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormVectorArray_Serial(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormVectorArray_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormMaskVectorArray_Serial(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormMaskVectorArray_Serial(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT void * _wrap_FN_VCloneVectorArray_Serial(int const *farg1, N_Vector farg2) { void * fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); result = (N_Vector *)N_VCloneVectorArray_Serial(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void * _wrap_FN_VCloneVectorArrayEmpty_Serial(int const *farg1, N_Vector farg2) { void * fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); result = (N_Vector *)N_VCloneVectorArrayEmpty_Serial(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroyVectorArray_Serial(void *farg1, int const *farg2) { N_Vector *arg1 = (N_Vector *) 0 ; int arg2 ; arg1 = (N_Vector *)(farg1); arg2 = (int)(*farg2); N_VDestroyVectorArray_Serial(arg1,arg2); } StanHeaders/src/nvector/serial/fmod/fnvector_serial_mod.f900000644000176200001440000011727114645137106023512 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fnvector_serial_mod use, intrinsic :: ISO_C_BINDING use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FN_VNew_Serial public :: FN_VNewEmpty_Serial public :: FN_VMake_Serial public :: FN_VGetLength_Serial public :: FN_VPrint_Serial public :: FN_VPrintFile_Serial public :: FN_VGetVectorID_Serial public :: FN_VCloneEmpty_Serial public :: FN_VClone_Serial public :: FN_VDestroy_Serial public :: FN_VSpace_Serial public :: FN_VGetArrayPointer_Serial public :: FN_VSetArrayPointer_Serial public :: FN_VLinearSum_Serial public :: FN_VConst_Serial public :: FN_VProd_Serial public :: FN_VDiv_Serial public :: FN_VScale_Serial public :: FN_VAbs_Serial public :: FN_VInv_Serial public :: FN_VAddConst_Serial public :: FN_VDotProd_Serial public :: FN_VMaxNorm_Serial public :: FN_VWrmsNorm_Serial public :: FN_VWrmsNormMask_Serial public :: FN_VMin_Serial public :: FN_VWL2Norm_Serial public :: FN_VL1Norm_Serial public :: FN_VCompare_Serial public :: FN_VInvTest_Serial public :: FN_VConstrMask_Serial public :: FN_VMinQuotient_Serial public :: FN_VLinearCombination_Serial public :: FN_VScaleAddMulti_Serial public :: FN_VDotProdMulti_Serial public :: FN_VLinearSumVectorArray_Serial public :: FN_VScaleVectorArray_Serial public :: FN_VConstVectorArray_Serial public :: FN_VWrmsNormVectorArray_Serial public :: FN_VWrmsNormMaskVectorArray_Serial public :: FN_VWSqrSumLocal_Serial public :: FN_VWSqrSumMaskLocal_Serial public :: FN_VBufSize_Serial public :: FN_VBufPack_Serial public :: FN_VBufUnpack_Serial public :: FN_VEnableFusedOps_Serial public :: FN_VEnableLinearCombination_Serial public :: FN_VEnableScaleAddMulti_Serial public :: FN_VEnableDotProdMulti_Serial public :: FN_VEnableLinearSumVectorArray_Serial public :: FN_VEnableScaleVectorArray_Serial public :: FN_VEnableConstVectorArray_Serial public :: FN_VEnableWrmsNormVectorArray_Serial public :: FN_VEnableWrmsNormMaskVectorArray_Serial public :: FN_VCloneVectorArray_Serial public :: FN_VCloneVectorArrayEmpty_Serial public :: FN_VDestroyVectorArray_Serial ! WRAPPER DECLARATIONS interface function swigc_FN_VNew_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VNew_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VNewEmpty_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VNewEmpty_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VMake_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VMake_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FN_VGetLength_Serial(farg1) & bind(C, name="_wrap_FN_VGetLength_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function subroutine swigc_FN_VPrint_Serial(farg1) & bind(C, name="_wrap_FN_VPrint_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VPrintFile_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VPrintFile_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FN_VGetVectorID_Serial(farg1) & bind(C, name="_wrap_FN_VGetVectorID_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FN_VCloneEmpty_Serial(farg1) & bind(C, name="_wrap_FN_VCloneEmpty_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VClone_Serial(farg1) & bind(C, name="_wrap_FN_VClone_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroy_Serial(farg1) & bind(C, name="_wrap_FN_VDestroy_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VSpace_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VSpace_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VGetArrayPointer_Serial(farg1) & bind(C, name="_wrap_FN_VGetArrayPointer_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VSetArrayPointer_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VSetArrayPointer_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VLinearSum_Serial(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VLinearSum_Serial") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 end subroutine subroutine swigc_FN_VConst_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VConst_Serial") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VProd_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VProd_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VDiv_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VDiv_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VScale_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VScale_Serial") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VAbs_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VAbs_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VInv_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VInv_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VAddConst_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VAddConst_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VDotProd_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VDotProd_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VMaxNorm_Serial(farg1) & bind(C, name="_wrap_FN_VMaxNorm_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNorm_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VWrmsNorm_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNormMask_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWrmsNormMask_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VMin_Serial(farg1) & bind(C, name="_wrap_FN_VMin_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWL2Norm_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VWL2Norm_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VL1Norm_Serial(farg1) & bind(C, name="_wrap_FN_VL1Norm_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function subroutine swigc_FN_VCompare_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VCompare_Serial") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VInvTest_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VInvTest_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VConstrMask_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstrMask_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VMinQuotient_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VMinQuotient_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VLinearCombination_Serial(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VLinearCombination_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VScaleAddMulti_Serial(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VScaleAddMulti_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMulti_Serial(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMulti_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VLinearSumVectorArray_Serial(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FN_VLinearSumVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FN_VScaleVectorArray_Serial(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VScaleVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VConstVectorArray_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormVectorArray_Serial(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VWrmsNormVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormMaskVectorArray_Serial(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VWrmsNormMaskVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VWSqrSumLocal_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VWSqrSumLocal_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumMaskLocal_Serial(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWSqrSumMaskLocal_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VBufSize_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VBufSize_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufPack_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VBufPack_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufUnpack_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VBufUnpack_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableFusedOps_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VEnableFusedOps_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearCombination_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearCombination_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleAddMulti_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleAddMulti_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableDotProdMulti_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VEnableDotProdMulti_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearSumVectorArray_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearSumVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleVectorArray_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableConstVectorArray_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VEnableConstVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormVectorArray_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormMaskVectorArray_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormMaskVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VCloneVectorArray_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VCloneVectorArray_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VCloneVectorArrayEmpty_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VCloneVectorArrayEmpty_Serial") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroyVectorArray_Serial(farg1, farg2) & bind(C, name="_wrap_FN_VDestroyVectorArray_Serial") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 end subroutine end interface contains ! MODULE SUBPROGRAMS function FN_VNew_Serial(vec_length, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: vec_length type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 type(C_PTR) :: farg2 farg1 = vec_length farg2 = sunctx fresult = swigc_FN_VNew_Serial(farg1, farg2) call c_f_pointer(fresult, swig_result) end function function FN_VNewEmpty_Serial(vec_length, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: vec_length type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 type(C_PTR) :: farg2 farg1 = vec_length farg2 = sunctx fresult = swigc_FN_VNewEmpty_Serial(farg1, farg2) call c_f_pointer(fresult, swig_result) end function function FN_VMake_Serial(vec_length, v_data, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: vec_length real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = vec_length farg2 = c_loc(v_data(1)) farg3 = sunctx fresult = swigc_FN_VMake_Serial(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FN_VGetLength_Serial(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetLength_Serial(farg1) swig_result = fresult end function subroutine FN_VPrint_Serial(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VPrint_Serial(farg1) end subroutine subroutine FN_VPrintFile_Serial(v, outfile) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: outfile type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v) farg2 = outfile call swigc_FN_VPrintFile_Serial(farg1, farg2) end subroutine function FN_VGetVectorID_Serial(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(N_Vector_ID) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetVectorID_Serial(farg1) swig_result = fresult end function function FN_VCloneEmpty_Serial(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VCloneEmpty_Serial(farg1) call c_f_pointer(fresult, swig_result) end function function FN_VClone_Serial(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VClone_Serial(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FN_VDestroy_Serial(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VDestroy_Serial(farg1) end subroutine subroutine FN_VSpace_Serial(v, lrw, liw) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), dimension(*), target, intent(inout) :: lrw integer(C_INT64_T), dimension(*), target, intent(inout) :: liw type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(v) farg2 = c_loc(lrw(1)) farg3 = c_loc(liw(1)) call swigc_FN_VSpace_Serial(farg1, farg2, farg3) end subroutine function FN_VGetArrayPointer_Serial(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetArrayPointer_Serial(farg1) call c_f_pointer(fresult, swig_result, [1]) end function subroutine FN_VSetArrayPointer_Serial(v_data, v) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v_data(1)) farg2 = c_loc(v) call swigc_FN_VSetArrayPointer_Serial(farg1, farg2) end subroutine subroutine FN_VLinearSum_Serial(a, x, b, y, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: a type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = a farg2 = c_loc(x) farg3 = b farg4 = c_loc(y) farg5 = c_loc(z) call swigc_FN_VLinearSum_Serial(farg1, farg2, farg3, farg4, farg5) end subroutine subroutine FN_VConst_Serial(c, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 farg1 = c farg2 = c_loc(z) call swigc_FN_VConst_Serial(farg1, farg2) end subroutine subroutine FN_VProd_Serial(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VProd_Serial(farg1, farg2, farg3) end subroutine subroutine FN_VDiv_Serial(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VDiv_Serial(farg1, farg2, farg3) end subroutine subroutine FN_VScale_Serial(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VScale_Serial(farg1, farg2, farg3) end subroutine subroutine FN_VAbs_Serial(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VAbs_Serial(farg1, farg2) end subroutine subroutine FN_VInv_Serial(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VInv_Serial(farg1, farg2) end subroutine subroutine FN_VAddConst_Serial(x, b, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = b farg3 = c_loc(z) call swigc_FN_VAddConst_Serial(farg1, farg2, farg3) end subroutine function FN_VDotProd_Serial(x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(y) fresult = swigc_FN_VDotProd_Serial(farg1, farg2) swig_result = fresult end function function FN_VMaxNorm_Serial(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMaxNorm_Serial(farg1) swig_result = fresult end function function FN_VWrmsNorm_Serial(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWrmsNorm_Serial(farg1, farg2) swig_result = fresult end function function FN_VWrmsNormMask_Serial(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWrmsNormMask_Serial(farg1, farg2, farg3) swig_result = fresult end function function FN_VMin_Serial(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMin_Serial(farg1) swig_result = fresult end function function FN_VWL2Norm_Serial(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWL2Norm_Serial(farg1, farg2) swig_result = fresult end function function FN_VL1Norm_Serial(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VL1Norm_Serial(farg1) swig_result = fresult end function subroutine FN_VCompare_Serial(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VCompare_Serial(farg1, farg2, farg3) end subroutine function FN_VInvTest_Serial(x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) fresult = swigc_FN_VInvTest_Serial(farg1, farg2) swig_result = fresult end function function FN_VConstrMask_Serial(c, x, m) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: m integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(c) farg2 = c_loc(x) farg3 = c_loc(m) fresult = swigc_FN_VConstrMask_Serial(farg1, farg2, farg3) swig_result = fresult end function function FN_VMinQuotient_Serial(num, denom) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: num type(N_Vector), target, intent(inout) :: denom real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(num) farg2 = c_loc(denom) fresult = swigc_FN_VMinQuotient_Serial(farg1, farg2) swig_result = fresult end function function FN_VLinearCombination_Serial(nvec, c, v, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: v type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = v farg4 = c_loc(z) fresult = swigc_FN_VLinearCombination_Serial(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VScaleAddMulti_Serial(nvec, a, x, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = c_loc(a(1)) farg3 = c_loc(x) farg4 = y farg5 = z fresult = swigc_FN_VScaleAddMulti_Serial(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdMulti_Serial(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMulti_Serial(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VLinearSumVectorArray_Serial(nvec, a, x, b, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), intent(in) :: a type(C_PTR) :: x real(C_DOUBLE), intent(in) :: b type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 farg1 = nvec farg2 = a farg3 = x farg4 = b farg5 = y farg6 = z fresult = swigc_FN_VLinearSumVectorArray_Serial(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FN_VScaleVectorArray_Serial(nvec, c, x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: x type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = x farg4 = z fresult = swigc_FN_VScaleVectorArray_Serial(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VConstVectorArray_Serial(nvecs, c, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs real(C_DOUBLE), intent(in) :: c type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = nvecs farg2 = c farg3 = z fresult = swigc_FN_VConstVectorArray_Serial(farg1, farg2, farg3) swig_result = fresult end function function FN_VWrmsNormVectorArray_Serial(nvecs, x, w, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs type(C_PTR) :: x type(C_PTR) :: w real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvecs farg2 = x farg3 = w farg4 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormVectorArray_Serial(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VWrmsNormMaskVectorArray_Serial(nvecs, x, w, id, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs type(C_PTR) :: x type(C_PTR) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvecs farg2 = x farg3 = w farg4 = c_loc(id) farg5 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormMaskVectorArray_Serial(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VWSqrSumLocal_Serial(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWSqrSumLocal_Serial(farg1, farg2) swig_result = fresult end function function FN_VWSqrSumMaskLocal_Serial(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWSqrSumMaskLocal_Serial(farg1, farg2, farg3) swig_result = fresult end function function FN_VBufSize_Serial(x, size) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x integer(C_INT64_T), dimension(*), target, intent(inout) :: size integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(size(1)) fresult = swigc_FN_VBufSize_Serial(farg1, farg2) swig_result = fresult end function function FN_VBufPack_Serial(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufPack_Serial(farg1, farg2) swig_result = fresult end function function FN_VBufUnpack_Serial(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufUnpack_Serial(farg1, farg2) swig_result = fresult end function function FN_VEnableFusedOps_Serial(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableFusedOps_Serial(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearCombination_Serial(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearCombination_Serial(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleAddMulti_Serial(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleAddMulti_Serial(farg1, farg2) swig_result = fresult end function function FN_VEnableDotProdMulti_Serial(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableDotProdMulti_Serial(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearSumVectorArray_Serial(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearSumVectorArray_Serial(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleVectorArray_Serial(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleVectorArray_Serial(farg1, farg2) swig_result = fresult end function function FN_VEnableConstVectorArray_Serial(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableConstVectorArray_Serial(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormVectorArray_Serial(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormVectorArray_Serial(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormMaskVectorArray_Serial(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormMaskVectorArray_Serial(farg1, farg2) swig_result = fresult end function function FN_VCloneVectorArray_Serial(count, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = count farg2 = c_loc(w) fresult = swigc_FN_VCloneVectorArray_Serial(farg1, farg2) swig_result = fresult end function function FN_VCloneVectorArrayEmpty_Serial(count, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = count farg2 = c_loc(w) fresult = swigc_FN_VCloneVectorArrayEmpty_Serial(farg1, farg2) swig_result = fresult end function subroutine FN_VDestroyVectorArray_Serial(vs, count) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: vs integer(C_INT), intent(in) :: count type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = vs farg2 = count call swigc_FN_VDestroyVectorArray_Serial(farg1, farg2) end subroutine end module StanHeaders/src/nvector/hip/0000755000176200001440000000000014511135055015502 5ustar liggesusersStanHeaders/src/nvector/hip/VectorKernels.hip.hpp0000644000176200001440000001654314645137106021600 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- */ #ifndef _NVECTOR_HIP_KERNELS_CUH_ #define _NVECTOR_HIP_KERNELS_CUH_ #include #include "sundials_hip_kernels.hip.hpp" namespace sundials { namespace hip { namespace impl { /* * Sets all elements of the vector X to constant value a. * */ template __global__ void setConstKernel(T a, T *X, I n) { GRID_STRIDE_XLOOP(I, i, n) { X[i] = a; } } /* * Computes linear sum (combination) of two vectors. * */ template __global__ void linearSumKernel(T a, const T *X, T b, const T *Y, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = a*X[i] + b*Y[i]; } } /* * Elementwise product of two vectors. * */ template __global__ void prodKernel(const T *X, const T *Y, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = X[i]*Y[i]; } } /* * Elementwise division of two vectors. * */ template __global__ void divKernel(const T *X, const T *Y, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = X[i]/Y[i]; } } /* * Scale vector with scalar value 'a'. * */ template __global__ void scaleKernel(T a, const T *X, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = a*X[i]; } } /* * Stores absolute values of vector X elements into vector Z. * */ template __global__ void absKernel(const T *X, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = abs(X[i]); } } /* * Elementwise inversion. * */ template __global__ void invKernel(const T *X, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = 1.0/(X[i]); } } /* * Add constant 'c' to each vector element. * */ template __global__ void addConstKernel(T a, const T *X, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = a + X[i]; } } /* * Compare absolute values of vector 'X' with constant 'c'. * */ template __global__ void compareKernel(T c, const T *X, T *Z, I n) { GRID_STRIDE_XLOOP(I, i, n) { Z[i] = (abs(X[i]) >= c) ? 1.0 : 0.0; } } /* * Dot product of two vectors. * */ template class GridReducer> __global__ void dotProdKernel(const T *x, const T *y, T *out, I n, unsigned int *device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T sum = Id; GRID_STRIDE_XLOOP(I, i, n) { sum += x[i] * y[i]; } GridReducer{}(sum, Id, out, device_count); } /* * Finds max norm the vector. * */ template class GridReducer> __global__ void maxNormKernel(const T *x, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::maximum; const T Id = op::identity(); T maximum = Id; GRID_STRIDE_XLOOP(I, i, n) { maximum = max(abs(x[i]), maximum); } GridReducer{}(maximum, Id, out, device_count); } /* * Weighted L2 norm squared. * */ template class GridReducer> __global__ void wL2NormSquareKernel(const T *x, const T *w, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T sum = Id; GRID_STRIDE_XLOOP(I, i, n) { sum += x[i] * w[i] * x[i] * w[i]; } GridReducer{}(sum, Id, out, device_count); } /* * Weighted L2 norm squared with mask. Vector id specifies the mask. * */ template class GridReducer> __global__ void wL2NormSquareMaskKernel(const T *x, const T *w, const T *id, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T sum = Id; GRID_STRIDE_XLOOP(I, i, n) { if(id[i] > 0.0) sum += x[i] * w[i] * x[i] * w[i]; } GridReducer{}(sum, Id, out, device_count); } /* * Finds min value in the vector. * */ template class GridReducer> __global__ void findMinKernel(T MAX_VAL, const T *x, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::minimum; const T Id = op::identity(); T minimum = Id; GRID_STRIDE_XLOOP(I, i, n) { minimum = min(x[i], minimum); } GridReducer{}(minimum, Id, out, device_count); } /* * Computes L1 norm of vector * */ template class GridReducer> __global__ void L1NormKernel(const T *x, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T sum = Id; GRID_STRIDE_XLOOP(I, i, n) { sum += abs(x[i]); } GridReducer{}(sum, Id, out, device_count); } /* * Vector inverse z[i] = 1/x[i] with check for zeros. Reduction is performed * to flag the result if any x[i] = 0. * */ template class GridReducer> __global__ void invTestKernel(const T *x, T *z, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T flag = Id; GRID_STRIDE_XLOOP(I, i, n) { if (x[i] == static_cast(0.0)) flag += 1.0; else z[i] = 1.0/x[i]; } GridReducer{}(flag, Id, out, device_count); } /* * Checks if inequality constraints are satisfied. Constraint check * results are stored in vector 'm'. A sum reduction over all elements * of 'm' is performed to find if any of the constraints is violated. * If all constraints are satisfied sum == 0. * */ template class GridReducer> __global__ void constrMaskKernel(const T *c, const T *x, T *m, T *out, I n, unsigned int* device_count) { using op = sundials::reductions::impl::plus; const T Id = op::identity(); T sum = Id; GRID_STRIDE_XLOOP(I, i, n) { // test = true if constraints violated bool test = (std::abs(c[i]) > 1.5 && c[i]*x[i] <= 0.0) || (std::abs(c[i]) > 0.5 && c[i]*x[i] < 0.0); m[i] = test ? 1.0 : 0.0; sum = m[i]; } GridReducer{}(sum, Id, out, device_count); } /* * Finds minimum component-wise quotient. * */ template class GridReducer> __global__ void minQuotientKernel(const T MAX_VAL, const T *num, const T *den, T *min_quotient, I n, unsigned int* device_count) { using op = sundials::reductions::impl::minimum; const T Id = op::identity(); T minimum = Id; T quotient = 0.0; GRID_STRIDE_XLOOP(I, i, n) { quotient = (den[i] == static_cast(0.0)) ? Id : num[i]/den[i]; minimum = min(quotient, minimum); } GridReducer{}(minimum, Id, min_quotient, device_count); } } // namespace impl } // namespace hip } // namespace sundials #endif // _NVECTOR_HIP_KERNELS_CUH_ StanHeaders/src/nvector/hip/VectorArrayKernels.hip.hpp0000644000176200001440000001226714645137106022576 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): David Gardner, Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- */ #ifndef _NVECTOR_HIP_ARRAY_KERNELS_CUH_ #define _NVECTOR_HIP_ARRAY_KERNELS_CUH_ #include #include "sundials_hip_kernels.hip.hpp" namespace sundials { namespace hip { namespace impl { /* * ----------------------------------------------------------------------------- * fused vector operation kernels * ----------------------------------------------------------------------------- */ /* * Computes the linear combination of nv vectors */ template __global__ void linearCombinationKernel(int nv, T* c, T** xd, T* zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { zd[i] = c[0]*xd[0][i]; for (int j=1; j __global__ void scaleAddMultiKernel(int nv, T* c, T* xd, T** yd, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int j=0; j class GridReducer> __global__ void dotProdMultiKernel(int nv, const T* xd, T** yd, T* out, I n) { // REQUIRES nv blocks (i.e. gridDim.x == nv) using op = sundials::reductions::impl::plus; constexpr T Id = op::identity(); const I k = blockIdx.x; // Initialize to zero. T sum = Id; for (I i = threadIdx.x; i < n; i += blockDim.x) { // each thread computes n/blockDim.x elements sum += xd[i] * yd[k][i]; } GridReducer{}(sum, Id, &out[k], nullptr); } /* * ----------------------------------------------------------------------------- * vector array operation kernels * ----------------------------------------------------------------------------- */ /* * Computes the linear sum of multiple vectors */ template __global__ void linearSumVectorArrayKernel(int nv, T a, T** xd, T b, T** yd, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int j=0; j __global__ void scaleVectorArrayKernel(int nv, T* c, T** xd, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int j=0; j __global__ void constVectorArrayKernel(int nv, T c, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int j=0; j class GridReducer> __global__ void wL2NormSquareVectorArrayKernel(int nv, T** xd, T** wd, T* out, I n) { // REQUIRES nv blocks (i.e. gridDim.x == nv) using op = sundials::reductions::impl::plus; constexpr T Id = op::identity(); const I k = blockIdx.x; // Initialize to zero. T sum = 0.0; for (I i = threadIdx.x; i < n; i += blockDim.x) { // each thread computes n/blockDim.x elements sum += xd[k][i] * wd[k][i] * xd[k][i] * wd[k][i]; } GridReducer{}(sum, Id, &out[k], nullptr); } /* * Masked WRMS norm of nv vectors. * */ template class GridReducer> __global__ void wL2NormSquareMaskVectorArrayKernel(int nv, T** xd, T** wd, T* id, T* out, I n) { // REQUIRES nv blocks (i.e. gridDim.x == nv) using op = sundials::reductions::impl::plus; constexpr T Id = op::identity(); const I k = blockIdx.x; // Initialize to zero. T sum = 0.0; for (I i = threadIdx.x; i < n; i += blockDim.x) { // each thread computes n/blockDim.x elements if (id[i] > 0.0) sum += xd[k][i] * wd[k][i] * xd[k][i] * wd[k][i]; } GridReducer{}(sum, Id, &out[k], nullptr); } /* * Computes the scaled sum of a vector array with multiple other vector arrays */ template __global__ void scaleAddMultiVectorArrayKernel(int nv, int ns, T* c, T** xd, T** yd, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int k=0; k __global__ void linearCombinationVectorArrayKernel(int nv, int ns, T* c, T** xd, T** zd, I n) { GRID_STRIDE_XLOOP(I, i, n) { for (int k=0; k #include #include #include #include #include #include "VectorArrayKernels.hip.hpp" #include "VectorKernels.hip.hpp" #include "sundials_hip.h" #include "sundials_debug.h" #define ZERO RCONST(0.0) #define HALF RCONST(0.5) using namespace sundials; using namespace sundials::hip; using namespace sundials::hip::impl; /* * Private function definitions */ // Allocate vector data static int AllocateData(N_Vector v); // Reduction buffer functions static int InitializeDeviceCounter(N_Vector v); static int FreeDeviceCounter(N_Vector v); static int InitializeReductionBuffer(N_Vector v, realtype value, size_t n = 1); static void FreeReductionBuffer(N_Vector v); static int CopyReductionBufferFromDevice(N_Vector v, size_t n = 1); // Kernel launch parameters static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block, size_t& shMemSize, hipStream_t& stream, size_t n = 0); static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block, size_t& shMemSize, hipStream_t& stream, bool& atomic, size_t n = 0); static void PostKernelLaunch(); /* * Macro definitions */ // Macros to access vector content #define NVEC_HIP_CONTENT(x) ((N_VectorContent_Hip)(x->content)) #define NVEC_HIP_MEMSIZE(x) (NVEC_HIP_CONTENT(x)->length * sizeof(realtype)) #define NVEC_HIP_MEMHELP(x) (NVEC_HIP_CONTENT(x)->mem_helper) #define NVEC_HIP_HDATAp(x) ((realtype*) NVEC_HIP_CONTENT(x)->host_data->ptr) #define NVEC_HIP_DDATAp(x) ((realtype*) NVEC_HIP_CONTENT(x)->device_data->ptr) #define NVEC_HIP_STREAM(x) (NVEC_HIP_CONTENT(x)->stream_exec_policy->stream()) // Macros to access vector private content #define NVEC_HIP_PRIVATE(x) ((N_PrivateVectorContent_Hip)(NVEC_HIP_CONTENT(x)->priv)) #define NVEC_HIP_HBUFFERp(x) ((realtype*) NVEC_HIP_PRIVATE(x)->reduce_buffer_host->ptr) #define NVEC_HIP_DBUFFERp(x) ((realtype*) NVEC_HIP_PRIVATE(x)->reduce_buffer_dev->ptr) #define NVEC_HIP_DCOUNTERp(x) ((unsigned int*) NVEC_HIP_PRIVATE(x)->device_counter->ptr) /* * Private structure definition */ struct _N_PrivateVectorContent_Hip { booleantype use_managed_mem; /* indicates if the data pointers and buffer pointers are managed memory */ size_t reduce_buffer_allocated_bytes; /* current size of the reduction buffer */ SUNMemory reduce_buffer_dev; /* device buffer used for reductions */ SUNMemory reduce_buffer_host; /* host buffer used for reductions */ SUNMemory device_counter; /* device memory for a counter (used in LDS reductions) */ }; typedef struct _N_PrivateVectorContent_Hip *N_PrivateVectorContent_Hip; /* Default policies to clone */ ThreadDirectExecPolicy DEFAULT_STREAMING_EXECPOLICY(512); BlockReduceExecPolicy DEFAULT_REDUCTION_EXECPOLICY(512); extern "C" { N_Vector N_VNewEmpty_Hip(SUNContext sunctx) { N_Vector v; /* Create vector */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Hip; v->ops->nvclone = N_VClone_Hip; v->ops->nvcloneempty = N_VCloneEmpty_Hip; v->ops->nvdestroy = N_VDestroy_Hip; v->ops->nvspace = N_VSpace_Hip; v->ops->nvgetlength = N_VGetLength_Hip; v->ops->nvgetarraypointer = N_VGetHostArrayPointer_Hip; v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_Hip; v->ops->nvsetarraypointer = N_VSetHostArrayPointer_Hip; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Hip; v->ops->nvconst = N_VConst_Hip; v->ops->nvprod = N_VProd_Hip; v->ops->nvdiv = N_VDiv_Hip; v->ops->nvscale = N_VScale_Hip; v->ops->nvabs = N_VAbs_Hip; v->ops->nvinv = N_VInv_Hip; v->ops->nvaddconst = N_VAddConst_Hip; v->ops->nvdotprod = N_VDotProd_Hip; v->ops->nvmaxnorm = N_VMaxNorm_Hip; v->ops->nvmin = N_VMin_Hip; v->ops->nvl1norm = N_VL1Norm_Hip; v->ops->nvinvtest = N_VInvTest_Hip; v->ops->nvconstrmask = N_VConstrMask_Hip; v->ops->nvminquotient = N_VMinQuotient_Hip; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Hip; v->ops->nvwrmsnorm = N_VWrmsNorm_Hip; v->ops->nvwl2norm = N_VWL2Norm_Hip; v->ops->nvcompare = N_VCompare_Hip; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProd_Hip; v->ops->nvmaxnormlocal = N_VMaxNorm_Hip; v->ops->nvminlocal = N_VMin_Hip; v->ops->nvl1normlocal = N_VL1Norm_Hip; v->ops->nvinvtestlocal = N_VInvTest_Hip; v->ops->nvconstrmasklocal = N_VConstrMask_Hip; v->ops->nvminquotientlocal = N_VMinQuotient_Hip; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Hip; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Hip; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_Hip; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_Hip; v->ops->nvbufpack = N_VBufPack_Hip; v->ops->nvbufunpack = N_VBufUnpack_Hip; /* print operation for debugging */ v->ops->nvprint = N_VPrint_Hip; v->ops->nvprintfile = N_VPrintFile_Hip; /* Create content */ v->content = (N_VectorContent_Hip) malloc(sizeof(_N_VectorContent_Hip)); if (v->content == NULL) { N_VDestroy(v); return(NULL); } NVEC_HIP_CONTENT(v)->priv = malloc(sizeof(_N_PrivateVectorContent_Hip)); if (NVEC_HIP_CONTENT(v)->priv == NULL) { N_VDestroy(v); return(NULL); } // Initialize content NVEC_HIP_CONTENT(v)->length = 0; NVEC_HIP_CONTENT(v)->host_data = NULL; NVEC_HIP_CONTENT(v)->device_data = NULL; NVEC_HIP_CONTENT(v)->stream_exec_policy = NULL; NVEC_HIP_CONTENT(v)->reduce_exec_policy = NULL; NVEC_HIP_CONTENT(v)->mem_helper = NULL; NVEC_HIP_CONTENT(v)->own_helper = SUNFALSE; // Initialize private content NVEC_HIP_PRIVATE(v)->use_managed_mem = SUNFALSE; NVEC_HIP_PRIVATE(v)->reduce_buffer_dev = NULL; NVEC_HIP_PRIVATE(v)->reduce_buffer_host = NULL; NVEC_HIP_PRIVATE(v)->device_counter = NULL; NVEC_HIP_PRIVATE(v)->reduce_buffer_allocated_bytes = 0; return(v); } N_Vector N_VNew_Hip(sunindextype length, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Hip(sunctx); if (v == NULL) return(NULL); NVEC_HIP_CONTENT(v)->length = length; NVEC_HIP_CONTENT(v)->mem_helper = SUNMemoryHelper_Hip(sunctx); NVEC_HIP_CONTENT(v)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); NVEC_HIP_CONTENT(v)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); NVEC_HIP_CONTENT(v)->own_helper = SUNTRUE; NVEC_HIP_PRIVATE(v)->use_managed_mem = SUNFALSE; if (NVEC_HIP_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Hip: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Hip: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VNewWithMemHelp_Hip(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper, SUNContext sunctx) { N_Vector v; if (helper == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Hip: helper is NULL\n"); return(NULL); } if (!SUNMemoryHelper_ImplementsRequiredOps(helper)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Hip: helper doesn't implement all required ops\n"); return(NULL); } v = NULL; v = N_VNewEmpty_Hip(sunctx); if (v == NULL) return(NULL); NVEC_HIP_CONTENT(v)->length = length; NVEC_HIP_CONTENT(v)->mem_helper = helper; NVEC_HIP_CONTENT(v)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); NVEC_HIP_CONTENT(v)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); NVEC_HIP_CONTENT(v)->own_helper = SUNFALSE; NVEC_HIP_PRIVATE(v)->use_managed_mem = use_managed_mem; if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Hip: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VNewManaged_Hip(sunindextype length, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Hip(sunctx); if (v == NULL) return(NULL); NVEC_HIP_CONTENT(v)->length = length; NVEC_HIP_CONTENT(v)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); NVEC_HIP_CONTENT(v)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); NVEC_HIP_CONTENT(v)->mem_helper = SUNMemoryHelper_Hip(sunctx); NVEC_HIP_CONTENT(v)->own_helper = SUNTRUE; NVEC_HIP_PRIVATE(v)->use_managed_mem = SUNTRUE; if (NVEC_HIP_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Hip: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Hip: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VMake_Hip(sunindextype length, realtype *h_vdata, realtype *d_vdata, SUNContext sunctx) { N_Vector v; if (h_vdata == NULL || d_vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_Hip(sunctx); if (v == NULL) return(NULL); NVEC_HIP_CONTENT(v)->length = length; NVEC_HIP_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(h_vdata, SUNMEMTYPE_HOST); NVEC_HIP_CONTENT(v)->device_data = SUNMemoryHelper_Wrap(d_vdata, SUNMEMTYPE_DEVICE); NVEC_HIP_CONTENT(v)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); NVEC_HIP_CONTENT(v)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); NVEC_HIP_CONTENT(v)->mem_helper = SUNMemoryHelper_Hip(sunctx); NVEC_HIP_CONTENT(v)->own_helper = SUNTRUE; NVEC_HIP_PRIVATE(v)->use_managed_mem = SUNFALSE; if (NVEC_HIP_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Hip: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (NVEC_HIP_CONTENT(v)->device_data == NULL || NVEC_HIP_CONTENT(v)->host_data == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Hip: SUNMemoryHelper_Wrap returned NULL\n"); N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VMakeManaged_Hip(sunindextype length, realtype *vdata, SUNContext sunctx) { N_Vector v; if (vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_Hip(sunctx); if (v == NULL) return(NULL); NVEC_HIP_CONTENT(v)->length = length; NVEC_HIP_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(vdata, SUNMEMTYPE_UVM); NVEC_HIP_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_HIP_CONTENT(v)->host_data); NVEC_HIP_CONTENT(v)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); NVEC_HIP_CONTENT(v)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); NVEC_HIP_CONTENT(v)->mem_helper = SUNMemoryHelper_Hip(sunctx); NVEC_HIP_CONTENT(v)->own_helper = SUNTRUE; NVEC_HIP_PRIVATE(v)->use_managed_mem = SUNTRUE; if (NVEC_HIP_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Hip: memory helper is NULL\n"); N_VDestroy(v); return(NULL); } if (NVEC_HIP_CONTENT(v)->device_data == NULL || NVEC_HIP_CONTENT(v)->host_data == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Hip: SUNMemoryHelper_Wrap returned NULL\n"); N_VDestroy(v); return(NULL); } return(v); } /* ---------------------------------------------------------------------------- * Set pointer to the raw host data. Does not free the existing pointer. */ void N_VSetHostArrayPointer_Hip(realtype* h_vdata, N_Vector v) { if (N_VIsManagedMemory_Hip(v)) { if (NVEC_HIP_CONTENT(v)->host_data) { NVEC_HIP_CONTENT(v)->host_data->ptr = (void*) h_vdata; NVEC_HIP_CONTENT(v)->device_data->ptr = (void*) h_vdata; } else { NVEC_HIP_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_UVM); NVEC_HIP_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_HIP_CONTENT(v)->host_data); } } else { if (NVEC_HIP_CONTENT(v)->host_data) { NVEC_HIP_CONTENT(v)->host_data->ptr = (void*) h_vdata; } else { NVEC_HIP_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_HOST); } } } /* ---------------------------------------------------------------------------- * Set pointer to the raw device data */ void N_VSetDeviceArrayPointer_Hip(realtype* d_vdata, N_Vector v) { if (N_VIsManagedMemory_Hip(v)) { if (NVEC_HIP_CONTENT(v)->device_data) { NVEC_HIP_CONTENT(v)->device_data->ptr = (void*) d_vdata; NVEC_HIP_CONTENT(v)->host_data->ptr = (void*) d_vdata; } else { NVEC_HIP_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_UVM); NVEC_HIP_CONTENT(v)->host_data = SUNMemoryHelper_Alias(NVEC_HIP_CONTENT(v)->device_data); } } else { if (NVEC_HIP_CONTENT(v)->device_data) { NVEC_HIP_CONTENT(v)->device_data->ptr = (void*) d_vdata; } else { NVEC_HIP_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_DEVICE); } } } /* ---------------------------------------------------------------------------- * Return a flag indicating if the memory for the vector data is managed */ booleantype N_VIsManagedMemory_Hip(N_Vector x) { return NVEC_HIP_PRIVATE(x)->use_managed_mem; } int N_VSetKernelExecPolicy_Hip(N_Vector x, SUNHipExecPolicy* stream_exec_policy, SUNHipExecPolicy* reduce_exec_policy) { if (x == NULL) return(-1); /* Delete the old policies */ delete NVEC_HIP_CONTENT(x)->stream_exec_policy; delete NVEC_HIP_CONTENT(x)->reduce_exec_policy; /* Reset the policy if it is null */ if (stream_exec_policy == NULL) NVEC_HIP_CONTENT(x)->stream_exec_policy = DEFAULT_STREAMING_EXECPOLICY.clone(); else NVEC_HIP_CONTENT(x)->stream_exec_policy = stream_exec_policy->clone(); if (reduce_exec_policy == NULL) NVEC_HIP_CONTENT(x)->reduce_exec_policy = DEFAULT_REDUCTION_EXECPOLICY.clone(); else NVEC_HIP_CONTENT(x)->reduce_exec_policy = reduce_exec_policy->clone(); return(0); } /* ---------------------------------------------------------------------------- * Copy vector data to the device */ void N_VCopyToDevice_Hip(N_Vector x) { int copy_fail; copy_fail = SUNMemoryHelper_CopyAsync(NVEC_HIP_MEMHELP(x), NVEC_HIP_CONTENT(x)->device_data, NVEC_HIP_CONTENT(x)->host_data, NVEC_HIP_MEMSIZE(x), (void*) NVEC_HIP_STREAM(x)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyToDevice_Hip: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but only in this stream */ SUNDIALS_HIP_VERIFY(hipStreamSynchronize(*NVEC_HIP_STREAM(x))); } /* ---------------------------------------------------------------------------- * Copy vector data from the device to the host */ void N_VCopyFromDevice_Hip(N_Vector x) { int copy_fail; copy_fail = SUNMemoryHelper_CopyAsync(NVEC_HIP_MEMHELP(x), NVEC_HIP_CONTENT(x)->host_data, NVEC_HIP_CONTENT(x)->device_data, NVEC_HIP_MEMSIZE(x), (void*) NVEC_HIP_STREAM(x)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyFromDevice_Hip: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but only in this stream */ SUNDIALS_HIP_VERIFY(hipStreamSynchronize(*NVEC_HIP_STREAM(x))); } /* ---------------------------------------------------------------------------- * Function to print the a CUDA-based vector to stdout */ void N_VPrint_Hip(N_Vector x) { N_VPrintFile_Hip(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print the a CUDA-based vector to outfile */ void N_VPrintFile_Hip(N_Vector x, FILE *outfile) { sunindextype i; #ifdef SUNDIALS_DEBUG_PRINTVEC N_VCopyFromDevice_Hip(x); #endif for (i = 0; i < NVEC_HIP_CONTENT(x)->length; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%35.32Le\n", NVEC_HIP_HDATAp(x)[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%19.16e\n", NVEC_HIP_HDATAp(x)[i]); #else fprintf(outfile, "%11.8e\n", NVEC_HIP_HDATAp(x)[i]); #endif } fprintf(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Hip(N_Vector w) { N_Vector v; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty_Hip(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Set content */ NVEC_HIP_CONTENT(v)->length = NVEC_HIP_CONTENT(w)->length; NVEC_HIP_PRIVATE(v)->use_managed_mem = NVEC_HIP_PRIVATE(w)->use_managed_mem; return(v); } N_Vector N_VClone_Hip(N_Vector w) { N_Vector v; v = NULL; v = N_VCloneEmpty_Hip(w); if (v == NULL) return(NULL); NVEC_HIP_MEMHELP(v) = SUNMemoryHelper_Clone(NVEC_HIP_MEMHELP(w)); NVEC_HIP_CONTENT(v)->own_helper = SUNTRUE; NVEC_HIP_CONTENT(v)->stream_exec_policy = NVEC_HIP_CONTENT(w)->stream_exec_policy->clone(); NVEC_HIP_CONTENT(v)->reduce_exec_policy = NVEC_HIP_CONTENT(w)->reduce_exec_policy->clone(); if (NVEC_HIP_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Hip: SUNMemoryHelper_Clone returned NULL\n"); N_VDestroy(v); return(NULL); } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Hip: AllocateData returned nonzero\n"); N_VDestroy(v); return(NULL); } return(v); } void N_VDestroy_Hip(N_Vector v) { N_VectorContent_Hip vc; N_PrivateVectorContent_Hip vcp; if (v == NULL) return; /* free ops structure */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } /* extract content */ vc = NVEC_HIP_CONTENT(v); if (vc == NULL) { free(v); v = NULL; return; } /* free private content */ vcp = (N_PrivateVectorContent_Hip) vc->priv; if (vcp != NULL) { /* free items in private content */ FreeDeviceCounter(v); FreeReductionBuffer(v); free(vcp); vc->priv = NULL; } /* free items in content */ if (NVEC_HIP_MEMHELP(v)) { SUNMemoryHelper_Dealloc(NVEC_HIP_MEMHELP(v), vc->host_data, (void*) NVEC_HIP_STREAM(v)); vc->host_data = NULL; SUNMemoryHelper_Dealloc(NVEC_HIP_MEMHELP(v), vc->device_data, (void*) NVEC_HIP_STREAM(v)); vc->device_data = NULL; if (vc->own_helper) SUNMemoryHelper_Destroy(vc->mem_helper); vc->mem_helper = NULL; } /* we can delete the exec policies now that we are done with the streams */ delete vc->stream_exec_policy; delete vc->reduce_exec_policy; /* free content struct */ free(vc); /* free vector */ free(v); return; } void N_VSpace_Hip(N_Vector X, sunindextype *lrw, sunindextype *liw) { *lrw = NVEC_HIP_CONTENT(X)->length; *liw = 2; } void N_VConst_Hip(realtype a, N_Vector X) { size_t grid, block, shMemSize; hipStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConst_Hip: GetKernelParameters returned nonzero\n"); } setConstKernel<<>> ( a, NVEC_HIP_DDATAp(X), NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); } void N_VLinearSum_Hip(realtype a, N_Vector X, realtype b, N_Vector Y, N_Vector Z) { size_t grid, block, shMemSize; hipStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSum_Hip: GetKernelParameters returned nonzero\n"); } linearSumKernel<<>> ( a, NVEC_HIP_DDATAp(X), b, NVEC_HIP_DDATAp(Y), NVEC_HIP_DDATAp(Z), NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); } void N_VProd_Hip(N_Vector X, N_Vector Y, N_Vector Z) { size_t grid, block, shMemSize; hipStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VProd_Hip: GetKernelParameters returned nonzero\n"); } prodKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Y), NVEC_HIP_DDATAp(Z), NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); } void N_VDiv_Hip(N_Vector X, N_Vector Y, N_Vector Z) { size_t grid, block, shMemSize; hipStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDiv_Hip: GetKernelParameters returned nonzero\n"); } divKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Y), NVEC_HIP_DDATAp(Z), NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); } void N_VScale_Hip(realtype a, N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; hipStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScale_Hip: GetKernelParameters returned nonzero\n"); } scaleKernel<<>> ( a, NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Z), NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); } void N_VAbs_Hip(N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; hipStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VAbs_Hip: GetKernelParameters returned nonzero\n"); } absKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Z), NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); } void N_VInv_Hip(N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; hipStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInv_Hip: GetKernelParameters returned nonzero\n"); } invKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Z), NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); } void N_VAddConst_Hip(N_Vector X, realtype b, N_Vector Z) { size_t grid, block, shMemSize; hipStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VAddConst_Hip: GetKernelParameters returned nonzero\n"); } addConstKernel<<>> ( b, NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Z), NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); } realtype N_VDotProd_Hip(N_Vector X, N_Vector Y) { bool atomic; size_t grid, block, shMemSize; hipStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Hip: GetKernelParameters returned nonzero\n"); } // When using atomic reductions, we only need one output value const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Hip: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { dotProdKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Y), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, nullptr ); } else { dotProdKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Y), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, NVEC_HIP_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_HIP_HBUFFERp(X)[0]; return gpu_result; } realtype N_VMaxNorm_Hip(N_Vector X) { bool atomic; size_t grid, block, shMemSize; hipStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Hip: GetKernelParameters returned nonzero\n"); } // When using atomic reductions, we only need one output value const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Hip: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { maxNormKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, nullptr ); } else { maxNormKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, NVEC_HIP_DCOUNTERp(X) ); } PostKernelLaunch(); // Finish reduction on CPU if there are less than two blocks of data left. CopyReductionBufferFromDevice(X); gpu_result = NVEC_HIP_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWSqrSumLocal_Hip(N_Vector X, N_Vector W) { bool atomic; size_t grid, block, shMemSize; hipStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Hip: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Hip: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { wL2NormSquareKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(W), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, nullptr ); } else { wL2NormSquareKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(W), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, NVEC_HIP_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_HIP_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWrmsNorm_Hip(N_Vector X, N_Vector W) { const realtype sum = N_VWSqrSumLocal_Hip(X, W); return std::sqrt(sum/NVEC_HIP_CONTENT(X)->length); } realtype N_VWSqrSumMaskLocal_Hip(N_Vector X, N_Vector W, N_Vector Id) { bool atomic; size_t grid, block, shMemSize; hipStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Hip: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Hip: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { wL2NormSquareMaskKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(W), NVEC_HIP_DDATAp(Id), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, nullptr ); } else { wL2NormSquareMaskKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(W), NVEC_HIP_DDATAp(Id), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, NVEC_HIP_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_HIP_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWrmsNormMask_Hip(N_Vector X, N_Vector W, N_Vector Id) { const realtype sum = N_VWSqrSumMaskLocal_Hip(X, W, Id); return std::sqrt(sum/NVEC_HIP_CONTENT(X)->length); } realtype N_VMin_Hip(N_Vector X) { bool atomic; size_t grid, block, shMemSize; hipStream_t stream; realtype gpu_result = std::numeric_limits::max(); if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Hip: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Hip: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { findMinKernel<<>> ( gpu_result, NVEC_HIP_DDATAp(X), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, nullptr ); } else { findMinKernel<<>> ( gpu_result, NVEC_HIP_DDATAp(X), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, NVEC_HIP_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_HIP_HBUFFERp(X)[0]; return gpu_result; } realtype N_VWL2Norm_Hip(N_Vector X, N_Vector W) { const realtype sum = N_VWSqrSumLocal_Hip(X, W); return std::sqrt(sum); } realtype N_VL1Norm_Hip(N_Vector X) { bool atomic; size_t grid, block, shMemSize; hipStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Hip: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Hip: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { L1NormKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, nullptr ); } else { L1NormKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, NVEC_HIP_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_HIP_HBUFFERp(X)[0]; return gpu_result; } void N_VCompare_Hip(realtype c, N_Vector X, N_Vector Z) { size_t grid, block, shMemSize; hipStream_t stream; if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCompare_Hip: GetKernelParameters returned nonzero\n"); } compareKernel<<>> ( c, NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Z), NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); } booleantype N_VInvTest_Hip(N_Vector X, N_Vector Z) { bool atomic; size_t grid, block, shMemSize; hipStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Hip: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Hip: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { invTestKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Z), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, nullptr ); } else { invTestKernel<<>> ( NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(Z), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, NVEC_HIP_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_HIP_HBUFFERp(X)[0]; return (gpu_result < HALF); } booleantype N_VConstrMask_Hip(N_Vector C, N_Vector X, N_Vector M) { bool atomic; size_t grid, block, shMemSize; hipStream_t stream; realtype gpu_result = ZERO; if (GetKernelParameters(X, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Hip: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(X, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Hip: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { constrMaskKernel<<>> ( NVEC_HIP_DDATAp(C), NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(M), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, nullptr ); } else { constrMaskKernel<<>> ( NVEC_HIP_DDATAp(C), NVEC_HIP_DDATAp(X), NVEC_HIP_DDATAp(M), NVEC_HIP_DBUFFERp(X), NVEC_HIP_CONTENT(X)->length, NVEC_HIP_DCOUNTERp(X) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(X); gpu_result = NVEC_HIP_HBUFFERp(X)[0]; return (gpu_result < HALF); } realtype N_VMinQuotient_Hip(N_Vector num, N_Vector denom) { bool atomic; size_t grid, block, shMemSize; hipStream_t stream; realtype gpu_result = std::numeric_limits::max();; if (GetKernelParameters(num, true, grid, block, shMemSize, stream, atomic)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Hip: GetKernelParameters returned nonzero\n"); } const size_t buffer_size = atomic ? 1 : grid; if (InitializeReductionBuffer(num, gpu_result, buffer_size)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Hip: InitializeReductionBuffer returned nonzero\n"); } if (atomic) { minQuotientKernel<<>> ( gpu_result, NVEC_HIP_DDATAp(num), NVEC_HIP_DDATAp(denom), NVEC_HIP_DBUFFERp(num), NVEC_HIP_CONTENT(num)->length, nullptr ); } else { minQuotientKernel<<>> ( gpu_result, NVEC_HIP_DDATAp(num), NVEC_HIP_DDATAp(denom), NVEC_HIP_DBUFFERp(num), NVEC_HIP_CONTENT(num)->length, NVEC_HIP_DCOUNTERp(num) ); } PostKernelLaunch(); // Get result from the GPU CopyReductionBufferFromDevice(num); gpu_result = NVEC_HIP_HBUFFERp(num)[0]; return gpu_result; } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_Hip(int nvec, realtype* c, N_Vector* X, N_Vector Z) { hipError_t err; // Copy c array to device realtype* d_c; err = hipMalloc((void**) &d_c, nvec*sizeof(realtype)); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipMemcpy(d_c, c, nvec*sizeof(realtype), hipMemcpyHostToDevice); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int i=0; i>>( nvec, d_c, d_Xd, NVEC_HIP_DDATAp(Z), NVEC_HIP_CONTENT(Z)->length ); PostKernelLaunch(); // Free host array delete[] h_Xd; // Free device arrays err = hipFree(d_c); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Xd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); return(0); } int N_VScaleAddMulti_Hip(int nvec, realtype* c, N_Vector X, N_Vector* Y, N_Vector* Z) { hipError_t err; // Copy c array to device realtype* d_c; err = hipMalloc((void**) &d_c, nvec*sizeof(realtype)); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipMemcpy(d_c, c, nvec*sizeof(realtype), hipMemcpyHostToDevice); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); // Create array of device pointers on host realtype** h_Yd = new realtype*[nvec]; for (int i=0; i>>( nvec, d_c, NVEC_HIP_DDATAp(X), d_Yd, d_Zd, NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); // Free host array delete[] h_Yd; delete[] h_Zd; // Free device arrays err = hipFree(d_c); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Yd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Zd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); return(0); } int N_VDotProdMulti_Hip(int nvec, N_Vector X, N_Vector* Y, realtype* dots) { hipError_t err; // Create array of device pointers on host realtype** h_Yd = new realtype*[nvec]; for (int i=0; i<<>>( nvec, NVEC_HIP_DDATAp(X), d_Yd, d_buff, NVEC_HIP_CONTENT(X)->length ); PostKernelLaunch(); // Copy GPU result to the cpu. err = hipMemcpy(dots, d_buff, grid*sizeof(realtype), hipMemcpyDeviceToHost); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); // Free host array delete[] h_Yd; // Free device arrays err = hipFree(d_Yd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_buff); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); return(0); } /* * ----------------------------------------------------------------------------- * vector array operations * ----------------------------------------------------------------------------- */ int N_VLinearSumVectorArray_Hip(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { hipError_t err; // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int i=0; i>>( nvec, a, d_Xd, b, d_Yd, d_Zd, NVEC_HIP_CONTENT(Z[0])->length ); PostKernelLaunch(); // Free host array delete[] h_Xd; delete[] h_Yd; delete[] h_Zd; // Free device arrays err = hipFree(d_Xd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Yd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Zd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); return(0); } int N_VScaleVectorArray_Hip(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { hipError_t err; // Copy c array to device realtype* d_c; err = hipMalloc((void**) &d_c, nvec*sizeof(realtype)); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipMemcpy(d_c, c, nvec*sizeof(realtype), hipMemcpyHostToDevice); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int i=0; i>>( nvec, d_c, d_Xd, d_Zd, NVEC_HIP_CONTENT(Z[0])->length ); PostKernelLaunch(); // Free host array delete[] h_Xd; delete[] h_Zd; // Free device arrays err = hipFree(d_c); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Xd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Zd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); return(0); } int N_VConstVectorArray_Hip(int nvec, realtype c, N_Vector* Z) { hipError_t err; // Create array of device pointers on host realtype** h_Zd = new realtype*[nvec]; for (int i=0; i>>( nvec, c, d_Zd, NVEC_HIP_CONTENT(Z[0])->length ); PostKernelLaunch(); // Free host array delete[] h_Zd; // Free device arrays err = hipFree(d_Zd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); return(0); } int N_VWrmsNormVectorArray_Hip(int nvec, N_Vector* X, N_Vector* W, realtype* norms) { hipError_t err; // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int i=0; i<<>>( nvec, d_Xd, d_Wd, d_buff, NVEC_HIP_CONTENT(X[0])->length ); PostKernelLaunch(); // Copy GPU result to the cpu. err = hipMemcpy(norms, d_buff, grid*sizeof(realtype), hipMemcpyDeviceToHost); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); // Finish computation for (int k=0; klength); // Free host array delete[] h_Xd; delete[] h_Wd; // Free device arrays err = hipFree(d_Xd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Wd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_buff); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); return(0); } int N_VWrmsNormMaskVectorArray_Hip(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* norms) { hipError_t err; // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int i=0; i<<>>( nvec, d_Xd, d_Wd, NVEC_HIP_DDATAp(id), d_buff, NVEC_HIP_CONTENT(X[0])->length ); PostKernelLaunch(); // Copy GPU result to the cpu. err = hipMemcpy(norms, d_buff, grid*sizeof(realtype), hipMemcpyDeviceToHost); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); // Finish computation for (int k=0; klength); // Free host array delete[] h_Xd; delete[] h_Wd; // Free device arrays err = hipFree(d_Xd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Wd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_buff); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); return(0); } int N_VScaleAddMultiVectorArray_Hip(int nvec, int nsum, realtype* c, N_Vector* X, N_Vector** Y, N_Vector** Z) { hipError_t err; // Copy c array to device realtype* d_c; err = hipMalloc((void**) &d_c, nsum*sizeof(realtype)); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipMemcpy(d_c, c, nsum*sizeof(realtype), hipMemcpyHostToDevice); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int i=0; i>>( nvec, nsum, d_c, d_Xd, d_Yd, d_Zd, NVEC_HIP_CONTENT(Z[0][0])->length ); PostKernelLaunch(); // Free host array delete[] h_Xd; delete[] h_Yd; delete[] h_Zd; // Free device arrays err = hipFree(d_c); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Xd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Yd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Zd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); return(0); } int N_VLinearCombinationVectorArray_Hip(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { hipError_t err; // Copy c array to device realtype* d_c; err = hipMalloc((void**) &d_c, nsum*sizeof(realtype)); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipMemcpy(d_c, c, nsum*sizeof(realtype), hipMemcpyHostToDevice); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); // Create array of device pointers on host realtype** h_Xd = new realtype*[nsum*nvec]; for (int j=0; j>>( nvec, nsum, d_c, d_Xd, d_Zd, NVEC_HIP_CONTENT(Z[0])->length ); PostKernelLaunch(); // Free host array delete[] h_Xd; delete[] h_Zd; // Free device arrays err = hipFree(d_c); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Xd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); err = hipFree(d_Zd); if (!SUNDIALS_HIP_VERIFY(err)) return(-1); return hipGetLastError(); } /* * ----------------------------------------------------------------- * OPTIONAL XBraid interface operations * ----------------------------------------------------------------- */ int N_VBufSize_Hip(N_Vector x, sunindextype *size) { if (x == NULL) return(-1); *size = (sunindextype)NVEC_HIP_MEMSIZE(x); return(0); } int N_VBufPack_Hip(N_Vector x, void *buf) { int copy_fail = 0; hipError_t cuerr; if (x == NULL || buf == NULL) return(-1); SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST); if (buf_mem == NULL) return(-1); copy_fail = SUNMemoryHelper_CopyAsync(NVEC_HIP_MEMHELP(x), buf_mem, NVEC_HIP_CONTENT(x)->device_data, NVEC_HIP_MEMSIZE(x), (void*) NVEC_HIP_STREAM(x)); /* we synchronize with respect to the host, but only in this stream */ cuerr = hipStreamSynchronize(*NVEC_HIP_STREAM(x)); SUNMemoryHelper_Dealloc(NVEC_HIP_MEMHELP(x), buf_mem, (void*) NVEC_HIP_STREAM(x)); return (!SUNDIALS_HIP_VERIFY(cuerr) || copy_fail ? -1 : 0); } int N_VBufUnpack_Hip(N_Vector x, void *buf) { int copy_fail = 0; hipError_t cuerr; if (x == NULL || buf == NULL) return(-1); SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST); if (buf_mem == NULL) return(-1); copy_fail = SUNMemoryHelper_CopyAsync(NVEC_HIP_MEMHELP(x), NVEC_HIP_CONTENT(x)->device_data, buf_mem, NVEC_HIP_MEMSIZE(x), (void*) NVEC_HIP_STREAM(x)); /* we synchronize with respect to the host, but only in this stream */ cuerr = hipStreamSynchronize(*NVEC_HIP_STREAM(x)); SUNMemoryHelper_Dealloc(NVEC_HIP_MEMHELP(x), buf_mem, (void*) NVEC_HIP_STREAM(x)); return (!SUNDIALS_HIP_VERIFY(cuerr) || copy_fail ? -1 : 0); } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Hip; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Hip; v->ops->nvdotprodmulti = N_VDotProdMulti_Hip; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Hip; v->ops->nvscalevectorarray = N_VScaleVectorArray_Hip; v->ops->nvconstvectorarray = N_VConstVectorArray_Hip; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Hip; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Hip; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Hip; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Hip; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_Hip; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_Hip; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_Hip; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) { v->ops->nvdotprodmulti = N_VDotProdMulti_Hip; v->ops->nvdotprodmultilocal = N_VDotProdMulti_Hip; } else { v->ops->nvdotprodmulti = NULL; v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearSumVectorArray_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Hip; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_Hip; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_Hip; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Hip; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Hip; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Hip; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_Hip(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Hip; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } } // extern "C" /* * Private helper functions. */ int AllocateData(N_Vector v) { int alloc_fail = 0; N_VectorContent_Hip vc = NVEC_HIP_CONTENT(v); N_PrivateVectorContent_Hip vcp = NVEC_HIP_PRIVATE(v); if (N_VGetLength_Hip(v) == 0) return(0); if (vcp->use_managed_mem) { alloc_fail = SUNMemoryHelper_Alloc(NVEC_HIP_MEMHELP(v), &(vc->device_data), NVEC_HIP_MEMSIZE(v), SUNMEMTYPE_UVM, (void*) NVEC_HIP_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed for SUNMEMTYPE_UVM\n"); } vc->host_data = SUNMemoryHelper_Alias(vc->device_data); } else { alloc_fail = SUNMemoryHelper_Alloc(NVEC_HIP_MEMHELP(v), &(vc->host_data), NVEC_HIP_MEMSIZE(v), SUNMEMTYPE_HOST, (void*) NVEC_HIP_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); } alloc_fail = SUNMemoryHelper_Alloc(NVEC_HIP_MEMHELP(v), &(vc->device_data), NVEC_HIP_MEMSIZE(v), SUNMEMTYPE_DEVICE, (void*) NVEC_HIP_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); } } return(alloc_fail ? -1 : 0); } /* * Initializes the internal buffer used for reductions. * If the buffer is already allocated, it will only be reallocated * if it is no longer large enough. This may occur if the length * of the vector is increased. The buffer is initialized to the * value given. */ static int InitializeReductionBuffer(N_Vector v, realtype value, size_t n) { int alloc_fail = 0; int copy_fail = 0; booleantype alloc_mem = SUNFALSE; size_t bytes = n * sizeof(realtype); // Get the vector private memory structure N_PrivateVectorContent_Hip vcp = NVEC_HIP_PRIVATE(v); // Check if the existing reduction memory is not large enough if (vcp->reduce_buffer_allocated_bytes < bytes) { FreeReductionBuffer(v); alloc_mem = SUNTRUE; } if (alloc_mem) { // Allocate pinned memory on the host alloc_fail = SUNMemoryHelper_Alloc(NVEC_HIP_MEMHELP(v), &(vcp->reduce_buffer_host), bytes, SUNMEMTYPE_PINNED, (void*) NVEC_HIP_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("WARNING in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_PINNED, using SUNMEMTYPE_HOST instead\n"); // If pinned alloc failed, allocate plain host memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_HIP_MEMHELP(v), &(vcp->reduce_buffer_host), bytes, SUNMEMTYPE_HOST, (void*) NVEC_HIP_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); } } // Allocate device memory alloc_fail = SUNMemoryHelper_Alloc(NVEC_HIP_MEMHELP(v), &(vcp->reduce_buffer_dev), bytes, SUNMEMTYPE_DEVICE, (void*) NVEC_HIP_STREAM(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); } } if (!alloc_fail) { // Store the size of the reduction memory buffer vcp->reduce_buffer_allocated_bytes = bytes; // Initialize the host memory with the value for (int i = 0; i < n; ++i) ((realtype*)vcp->reduce_buffer_host->ptr)[i] = value; // Initialize the device memory with the value copy_fail = SUNMemoryHelper_CopyAsync(NVEC_HIP_MEMHELP(v), vcp->reduce_buffer_dev, vcp->reduce_buffer_host, bytes, (void*) NVEC_HIP_STREAM(v)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_CopyAsync failed\n"); } } return((alloc_fail || copy_fail) ? -1 : 0); } /* Free the reduction buffer */ static void FreeReductionBuffer(N_Vector v) { N_PrivateVectorContent_Hip vcp = NVEC_HIP_PRIVATE(v); if (vcp == NULL) return; // Free device mem if (vcp->reduce_buffer_dev != NULL) SUNMemoryHelper_Dealloc(NVEC_HIP_MEMHELP(v), vcp->reduce_buffer_dev, (void*) NVEC_HIP_STREAM(v)); vcp->reduce_buffer_dev = NULL; // Free host mem if (vcp->reduce_buffer_host != NULL) SUNMemoryHelper_Dealloc(NVEC_HIP_MEMHELP(v), vcp->reduce_buffer_host, (void*) NVEC_HIP_STREAM(v)); vcp->reduce_buffer_host = NULL; // Reset allocated memory size vcp->reduce_buffer_allocated_bytes = 0; } /* Copy the reduction buffer from the device to the host. */ static int CopyReductionBufferFromDevice(N_Vector v, size_t n) { int copy_fail; hipError_t cuerr; copy_fail = SUNMemoryHelper_CopyAsync(NVEC_HIP_MEMHELP(v), NVEC_HIP_PRIVATE(v)->reduce_buffer_host, NVEC_HIP_PRIVATE(v)->reduce_buffer_dev, n * sizeof(realtype), (void*) NVEC_HIP_STREAM(v)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in CopyReductionBufferFromDevice: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* we synchronize with respect to the host, but only in this stream */ cuerr = hipStreamSynchronize(*NVEC_HIP_STREAM(v)); return (!SUNDIALS_HIP_VERIFY(cuerr) || copy_fail ? -1 : 0); } static int InitializeDeviceCounter(N_Vector v) { int retval = 0; /* AMD hardware does not seem to like atomicInc on pinned memory, so use device memory. */ if (NVEC_HIP_PRIVATE(v)->device_counter == NULL) { retval = SUNMemoryHelper_Alloc(NVEC_HIP_MEMHELP(v), &(NVEC_HIP_PRIVATE(v)->device_counter), sizeof(unsigned int), SUNMEMTYPE_DEVICE, (void*) NVEC_HIP_STREAM(v)); } hipMemsetAsync(NVEC_HIP_DCOUNTERp(v), 0, sizeof(unsigned int), *NVEC_HIP_STREAM(v)); return retval; } static int FreeDeviceCounter(N_Vector v) { int retval = 0; if (NVEC_HIP_PRIVATE(v)->device_counter) retval = SUNMemoryHelper_Dealloc(NVEC_HIP_MEMHELP(v), NVEC_HIP_PRIVATE(v)->device_counter, (void*) NVEC_HIP_STREAM(v)); return retval; } /* Get the kernel launch parameters based on the kernel type (reduction or not), * using the appropriate kernel execution policy. */ static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block, size_t& shMemSize, hipStream_t& stream, bool& atomic, size_t n) { n = (n == 0) ? NVEC_HIP_CONTENT(v)->length : n; if (reduction) { SUNHipExecPolicy* reduce_exec_policy = NVEC_HIP_CONTENT(v)->reduce_exec_policy; grid = reduce_exec_policy->gridSize(n); block = reduce_exec_policy->blockSize(); shMemSize = 0; stream = *(reduce_exec_policy->stream()); atomic = reduce_exec_policy->atomic(); if (!atomic) { if (InitializeDeviceCounter(v)) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("SUNMemoryHelper_Alloc returned nonzero\n"); #endif return(-1); } } if (block % sundials::hip::WARP_SIZE) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("the block size must be a multiple must be of the HIP warp size"); #endif return(-1); } } else { SUNHipExecPolicy* stream_exec_policy = NVEC_HIP_CONTENT(v)->stream_exec_policy; grid = stream_exec_policy->gridSize(n); block = stream_exec_policy->blockSize(); shMemSize = 0; stream = *(stream_exec_policy->stream()); atomic = false; } if (grid == 0) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("the grid size must be > 0"); #endif return(-1); } if (block == 0) { #ifdef SUNDIALS_DEBUG throw std::runtime_error("the block size must be > 0"); #endif return(-1); } return(0); } static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block, size_t& shMemSize, hipStream_t& stream, size_t n) { bool atomic; return GetKernelParameters(v, reduction, grid, block, shMemSize, stream, atomic, n); } /* Should be called after a kernel launch. * If SUNDIALS_DEBUG_HIP_LASTERROR is not defined, then the function does nothing. * If it is defined, the function will synchronize and check the last HIP error. */ static void PostKernelLaunch() { #ifdef SUNDIALS_DEBUG_HIP_LASTERROR hipDeviceSynchronize(); SUNDIALS_HIP_VERIFY(hipGetLastError()); #endif } StanHeaders/src/nvector/parallel/0000755000176200001440000000000014511135055016516 5ustar liggesusersStanHeaders/src/nvector/parallel/fmod/0000755000176200001440000000000014511135055017443 5ustar liggesusersStanHeaders/src/nvector/parallel/fmod/fnvector_parallel_mod.c0000644000176200001440000007416414645137106024173 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nvector.h" #include "nvector/nvector_parallel.h" SWIGEXPORT N_Vector _wrap_FN_VNew_Parallel(int const *farg1, int64_t const *farg2, int64_t const *farg3, void *farg4) { N_Vector fresult ; MPI_Comm arg1 ; sunindextype arg2 ; sunindextype arg3 ; SUNContext arg4 = (SUNContext) 0 ; N_Vector result; #ifdef SUNDIALS_MPI_ENABLED arg1 = MPI_Comm_f2c((MPI_Fint)(*farg1)); #else arg1 = *farg1; #endif arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (SUNContext)(farg4); result = (N_Vector)N_VNew_Parallel(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VNewEmpty_Parallel(int const *farg1, int64_t const *farg2, int64_t const *farg3, void *farg4) { N_Vector fresult ; MPI_Comm arg1 ; sunindextype arg2 ; sunindextype arg3 ; SUNContext arg4 = (SUNContext) 0 ; N_Vector result; #ifdef SUNDIALS_MPI_ENABLED arg1 = MPI_Comm_f2c((MPI_Fint)(*farg1)); #else arg1 = *farg1; #endif arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (SUNContext)(farg4); result = (N_Vector)N_VNewEmpty_Parallel(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VMake_Parallel(int const *farg1, int64_t const *farg2, int64_t const *farg3, double *farg4, void *farg5) { N_Vector fresult ; MPI_Comm arg1 ; sunindextype arg2 ; sunindextype arg3 ; realtype *arg4 = (realtype *) 0 ; SUNContext arg5 = (SUNContext) 0 ; N_Vector result; #ifdef SUNDIALS_MPI_ENABLED arg1 = MPI_Comm_f2c((MPI_Fint)(*farg1)); #else arg1 = *farg1; #endif arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (realtype *)(farg4); arg5 = (SUNContext)(farg5); result = (N_Vector)N_VMake_Parallel(arg1,arg2,arg3,arg4,arg5); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FN_VGetLength_Parallel(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetLength_Parallel(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FN_VGetLocalLength_Parallel(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetLocalLength_Parallel(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VPrint_Parallel(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VPrint_Parallel(arg1); } SWIGEXPORT void _wrap_FN_VPrintFile_Parallel(N_Vector farg1, void *farg2) { N_Vector arg1 = (N_Vector) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (FILE *)(farg2); N_VPrintFile_Parallel(arg1,arg2); } SWIGEXPORT int _wrap_FN_VGetVectorID_Parallel(N_Vector farg1) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector_ID result; arg1 = (N_Vector)(farg1); result = (N_Vector_ID)N_VGetVectorID_Parallel(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FN_VCloneEmpty_Parallel(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VCloneEmpty_Parallel(arg1); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VClone_Parallel(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VClone_Parallel(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroy_Parallel(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VDestroy_Parallel(arg1); } SWIGEXPORT void _wrap_FN_VSpace_Parallel(N_Vector farg1, int64_t *farg2, int64_t *farg3) { N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; sunindextype *arg3 = (sunindextype *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); arg3 = (sunindextype *)(farg3); N_VSpace_Parallel(arg1,arg2,arg3); } SWIGEXPORT double * _wrap_FN_VGetArrayPointer_Parallel(N_Vector farg1) { double * fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype *result = 0 ; arg1 = (N_Vector)(farg1); result = (realtype *)N_VGetArrayPointer_Parallel(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VSetArrayPointer_Parallel(double *farg1, N_Vector farg2) { realtype *arg1 = (realtype *) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype *)(farg1); arg2 = (N_Vector)(farg2); N_VSetArrayPointer_Parallel(arg1,arg2); } SWIGEXPORT void * _wrap_FN_VGetCommunicator_Parallel(N_Vector farg1) { void * fresult ; N_Vector arg1 = (N_Vector) 0 ; void *result = 0 ; arg1 = (N_Vector)(farg1); result = (void *)N_VGetCommunicator_Parallel(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VLinearSum_Parallel(double const *farg1, N_Vector farg2, double const *farg3, N_Vector farg4, N_Vector farg5) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); N_VLinearSum_Parallel(arg1,arg2,arg3,arg4,arg5); } SWIGEXPORT void _wrap_FN_VConst_Parallel(double const *farg1, N_Vector farg2) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); N_VConst_Parallel(arg1,arg2); } SWIGEXPORT void _wrap_FN_VProd_Parallel(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VProd_Parallel(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VDiv_Parallel(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VDiv_Parallel(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VScale_Parallel(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VScale_Parallel(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VAbs_Parallel(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VAbs_Parallel(arg1,arg2); } SWIGEXPORT void _wrap_FN_VInv_Parallel(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VInv_Parallel(arg1,arg2); } SWIGEXPORT void _wrap_FN_VAddConst_Parallel(N_Vector farg1, double const *farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); N_VAddConst_Parallel(arg1,arg2,arg3); } SWIGEXPORT double _wrap_FN_VDotProd_Parallel(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VDotProd_Parallel(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMaxNorm_Parallel(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMaxNorm_Parallel(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNorm_Parallel(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWrmsNorm_Parallel(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNormMask_Parallel(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWrmsNormMask_Parallel(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMin_Parallel(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMin_Parallel(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWL2Norm_Parallel(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWL2Norm_Parallel(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VL1Norm_Parallel(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VL1Norm_Parallel(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VCompare_Parallel(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VCompare_Parallel(arg1,arg2,arg3); } SWIGEXPORT int _wrap_FN_VInvTest_Parallel(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VInvTest_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstrMask_Parallel(N_Vector farg1, N_Vector farg2, N_Vector farg3) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)N_VConstrMask_Parallel(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinQuotient_Parallel(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VMinQuotient_Parallel(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearCombination_Parallel(int const *farg1, double *farg2, void *farg3, N_Vector farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); result = (int)N_VLinearCombination_Parallel(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleAddMulti_Parallel(int const *farg1, double *farg2, N_Vector farg3, void *farg4, void *farg5) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector *)(farg4); arg5 = (N_Vector *)(farg5); result = (int)N_VScaleAddMulti_Parallel(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMulti_Parallel(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMulti_Parallel(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearSumVectorArray_Parallel(int const *farg1, double const *farg2, void *farg3, double const *farg4, void *farg5, void *farg6) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype arg4 ; N_Vector *arg5 = (N_Vector *) 0 ; N_Vector *arg6 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector *)(farg5); arg6 = (N_Vector *)(farg6); result = (int)N_VLinearSumVectorArray_Parallel(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleVectorArray_Parallel(int const *farg1, double *farg2, void *farg3, void *farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector *)(farg4); result = (int)N_VScaleVectorArray_Parallel(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstVectorArray_Parallel(int const *farg1, double const *farg2, void *farg3) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)N_VConstVectorArray_Parallel(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormVectorArray_Parallel(int const *farg1, void *farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VWrmsNormVectorArray_Parallel(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormMaskVectorArray_Parallel(int const *farg1, void *farg2, void *farg3, N_Vector farg4, double *farg5) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype *arg5 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype *)(farg5); result = (int)N_VWrmsNormMaskVectorArray_Parallel(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VDotProdLocal_Parallel(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VDotProdLocal_Parallel(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMaxNormLocal_Parallel(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMaxNormLocal_Parallel(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinLocal_Parallel(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMinLocal_Parallel(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VL1NormLocal_Parallel(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VL1NormLocal_Parallel(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumLocal_Parallel(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWSqrSumLocal_Parallel(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumMaskLocal_Parallel(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWSqrSumMaskLocal_Parallel(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VInvTestLocal_Parallel(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VInvTestLocal_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstrMaskLocal_Parallel(N_Vector farg1, N_Vector farg2, N_Vector farg3) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)N_VConstrMaskLocal_Parallel(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinQuotientLocal_Parallel(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VMinQuotientLocal_Parallel(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMultiLocal_Parallel(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMultiLocal_Parallel(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMultiAllReduce_Parallel(int const *farg1, N_Vector farg2, double *farg3) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; realtype *arg3 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (realtype *)(farg3); result = (int)N_VDotProdMultiAllReduce_Parallel(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufSize_Parallel(N_Vector farg1, int64_t *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); result = (int)N_VBufSize_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufPack_Parallel(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufPack_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufUnpack_Parallel(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufUnpack_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableFusedOps_Parallel(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableFusedOps_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearCombination_Parallel(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearCombination_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleAddMulti_Parallel(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleAddMulti_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableDotProdMulti_Parallel(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableDotProdMulti_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearSumVectorArray_Parallel(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearSumVectorArray_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleVectorArray_Parallel(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleVectorArray_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableConstVectorArray_Parallel(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableConstVectorArray_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormVectorArray_Parallel(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormVectorArray_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormMaskVectorArray_Parallel(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormMaskVectorArray_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableDotProdMultiLocal_Parallel(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableDotProdMultiLocal_Parallel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT void * _wrap_FN_VCloneVectorArray_Parallel(int const *farg1, N_Vector farg2) { void * fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); result = (N_Vector *)N_VCloneVectorArray_Parallel(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void * _wrap_FN_VCloneVectorArrayEmpty_Parallel(int const *farg1, N_Vector farg2) { void * fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); result = (N_Vector *)N_VCloneVectorArrayEmpty_Parallel(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroyVectorArray_Parallel(void *farg1, int const *farg2) { N_Vector *arg1 = (N_Vector *) 0 ; int arg2 ; arg1 = (N_Vector *)(farg1); arg2 = (int)(*farg2); N_VDestroyVectorArray_Parallel(arg1,arg2); } StanHeaders/src/nvector/parallel/fmod/fnvector_parallel_mod.f900000644000176200001440000014261114645137106024340 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fnvector_parallel_mod use, intrinsic :: ISO_C_BINDING use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FN_VNew_Parallel public :: FN_VNewEmpty_Parallel public :: FN_VMake_Parallel public :: FN_VGetLength_Parallel public :: FN_VGetLocalLength_Parallel public :: FN_VPrint_Parallel public :: FN_VPrintFile_Parallel public :: FN_VGetVectorID_Parallel public :: FN_VCloneEmpty_Parallel public :: FN_VClone_Parallel public :: FN_VDestroy_Parallel public :: FN_VSpace_Parallel public :: FN_VGetArrayPointer_Parallel public :: FN_VSetArrayPointer_Parallel public :: FN_VGetCommunicator_Parallel public :: FN_VLinearSum_Parallel public :: FN_VConst_Parallel public :: FN_VProd_Parallel public :: FN_VDiv_Parallel public :: FN_VScale_Parallel public :: FN_VAbs_Parallel public :: FN_VInv_Parallel public :: FN_VAddConst_Parallel public :: FN_VDotProd_Parallel public :: FN_VMaxNorm_Parallel public :: FN_VWrmsNorm_Parallel public :: FN_VWrmsNormMask_Parallel public :: FN_VMin_Parallel public :: FN_VWL2Norm_Parallel public :: FN_VL1Norm_Parallel public :: FN_VCompare_Parallel public :: FN_VInvTest_Parallel public :: FN_VConstrMask_Parallel public :: FN_VMinQuotient_Parallel public :: FN_VLinearCombination_Parallel public :: FN_VScaleAddMulti_Parallel public :: FN_VDotProdMulti_Parallel public :: FN_VLinearSumVectorArray_Parallel public :: FN_VScaleVectorArray_Parallel public :: FN_VConstVectorArray_Parallel public :: FN_VWrmsNormVectorArray_Parallel public :: FN_VWrmsNormMaskVectorArray_Parallel public :: FN_VDotProdLocal_Parallel public :: FN_VMaxNormLocal_Parallel public :: FN_VMinLocal_Parallel public :: FN_VL1NormLocal_Parallel public :: FN_VWSqrSumLocal_Parallel public :: FN_VWSqrSumMaskLocal_Parallel public :: FN_VInvTestLocal_Parallel public :: FN_VConstrMaskLocal_Parallel public :: FN_VMinQuotientLocal_Parallel public :: FN_VDotProdMultiLocal_Parallel public :: FN_VDotProdMultiAllReduce_Parallel public :: FN_VBufSize_Parallel public :: FN_VBufPack_Parallel public :: FN_VBufUnpack_Parallel public :: FN_VEnableFusedOps_Parallel public :: FN_VEnableLinearCombination_Parallel public :: FN_VEnableScaleAddMulti_Parallel public :: FN_VEnableDotProdMulti_Parallel public :: FN_VEnableLinearSumVectorArray_Parallel public :: FN_VEnableScaleVectorArray_Parallel public :: FN_VEnableConstVectorArray_Parallel public :: FN_VEnableWrmsNormVectorArray_Parallel public :: FN_VEnableWrmsNormMaskVectorArray_Parallel public :: FN_VEnableDotProdMultiLocal_Parallel public :: FN_VCloneVectorArray_Parallel public :: FN_VCloneVectorArrayEmpty_Parallel public :: FN_VDestroyVectorArray_Parallel ! WRAPPER DECLARATIONS interface function swigc_FN_VNew_Parallel(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VNew_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FN_VNewEmpty_Parallel(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VNewEmpty_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FN_VMake_Parallel(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VMake_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 type(C_PTR) :: fresult end function function swigc_FN_VGetLength_Parallel(farg1) & bind(C, name="_wrap_FN_VGetLength_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FN_VGetLocalLength_Parallel(farg1) & bind(C, name="_wrap_FN_VGetLocalLength_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function subroutine swigc_FN_VPrint_Parallel(farg1) & bind(C, name="_wrap_FN_VPrint_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VPrintFile_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VPrintFile_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FN_VGetVectorID_Parallel(farg1) & bind(C, name="_wrap_FN_VGetVectorID_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FN_VCloneEmpty_Parallel(farg1) & bind(C, name="_wrap_FN_VCloneEmpty_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VClone_Parallel(farg1) & bind(C, name="_wrap_FN_VClone_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroy_Parallel(farg1) & bind(C, name="_wrap_FN_VDestroy_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VSpace_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VSpace_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VGetArrayPointer_Parallel(farg1) & bind(C, name="_wrap_FN_VGetArrayPointer_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VSetArrayPointer_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VSetArrayPointer_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FN_VGetCommunicator_Parallel(farg1) & bind(C, name="_wrap_FN_VGetCommunicator_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VLinearSum_Parallel(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VLinearSum_Parallel") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 end subroutine subroutine swigc_FN_VConst_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VConst_Parallel") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VProd_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VProd_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VDiv_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VDiv_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VScale_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VScale_Parallel") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VAbs_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VAbs_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VInv_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VInv_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VAddConst_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VAddConst_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VDotProd_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VDotProd_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VMaxNorm_Parallel(farg1) & bind(C, name="_wrap_FN_VMaxNorm_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNorm_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VWrmsNorm_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNormMask_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWrmsNormMask_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VMin_Parallel(farg1) & bind(C, name="_wrap_FN_VMin_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWL2Norm_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VWL2Norm_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VL1Norm_Parallel(farg1) & bind(C, name="_wrap_FN_VL1Norm_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function subroutine swigc_FN_VCompare_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VCompare_Parallel") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VInvTest_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VInvTest_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VConstrMask_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstrMask_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VMinQuotient_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VMinQuotient_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VLinearCombination_Parallel(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VLinearCombination_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VScaleAddMulti_Parallel(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VScaleAddMulti_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMulti_Parallel(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMulti_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VLinearSumVectorArray_Parallel(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FN_VLinearSumVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FN_VScaleVectorArray_Parallel(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VScaleVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VConstVectorArray_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormVectorArray_Parallel(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VWrmsNormVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormMaskVectorArray_Parallel(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VWrmsNormMaskVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdLocal_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VDotProdLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VMaxNormLocal_Parallel(farg1) & bind(C, name="_wrap_FN_VMaxNormLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VMinLocal_Parallel(farg1) & bind(C, name="_wrap_FN_VMinLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VL1NormLocal_Parallel(farg1) & bind(C, name="_wrap_FN_VL1NormLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumLocal_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VWSqrSumLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumMaskLocal_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWSqrSumMaskLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VInvTestLocal_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VInvTestLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VConstrMaskLocal_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstrMaskLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VMinQuotientLocal_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VMinQuotientLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VDotProdMultiLocal_Parallel(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMultiLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMultiAllReduce_Parallel(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VDotProdMultiAllReduce_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VBufSize_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VBufSize_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufPack_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VBufPack_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufUnpack_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VBufUnpack_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableFusedOps_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VEnableFusedOps_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearCombination_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearCombination_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleAddMulti_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleAddMulti_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableDotProdMulti_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VEnableDotProdMulti_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearSumVectorArray_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearSumVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleVectorArray_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableConstVectorArray_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VEnableConstVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormVectorArray_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormMaskVectorArray_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormMaskVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableDotProdMultiLocal_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VEnableDotProdMultiLocal_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VCloneVectorArray_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VCloneVectorArray_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VCloneVectorArrayEmpty_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VCloneVectorArrayEmpty_Parallel") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroyVectorArray_Parallel(farg1, farg2) & bind(C, name="_wrap_FN_VDestroyVectorArray_Parallel") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 end subroutine end interface contains ! MODULE SUBPROGRAMS function FN_VNew_Parallel(comm, local_length, global_length, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer :: comm integer(C_INT64_T), intent(in) :: local_length integer(C_INT64_T), intent(in) :: global_length type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 type(C_PTR) :: farg4 farg1 = int(comm, C_INT) farg2 = local_length farg3 = global_length farg4 = sunctx fresult = swigc_FN_VNew_Parallel(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FN_VNewEmpty_Parallel(comm, local_length, global_length, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer :: comm integer(C_INT64_T), intent(in) :: local_length integer(C_INT64_T), intent(in) :: global_length type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 type(C_PTR) :: farg4 farg1 = int(comm, C_INT) farg2 = local_length farg3 = global_length farg4 = sunctx fresult = swigc_FN_VNewEmpty_Parallel(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FN_VMake_Parallel(comm, local_length, global_length, v_data, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer :: comm integer(C_INT64_T), intent(in) :: local_length integer(C_INT64_T), intent(in) :: global_length real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = int(comm, C_INT) farg2 = local_length farg3 = global_length farg4 = c_loc(v_data(1)) farg5 = sunctx fresult = swigc_FN_VMake_Parallel(farg1, farg2, farg3, farg4, farg5) call c_f_pointer(fresult, swig_result) end function function FN_VGetLength_Parallel(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetLength_Parallel(farg1) swig_result = fresult end function function FN_VGetLocalLength_Parallel(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetLocalLength_Parallel(farg1) swig_result = fresult end function subroutine FN_VPrint_Parallel(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VPrint_Parallel(farg1) end subroutine subroutine FN_VPrintFile_Parallel(v, outfile) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: outfile type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v) farg2 = outfile call swigc_FN_VPrintFile_Parallel(farg1, farg2) end subroutine function FN_VGetVectorID_Parallel(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(N_Vector_ID) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetVectorID_Parallel(farg1) swig_result = fresult end function function FN_VCloneEmpty_Parallel(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VCloneEmpty_Parallel(farg1) call c_f_pointer(fresult, swig_result) end function function FN_VClone_Parallel(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VClone_Parallel(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FN_VDestroy_Parallel(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VDestroy_Parallel(farg1) end subroutine subroutine FN_VSpace_Parallel(v, lrw, liw) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), dimension(*), target, intent(inout) :: lrw integer(C_INT64_T), dimension(*), target, intent(inout) :: liw type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(v) farg2 = c_loc(lrw(1)) farg3 = c_loc(liw(1)) call swigc_FN_VSpace_Parallel(farg1, farg2, farg3) end subroutine function FN_VGetArrayPointer_Parallel(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetArrayPointer_Parallel(farg1) call c_f_pointer(fresult, swig_result, [1]) end function subroutine FN_VSetArrayPointer_Parallel(v_data, v) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v_data(1)) farg2 = c_loc(v) call swigc_FN_VSetArrayPointer_Parallel(farg1, farg2) end subroutine function FN_VGetCommunicator_Parallel(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetCommunicator_Parallel(farg1) swig_result = fresult end function subroutine FN_VLinearSum_Parallel(a, x, b, y, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: a type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = a farg2 = c_loc(x) farg3 = b farg4 = c_loc(y) farg5 = c_loc(z) call swigc_FN_VLinearSum_Parallel(farg1, farg2, farg3, farg4, farg5) end subroutine subroutine FN_VConst_Parallel(c, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 farg1 = c farg2 = c_loc(z) call swigc_FN_VConst_Parallel(farg1, farg2) end subroutine subroutine FN_VProd_Parallel(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VProd_Parallel(farg1, farg2, farg3) end subroutine subroutine FN_VDiv_Parallel(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VDiv_Parallel(farg1, farg2, farg3) end subroutine subroutine FN_VScale_Parallel(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VScale_Parallel(farg1, farg2, farg3) end subroutine subroutine FN_VAbs_Parallel(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VAbs_Parallel(farg1, farg2) end subroutine subroutine FN_VInv_Parallel(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VInv_Parallel(farg1, farg2) end subroutine subroutine FN_VAddConst_Parallel(x, b, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = b farg3 = c_loc(z) call swigc_FN_VAddConst_Parallel(farg1, farg2, farg3) end subroutine function FN_VDotProd_Parallel(x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(y) fresult = swigc_FN_VDotProd_Parallel(farg1, farg2) swig_result = fresult end function function FN_VMaxNorm_Parallel(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMaxNorm_Parallel(farg1) swig_result = fresult end function function FN_VWrmsNorm_Parallel(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWrmsNorm_Parallel(farg1, farg2) swig_result = fresult end function function FN_VWrmsNormMask_Parallel(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWrmsNormMask_Parallel(farg1, farg2, farg3) swig_result = fresult end function function FN_VMin_Parallel(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMin_Parallel(farg1) swig_result = fresult end function function FN_VWL2Norm_Parallel(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWL2Norm_Parallel(farg1, farg2) swig_result = fresult end function function FN_VL1Norm_Parallel(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VL1Norm_Parallel(farg1) swig_result = fresult end function subroutine FN_VCompare_Parallel(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VCompare_Parallel(farg1, farg2, farg3) end subroutine function FN_VInvTest_Parallel(x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) fresult = swigc_FN_VInvTest_Parallel(farg1, farg2) swig_result = fresult end function function FN_VConstrMask_Parallel(c, x, m) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: m integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(c) farg2 = c_loc(x) farg3 = c_loc(m) fresult = swigc_FN_VConstrMask_Parallel(farg1, farg2, farg3) swig_result = fresult end function function FN_VMinQuotient_Parallel(num, denom) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: num type(N_Vector), target, intent(inout) :: denom real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(num) farg2 = c_loc(denom) fresult = swigc_FN_VMinQuotient_Parallel(farg1, farg2) swig_result = fresult end function function FN_VLinearCombination_Parallel(nvec, c, v, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: v type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = v farg4 = c_loc(z) fresult = swigc_FN_VLinearCombination_Parallel(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VScaleAddMulti_Parallel(nvec, a, x, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = c_loc(a(1)) farg3 = c_loc(x) farg4 = y farg5 = z fresult = swigc_FN_VScaleAddMulti_Parallel(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdMulti_Parallel(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMulti_Parallel(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VLinearSumVectorArray_Parallel(nvec, a, x, b, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), intent(in) :: a type(C_PTR) :: x real(C_DOUBLE), intent(in) :: b type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 farg1 = nvec farg2 = a farg3 = x farg4 = b farg5 = y farg6 = z fresult = swigc_FN_VLinearSumVectorArray_Parallel(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FN_VScaleVectorArray_Parallel(nvec, c, x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: x type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = x farg4 = z fresult = swigc_FN_VScaleVectorArray_Parallel(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VConstVectorArray_Parallel(nvecs, c, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs real(C_DOUBLE), intent(in) :: c type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = nvecs farg2 = c farg3 = z fresult = swigc_FN_VConstVectorArray_Parallel(farg1, farg2, farg3) swig_result = fresult end function function FN_VWrmsNormVectorArray_Parallel(nvecs, x, w, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs type(C_PTR) :: x type(C_PTR) :: w real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvecs farg2 = x farg3 = w farg4 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormVectorArray_Parallel(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VWrmsNormMaskVectorArray_Parallel(nvec, x, w, id, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(C_PTR) :: x type(C_PTR) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = x farg3 = w farg4 = c_loc(id) farg5 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormMaskVectorArray_Parallel(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdLocal_Parallel(x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(y) fresult = swigc_FN_VDotProdLocal_Parallel(farg1, farg2) swig_result = fresult end function function FN_VMaxNormLocal_Parallel(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMaxNormLocal_Parallel(farg1) swig_result = fresult end function function FN_VMinLocal_Parallel(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMinLocal_Parallel(farg1) swig_result = fresult end function function FN_VL1NormLocal_Parallel(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VL1NormLocal_Parallel(farg1) swig_result = fresult end function function FN_VWSqrSumLocal_Parallel(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWSqrSumLocal_Parallel(farg1, farg2) swig_result = fresult end function function FN_VWSqrSumMaskLocal_Parallel(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWSqrSumMaskLocal_Parallel(farg1, farg2, farg3) swig_result = fresult end function function FN_VInvTestLocal_Parallel(x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) fresult = swigc_FN_VInvTestLocal_Parallel(farg1, farg2) swig_result = fresult end function function FN_VConstrMaskLocal_Parallel(c, x, m) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: m integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(c) farg2 = c_loc(x) farg3 = c_loc(m) fresult = swigc_FN_VConstrMaskLocal_Parallel(farg1, farg2, farg3) swig_result = fresult end function function FN_VMinQuotientLocal_Parallel(num, denom) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: num type(N_Vector), target, intent(inout) :: denom real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(num) farg2 = c_loc(denom) fresult = swigc_FN_VMinQuotientLocal_Parallel(farg1, farg2) swig_result = fresult end function function FN_VDotProdMultiLocal_Parallel(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMultiLocal_Parallel(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VDotProdMultiAllReduce_Parallel(nvec_total, x, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec_total type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = nvec_total farg2 = c_loc(x) farg3 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMultiAllReduce_Parallel(farg1, farg2, farg3) swig_result = fresult end function function FN_VBufSize_Parallel(x, size) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x integer(C_INT64_T), dimension(*), target, intent(inout) :: size integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(size(1)) fresult = swigc_FN_VBufSize_Parallel(farg1, farg2) swig_result = fresult end function function FN_VBufPack_Parallel(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufPack_Parallel(farg1, farg2) swig_result = fresult end function function FN_VBufUnpack_Parallel(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufUnpack_Parallel(farg1, farg2) swig_result = fresult end function function FN_VEnableFusedOps_Parallel(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableFusedOps_Parallel(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearCombination_Parallel(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearCombination_Parallel(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleAddMulti_Parallel(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleAddMulti_Parallel(farg1, farg2) swig_result = fresult end function function FN_VEnableDotProdMulti_Parallel(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableDotProdMulti_Parallel(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearSumVectorArray_Parallel(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearSumVectorArray_Parallel(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleVectorArray_Parallel(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleVectorArray_Parallel(farg1, farg2) swig_result = fresult end function function FN_VEnableConstVectorArray_Parallel(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableConstVectorArray_Parallel(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormVectorArray_Parallel(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormVectorArray_Parallel(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormMaskVectorArray_Parallel(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormMaskVectorArray_Parallel(farg1, farg2) swig_result = fresult end function function FN_VEnableDotProdMultiLocal_Parallel(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableDotProdMultiLocal_Parallel(farg1, farg2) swig_result = fresult end function function FN_VCloneVectorArray_Parallel(count, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = count farg2 = c_loc(w) fresult = swigc_FN_VCloneVectorArray_Parallel(farg1, farg2) swig_result = fresult end function function FN_VCloneVectorArrayEmpty_Parallel(count, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = count farg2 = c_loc(w) fresult = swigc_FN_VCloneVectorArrayEmpty_Parallel(farg1, farg2) swig_result = fresult end function subroutine FN_VDestroyVectorArray_Parallel(vs, count) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: vs integer(C_INT), intent(in) :: count type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = vs farg2 = count call swigc_FN_VDestroyVectorArray_Parallel(farg1, farg2) end subroutine end module StanHeaders/src/nvector/parallel/nvector_parallel.c0000644000176200001440000015046714645137106022242 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Scott D. Cohen, Alan C. Hindmarsh, Radu Serban, * and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a parallel MPI implementation * of the NVECTOR package. * -----------------------------------------------------------------*/ #include #include #include #include #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private functions for special cases of vector operations */ static void VCopy_Parallel(N_Vector x, N_Vector z); /* z=x */ static void VSum_Parallel(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */ static void VDiff_Parallel(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VNeg_Parallel(N_Vector x, N_Vector z); /* z=-x */ static void VScaleSum_Parallel(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleDiff_Parallel(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VLin1_Parallel(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin2_Parallel(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void Vaxpy_Parallel(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */ static void VScaleBy_Parallel(realtype a, N_Vector x); /* x <- ax */ /* Private functions for special cases of vector array operations */ static int VSumVectorArray_Parallel(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */ static int VDiffVectorArray_Parallel(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */ static int VScaleSumVectorArray_Parallel(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */ static int VScaleDiffVectorArray_Parallel(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */ static int VLin1VectorArray_Parallel(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */ static int VLin2VectorArray_Parallel(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */ static int VaxpyVectorArray_Parallel(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */ /* Error Message */ #define BAD_N1 "N_VNew_Parallel -- Sum of local vector lengths differs from " #define BAD_N2 "input global length. \n\n" #define BAD_N BAD_N1 BAD_N2 /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_Parallel(N_Vector v) { return SUNDIALS_NVEC_PARALLEL; } /* ---------------------------------------------------------------- * Function to create a new parallel vector with empty data array */ N_Vector N_VNewEmpty_Parallel(MPI_Comm comm, sunindextype local_length, sunindextype global_length, SUNContext sunctx) { N_Vector v; N_VectorContent_Parallel content; sunindextype n, Nsum; /* Compute global length as sum of local lengths */ n = local_length; MPI_Allreduce(&n, &Nsum, 1, MPI_SUNINDEXTYPE, MPI_SUM, comm); if (Nsum != global_length) { STAN_SUNDIALS_FPRINTF(stderr, BAD_N); return(NULL); } /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Parallel; v->ops->nvclone = N_VClone_Parallel; v->ops->nvcloneempty = N_VCloneEmpty_Parallel; v->ops->nvdestroy = N_VDestroy_Parallel; v->ops->nvspace = N_VSpace_Parallel; v->ops->nvgetarraypointer = N_VGetArrayPointer_Parallel; v->ops->nvsetarraypointer = N_VSetArrayPointer_Parallel; v->ops->nvgetcommunicator = N_VGetCommunicator_Parallel; v->ops->nvgetlength = N_VGetLength_Parallel; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Parallel; v->ops->nvconst = N_VConst_Parallel; v->ops->nvprod = N_VProd_Parallel; v->ops->nvdiv = N_VDiv_Parallel; v->ops->nvscale = N_VScale_Parallel; v->ops->nvabs = N_VAbs_Parallel; v->ops->nvinv = N_VInv_Parallel; v->ops->nvaddconst = N_VAddConst_Parallel; v->ops->nvdotprod = N_VDotProd_Parallel; v->ops->nvmaxnorm = N_VMaxNorm_Parallel; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Parallel; v->ops->nvwrmsnorm = N_VWrmsNorm_Parallel; v->ops->nvmin = N_VMin_Parallel; v->ops->nvwl2norm = N_VWL2Norm_Parallel; v->ops->nvl1norm = N_VL1Norm_Parallel; v->ops->nvcompare = N_VCompare_Parallel; v->ops->nvinvtest = N_VInvTest_Parallel; v->ops->nvconstrmask = N_VConstrMask_Parallel; v->ops->nvminquotient = N_VMinQuotient_Parallel; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProdLocal_Parallel; v->ops->nvmaxnormlocal = N_VMaxNormLocal_Parallel; v->ops->nvminlocal = N_VMinLocal_Parallel; v->ops->nvl1normlocal = N_VL1NormLocal_Parallel; v->ops->nvinvtestlocal = N_VInvTestLocal_Parallel; v->ops->nvconstrmasklocal = N_VConstrMaskLocal_Parallel; v->ops->nvminquotientlocal = N_VMinQuotientLocal_Parallel; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Parallel; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Parallel; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_Parallel; v->ops->nvdotprodmultiallreduce = N_VDotProdMultiAllReduce_Parallel; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_Parallel; v->ops->nvbufpack = N_VBufPack_Parallel; v->ops->nvbufunpack = N_VBufUnpack_Parallel; /* debugging functions */ v->ops->nvprint = N_VPrint_Parallel; v->ops->nvprintfile = N_VPrintFile_Parallel; /* Create content */ content = NULL; content = (N_VectorContent_Parallel) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->local_length = local_length; content->global_length = global_length; content->comm = comm; content->own_data = SUNFALSE; content->data = NULL; return(v); } /* ---------------------------------------------------------------- * Function to create a new parallel vector */ N_Vector N_VNew_Parallel(MPI_Comm comm, sunindextype local_length, sunindextype global_length, SUNContext sunctx) { N_Vector v; realtype *data; v = NULL; v = N_VNewEmpty_Parallel(comm, local_length, global_length, sunctx); if (v == NULL) return(NULL); /* Create data */ if(local_length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(local_length * sizeof(realtype)); if(data == NULL) { N_VDestroy_Parallel(v); return(NULL); } /* Attach data */ NV_OWN_DATA_P(v) = SUNTRUE; NV_DATA_P(v) = data; } return(v); } /* ---------------------------------------------------------------- * Function to create a parallel N_Vector with user data component */ N_Vector N_VMake_Parallel(MPI_Comm comm, sunindextype local_length, sunindextype global_length, realtype *v_data, SUNContext sunctx) { N_Vector v; v = NULL; v = N_VNewEmpty_Parallel(comm, local_length, global_length, sunctx); if (v == NULL) return(NULL); if (local_length > 0) { /* Attach data */ NV_OWN_DATA_P(v) = SUNFALSE; NV_DATA_P(v) = v_data; } return(v); } /* ---------------------------------------------------------------- * Function to create an array of new parallel vectors. */ N_Vector* N_VCloneVectorArray_Parallel(int count, N_Vector w) { return(N_VCloneVectorArray(count, w)); } /* ---------------------------------------------------------------- * Function to create an array of new parallel vectors with empty * (NULL) data array. */ N_Vector* N_VCloneVectorArrayEmpty_Parallel(int count, N_Vector w) { return(N_VCloneEmptyVectorArray(count, w)); } /* ---------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_Parallel */ void N_VDestroyVectorArray_Parallel(N_Vector* vs, int count) { N_VDestroyVectorArray(vs, count); return; } /* ---------------------------------------------------------------- * Function to return global vector length */ sunindextype N_VGetLength_Parallel(N_Vector v) { return NV_GLOBLENGTH_P(v); } /* ---------------------------------------------------------------- * Function to return local vector length */ sunindextype N_VGetLocalLength_Parallel(N_Vector v) { return NV_LOCLENGTH_P(v); } /* ---------------------------------------------------------------- * Function to print the local data in a parallel vector to stdout */ void N_VPrint_Parallel(N_Vector x) { N_VPrintFile_Parallel(x, stdout); } /* ---------------------------------------------------------------- * Function to print the local data in a parallel vector to outfile */ void N_VPrintFile_Parallel(N_Vector x, FILE* outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%35.32Le\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%19.16e\n", xd[i]); #else STAN_SUNDIALS_FPRINTF(outfile, "%11.8e\n", xd[i]); #endif } STAN_SUNDIALS_FPRINTF(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Parallel(N_Vector w) { N_Vector v; N_VectorContent_Parallel content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_Parallel) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->local_length = NV_LOCLENGTH_P(w); content->global_length = NV_GLOBLENGTH_P(w); content->comm = NV_COMM_P(w); content->own_data = SUNFALSE; content->data = NULL; return(v); } N_Vector N_VClone_Parallel(N_Vector w) { N_Vector v; realtype *data; sunindextype local_length; v = NULL; v = N_VCloneEmpty_Parallel(w); if (v == NULL) return(NULL); local_length = NV_LOCLENGTH_P(w); /* Create data */ if(local_length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(local_length * sizeof(realtype)); if(data == NULL) { N_VDestroy_Parallel(v); return(NULL); } /* Attach data */ NV_OWN_DATA_P(v) = SUNTRUE; NV_DATA_P(v) = data; } return(v); } void N_VDestroy_Parallel(N_Vector v) { if (v == NULL) return; /* free content */ if (v->content != NULL) { if (NV_OWN_DATA_P(v) && NV_DATA_P(v) != NULL) { free(NV_DATA_P(v)); NV_DATA_P(v) = NULL; } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } void N_VSpace_Parallel(N_Vector v, sunindextype *lrw, sunindextype *liw) { MPI_Comm comm; int npes; comm = NV_COMM_P(v); MPI_Comm_size(comm, &npes); *lrw = NV_GLOBLENGTH_P(v); *liw = 2*npes; return; } realtype *N_VGetArrayPointer_Parallel(N_Vector v) { return((realtype *) NV_DATA_P(v)); } void N_VSetArrayPointer_Parallel(realtype *v_data, N_Vector v) { if (NV_LOCLENGTH_P(v) > 0) NV_DATA_P(v) = v_data; return; } void *N_VGetCommunicator_Parallel(N_Vector v) { return((void *) &(NV_COMM_P(v))); } void N_VLinearSum_Parallel(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i, N; realtype c, *xd, *yd, *zd; N_Vector v1, v2; booleantype test; xd = yd = zd = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_Parallel(a, x, y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_Parallel(b, y, x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_Parallel(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_Parallel(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_Parallel(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_Parallel(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_Parallel(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_Parallel(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); yd = NV_DATA_P(y); zd = NV_DATA_P(z); for (i = 0; i < N; i++) zd[i] = (a*xd[i])+(b*yd[i]); return; } void N_VConst_Parallel(realtype c, N_Vector z) { sunindextype i, N; realtype *zd; zd = NULL; N = NV_LOCLENGTH_P(z); zd = NV_DATA_P(z); for (i = 0; i < N; i++) zd[i] = c; return; } void N_VProd_Parallel(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); yd = NV_DATA_P(y); zd = NV_DATA_P(z); for (i = 0; i < N; i++) zd[i] = xd[i]*yd[i]; return; } void N_VDiv_Parallel(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); yd = NV_DATA_P(y); zd = NV_DATA_P(z); for (i = 0; i < N; i++) zd[i] = xd[i]/yd[i]; return; } void N_VScale_Parallel(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_Parallel(c, x); return; } if (c == ONE) { VCopy_Parallel(x, z); } else if (c == -ONE) { VNeg_Parallel(x, z); } else { N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); zd = NV_DATA_P(z); for (i = 0; i < N; i++) zd[i] = c*xd[i]; } return; } void N_VAbs_Parallel(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); zd = NV_DATA_P(z); for (i = 0; i < N; i++) zd[i] = SUNRabs(xd[i]); return; } void N_VInv_Parallel(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); zd = NV_DATA_P(z); for (i = 0; i < N; i++) zd[i] = ONE/xd[i]; return; } void N_VAddConst_Parallel(N_Vector x, realtype b, N_Vector z) { sunindextype i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); zd = NV_DATA_P(z); for (i = 0; i < N; i++) zd[i] = xd[i]+b; return; } realtype N_VDotProdLocal_Parallel(N_Vector x, N_Vector y) { sunindextype i, N; realtype sum, *xd, *yd; sum = ZERO; xd = yd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); yd = NV_DATA_P(y); for (i = 0; i < N; i++) sum += xd[i]*yd[i]; return(sum); } realtype N_VDotProd_Parallel(N_Vector x, N_Vector y) { realtype lsum, gsum; lsum = N_VDotProdLocal_Parallel(x,y); MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, NV_COMM_P(x)); return(gsum); } realtype N_VMaxNormLocal_Parallel(N_Vector x) { sunindextype i, N; realtype max, *xd; xd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); max = ZERO; for (i = 0; i < N; i++) if (SUNRabs(xd[i]) > max) max = SUNRabs(xd[i]); return(max); } realtype N_VMaxNorm_Parallel(N_Vector x) { realtype lmax, gmax; lmax = N_VMaxNormLocal_Parallel(x); MPI_Allreduce(&lmax, &gmax, 1, MPI_SUNREALTYPE, MPI_MAX, NV_COMM_P(x)); return(gmax); } realtype N_VWSqrSumLocal_Parallel(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, prodi, *xd, *wd; sum = ZERO; xd = wd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); wd = NV_DATA_P(w); for (i = 0; i < N; i++) { prodi = xd[i]*wd[i]; sum += SUNSQR(prodi); } return(sum); } realtype N_VWrmsNorm_Parallel(N_Vector x, N_Vector w) { realtype lsum, gsum; lsum = N_VWSqrSumLocal_Parallel(x, w); MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, NV_COMM_P(x)); return(SUNRsqrt(gsum/(NV_GLOBLENGTH_P(x)))); } realtype N_VWSqrSumMaskLocal_Parallel(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, prodi, *xd, *wd, *idd; sum = ZERO; xd = wd = idd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); wd = NV_DATA_P(w); idd = NV_DATA_P(id); for (i = 0; i < N; i++) { if (idd[i] > ZERO) { prodi = xd[i]*wd[i]; sum += SUNSQR(prodi); } } return(sum); } realtype N_VWrmsNormMask_Parallel(N_Vector x, N_Vector w, N_Vector id) { realtype lsum, gsum; lsum = N_VWSqrSumMaskLocal_Parallel(x, w, id); MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, NV_COMM_P(x)); return(SUNRsqrt(gsum/(NV_GLOBLENGTH_P(x)))); } realtype N_VMinLocal_Parallel(N_Vector x) { sunindextype i, N; realtype min, *xd; xd = NULL; N = NV_LOCLENGTH_P(x); min = BIG_REAL; if (N > 0) { xd = NV_DATA_P(x); min = xd[0]; for (i = 1; i < N; i++) if (xd[i] < min) min = xd[i]; } return(min); } realtype N_VMin_Parallel(N_Vector x) { realtype lmin, gmin; lmin = N_VMinLocal_Parallel(x); MPI_Allreduce(&lmin, &gmin, 1, MPI_SUNREALTYPE, MPI_MIN, NV_COMM_P(x)); return(gmin); } realtype N_VWL2Norm_Parallel(N_Vector x, N_Vector w) { realtype lsum, gsum; lsum = N_VWSqrSumLocal_Parallel(x, w); MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, NV_COMM_P(x)); return(SUNRsqrt(gsum)); } realtype N_VL1NormLocal_Parallel(N_Vector x) { sunindextype i, N; realtype sum, *xd; sum = ZERO; xd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); for (i = 0; i= c) ? ONE : ZERO; } return; } booleantype N_VInvTestLocal_Parallel(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd, *zd, val; xd = zd = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); zd = NV_DATA_P(z); val = ONE; for (i = 0; i < N; i++) { if (xd[i] == ZERO) val = ZERO; else zd[i] = ONE/xd[i]; } if (val == ZERO) return(SUNFALSE); else return(SUNTRUE); } booleantype N_VInvTest_Parallel(N_Vector x, N_Vector z) { realtype val, gval; val = (N_VInvTestLocal_Parallel(x, z)) ? ONE : ZERO; MPI_Allreduce(&val, &gval, 1, MPI_SUNREALTYPE, MPI_MIN, NV_COMM_P(x)); if (gval == ZERO) return(SUNFALSE); else return(SUNTRUE); } booleantype N_VConstrMaskLocal_Parallel(N_Vector c, N_Vector x, N_Vector m) { sunindextype i, N; realtype temp; realtype *cd, *xd, *md; booleantype test; cd = xd = md = NULL; N = NV_LOCLENGTH_P(x); xd = NV_DATA_P(x); cd = NV_DATA_P(c); md = NV_DATA_P(m); temp = ZERO; for (i = 0; i < N; i++) { md[i] = ZERO; /* Continue if no constraints were set for the variable */ if (cd[i] == ZERO) continue; /* Check if a set constraint has been violated */ test = (SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) || (SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO); if (test) { temp = md[i] = ONE; } } /* Return false if any constraint was violated */ return (temp == ONE) ? SUNFALSE : SUNTRUE; } booleantype N_VConstrMask_Parallel(N_Vector c, N_Vector x, N_Vector m) { realtype temp, temp2; temp = (N_VConstrMaskLocal_Parallel(c, x, m)) ? ZERO : ONE; MPI_Allreduce(&temp, &temp2, 1, MPI_SUNREALTYPE, MPI_MAX, NV_COMM_P(x)); return (temp2 == ONE) ? SUNFALSE : SUNTRUE; } realtype N_VMinQuotientLocal_Parallel(N_Vector num, N_Vector denom) { booleantype notEvenOnce; sunindextype i, N; realtype *nd, *dd, min; nd = dd = NULL; N = NV_LOCLENGTH_P(num); nd = NV_DATA_P(num); dd = NV_DATA_P(denom); notEvenOnce = SUNTRUE; min = BIG_REAL; for (i = 0; i < N; i++) { if (dd[i] == ZERO) continue; else { if (!notEvenOnce) min = SUNMIN(min, nd[i]/dd[i]); else { min = nd[i]/dd[i]; notEvenOnce = SUNFALSE; } } } return(min); } realtype N_VMinQuotient_Parallel(N_Vector num, N_Vector denom) { realtype lmin, gmin; lmin = N_VMinQuotientLocal_Parallel(num, denom); MPI_Allreduce(&lmin, &gmin, 1, MPI_SUNREALTYPE, MPI_MIN, NV_COMM_P(num)); return(gmin); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_Parallel(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i; sunindextype j, N; realtype* zd=NULL; realtype* xd=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_Parallel(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_Parallel(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LOCLENGTH_P(z); zd = NV_DATA_P(z); /* * X[0] += c[i]*X[i], i = 1,...,nvec-1 */ if ((X[0] == z) && (c[0] == ONE)) { for (i=1; i ZERO) nrm[i] += SUNSQR(xd[j] * wd[j]); } } retval = MPI_Allreduce(MPI_IN_PLACE, nrm, nvec, MPI_SUNREALTYPE, MPI_SUM, comm); for (i=0; i 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_Parallel(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LOCLENGTH_P(X[0]); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { for (i=0; i 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; jops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Parallel; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Parallel; v->ops->nvdotprodmulti = N_VDotProdMulti_Parallel; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Parallel; v->ops->nvscalevectorarray = N_VScaleVectorArray_Parallel; v->ops->nvconstvectorarray = N_VConstVectorArray_Parallel; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Parallel; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Parallel; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Parallel; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Parallel; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_Parallel; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_Parallel; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_Parallel; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmulti = N_VDotProdMulti_Parallel; else v->ops->nvdotprodmulti = NULL; /* return success */ return(0); } int N_VEnableLinearSumVectorArray_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Parallel; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_Parallel; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_Parallel; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Parallel; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Parallel; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Parallel; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Parallel; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } int N_VEnableDotProdMultiLocal_Parallel(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_Parallel; else v->ops->nvdotprodmultilocal = NULL; /* return success */ return(0); } StanHeaders/src/nvector/trilinos/0000755000176200001440000000000014511135055016565 5ustar liggesusersStanHeaders/src/nvector/trilinos/nvector_trilinos.cpp0000644000176200001440000003615214645137106022712 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles @ LLNL * * Based on N_Vector_Parallel by Scott D. Cohen, Alan C. Hindmarsh, * Radu Serban, and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a Trilinos implementation * of the NVECTOR package. * -----------------------------------------------------------------*/ #include #include #include #include #include #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* * ----------------------------------------------------------------- * using statements * ----------------------------------------------------------------- */ using Teuchos::Comm; using Teuchos::RCP; using Teuchos::rcp; using Teuchos::outArg; using Teuchos::REDUCE_SUM; using Teuchos::reduceAll; using namespace sundials::trilinos::nvector_tpetra; /* * ----------------------------------------------------------------- * type definitions * ----------------------------------------------------------------- */ typedef TpetraVectorInterface::vector_type vector_type; /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_Trilinos(N_Vector v) { return SUNDIALS_NVEC_TRILINOS; } /* ---------------------------------------------------------------- * Function to create a new Trilinos vector with empty data array */ N_Vector N_VNewEmpty_Trilinos(SUNContext sunctx) { N_Vector v; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Trilinos; v->ops->nvclone = N_VClone_Trilinos; v->ops->nvcloneempty = N_VCloneEmpty_Trilinos; v->ops->nvdestroy = N_VDestroy_Trilinos; v->ops->nvspace = N_VSpace_Trilinos; v->ops->nvgetcommunicator = N_VGetCommunicator_Trilinos; v->ops->nvgetlength = N_VGetLength_Trilinos; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Trilinos; v->ops->nvconst = N_VConst_Trilinos; v->ops->nvprod = N_VProd_Trilinos; v->ops->nvdiv = N_VDiv_Trilinos; v->ops->nvscale = N_VScale_Trilinos; v->ops->nvabs = N_VAbs_Trilinos; v->ops->nvinv = N_VInv_Trilinos; v->ops->nvaddconst = N_VAddConst_Trilinos; v->ops->nvdotprod = N_VDotProd_Trilinos; v->ops->nvmaxnorm = N_VMaxNorm_Trilinos; v->ops->nvwrmsnorm = N_VWrmsNorm_Trilinos; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Trilinos; v->ops->nvmin = N_VMin_Trilinos; v->ops->nvwl2norm = N_VWL2Norm_Trilinos; v->ops->nvl1norm = N_VL1Norm_Trilinos; v->ops->nvcompare = N_VCompare_Trilinos; v->ops->nvinvtest = N_VInvTest_Trilinos; v->ops->nvconstrmask = N_VConstrMask_Trilinos; v->ops->nvminquotient = N_VMinQuotient_Trilinos; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProdLocal_Trilinos; v->ops->nvmaxnormlocal = N_VMaxNormLocal_Trilinos; v->ops->nvminlocal = N_VMinLocal_Trilinos; v->ops->nvl1normlocal = N_VL1NormLocal_Trilinos; v->ops->nvinvtestlocal = N_VInvTestLocal_Trilinos; v->ops->nvconstrmasklocal = N_VConstrMaskLocal_Trilinos; v->ops->nvminquotientlocal = N_VMinQuotientLocal_Trilinos; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Trilinos; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Trilinos; return(v); } /* ---------------------------------------------------------------- * Function to create an N_Vector attachment to Tpetra vector. * void* argument is to allow for calling this method from C code. * */ N_Vector N_VMake_Trilinos(Teuchos::RCP vec, SUNContext sunctx) { N_Vector v = NULL; // Create an N_Vector with operators attached and empty content v = N_VNewEmpty_Trilinos(sunctx); if (v == NULL) return(NULL); // Create vector content using a pointer to Tpetra vector v->content = new TpetraVectorInterface(vec); if (v->content == NULL) { N_VDestroy(v); return NULL; } return(v); } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Trilinos(N_Vector w) { N_Vector v; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VClone_Trilinos(N_Vector w) { N_Vector v = N_VCloneEmpty_Trilinos(w); if (v == NULL) return(NULL); // Get raw pointer to Tpetra vector Teuchos::RCP wvec = N_VGetVector_Trilinos(w); // Clone wvec and get raw pointer to the clone Teuchos::RCP tvec = Teuchos::rcp(new vector_type(*wvec, Teuchos::Copy)); // Create vector content using the raw pointer to the cloned Tpetra vector v->content = new TpetraVectorInterface(tvec); if (v->content == NULL) { N_VDestroy(v); return NULL; } return(v); } void N_VDestroy_Trilinos(N_Vector v) { if (v == NULL) return; if(v->content != NULL) { TpetraVectorInterface* iface = reinterpret_cast(v->content); // iface was created with 'new', so use 'delete' to destroy it. delete iface; v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } void N_VSpace_Trilinos(N_Vector x, sunindextype *lrw, sunindextype *liw) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); const Teuchos::RCP >& comm = xv->getMap()->getComm(); int npes = comm->getSize(); *lrw = (sunindextype)(xv->getGlobalLength()); *liw = 2*npes; } /* * MPI communicator accessor */ void *N_VGetCommunicator_Trilinos(N_Vector x) { #ifdef SUNDIALS_TRILINOS_HAVE_MPI Teuchos::RCP xv = N_VGetVector_Trilinos(x); /* Access Teuchos::Comm* (which is actually a Teuchos::MpiComm*) */ auto comm = Teuchos::rcp_dynamic_cast>(xv->getMap()->getComm()); return((void*) comm->getRawMpiComm().get()); /* extract raw pointer to MPI_Comm */ #else return(NULL); #endif } /* * Global vector length accessor */ sunindextype N_VGetLength_Trilinos(N_Vector x) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); return ((sunindextype) xv->getGlobalLength()); } /* * Linear combination of two vectors: z = a*x + b*y */ void N_VLinearSum_Trilinos(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP yv = N_VGetVector_Trilinos(y); Teuchos::RCP zv = N_VGetVector_Trilinos(z); if (x == z) { zv->update(b, *yv, a); } else if (y == z) { zv->update(a, *xv, b); } else { zv->update(a, *xv, b, *yv, ZERO); } } /* * Set all vector elements to a constant: z[i] = c */ void N_VConst_Trilinos(realtype c, N_Vector z) { Teuchos::RCP zv = N_VGetVector_Trilinos(z); zv->putScalar(c); } /* * Elementwise multiply vectors: z[i] = x[i]*y[i] */ void N_VProd_Trilinos(N_Vector x, N_Vector y, N_Vector z) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP yv = N_VGetVector_Trilinos(y); Teuchos::RCP zv = N_VGetVector_Trilinos(z); zv->elementWiseMultiply(ONE, *xv, *yv, ZERO); } /* * Elementwise divide vectors: z[i] = x[i]/y[i] */ void N_VDiv_Trilinos(N_Vector x, N_Vector y, N_Vector z) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP yv = N_VGetVector_Trilinos(y); Teuchos::RCP zv = N_VGetVector_Trilinos(z); elementWiseDivide(*xv, *yv, *zv); } /* * Scale vector: z = c*x */ void N_VScale_Trilinos(realtype c, N_Vector x, N_Vector z) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP zv = N_VGetVector_Trilinos(z); zv->scale(c, *xv); } /* * Elementwise absolute value: z[i] = |x[i]| */ void N_VAbs_Trilinos(N_Vector x, N_Vector z) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP zv = N_VGetVector_Trilinos(z); zv->abs(*xv); } /* * Elementwise inverse: z[i] = 1/x[i] */ void N_VInv_Trilinos(N_Vector x, N_Vector z) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP zv = N_VGetVector_Trilinos(z); zv->reciprocal(*xv); } /* * Add constant: z = x + b */ void N_VAddConst_Trilinos(N_Vector x, realtype b, N_Vector z) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP zv = N_VGetVector_Trilinos(z); addConst(*xv, b, *zv); } /* * Scalar product of vectors x and y */ realtype N_VDotProd_Trilinos(N_Vector x, N_Vector y) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP yv = N_VGetVector_Trilinos(y); return xv->dot(*yv); } /* * Max norm (L infinity) of vector x */ realtype N_VMaxNorm_Trilinos(N_Vector x) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); return xv->normInf(); } /* * Weighted RMS norm */ realtype N_VWrmsNorm_Trilinos(N_Vector x, N_Vector w) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP wv = N_VGetVector_Trilinos(w); return normWrms(*xv, *wv); } /* * Masked weighted RMS norm */ realtype N_VWrmsNormMask_Trilinos(N_Vector x, N_Vector w, N_Vector id) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP wv = N_VGetVector_Trilinos(w); Teuchos::RCP idv = N_VGetVector_Trilinos(id); return normWrmsMask(*xv, *wv, *idv); } /* * Returns minimum vector element */ realtype N_VMin_Trilinos(N_Vector x) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); return minElement(*xv); } /* * Weighted L2 norm */ realtype N_VWL2Norm_Trilinos(N_Vector x, N_Vector w) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP wv = N_VGetVector_Trilinos(w); return normWL2(*xv, *wv); } /* * L1 norm */ realtype N_VL1Norm_Trilinos(N_Vector x) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); return xv->norm1(); } /* * Elementwise z[i] = |x[i]| >= c ? 1 : 0 */ void N_VCompare_Trilinos(realtype c, N_Vector x, N_Vector z) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP zv = N_VGetVector_Trilinos(z); compare(c, *xv, *zv); } /* * Elementwise inverse with zero checking: z[i] = 1/x[i], x[i] != 0 */ booleantype N_VInvTest_Trilinos(N_Vector x, N_Vector z) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP zv = N_VGetVector_Trilinos(z); return invTest(*xv, *zv) ? SUNTRUE : SUNFALSE; } /* * Checks constraint violations for vector x. Constraints are defined in * vector c, and constraint violation flags are stored in vector m. */ booleantype N_VConstrMask_Trilinos(N_Vector c, N_Vector x, N_Vector m) { Teuchos::RCP cv = N_VGetVector_Trilinos(c); Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP mv = N_VGetVector_Trilinos(m); return constraintMask(*cv, *xv, *mv) ? SUNTRUE : SUNFALSE; } /* * Find minimum quotient: minq = min ( num[i]/denom[i]), denom[i] != 0. */ realtype N_VMinQuotient_Trilinos(N_Vector num, N_Vector denom) { Teuchos::RCP numv = N_VGetVector_Trilinos(num); Teuchos::RCP denv = N_VGetVector_Trilinos(denom); return minQuotient(*numv, *denv); } /* * MPI task-local dot product */ realtype N_VDotProdLocal_Trilinos(N_Vector x, N_Vector y) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP yv = N_VGetVector_Trilinos(y); return dotProdLocal(*xv, *yv); } /* * MPI task-local maximum norm */ realtype N_VMaxNormLocal_Trilinos(N_Vector x) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); return maxNormLocal(*xv); } /* * MPI task-local minimum element */ realtype N_VMinLocal_Trilinos(N_Vector x) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); return minLocal(*xv); } /* * MPI task-local L1 norm */ realtype N_VL1NormLocal_Trilinos(N_Vector x) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); return L1NormLocal(*xv); } /* * MPI task-local weighted squared sum */ realtype N_VWSqrSumLocal_Trilinos(N_Vector x, N_Vector w) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP wv = N_VGetVector_Trilinos(w); return WSqrSumLocal(*xv, *wv); } /* * MPI task-local weighted masked squared sum */ realtype N_VWSqrSumMaskLocal_Trilinos(N_Vector x, N_Vector w, N_Vector id) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP wv = N_VGetVector_Trilinos(w); Teuchos::RCP idv = N_VGetVector_Trilinos(id); return WSqrSumMaskLocal(*xv, *wv, *idv); } /* * MPI task-local elementwise inverse with zero checking: z[i] = 1/x[i], x[i] != 0 */ booleantype N_VInvTestLocal_Trilinos(N_Vector x, N_Vector z) { Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP zv = N_VGetVector_Trilinos(z); return invTestLocal(*xv, *zv) ? SUNTRUE : SUNFALSE; } /* * MPI task-local constraint checking for vector x. Constraints are defined in * vector c, and constraint violation flags are stored in vector m. */ booleantype N_VConstrMaskLocal_Trilinos(N_Vector c, N_Vector x, N_Vector m) { Teuchos::RCP cv = N_VGetVector_Trilinos(c); Teuchos::RCP xv = N_VGetVector_Trilinos(x); Teuchos::RCP mv = N_VGetVector_Trilinos(m); return constraintMaskLocal(*cv, *xv, *mv) ? SUNTRUE : SUNFALSE; } /* * MPI task-local minimum quotient: minq = min ( num[i]/denom[i]), denom[i] != 0. */ realtype N_VMinQuotientLocal_Trilinos(N_Vector num, N_Vector denom) { Teuchos::RCP numv = N_VGetVector_Trilinos(num); Teuchos::RCP denv = N_VGetVector_Trilinos(denom); return minQuotientLocal(*numv, *denv); } StanHeaders/src/nvector/openmpdev/0000755000176200001440000000000014511135055016717 5ustar liggesusersStanHeaders/src/nvector/openmpdev/nvector_openmpdev.c0000644000176200001440000023567214645137106022646 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for an OpenMP DEV implementation * of the NVECTOR module. * -----------------------------------------------------------------*/ #include #include #include #include #include #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private functions for special cases of vector operations */ static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */ static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */ static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */ static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */ static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */ /* Private functions for special cases of vector array operations */ static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */ static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */ static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */ static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */ static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */ static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */ static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */ /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v) { return SUNDIALS_NVEC_OPENMPDEV; } /* ---------------------------------------------------------------------------- * Function to create a new empty vector */ N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length, SUNContext sunctx) { N_Vector v; N_VectorContent_OpenMPDEV content; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV; v->ops->nvclone = N_VClone_OpenMPDEV; v->ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV; v->ops->nvdestroy = N_VDestroy_OpenMPDEV; v->ops->nvspace = N_VSpace_OpenMPDEV; v->ops->nvgetlength = N_VGetLength_OpenMPDEV; v->ops->nvgetarraypointer = N_VGetHostArrayPointer_OpenMPDEV; v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_OpenMPDEV; v->ops->nvprint = N_VPrint_OpenMPDEV; v->ops->nvprintfile = N_VPrintFile_OpenMPDEV; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_OpenMPDEV; v->ops->nvconst = N_VConst_OpenMPDEV; v->ops->nvprod = N_VProd_OpenMPDEV; v->ops->nvdiv = N_VDiv_OpenMPDEV; v->ops->nvscale = N_VScale_OpenMPDEV; v->ops->nvabs = N_VAbs_OpenMPDEV; v->ops->nvinv = N_VInv_OpenMPDEV; v->ops->nvaddconst = N_VAddConst_OpenMPDEV; v->ops->nvdotprod = N_VDotProd_OpenMPDEV; v->ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV; v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV; v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV; v->ops->nvmin = N_VMin_OpenMPDEV; v->ops->nvwl2norm = N_VWL2Norm_OpenMPDEV; v->ops->nvl1norm = N_VL1Norm_OpenMPDEV; v->ops->nvcompare = N_VCompare_OpenMPDEV; v->ops->nvinvtest = N_VInvTest_OpenMPDEV; v->ops->nvconstrmask = N_VConstrMask_OpenMPDEV; v->ops->nvminquotient = N_VMinQuotient_OpenMPDEV; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProd_OpenMPDEV; v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMPDEV; v->ops->nvminlocal = N_VMin_OpenMPDEV; v->ops->nvl1normlocal = N_VL1Norm_OpenMPDEV; v->ops->nvinvtestlocal = N_VInvTest_OpenMPDEV; v->ops->nvconstrmasklocal = N_VConstrMask_OpenMPDEV; v->ops->nvminquotientlocal = N_VMinQuotient_OpenMPDEV; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMPDEV; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMPDEV; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMPDEV; /* Create content */ content = NULL; content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = length; content->own_data = SUNFALSE; content->host_data = NULL; content->dev_data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Function to create a new vector */ N_Vector N_VNew_OpenMPDEV(sunindextype length) { N_Vector v; realtype *data; realtype *dev_data; int dev; v = NULL; v = N_VNewEmpty_OpenMPDEV(length); if (v == NULL) return(NULL); /* Create data */ if (length > 0) { /* Update ownership */ NV_OWN_DATA_OMPDEV(v) = SUNTRUE; /* Allocate memory on host */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if (data == NULL) { N_VDestroy(v); return(NULL); } /* Allocate memory on device */ dev = omp_get_default_device(); dev_data = omp_target_alloc(length * sizeof(realtype), dev); if (dev_data == NULL) { N_VDestroy(v); return(NULL); } /* Attach data */ NV_DATA_HOST_OMPDEV(v) = data; NV_DATA_DEV_OMPDEV(v) = dev_data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create a vector with user data component */ N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata, realtype *d_vdata) { N_Vector v; int dev, host; if (h_vdata == NULL || d_vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_OpenMPDEV(length); if (v == NULL) return(NULL); if (length > 0) { /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Attach data */ NV_OWN_DATA_OMPDEV(v) = SUNFALSE; NV_DATA_HOST_OMPDEV(v) = h_vdata; NV_DATA_DEV_OMPDEV(v) = d_vdata; } return(v); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors. */ N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w) { return(N_VCloneVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors with NULL data array. */ N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w) { return(N_VCloneEmptyVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_OpenMPDEV */ void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count) { N_VDestroyVectorArray(vs, count); return; } /* ---------------------------------------------------------------------------- * Function to return number of vector elements */ sunindextype N_VGetLength_OpenMPDEV(N_Vector v) { return NV_LENGTH_OMPDEV(v); } /* ---------------------------------------------------------------------------- * Function to return a pointer to the data array on the host. */ realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v) { return((realtype *) NV_DATA_HOST_OMPDEV(v)); } /* ---------------------------------------------------------------------------- * Function to return a pointer to the data array on the device. */ realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v) { return((realtype *) NV_DATA_DEV_OMPDEV(v)); } /* ---------------------------------------------------------------------------- * Function to print a vector to stdout */ void N_VPrint_OpenMPDEV(N_Vector x) { N_VPrintFile_OpenMPDEV(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print a vector to outfile */ void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LENGTH_OMPDEV(x); xd = NV_DATA_HOST_OMPDEV(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%11.8Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]); #else STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]); #endif } STAN_SUNDIALS_FPRINTF(outfile, "\n"); return; } /* ---------------------------------------------------------------------------- * Function to copy host array into device array */ void N_VCopyToDevice_OpenMPDEV(N_Vector x) { int dev, host; sunindextype length; realtype *host_ptr; realtype *dev_ptr; /* Get array information */ length = NV_LENGTH_OMPDEV(x); host_ptr = NV_DATA_HOST_OMPDEV(x); dev_ptr = NV_DATA_DEV_OMPDEV(x); /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Copy array from host to device */ omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host); return; } /* ---------------------------------------------------------------------------- * Function to copy device array into host array */ void N_VCopyFromDevice_OpenMPDEV(N_Vector x) { int dev, host; sunindextype length; realtype *host_ptr; realtype *dev_ptr; /* Get array information */ length = NV_LENGTH_OMPDEV(x); host_ptr = NV_DATA_HOST_OMPDEV(x); dev_ptr = NV_DATA_DEV_OMPDEV(x); /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Copy array from device to host */ omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Create new vector from existing vector without attaching data */ N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w) { N_Vector v; N_VectorContent_OpenMPDEV content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = NV_LENGTH_OMPDEV(w); content->own_data = SUNFALSE; content->host_data = NULL; content->dev_data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Create new vector from existing vector and attach data */ N_Vector N_VClone_OpenMPDEV(N_Vector w) { N_Vector v; realtype *data; realtype *dev_data; sunindextype length; int dev; v = NULL; v = N_VCloneEmpty_OpenMPDEV(w); if (v == NULL) return(NULL); length = NV_LENGTH_OMPDEV(w); /* Create data */ if (length > 0) { /* Update ownership flag */ NV_OWN_DATA_OMPDEV(v) = SUNTRUE; /* Allocate memory on host */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if (data == NULL) { N_VDestroy(v); return(NULL); } /* Allocate memory on device */ dev = omp_get_default_device(); dev_data = omp_target_alloc(length * sizeof(realtype), dev); if (dev_data == NULL) { N_VDestroy(v); return(NULL); } /* Attach data */ NV_DATA_HOST_OMPDEV(v)= data; NV_DATA_DEV_OMPDEV(v) = dev_data; } return(v); } /* ---------------------------------------------------------------------------- * Destroy vector and free vector memory */ void N_VDestroy_OpenMPDEV(N_Vector v) { int dev; if (v == NULL) return; /* free content */ if (v->content != NULL) { /* free data arrays if they are owned by the vector */ if (NV_OWN_DATA_OMPDEV(v)) { if (NV_DATA_HOST_OMPDEV(v) != NULL) { free(NV_DATA_HOST_OMPDEV(v)); NV_DATA_HOST_OMPDEV(v) = NULL; } if (NV_DATA_DEV_OMPDEV(v) != NULL) { dev = omp_get_default_device(); omp_target_free(NV_DATA_DEV_OMPDEV(v), dev); NV_DATA_DEV_OMPDEV(v) = NULL; } } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } /* ---------------------------------------------------------------------------- * Get storage requirement for N_Vector */ void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw) { *lrw = NV_LENGTH_OMPDEV(v); *liw = 1; return; } /* ---------------------------------------------------------------------------- * Compute linear combination z[i] = a*x[i]+b*y[i] */ void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i, N; realtype c, *xd_dev, *yd_dev, *zd_dev; N_Vector v1, v2; booleantype test; int dev; xd_dev = yd_dev = zd_dev = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_OpenMPDEV(a,x,y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_OpenMPDEV(b,y,x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_OpenMPDEV(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_OpenMPDEV(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_OpenMPDEV(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_OpenMPDEV(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_OpenMPDEV(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_OpenMPDEV(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Assigns constant value to all vector elements, z[i] = c */ void N_VConst_OpenMPDEV(realtype c, N_Vector z) { sunindextype i, N; realtype *zd_dev; int dev; zd_dev = NULL; N = NV_LENGTH_OMPDEV(z); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c; return; } /* ---------------------------------------------------------------------------- * Compute componentwise product z[i] = x[i]*y[i] */ void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]*yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise division z[i] = x[i]/y[i] */ void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]/yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaler multiplication z[i] = c*x[i] */ void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_OpenMPDEV(c, x); return; } if (c == ONE) { VCopy_OpenMPDEV(x, z); } else if (c == -ONE) { VNeg_OpenMPDEV(x, z); } else { N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*xd_dev[i]; } return; } /* ---------------------------------------------------------------------------- * Compute absolute value of vector components z[i] = SUNRabs(x[i]) */ void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = SUNRabs(xd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = 1 / x[i] */ void N_VInv_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = ONE/xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise addition of a scaler to a vector z[i] = x[i] + b */ void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]+b; return; } /* ---------------------------------------------------------------------------- * Computes the dot product of two vectors, a = sum(x[i]*y[i]) */ realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y) { sunindextype i, N; realtype sum, *xd_dev, *yd_dev; int dev; xd_dev = yd_dev = NULL; sum = ZERO; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += xd_dev[i]*yd_dev[i]; } return(sum); } /* ---------------------------------------------------------------------------- * Computes max norm of a vector */ realtype N_VMaxNorm_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype max, *xd_dev; int dev; max = ZERO; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:max) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1) for (i = 0; i < N; i++) { max = SUNMAX(SUNRabs(xd_dev[i]), max); } return(max); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a vector */ realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w) { return(SUNRsqrt(N_VWSqrSumLocal_OpenMPDEV(x, w)/(NV_LENGTH_OMPDEV(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a masked vector */ realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id) { return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMPDEV(x, w, id) / (NV_LENGTH_OMPDEV(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a vector */ realtype N_VWSqrSumLocal_OpenMPDEV(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev; int dev; sum = ZERO; xd_dev = wd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } return(sum); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a masked vector */ realtype N_VWSqrSumMaskLocal_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev, *idd_dev; int dev; sum = ZERO; xd_dev = wd_dev = idd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); idd_dev = NV_DATA_DEV_OMPDEV(id); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { if (idd_dev[i] > ZERO) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } } return(sum); } /* ---------------------------------------------------------------------------- * Finds the minimun component of a vector */ realtype N_VMin_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype min, *xd_dev; int dev; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(from:min) is_device_ptr(xd_dev) device(dev) #pragma omp teams num_teams(1) { min = xd_dev[0]; #pragma omp distribute parallel for reduction(min:min) schedule(static, 1) for (i = 1; i < N; i++) { min = SUNMIN(xd_dev[i], min); } } return(min); } /* ---------------------------------------------------------------------------- * Computes weighted L2 norm of a vector */ realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev; int dev; sum = ZERO; xd_dev = wd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } return(SUNRsqrt(sum)); } /* ---------------------------------------------------------------------------- * Computes L1 norm of a vector */ realtype N_VL1Norm_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype sum, *xd_dev; int dev; sum = ZERO; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i= c) ? ONE : ZERO; return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO */ booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev, val; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); val = ZERO; #pragma omp target map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1) for (i = 0; i < N; i++) { if (xd_dev[i] == ZERO) val = ONE; else zd_dev[i] = ONE/xd_dev[i]; } if (val > ZERO) return (SUNFALSE); else return (SUNTRUE); } /* ---------------------------------------------------------------------------- * Compute constraint mask of a vector */ booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m) { sunindextype i, N; realtype temp; realtype *cd_dev, *xd_dev, *md_dev; int dev; cd_dev = xd_dev = md_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); cd_dev = NV_DATA_DEV_OMPDEV(c); md_dev = NV_DATA_DEV_OMPDEV(m); /* get default device identifier */ dev = omp_get_default_device(); temp = ONE; #pragma omp target map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev) #pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1) for (i = 0; i < N; i++) { md_dev[i] = ZERO; if (cd_dev[i] == ZERO) continue; if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) { if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; } continue; } if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) { if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; } } } if (temp == ONE) return (SUNTRUE); else return(SUNFALSE); } /* ---------------------------------------------------------------------------- * Compute minimum componentwise quotient */ realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom) { sunindextype i, N; realtype *nd_dev, *dd_dev, min; int dev; nd_dev = dd_dev = NULL; N = NV_LENGTH_OMPDEV(num); nd_dev = NV_DATA_DEV_OMPDEV(num); dd_dev = NV_DATA_DEV_OMPDEV(denom); /* get default device identifier */ dev = omp_get_default_device(); min = BIG_REAL; #pragma omp target map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev) #pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1) for (i = 0; i < N; i++) if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min); return(min); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i, dev; realtype to_add; /* temporary variable to hold sum being added in atomic operation */ sunindextype j, N; realtype* zd_dev=NULL; realtype* xd_dev=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMPDEV(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(z); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store X dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i ZERO) sum += SUNSQR(xd_dev[j] * wd_dev[j]); } } nrm[i] = SUNRsqrt(sum/N); } } free(xd_dev_ptrs); free(wd_dev_ptrs); return(0); } int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z) { int i, j, dev; sunindextype k, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; int retval; N_Vector* YY; N_Vector* ZZ; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VLinearSum */ if (nsum == 1) { N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]); return(0); } /* should have called N_VScaleAddMulti */ YY = (N_Vector *) malloc(nsum * sizeof(N_Vector)); ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector)); for (j=0; j 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (i=0; i 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; jops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV; v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV; v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV; v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV; v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_OpenMPDEV; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) { v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV; v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMPDEV; } else { v->ops->nvdotprodmulti = NULL; v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } StanHeaders/src/nvector/petsc/0000755000176200001440000000000014511135055016040 5ustar liggesusersStanHeaders/src/nvector/petsc/nvector_petsc.c0000644000176200001440000012721414645137106021100 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles @ LLNL * ----------------------------------------------------------------- * Based on N_Vector_Parallel by Scott D. Cohen, Alan C. Hindmarsh, * Radu Serban, and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a PETSc implementation * of the NVECTOR package. * -----------------------------------------------------------------*/ #include #include #include #include #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Error Message */ #define BAD_N1 "N_VNewEmpty_Petsc -- Sum of local vector lengths differs from " #define BAD_N2 "input global length. \n\n" #define BAD_N BAD_N1 BAD_N2 /* * ----------------------------------------------------------------- * Simplifying macros NV_CONTENT_PTC, NV_OWN_DATA_PTC, * NV_LOCLENGTH_PTC, NV_GLOBLENGTH_PTC, * NV_COMM_PTC * ----------------------------------------------------------------- * In the descriptions below, the following user declarations * are assumed: * * N_Vector v; * sunindextype v_len, s_len, i; * * (1) NV_CONTENT_PTC * * This routines gives access to the contents of the PETSc * vector wrapper N_Vector. * * The assignment v_cont = NV_CONTENT_PTC(v) sets v_cont to be * a pointer to the N_Vector (PETSc wrapper) content structure. * * (2) NV_PVEC_PTC, NV_OWN_DATA_PTC, NV_LOCLENGTH_PTC, NV_GLOBLENGTH_PTC, * and NV_COMM_PTC * * These routines give access to the individual parts of * the content structure of a PETSc N_Vector wrapper. * * NV_PVEC_PTC(v) returns the PETSc vector (Vec) object. * * The assignment v_llen = NV_LOCLENGTH_PTC(v) sets v_llen to * be the length of the local part of the vector v. The call * NV_LOCLENGTH_PTC(v) = llen_v should NOT be used! It will * change the value stored in the N_Vector content structure, * but it will NOT change the length of the actual PETSc vector. * * The assignment v_glen = NV_GLOBLENGTH_PTC(v) sets v_glen to * be the global length of the vector v. The call * NV_GLOBLENGTH_PTC(v) = glen_v should NOT be used! It will * change the value stored in the N_Vector content structure, * but it will NOT change the length of the actual PETSc vector. * * The assignment v_comm = NV_COMM_PTC(v) sets v_comm to be the * MPI communicator of the vector v. The assignment * NV_COMM_PTC(v) = comm_v should NOT be used! It will change * the value stored in the N_Vector content structure, but it * will NOT change the MPI communicator of the actual PETSc * vector. * * ----------------------------------------------------------------- */ #define NV_CONTENT_PTC(v) ( (N_VectorContent_Petsc)(v->content) ) #define NV_LOCLENGTH_PTC(v) ( NV_CONTENT_PTC(v)->local_length ) #define NV_GLOBLENGTH_PTC(v) ( NV_CONTENT_PTC(v)->global_length ) #define NV_OWN_DATA_PTC(v) ( NV_CONTENT_PTC(v)->own_data ) #define NV_PVEC_PTC(v) ( NV_CONTENT_PTC(v)->pvec ) #define NV_COMM_PTC(v) ( NV_CONTENT_PTC(v)->comm ) /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_Petsc(N_Vector v) { return SUNDIALS_NVEC_PETSC; } /* ---------------------------------------------------------------- * Function to create a new N_Vector wrapper with an empty (NULL) * PETSc vector. */ N_Vector N_VNewEmpty_Petsc(MPI_Comm comm, sunindextype local_length, sunindextype global_length, SUNContext sunctx) { N_Vector v; N_VectorContent_Petsc content; sunindextype n, Nsum; PetscErrorCode ierr; /* Compute global length as sum of local lengths */ n = local_length; ierr = MPI_Allreduce(&n, &Nsum, 1, MPI_SUNINDEXTYPE, MPI_SUM, comm); CHKERRABORT(comm,ierr); if (Nsum != global_length) { STAN_SUNDIALS_FPRINTF(stderr, BAD_N); return(NULL); } /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Petsc; v->ops->nvclone = N_VClone_Petsc; v->ops->nvcloneempty = N_VCloneEmpty_Petsc; v->ops->nvdestroy = N_VDestroy_Petsc; v->ops->nvspace = N_VSpace_Petsc; v->ops->nvgetarraypointer = N_VGetArrayPointer_Petsc; v->ops->nvsetarraypointer = N_VSetArrayPointer_Petsc; v->ops->nvgetcommunicator = N_VGetCommunicator_Petsc; v->ops->nvgetlength = N_VGetLength_Petsc; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Petsc; v->ops->nvconst = N_VConst_Petsc; v->ops->nvprod = N_VProd_Petsc; v->ops->nvdiv = N_VDiv_Petsc; v->ops->nvscale = N_VScale_Petsc; v->ops->nvabs = N_VAbs_Petsc; v->ops->nvinv = N_VInv_Petsc; v->ops->nvaddconst = N_VAddConst_Petsc; v->ops->nvdotprod = N_VDotProd_Petsc; v->ops->nvmaxnorm = N_VMaxNorm_Petsc; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Petsc; v->ops->nvwrmsnorm = N_VWrmsNorm_Petsc; v->ops->nvmin = N_VMin_Petsc; v->ops->nvwl2norm = N_VWL2Norm_Petsc; v->ops->nvl1norm = N_VL1Norm_Petsc; v->ops->nvcompare = N_VCompare_Petsc; v->ops->nvinvtest = N_VInvTest_Petsc; v->ops->nvconstrmask = N_VConstrMask_Petsc; v->ops->nvminquotient = N_VMinQuotient_Petsc; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProdLocal_Petsc; v->ops->nvmaxnormlocal = N_VMaxNormLocal_Petsc; v->ops->nvminlocal = N_VMinLocal_Petsc; v->ops->nvl1normlocal = N_VL1NormLocal_Petsc; v->ops->nvinvtestlocal = N_VInvTestLocal_Petsc; v->ops->nvconstrmasklocal = N_VConstrMaskLocal_Petsc; v->ops->nvminquotientlocal = N_VMinQuotientLocal_Petsc; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Petsc; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Petsc; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_Petsc; v->ops->nvdotprodmultiallreduce = N_VDotProdMultiAllReduce_Petsc; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_Petsc; v->ops->nvbufpack = N_VBufPack_Petsc; v->ops->nvbufunpack = N_VBufUnpack_Petsc; /* Create content */ content = NULL; content = (N_VectorContent_Petsc) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->local_length = local_length; content->global_length = global_length; content->comm = comm; content->own_data = SUNFALSE; content->pvec = NULL; return(v); } /* ---------------------------------------------------------------- * Function to create an N_Vector wrapper for a PETSc vector. */ N_Vector N_VMake_Petsc(Vec pvec, SUNContext sunctx) { N_Vector v = NULL; MPI_Comm comm; PetscInt local_length; PetscInt global_length; VecGetLocalSize(pvec, &local_length); VecGetSize(pvec, &global_length); PetscObjectGetComm((PetscObject) pvec, &comm); v = N_VNewEmpty_Petsc(comm, local_length, global_length, sunctx); if (v == NULL) return(NULL); /* Attach data */ NV_OWN_DATA_PTC(v) = SUNFALSE; NV_PVEC_PTC(v) = pvec; return(v); } /* ---------------------------------------------------------------- * Function to create an array of new PETSc vector wrappers. */ N_Vector *N_VCloneVectorArray_Petsc(int count, N_Vector w) { return(N_VCloneVectorArray(count, w)); } /* ---------------------------------------------------------------- * Function to create an array of new PETSc vector wrappers with * empty (NULL) PETSc vectors. */ N_Vector *N_VCloneVectorArrayEmpty_Petsc(int count, N_Vector w) { return(N_VCloneEmptyVectorArray(count, w)); } /* ---------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_Petsc */ void N_VDestroyVectorArray_Petsc(N_Vector *vs, int count) { N_VDestroyVectorArray(vs, count); return; } /* ---------------------------------------------------------------- * Function to extract PETSc vector */ Vec N_VGetVector_Petsc(N_Vector v) { return NV_PVEC_PTC(v); } /* ---------------------------------------------------------------- * Function to set the PETSc vector */ void N_VSetVector_Petsc(N_Vector v, Vec p) { NV_PVEC_PTC(v) = p; } /* ---------------------------------------------------------------- * Function to print the global data in a PETSc vector to stdout */ void N_VPrint_Petsc(N_Vector x) { Vec xv = NV_PVEC_PTC(x); MPI_Comm comm = NV_COMM_PTC(x); VecView(xv, PETSC_VIEWER_STDOUT_(comm)); return; } /* ---------------------------------------------------------------- * Function to print the global data in a PETSc vector to fname */ void N_VPrintFile_Petsc(N_Vector x, const char fname[]) { Vec xv = NV_PVEC_PTC(x); MPI_Comm comm = NV_COMM_PTC(x); PetscViewer viewer; PetscViewerASCIIOpen(comm, fname, &viewer); VecView(xv, viewer); PetscViewerDestroy(&viewer); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Petsc(N_Vector w) { N_Vector v; N_VectorContent_Petsc content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_Petsc) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->local_length = NV_LOCLENGTH_PTC(w); content->global_length = NV_GLOBLENGTH_PTC(w); content->comm = NV_COMM_PTC(w); content->own_data = SUNFALSE; content->pvec = NULL; return(v); } N_Vector N_VClone_Petsc(N_Vector w) { N_Vector v = NULL; Vec pvec = NULL; Vec wvec = NV_PVEC_PTC(w); /* PetscErrorCode ierr; */ v = N_VCloneEmpty_Petsc(w); if (v == NULL) return(NULL); /* Duplicate vector */ VecDuplicate(wvec, &pvec); if (pvec == NULL) { N_VDestroy_Petsc(v); return(NULL); } /* Attach data */ NV_OWN_DATA_PTC(v) = SUNTRUE; NV_PVEC_PTC(v) = pvec; return(v); } void N_VDestroy_Petsc(N_Vector v) { if (v == NULL) return; /* free content */ if (v->content != NULL) { if (NV_OWN_DATA_PTC(v) && NV_PVEC_PTC(v) != NULL) { VecDestroy(&(NV_PVEC_PTC(v))); NV_PVEC_PTC(v) = NULL; } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } void N_VSpace_Petsc(N_Vector v, sunindextype *lrw, sunindextype *liw) { MPI_Comm comm; int npes; comm = NV_COMM_PTC(v); MPI_Comm_size(comm, &npes); *lrw = NV_GLOBLENGTH_PTC(v); *liw = 2*npes; return; } /* * Not implemented for PETSc wrapper. */ realtype *N_VGetArrayPointer_Petsc(N_Vector v) { return NULL; } /* * Not implemented for PETSc wrapper. */ void N_VSetArrayPointer_Petsc(realtype *v_data, N_Vector v) { return; } void *N_VGetCommunicator_Petsc(N_Vector v) { return((void *) &(NV_COMM_PTC(v))); } sunindextype N_VGetLength_Petsc(N_Vector v) { return(NV_GLOBLENGTH_PTC(v)); } void N_VLinearSum_Petsc(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { Vec xv = NV_PVEC_PTC(x); Vec yv = NV_PVEC_PTC(y); Vec zv = NV_PVEC_PTC(z); if (x == y) { N_VScale_Petsc(a + b, x, z); /* z <~ ax+bx */ return; } if (z == y) { if (b == ONE) { VecAXPY(yv, a, xv); /* BLAS usage: axpy y <- ax+y */ return; } VecAXPBY(yv, a, b, xv); /* BLAS usage: axpby y <- ax+by */ return; } if (z == x) { if (a == ONE) { VecAXPY(xv, b, yv); /* BLAS usage: axpy x <- by+x */ return; } VecAXPBY(xv, b, a, yv); /* BLAS usage: axpby x <- by+ax */ return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ VecAXPBYPCZ(zv, a, b, 0.0, xv, yv); /* PETSc, probably not optimal */ return; } void N_VConst_Petsc(realtype c, N_Vector z) { Vec zv = NV_PVEC_PTC(z); VecSet(zv, c); return; } void N_VProd_Petsc(N_Vector x, N_Vector y, N_Vector z) { Vec xv = NV_PVEC_PTC(x); Vec yv = NV_PVEC_PTC(y); Vec zv = NV_PVEC_PTC(z); VecPointwiseMult(zv, xv, yv); return; } void N_VDiv_Petsc(N_Vector x, N_Vector y, N_Vector z) { Vec xv = NV_PVEC_PTC(x); Vec yv = NV_PVEC_PTC(y); Vec zv = NV_PVEC_PTC(z); VecPointwiseDivide(zv, xv, yv); /* z = x/y */ return; } void N_VScale_Petsc(realtype c, N_Vector x, N_Vector z) { Vec xv = NV_PVEC_PTC(x); Vec zv = NV_PVEC_PTC(z); if (z == x) { /* BLAS usage: scale x <- cx */ VecScale(xv, c); return; } VecAXPBY(zv, c, 0.0, xv); return; } void N_VAbs_Petsc(N_Vector x, N_Vector z) { Vec xv = NV_PVEC_PTC(x); Vec zv = NV_PVEC_PTC(z); if(z != x) VecCopy(xv, zv); /* copy x~>z */ VecAbs(zv); return; } void N_VInv_Petsc(N_Vector x, N_Vector z) { Vec xv = NV_PVEC_PTC(x); Vec zv = NV_PVEC_PTC(z); if(z != x) VecCopy(xv, zv); /* copy x~>z */ VecReciprocal(zv); return; } void N_VAddConst_Petsc(N_Vector x, realtype b, N_Vector z) { Vec xv = NV_PVEC_PTC(x); Vec zv = NV_PVEC_PTC(z); if(z != x) VecCopy(xv, zv); /* copy x~>z */ VecShift(zv, b); return; } realtype N_VDotProdLocal_Petsc(N_Vector x, N_Vector y) { sunindextype i; sunindextype N = NV_LOCLENGTH_PTC(x); Vec xv = NV_PVEC_PTC(x); Vec yv = NV_PVEC_PTC(y); PetscScalar *xd; PetscScalar *yd; PetscReal sum = ZERO; VecGetArray(xv, &xd); VecGetArray(yv, &yd); for (i = 0; i < N; i++) sum += xd[i] * yd[i]; VecRestoreArray(xv, &xd); VecRestoreArray(yv, &yd); return ((realtype) sum); } realtype N_VDotProd_Petsc(N_Vector x, N_Vector y) { Vec xv = NV_PVEC_PTC(x); Vec yv = NV_PVEC_PTC(y); PetscScalar dotprod; VecDot(xv, yv, &dotprod); return dotprod; } realtype N_VMaxNormLocal_Petsc(N_Vector x) { sunindextype i; sunindextype N = NV_LOCLENGTH_PTC(x); Vec xv = NV_PVEC_PTC(x); PetscScalar *xd; PetscReal max = ZERO; VecGetArray(xv, &xd); for (i = 0; i < N; i++) { if (PetscAbsScalar(xd[i]) > max) max = PetscAbsScalar(xd[i]); } VecRestoreArray(xv, &xd); return ((realtype) max); } realtype N_VMaxNorm_Petsc(N_Vector x) { Vec xv = NV_PVEC_PTC(x); PetscReal norm; VecNorm(xv, NORM_INFINITY, &norm); return norm; } realtype N_VWSqrSumLocal_Petsc(N_Vector x, N_Vector w) { sunindextype i; sunindextype N = NV_LOCLENGTH_PTC(x); Vec xv = NV_PVEC_PTC(x); Vec wv = NV_PVEC_PTC(w); PetscScalar *xd; PetscScalar *wd; PetscReal sum = ZERO; VecGetArray(xv, &xd); VecGetArray(wv, &wd); for (i = 0; i < N; i++) { sum += PetscSqr(PetscAbsScalar(xd[i] * wd[i])); } VecRestoreArray(xv, &xd); VecRestoreArray(wv, &wd); return ((realtype) sum); } realtype N_VWrmsNorm_Petsc(N_Vector x, N_Vector w) { realtype global_sum; sunindextype N_global = NV_GLOBLENGTH_PTC(x); realtype sum = N_VWSqrSumLocal_Petsc(x, w); (void) MPI_Allreduce(&sum, &global_sum, 1, MPI_SUNREALTYPE, MPI_SUM, NV_COMM_PTC(x)); return (SUNRsqrt(global_sum/N_global)); } realtype N_VWSqrSumMaskLocal_Petsc(N_Vector x, N_Vector w, N_Vector id) { sunindextype i; sunindextype N = NV_LOCLENGTH_PTC(x); Vec xv = NV_PVEC_PTC(x); Vec wv = NV_PVEC_PTC(w); Vec idv = NV_PVEC_PTC(id); PetscScalar *xd; PetscScalar *wd; PetscScalar *idd; PetscReal sum = ZERO; VecGetArray(xv, &xd); VecGetArray(wv, &wd); VecGetArray(idv, &idd); for (i = 0; i < N; i++) { PetscReal tag = (PetscReal) idd[i]; if (tag > ZERO) { sum += PetscSqr(PetscAbsScalar(xd[i] * wd[i])); } } VecRestoreArray(xv, &xd); VecRestoreArray(wv, &wd); VecRestoreArray(idv, &idd); return sum; } realtype N_VWrmsNormMask_Petsc(N_Vector x, N_Vector w, N_Vector id) { realtype global_sum; sunindextype N_global = NV_GLOBLENGTH_PTC(x); realtype sum = N_VWSqrSumMaskLocal_Petsc(x, w, id); (void) MPI_Allreduce(&sum, &global_sum, 1, MPI_SUNREALTYPE, MPI_SUM, NV_COMM_PTC(x)); return (SUNRsqrt(global_sum/N_global)); } realtype N_VMinLocal_Petsc(N_Vector x) { sunindextype i; sunindextype N = NV_LOCLENGTH_PTC(x); Vec xv = NV_PVEC_PTC(x); PetscScalar *xd; PetscReal min = BIG_REAL; VecGetArray(xv, &xd); for (i = 0; i < N; i++) { if (xd[i] < min) min = xd[i]; } VecRestoreArray(xv, &xd); return ((realtype) min); } realtype N_VMin_Petsc(N_Vector x) { Vec xv = NV_PVEC_PTC(x); PetscReal minval; PetscInt i; VecMin(xv, &i, &minval); return minval; } realtype N_VWL2Norm_Petsc(N_Vector x, N_Vector w) { realtype global_sum; realtype sum = N_VWSqrSumLocal_Petsc(x, w); (void) MPI_Allreduce(&sum, &global_sum, 1, MPI_SUNREALTYPE, MPI_SUM, NV_COMM_PTC(x)); return (SUNRsqrt(global_sum)); } realtype N_VL1NormLocal_Petsc(N_Vector x) { sunindextype i; sunindextype N = NV_LOCLENGTH_PTC(x); Vec xv = NV_PVEC_PTC(x); PetscScalar *xd; PetscReal sum = ZERO; VecGetArray(xv, &xd); for (i = 0; i < N; i++) { sum += PetscAbsScalar(xd[i]); } VecRestoreArray(xv, &xd); return ((realtype) sum); } realtype N_VL1Norm_Petsc(N_Vector x) { Vec xv = NV_PVEC_PTC(x); PetscReal norm; VecNorm(xv, NORM_1, &norm); return norm; } void N_VCompare_Petsc(realtype c, N_Vector x, N_Vector z) { sunindextype i; sunindextype N = NV_LOCLENGTH_PTC(x); Vec xv = NV_PVEC_PTC(x); Vec zv = NV_PVEC_PTC(z); PetscReal cpet = c; /* <~ realtype should typedef to PETScReal */ PetscScalar *xdata; PetscScalar *zdata; VecGetArray(xv, &xdata); VecGetArray(zv, &zdata); for (i = 0; i < N; i++) { zdata[i] = PetscAbsScalar(xdata[i]) >= cpet ? ONE : ZERO; } VecRestoreArray(xv, &xdata); VecRestoreArray(zv, &zdata); return; } booleantype N_VInvTestLocal_Petsc(N_Vector x, N_Vector z) { sunindextype i; sunindextype N = NV_LOCLENGTH_PTC(x); Vec xv = NV_PVEC_PTC(x); Vec zv = NV_PVEC_PTC(z); PetscScalar *xd; PetscScalar *zd; PetscReal val = ONE; VecGetArray(xv, &xd); VecGetArray(zv, &zd); for (i = 0; i < N; i++) { if (xd[i] == ZERO) val = ZERO; else zd[i] = ONE/xd[i]; } VecRestoreArray(xv, &xd); VecRestoreArray(zv, &zd); if (val == ZERO) return(SUNFALSE); else return(SUNTRUE); } booleantype N_VInvTest_Petsc(N_Vector x, N_Vector z) { realtype val2; realtype val = (N_VInvTestLocal_Petsc(x, z)) ? ONE : ZERO; (void) MPI_Allreduce(&val, &val2, 1, MPI_SUNREALTYPE, MPI_MIN, NV_COMM_PTC(x)); if (val2 == ZERO) return(SUNFALSE); else return(SUNTRUE); } booleantype N_VConstrMaskLocal_Petsc(N_Vector c, N_Vector x, N_Vector m) { sunindextype i; sunindextype N = NV_LOCLENGTH_PTC(x); realtype temp; booleantype test; Vec xv = NV_PVEC_PTC(x); Vec cv = NV_PVEC_PTC(c); Vec mv = NV_PVEC_PTC(m); PetscScalar *xd; PetscScalar *cd; PetscScalar *md; temp = ZERO; VecGetArray(xv, &xd); VecGetArray(cv, &cd); VecGetArray(mv, &md); for (i = 0; i < N; i++) { PetscReal cc = (PetscReal) cd[i]; /* <~ Drop imaginary parts if any. */ PetscReal xx = (PetscReal) xd[i]; /* <~ Constraints defined on Re{x} */ md[i] = ZERO; /* Continue if no constraints were set for the variable */ if (cc == ZERO) continue; /* Check if a set constraint has been violated */ test = (SUNRabs(cc) > ONEPT5 && xx*cc <= ZERO) || (SUNRabs(cc) > HALF && xx*cc < ZERO); if (test) { temp = md[i] = ONE; } } VecRestoreArray(xv, &xd); VecRestoreArray(cv, &cd); VecRestoreArray(mv, &md); /* Return false if any constraint was violated */ return (temp == ONE) ? SUNFALSE : SUNTRUE; } booleantype N_VConstrMask_Petsc(N_Vector c, N_Vector x, N_Vector m) { realtype temp2; realtype temp = (N_VConstrMaskLocal_Petsc(c, x, m)) ? ZERO : ONE; (void) MPI_Allreduce(&temp, &temp2, 1, MPI_SUNREALTYPE, MPI_MAX, NV_COMM_PTC(x)); return (temp2 == ONE) ? SUNFALSE : SUNTRUE; } realtype N_VMinQuotientLocal_Petsc(N_Vector num, N_Vector denom) { booleantype notEvenOnce = SUNTRUE; sunindextype i; sunindextype N = NV_LOCLENGTH_PTC(num); Vec nv = NV_PVEC_PTC(num); Vec dv = NV_PVEC_PTC(denom); PetscScalar *nd; PetscScalar *dd; PetscReal minval = BIG_REAL; VecGetArray(nv, &nd); VecGetArray(dv, &dd); for (i = 0; i < N; i++) { PetscReal nr = (PetscReal) nd[i]; PetscReal dr = (PetscReal) dd[i]; if (dr == ZERO) continue; else { if (!notEvenOnce) minval = SUNMIN(minval, nr/dr); else { minval = nr/dr; notEvenOnce = SUNFALSE; } } } VecRestoreArray(nv, &nd); VecRestoreArray(dv, &dd); return((realtype) minval); } realtype N_VMinQuotient_Petsc(N_Vector num, N_Vector denom) { PetscReal gmin; realtype minval = N_VMinQuotientLocal_Petsc(num, denom); (void) MPI_Allreduce(&minval, &gmin, 1, MPI_SUNREALTYPE, MPI_MIN, NV_COMM_PTC(num)); return(gmin); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_Petsc(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i; Vec* xv; Vec zv; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_Petsc(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_Petsc(c[0], X[0], c[1], X[1], z); return(0); } /* get petsc vectors */ xv = (Vec*) malloc(nvec * sizeof(Vec)); for (i=0; i ZERO) nrm[i] += SUNSQR(xd[j] * wd[j]); } VecRestoreArray(NV_PVEC_PTC(X[i]), &xd); VecRestoreArray(NV_PVEC_PTC(W[i]), &wd); } VecRestoreArray(NV_PVEC_PTC(id), &idd); retval = MPI_Allreduce(MPI_IN_PLACE, nrm, nvec, MPI_SUNREALTYPE, MPI_SUM, comm); for (i=0; i 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_Petsc(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LOCLENGTH_PTC(X[0]); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { for (i=0; i 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; jops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Petsc; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Petsc; v->ops->nvdotprodmulti = N_VDotProdMulti_Petsc; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Petsc; v->ops->nvscalevectorarray = N_VScaleVectorArray_Petsc; v->ops->nvconstvectorarray = N_VConstVectorArray_Petsc; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Petsc; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Petsc; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Petsc; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Petsc; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_Petsc; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_Petsc; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_Petsc; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmulti = N_VDotProdMulti_Petsc; else v->ops->nvdotprodmulti = NULL; /* return success */ return(0); } int N_VEnableLinearSumVectorArray_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Petsc; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_Petsc; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_Petsc; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Petsc; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Petsc; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Petsc; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Petsc; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } int N_VEnableDotProdMultiLocal_Petsc(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_Petsc; else v->ops->nvdotprodmultilocal = NULL; /* return success */ return(0); } StanHeaders/src/nvector/sycl/0000755000176200001440000000000014511135055015674 5ustar liggesusersStanHeaders/src/nvector/sycl/nvector_sycl.cpp0000644000176200001440000022330214645137106021123 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a SYCL implementation * of the NVECTOR package. * -----------------------------------------------------------------*/ #include #include #include /* SUNDIALS public headers */ #include #include /* SUNDIALS private headers */ #include "sundials_debug.h" #include "sundials_sycl.h" #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) extern "C" { using namespace sundials; using namespace sundials::sycl; /* -------------------------------------------------------------------------- * Helpful macros * -------------------------------------------------------------------------- */ /* Macros to access vector content */ #define NVEC_SYCL_CONTENT(x) ((N_VectorContent_Sycl)(x->content)) #define NVEC_SYCL_LENGTH(x) (NVEC_SYCL_CONTENT(x)->length) #define NVEC_SYCL_MEMHELP(x) (NVEC_SYCL_CONTENT(x)->mem_helper) #define NVEC_SYCL_MEMSIZE(x) (NVEC_SYCL_CONTENT(x)->length * sizeof(realtype)) #define NVEC_SYCL_HDATAp(x) ((realtype*) NVEC_SYCL_CONTENT(x)->host_data->ptr) #define NVEC_SYCL_DDATAp(x) ((realtype*) NVEC_SYCL_CONTENT(x)->device_data->ptr) #define NVEC_SYCL_QUEUE(x) (NVEC_SYCL_CONTENT(x)->queue) /* Macros to access vector private content */ #define NVEC_SYCL_PRIVATE(x) ((N_PrivateVectorContent_Sycl)(NVEC_SYCL_CONTENT(x)->priv)) #define NVEC_SYCL_HBUFFERp(x) ((realtype*) NVEC_SYCL_PRIVATE(x)->reduce_buffer_host->ptr) #define NVEC_SYCL_DBUFFERp(x) ((realtype*) NVEC_SYCL_PRIVATE(x)->reduce_buffer_dev->ptr) /* -------------------------------------------------------------------------- * Private structure definition * -------------------------------------------------------------------------- */ struct _N_PrivateVectorContent_Sycl { booleantype use_managed_mem; /* do data pointers use managed memory */ /* reduction workspace */ SUNMemory reduce_buffer_dev; /* device memory for reductions */ SUNMemory reduce_buffer_host; /* host memory for reductions */ size_t reduce_buffer_bytes; /* current size of reduction buffers */ /* fused op workspace */ SUNMemory fused_buffer_dev; /* device memory for fused ops */ SUNMemory fused_buffer_host; /* host memory for fused ops */ size_t fused_buffer_bytes; /* current size of the buffers */ size_t fused_buffer_offset; /* current offset into the buffer */ }; typedef struct _N_PrivateVectorContent_Sycl *N_PrivateVectorContent_Sycl; /* -------------------------------------------------------------------------- * Utility functions * -------------------------------------------------------------------------- */ /* Allocate vector data */ static int AllocateData(N_Vector v); /* Reduction buffer functions */ static int InitializeReductionBuffer(N_Vector v, const realtype value, size_t n = 1); static void FreeReductionBuffer(N_Vector v); static int CopyReductionBufferFromDevice(N_Vector v, size_t n = 1); /* Fused operation buffer functions */ static int FusedBuffer_Init(N_Vector v, int nreal, int nptr); static int FusedBuffer_CopyRealArray(N_Vector v, realtype *r_data, int nval, realtype **shortcut); static int FusedBuffer_CopyPtrArray1D(N_Vector v, N_Vector *X, int nvec, realtype ***shortcut); static int FusedBuffer_CopyPtrArray2D(N_Vector v, N_Vector **X, int nvec, int nsum, realtype ***shortcut); static int FusedBuffer_CopyToDevice(N_Vector v); static int FusedBuffer_Free(N_Vector v); /* Kernel launch parameters */ static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& nthreads_total, size_t& nthreads_per_block); /* -------------------------------------------------------------------------- * Constructors * -------------------------------------------------------------------------- */ N_Vector N_VNewEmpty_Sycl(SUNContext sunctx) { /* Create an empty vector object */ N_Vector v = N_VNewEmpty(sunctx); if (v == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewEmpty_Sycl: N_VNewEmpty returned NULL\n"); return NULL; } /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Sycl; v->ops->nvclone = N_VClone_Sycl; v->ops->nvcloneempty = N_VCloneEmpty_Sycl; v->ops->nvdestroy = N_VDestroy_Sycl; v->ops->nvspace = N_VSpace_Sycl; v->ops->nvgetlength = N_VGetLength_Sycl; v->ops->nvgetarraypointer = N_VGetHostArrayPointer_Sycl; v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_Sycl; v->ops->nvsetarraypointer = N_VSetHostArrayPointer_Sycl; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Sycl; v->ops->nvconst = N_VConst_Sycl; v->ops->nvprod = N_VProd_Sycl; v->ops->nvdiv = N_VDiv_Sycl; v->ops->nvscale = N_VScale_Sycl; v->ops->nvabs = N_VAbs_Sycl; v->ops->nvinv = N_VInv_Sycl; v->ops->nvaddconst = N_VAddConst_Sycl; v->ops->nvdotprod = N_VDotProd_Sycl; v->ops->nvmaxnorm = N_VMaxNorm_Sycl; v->ops->nvmin = N_VMin_Sycl; v->ops->nvl1norm = N_VL1Norm_Sycl; v->ops->nvinvtest = N_VInvTest_Sycl; v->ops->nvconstrmask = N_VConstrMask_Sycl; v->ops->nvminquotient = N_VMinQuotient_Sycl; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Sycl; v->ops->nvwrmsnorm = N_VWrmsNorm_Sycl; v->ops->nvwl2norm = N_VWL2Norm_Sycl; v->ops->nvcompare = N_VCompare_Sycl; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Sycl; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Sycl; v->ops->nvdotprodlocal = N_VDotProd_Sycl; v->ops->nvmaxnormlocal = N_VMaxNorm_Sycl; v->ops->nvminlocal = N_VMin_Sycl; v->ops->nvl1normlocal = N_VL1Norm_Sycl; v->ops->nvinvtestlocal = N_VInvTest_Sycl; v->ops->nvconstrmasklocal = N_VConstrMask_Sycl; v->ops->nvminquotientlocal = N_VMinQuotient_Sycl; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_Sycl; v->ops->nvbufpack = N_VBufPack_Sycl; v->ops->nvbufunpack = N_VBufUnpack_Sycl; /* print operation for debugging */ v->ops->nvprint = N_VPrint_Sycl; v->ops->nvprintfile = N_VPrintFile_Sycl; /* Allocate content structure */ v->content = (N_VectorContent_Sycl) malloc(sizeof(_N_VectorContent_Sycl)); if (v->content == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewEmpty_Sycl: content allocation failed\n"); N_VDestroy(v); return NULL; } /* Allocate private content structure */ NVEC_SYCL_CONTENT(v)->priv = NULL; NVEC_SYCL_CONTENT(v)->priv = malloc(sizeof(_N_PrivateVectorContent_Sycl)); if (NVEC_SYCL_CONTENT(v)->priv == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewEmpty_Sycl: private content allocation failed\n"); N_VDestroy(v); return NULL; } /* Initialize content */ NVEC_SYCL_CONTENT(v)->length = 0; NVEC_SYCL_CONTENT(v)->own_exec = SUNFALSE; NVEC_SYCL_CONTENT(v)->own_helper = SUNFALSE; NVEC_SYCL_CONTENT(v)->host_data = NULL; NVEC_SYCL_CONTENT(v)->device_data = NULL; NVEC_SYCL_CONTENT(v)->stream_exec_policy = NULL; NVEC_SYCL_CONTENT(v)->reduce_exec_policy = NULL; NVEC_SYCL_CONTENT(v)->mem_helper = NULL; NVEC_SYCL_CONTENT(v)->queue = NULL; /* Initialize private content */ NVEC_SYCL_PRIVATE(v)->use_managed_mem = SUNFALSE; NVEC_SYCL_PRIVATE(v)->reduce_buffer_dev = NULL; NVEC_SYCL_PRIVATE(v)->reduce_buffer_host = NULL; NVEC_SYCL_PRIVATE(v)->reduce_buffer_bytes = 0; NVEC_SYCL_PRIVATE(v)->fused_buffer_dev = NULL; NVEC_SYCL_PRIVATE(v)->fused_buffer_host = NULL; NVEC_SYCL_PRIVATE(v)->fused_buffer_bytes = 0; NVEC_SYCL_PRIVATE(v)->fused_buffer_offset = 0; return v; } N_Vector N_VNew_Sycl(sunindextype length, ::sycl::queue *Q, SUNContext sunctx) { /* Check inputs */ if (Q == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Sycl: queue is NULL\n"); return NULL; } if (!(Q->is_in_order())) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Sycl: queue is not in-order\n"); return NULL; } /* Create vector with empty content */ N_Vector v = N_VNewEmpty_Sycl(sunctx); if (v == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Sycl: N_VNewEmpty_Sycl returned NULL\n"); return NULL; } /* Fill content */ NVEC_SYCL_CONTENT(v)->length = length; NVEC_SYCL_CONTENT(v)->own_exec = SUNTRUE; NVEC_SYCL_CONTENT(v)->own_helper = SUNTRUE; NVEC_SYCL_CONTENT(v)->stream_exec_policy = new ThreadDirectExecPolicy(SYCL_BLOCKDIM(Q)); NVEC_SYCL_CONTENT(v)->reduce_exec_policy = new BlockReduceExecPolicy(SYCL_BLOCKDIM(Q)); NVEC_SYCL_CONTENT(v)->mem_helper = SUNMemoryHelper_Sycl(sunctx); NVEC_SYCL_CONTENT(v)->queue = Q; NVEC_SYCL_PRIVATE(v)->use_managed_mem = SUNFALSE; if (NVEC_SYCL_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Sycl: memory helper is NULL\n"); N_VDestroy(v); return NULL; } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Sycl: AllocateData returned nonzero\n"); N_VDestroy(v); return NULL; } return v; } N_Vector N_VNewWithMemHelp_Sycl(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper, ::sycl::queue *Q, SUNContext sunctx) { /* Check inputs */ if (Q == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Sycl: queue is NULL\n"); return NULL; } if (!(Q->is_in_order())) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Sycl: queue is not in-order\n"); return NULL; } if (helper == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Sycl: helper is NULL\n"); return NULL; } if (!SUNMemoryHelper_ImplementsRequiredOps(helper)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Sycl: helper doesn't implement all required ops\n"); return NULL; } /* Create vector with empty content */ N_Vector v = N_VNewEmpty_Sycl(sunctx); if (v == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Sycl: N_VNewEmpty_Sycl returned NULL\n"); return NULL; } /* Fill content */ NVEC_SYCL_CONTENT(v)->length = length; NVEC_SYCL_CONTENT(v)->own_exec = SUNTRUE; NVEC_SYCL_CONTENT(v)->own_helper = SUNFALSE; NVEC_SYCL_CONTENT(v)->stream_exec_policy = new ThreadDirectExecPolicy(SYCL_BLOCKDIM(Q)); NVEC_SYCL_CONTENT(v)->reduce_exec_policy = new BlockReduceExecPolicy(SYCL_BLOCKDIM(Q)); NVEC_SYCL_CONTENT(v)->mem_helper = helper; NVEC_SYCL_CONTENT(v)->queue = Q; NVEC_SYCL_PRIVATE(v)->use_managed_mem = use_managed_mem; if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Sycl: AllocateData returned nonzero\n"); N_VDestroy(v); return NULL; } return v; } N_Vector N_VNewManaged_Sycl(sunindextype length, ::sycl::queue *Q, SUNContext sunctx) { /* Check inputs */ if (Q == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Sycl: queue is NULL\n"); return NULL; } if (!(Q->is_in_order())) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Sycl: queue is not in-order\n"); return NULL; } /* Create vector with empty content */ N_Vector v = N_VNewEmpty_Sycl(sunctx); if (v == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Sycl: N_VNewEmpty_Sycl returned NULL\n"); return NULL; } /* Fill content */ NVEC_SYCL_CONTENT(v)->length = length; NVEC_SYCL_CONTENT(v)->own_exec = SUNTRUE; NVEC_SYCL_CONTENT(v)->own_helper = SUNTRUE; NVEC_SYCL_CONTENT(v)->stream_exec_policy = new ThreadDirectExecPolicy(SYCL_BLOCKDIM(Q)); NVEC_SYCL_CONTENT(v)->reduce_exec_policy = new BlockReduceExecPolicy(SYCL_BLOCKDIM(Q)); NVEC_SYCL_CONTENT(v)->mem_helper = SUNMemoryHelper_Sycl(sunctx); NVEC_SYCL_CONTENT(v)->queue = Q; NVEC_SYCL_PRIVATE(v)->use_managed_mem = SUNTRUE; if (NVEC_SYCL_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Sycl: memory helper is NULL\n"); N_VDestroy(v); return NULL; } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Sycl: AllocateData returned nonzero\n"); N_VDestroy(v); return NULL; } return v; } N_Vector N_VMake_Sycl(sunindextype length, realtype *h_vdata, realtype *d_vdata, ::sycl::queue *Q, SUNContext sunctx) { /* Check inputs */ if (Q == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Sycl: queue is NULL\n"); return NULL; } if (!(Q->is_in_order())) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Sycl: queue is not in-order\n"); return NULL; } if (h_vdata == NULL || d_vdata == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Sycl: host or device data is NULL\n"); return NULL; } /* Create vector with empty content */ N_Vector v = N_VNewEmpty_Sycl(sunctx); if (v == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Sycl: N_VNewEmpty_Sycl returned NULL\n"); return NULL; } /* Fill content */ NVEC_SYCL_CONTENT(v)->length = length; NVEC_SYCL_CONTENT(v)->own_exec = SUNTRUE; NVEC_SYCL_CONTENT(v)->own_helper = SUNTRUE; NVEC_SYCL_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(h_vdata, SUNMEMTYPE_HOST); NVEC_SYCL_CONTENT(v)->device_data = SUNMemoryHelper_Wrap(d_vdata, SUNMEMTYPE_DEVICE); NVEC_SYCL_CONTENT(v)->stream_exec_policy = new ThreadDirectExecPolicy(SYCL_BLOCKDIM(Q)); NVEC_SYCL_CONTENT(v)->reduce_exec_policy = new BlockReduceExecPolicy(SYCL_BLOCKDIM(Q)); NVEC_SYCL_CONTENT(v)->mem_helper = SUNMemoryHelper_Sycl(sunctx); NVEC_SYCL_CONTENT(v)->queue = Q; NVEC_SYCL_PRIVATE(v)->use_managed_mem = SUNFALSE; if (NVEC_SYCL_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Sycl: memory helper is NULL\n"); N_VDestroy(v); return NULL; } if (NVEC_SYCL_CONTENT(v)->device_data == NULL || NVEC_SYCL_CONTENT(v)->host_data == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Sycl: SUNMemoryHelper_Wrap returned NULL\n"); N_VDestroy(v); return NULL; } return v; } N_Vector N_VMakeManaged_Sycl(sunindextype length, realtype *vdata, ::sycl::queue *Q, SUNContext sunctx) { /* Check inputs */ if (Q == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Sycl: queue is NULL\n"); return NULL; } if (!(Q->is_in_order())) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Sycl: queue is not in-order\n"); return NULL; } if (vdata == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Sycl: host or device data is NULL\n"); return NULL; } /* Create vector with empty content */ N_Vector v = N_VNewEmpty_Sycl(sunctx); if (v == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Sycl: N_VNewEmpty_Sycl returned NULL\n"); return NULL; } /* Fill content */ NVEC_SYCL_CONTENT(v)->length = length; NVEC_SYCL_CONTENT(v)->own_exec = SUNTRUE; NVEC_SYCL_CONTENT(v)->own_helper = SUNTRUE; NVEC_SYCL_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(vdata, SUNMEMTYPE_UVM); NVEC_SYCL_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_SYCL_CONTENT(v)->host_data); NVEC_SYCL_CONTENT(v)->stream_exec_policy = new ThreadDirectExecPolicy(SYCL_BLOCKDIM(Q)); NVEC_SYCL_CONTENT(v)->reduce_exec_policy = new BlockReduceExecPolicy(SYCL_BLOCKDIM(Q)); NVEC_SYCL_CONTENT(v)->mem_helper = SUNMemoryHelper_Sycl(sunctx); NVEC_SYCL_CONTENT(v)->queue = Q; NVEC_SYCL_PRIVATE(v)->use_managed_mem = SUNTRUE; if (NVEC_SYCL_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Sycl: memory helper is NULL\n"); N_VDestroy(v); return NULL; } if (NVEC_SYCL_CONTENT(v)->device_data == NULL || NVEC_SYCL_CONTENT(v)->host_data == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Sycl: SUNMemoryHelper_Wrap returned NULL\n"); N_VDestroy(v); return NULL; } return v; } /* -------------------------------------------------------------------------- * Vector get, set, and utility functions * -------------------------------------------------------------------------- */ /* Function to return the global length of the vector. This is defined as an * inline function in nvector_sycl.h, so we just mark it as extern here. */ extern sunindextype N_VGetLength_Sycl(N_Vector v); /* Return pointer to the raw host data. This is defined as an inline function in * nvector_sycl.h, so we just mark it as extern here. */ extern realtype *N_VGetHostArrayPointer_Sycl(N_Vector x); /* Return pointer to the raw device data. This is defined as an inline function * in nvector_sycl.h, so we just mark it as extern here. */ extern realtype *N_VGetDeviceArrayPointer_Sycl(N_Vector x); /* Set pointer to the raw host data. Does not free the existing pointer. */ void N_VSetHostArrayPointer_Sycl(realtype* h_vdata, N_Vector v) { if (N_VIsManagedMemory_Sycl(v)) { if (NVEC_SYCL_CONTENT(v)->host_data) { NVEC_SYCL_CONTENT(v)->host_data->ptr = (void*) h_vdata; NVEC_SYCL_CONTENT(v)->device_data->ptr = (void*) h_vdata; } else { NVEC_SYCL_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_UVM); NVEC_SYCL_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_SYCL_CONTENT(v)->host_data); } } else { if (NVEC_SYCL_CONTENT(v)->host_data) { NVEC_SYCL_CONTENT(v)->host_data->ptr = (void*) h_vdata; } else { NVEC_SYCL_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_HOST); } } } /* Set pointer to the raw device data */ void N_VSetDeviceArrayPointer_Sycl(realtype* d_vdata, N_Vector v) { if (N_VIsManagedMemory_Sycl(v)) { if (NVEC_SYCL_CONTENT(v)->device_data) { NVEC_SYCL_CONTENT(v)->device_data->ptr = (void*) d_vdata; NVEC_SYCL_CONTENT(v)->host_data->ptr = (void*) d_vdata; } else { NVEC_SYCL_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_UVM); NVEC_SYCL_CONTENT(v)->host_data = SUNMemoryHelper_Alias(NVEC_SYCL_CONTENT(v)->device_data); } } else { if (NVEC_SYCL_CONTENT(v)->device_data) { NVEC_SYCL_CONTENT(v)->device_data->ptr = (void*) d_vdata; } else { NVEC_SYCL_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_DEVICE); } } } /* Return a flag indicating if the memory for the vector data is managed */ booleantype N_VIsManagedMemory_Sycl(N_Vector x) { return NVEC_SYCL_PRIVATE(x)->use_managed_mem; } int N_VSetKernelExecPolicy_Sycl(N_Vector x, SUNSyclExecPolicy* stream_exec_policy, SUNSyclExecPolicy* reduce_exec_policy) { if (x == NULL || stream_exec_policy == NULL || reduce_exec_policy == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VSetKernelExecPolicy_Sycl: An input is NULL\n"); return -1; } if (NVEC_SYCL_CONTENT(x)->own_exec) { delete NVEC_SYCL_CONTENT(x)->stream_exec_policy; delete NVEC_SYCL_CONTENT(x)->reduce_exec_policy; } NVEC_SYCL_CONTENT(x)->stream_exec_policy = stream_exec_policy; NVEC_SYCL_CONTENT(x)->reduce_exec_policy = reduce_exec_policy; NVEC_SYCL_CONTENT(x)->own_exec = SUNFALSE; return 0; } /* Copy vector data to the device */ void N_VCopyToDevice_Sycl(N_Vector x) { int copy_fail; copy_fail = SUNMemoryHelper_Copy(NVEC_SYCL_MEMHELP(x), NVEC_SYCL_CONTENT(x)->device_data, NVEC_SYCL_CONTENT(x)->host_data, NVEC_SYCL_MEMSIZE(x), NVEC_SYCL_QUEUE(x)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyToDevice_Sycl: SUNMemoryHelper_Copy returned nonzero\n"); } /* synchronize with the host */ NVEC_SYCL_QUEUE(x)->wait_and_throw(); } /* Copy vector data from the device to the host */ void N_VCopyFromDevice_Sycl(N_Vector x) { int copy_fail; copy_fail = SUNMemoryHelper_Copy(NVEC_SYCL_MEMHELP(x), NVEC_SYCL_CONTENT(x)->host_data, NVEC_SYCL_CONTENT(x)->device_data, NVEC_SYCL_MEMSIZE(x), NVEC_SYCL_QUEUE(x)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyFromDevice_Sycl: SUNMemoryHelper_Copy returned nonzero\n"); } /* synchronize with the host */ NVEC_SYCL_QUEUE(x)->wait_and_throw(); } /* Function to print the a serial vector to stdout */ void N_VPrint_Sycl(N_Vector X) { N_VPrintFile_Sycl(X, stdout); } /* Function to print the a serial vector to outfile */ void N_VPrintFile_Sycl(N_Vector X, FILE *outfile) { sunindextype i; for (i = 0; i < NVEC_SYCL_CONTENT(X)->length; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%35.32Lg\n", NVEC_SYCL_HDATAp(X)[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%19.16g\n", NVEC_SYCL_HDATAp(X)[i]); #else fprintf(outfile, "%11.8g\n", NVEC_SYCL_HDATAp(X)[i]); #endif } fprintf(outfile, "\n"); return; } /* -------------------------------------------------------------------------- * Vector operations * -------------------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Sycl(N_Vector w) { /* Check input */ if (w == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCloneEmpty_Sycl: input vector is NULL\n"); return NULL; } /* Create vector */ N_Vector v = N_VNewEmpty_Sycl(w->sunctx); if (v == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCloneEmpty_Sycl: N_VNewEmpty returned NULL\n"); return NULL; } /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); SUNDIALS_DEBUG_PRINT("ERROR in N_VCloneEmpty_Sycl: Error in N_VCopyOps\n"); return NULL; } /* Copy some content */ NVEC_SYCL_CONTENT(v)->length = NVEC_SYCL_CONTENT(w)->length; NVEC_SYCL_CONTENT(v)->queue = NVEC_SYCL_CONTENT(w)->queue; NVEC_SYCL_PRIVATE(v)->use_managed_mem = NVEC_SYCL_PRIVATE(w)->use_managed_mem; return v; } N_Vector N_VClone_Sycl(N_Vector w) { /* Check inputs */ if (w == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Sycl: vector is NULL\n"); return NULL; } /* Create an empty clone vector */ N_Vector v = N_VCloneEmpty_Sycl(w); if (v == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Sycl: N_VCloneEmpty_Sycl returned NULL\n"); return NULL; } /* Allocate content */ NVEC_SYCL_CONTENT(v)->own_exec = SUNTRUE; NVEC_SYCL_CONTENT(v)->own_helper = SUNTRUE; NVEC_SYCL_CONTENT(v)->stream_exec_policy = NVEC_SYCL_CONTENT(w)->stream_exec_policy->clone(); NVEC_SYCL_CONTENT(v)->reduce_exec_policy = NVEC_SYCL_CONTENT(w)->reduce_exec_policy->clone(); NVEC_SYCL_CONTENT(v)->mem_helper = SUNMemoryHelper_Clone(NVEC_SYCL_MEMHELP(w)); if (NVEC_SYCL_MEMHELP(v) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Sycl: memory helper is NULL\n"); N_VDestroy(v); return NULL; } if (AllocateData(v)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Sycl: AllocateData returned nonzero\n"); N_VDestroy(v); return NULL; } return v; } void N_VDestroy_Sycl(N_Vector v) { N_VectorContent_Sycl vc; N_PrivateVectorContent_Sycl vcp; if (v == NULL) return; /* free ops structure */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } /* extract content */ vc = NVEC_SYCL_CONTENT(v); if (vc == NULL) { free(v); v = NULL; return; } /* free private content */ vcp = (N_PrivateVectorContent_Sycl) vc->priv; if (vcp != NULL) { /* free items in private content */ FreeReductionBuffer(v); FusedBuffer_Free(v); free(vcp); vc->priv = NULL; } /* free items in content */ if (NVEC_SYCL_MEMHELP(v)) { SUNMemoryHelper_Dealloc(NVEC_SYCL_MEMHELP(v), vc->host_data, NVEC_SYCL_QUEUE(v)); vc->host_data = NULL; SUNMemoryHelper_Dealloc(NVEC_SYCL_MEMHELP(v), vc->device_data, NVEC_SYCL_QUEUE(v)); vc->device_data = NULL; if (vc->own_helper) SUNMemoryHelper_Destroy(vc->mem_helper); vc->mem_helper = NULL; } else { SUNDIALS_DEBUG_PRINT("WARNING in N_VDestroy_Sycl: mem_helper was NULL when trying to dealloc data, this could result in a memory leak\n"); } /* free content struct and vector */ free(vc); free(v); return; } void N_VSpace_Sycl(N_Vector X, sunindextype *lrw, sunindextype *liw) { *lrw = NVEC_SYCL_CONTENT(X)->length; *liw = 2; } void N_VConst_Sycl(realtype c, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(z, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConst_Sycl: GetKernelParameters returned nonzero\n"); } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { zdata[i] = c; }); } void N_VLinearSum_Sycl(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); const realtype *xdata = NVEC_SYCL_DDATAp(x); const realtype *ydata = NVEC_SYCL_DDATAp(y); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(z, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSum_Sycl: GetKernelParameters returned nonzero\n"); } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { zdata[i] = (a * xdata[i]) + (b * ydata[i]); }); } void N_VProd_Sycl(N_Vector x, N_Vector y, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); const realtype *xdata = NVEC_SYCL_DDATAp(x); const realtype *ydata = NVEC_SYCL_DDATAp(y); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(z, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VProd_Sycl: GetKernelParameters returned nonzero\n"); } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { zdata[i] = xdata[i] * ydata[i]; }); } void N_VDiv_Sycl(N_Vector x, N_Vector y, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); const realtype *xdata = NVEC_SYCL_DDATAp(x); const realtype *ydata = NVEC_SYCL_DDATAp(y); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(z, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDiv_Sycl: GetKernelParameters returned nonzero\n"); } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { zdata[i] = xdata[i] / ydata[i]; }); } void N_VScale_Sycl(realtype c, N_Vector x, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); const realtype *xdata = NVEC_SYCL_DDATAp(x); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(z, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScale_Sycl: GetKernelParameters returned nonzero\n"); } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { zdata[i] = c * xdata[i]; }); } void N_VAbs_Sycl(N_Vector x, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); const realtype *xdata = NVEC_SYCL_DDATAp(x); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(z, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VAbs_Sycl: GetKernelParameters returned nonzero\n"); } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { zdata[i] = abs(xdata[i]); }); } void N_VInv_Sycl(N_Vector x, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); const realtype *xdata = NVEC_SYCL_DDATAp(x); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(z, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInv_Sycl: GetKernelParameters returned nonzero\n"); } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { zdata[i] = ONE / xdata[i]; }); } void N_VAddConst_Sycl(N_Vector x, realtype b, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); const realtype *xdata = NVEC_SYCL_DDATAp(x); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(z, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VAddConst_Sycl: GetKernelParameters returned nonzero\n"); } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { zdata[i] = xdata[i] + b; }); } realtype N_VDotProd_Sycl(N_Vector x, N_Vector y) { const sunindextype N = NVEC_SYCL_LENGTH(x); const realtype *xdata = NVEC_SYCL_DDATAp(x); const realtype *ydata = NVEC_SYCL_DDATAp(y); ::sycl::queue *Q = NVEC_SYCL_QUEUE(x); size_t nthreads_total, nthreads_per_block; if (InitializeReductionBuffer(x, ZERO)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Sycl: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(x, SUNTRUE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Sycl: GetKernelParameters returned nonzero\n"); } /* Shortcut to the reduction buffer */ realtype *sum = NVEC_SYCL_DBUFFERp(x); SYCL_FOR_REDUCE(Q, nthreads_total, nthreads_per_block, item, sum, ::sycl::plus(), GRID_STRIDE_XLOOP(item, i, N) { sum += xdata[i] * ydata[i]; }); if (CopyReductionBufferFromDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Sycl: CopyReductionBufferFromDevice returned nonzero\n"); } return NVEC_SYCL_HBUFFERp(x)[0]; } realtype N_VMaxNorm_Sycl(N_Vector x) { const sunindextype N = NVEC_SYCL_LENGTH(x); const realtype *xdata = NVEC_SYCL_DDATAp(x); ::sycl::queue *Q = NVEC_SYCL_QUEUE(x); size_t nthreads_total, nthreads_per_block; if (InitializeReductionBuffer(x, ZERO)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Sycl: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(x, SUNTRUE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Sycl: GetKernelParameters returned nonzero\n"); } /* Shortcut to the reduction buffer */ realtype *max = NVEC_SYCL_DBUFFERp(x); SYCL_FOR_REDUCE(Q, nthreads_total, nthreads_per_block, item, max, ::sycl::maximum(), GRID_STRIDE_XLOOP(item, i, N) { max.combine(abs(xdata[i])); }); if (CopyReductionBufferFromDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Sycl: CopyReductionBufferFromDevice returned nonzero\n"); } return NVEC_SYCL_HBUFFERp(x)[0]; } realtype N_VWSqrSumLocal_Sycl(N_Vector x, N_Vector w) { const sunindextype N = NVEC_SYCL_LENGTH(x); const realtype *xdata = NVEC_SYCL_DDATAp(x); const realtype *wdata = NVEC_SYCL_DDATAp(w); ::sycl::queue *Q = NVEC_SYCL_QUEUE(x); size_t nthreads_total, nthreads_per_block; if (InitializeReductionBuffer(x, ZERO)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Sycl: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(x, SUNTRUE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Sycl: GetKernelParameters returned nonzero\n"); } /* Shortcut to the reduction buffer */ realtype *sum = NVEC_SYCL_DBUFFERp(x); SYCL_FOR_REDUCE(Q, nthreads_total, nthreads_per_block, item, sum, ::sycl::plus(), GRID_STRIDE_XLOOP(item, i, N) { sum += xdata[i] * wdata[i] * xdata[i] * wdata[i]; }); if (CopyReductionBufferFromDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Sycl: CopyReductionBufferFromDevice returned nonzero\n"); } return NVEC_SYCL_HBUFFERp(x)[0]; } realtype N_VWrmsNorm_Sycl(N_Vector x, N_Vector w) { const sunindextype N = NVEC_SYCL_LENGTH(x); const realtype sum = N_VWSqrSumLocal_Sycl(x, w); return std::sqrt(sum/N); } realtype N_VWSqrSumMaskLocal_Sycl(N_Vector x, N_Vector w, N_Vector id) { const sunindextype N = NVEC_SYCL_LENGTH(x); const realtype *xdata = NVEC_SYCL_DDATAp(x); const realtype *wdata = NVEC_SYCL_DDATAp(w); const realtype *iddata = NVEC_SYCL_DDATAp(id); ::sycl::queue *Q = NVEC_SYCL_QUEUE(x); size_t nthreads_total, nthreads_per_block; if (InitializeReductionBuffer(x, ZERO)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Sycl: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(x, SUNTRUE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Sycl: GetKernelParameters returned nonzero\n"); } /* Shortcut to the reduction buffer */ realtype *sum = NVEC_SYCL_DBUFFERp(x); SYCL_FOR_REDUCE(Q, nthreads_total, nthreads_per_block, item, sum, ::sycl::plus(), GRID_STRIDE_XLOOP(item, i, N) { if (iddata[i] > ZERO) sum += xdata[i] * wdata[i] * xdata[i] * wdata[i]; }); if (CopyReductionBufferFromDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Sycl: CopyReductionBufferFromDevice returned nonzero\n"); } return NVEC_SYCL_HBUFFERp(x)[0]; } realtype N_VWrmsNormMask_Sycl(N_Vector x, N_Vector w, N_Vector id) { const sunindextype N = NVEC_SYCL_LENGTH(x); const realtype sum = N_VWSqrSumMaskLocal_Sycl(x, w, id); return std::sqrt(sum/N); } realtype N_VMin_Sycl(N_Vector x) { const sunindextype N = NVEC_SYCL_LENGTH(x); const realtype *xdata = NVEC_SYCL_DDATAp(x); ::sycl::queue *Q = NVEC_SYCL_QUEUE(x); size_t nthreads_total, nthreads_per_block; if (InitializeReductionBuffer(x, std::numeric_limits::max())) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Sycl: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(x, SUNTRUE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Sycl: GetKernelParameters returned nonzero\n"); } /* Shortcut to the reduction buffer */ realtype *min = NVEC_SYCL_DBUFFERp(x); SYCL_FOR_REDUCE(Q, nthreads_total, nthreads_per_block, item, min, ::sycl::minimum(), GRID_STRIDE_XLOOP(item, i, N) { min.combine(xdata[i]); }); if (CopyReductionBufferFromDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Sycl: CopyReductionBufferFromDevice returned nonzero\n"); } return NVEC_SYCL_HBUFFERp(x)[0]; } realtype N_VWL2Norm_Sycl(N_Vector x, N_Vector w) { return std::sqrt(N_VWSqrSumLocal_Sycl(x, w)); } realtype N_VL1Norm_Sycl(N_Vector x) { const sunindextype N = NVEC_SYCL_LENGTH(x); const realtype *xdata = NVEC_SYCL_DDATAp(x); ::sycl::queue *Q = NVEC_SYCL_QUEUE(x); size_t nthreads_total, nthreads_per_block; if (InitializeReductionBuffer(x, ZERO)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Sycl: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(x, SUNTRUE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Sycl: GetKernelParameters returned nonzero\n"); } /* Shortcut to the reduction buffer */ realtype *sum = NVEC_SYCL_DBUFFERp(x); SYCL_FOR_REDUCE(Q, nthreads_total, nthreads_per_block, item, sum, ::sycl::plus(), GRID_STRIDE_XLOOP(item, i, N) { sum += abs(xdata[i]); }); if (CopyReductionBufferFromDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Sycl: CopyReductionBufferFromDevice returned nonzero\n"); } return NVEC_SYCL_HBUFFERp(x)[0]; } void N_VCompare_Sycl(realtype c, N_Vector x, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); const realtype *xdata = NVEC_SYCL_DDATAp(x); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(z, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VCompare_Sycl: GetKernelParameters returned nonzero\n"); } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { zdata[i] = abs(xdata[i]) >= c ? ONE : ZERO; }); } booleantype N_VInvTest_Sycl(N_Vector x, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); const realtype *xdata = NVEC_SYCL_DDATAp(x); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; if (InitializeReductionBuffer(x, ZERO)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Sycl: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(x, SUNTRUE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Sycl: GetKernelParameters returned nonzero\n"); } /* Shortcut to the reduction buffer */ realtype *sum = NVEC_SYCL_DBUFFERp(x); SYCL_FOR_REDUCE(Q, nthreads_total, nthreads_per_block, item, sum, ::sycl::plus(), GRID_STRIDE_XLOOP(item, i, N) { if (xdata[i] == ZERO) { sum += ONE; } else { zdata[i] = ONE / xdata[i]; } }); if (CopyReductionBufferFromDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Sycl: CopyReductionBufferFromDevice returned nonzero\n"); } return (NVEC_SYCL_HBUFFERp(x)[0] < HALF); } booleantype N_VConstrMask_Sycl(N_Vector c, N_Vector x, N_Vector m) { const sunindextype N = NVEC_SYCL_LENGTH(x); const realtype *cdata = NVEC_SYCL_DDATAp(c); const realtype *xdata = NVEC_SYCL_DDATAp(x); realtype *mdata = NVEC_SYCL_DDATAp(m); ::sycl::queue *Q = NVEC_SYCL_QUEUE(x); size_t nthreads_total, nthreads_per_block; if (InitializeReductionBuffer(x, ZERO)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Sycl: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(x, SUNTRUE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Sycl: GetKernelParameters returned nonzero\n"); } /* Shortcut to the reduction buffer */ realtype *sum = NVEC_SYCL_DBUFFERp(x); SYCL_FOR_REDUCE(Q, nthreads_total, nthreads_per_block, item, sum, ::sycl::plus(), GRID_STRIDE_XLOOP(item, i, N) { bool test = (abs(cdata[i]) > ONEPT5 && cdata[i] * xdata[i] <= ZERO) || (abs(cdata[i]) > HALF && cdata[i] * xdata[i] < ZERO); mdata[i] = test ? ONE : ZERO; sum += mdata[i]; }); if (CopyReductionBufferFromDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Sycl: CopyReductionBufferFromDevice returned nonzero\n"); } return (NVEC_SYCL_HBUFFERp(x)[0] < HALF); } realtype N_VMinQuotient_Sycl(N_Vector num, N_Vector denom) { const sunindextype N = NVEC_SYCL_LENGTH(num); const realtype *ndata = NVEC_SYCL_DDATAp(num); const realtype *ddata = NVEC_SYCL_DDATAp(denom); ::sycl::queue *Q = NVEC_SYCL_QUEUE(num); size_t nthreads_total, nthreads_per_block; if (InitializeReductionBuffer(num, std::numeric_limits::max())) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Sycl: InitializeReductionBuffer returned nonzero\n"); } if (GetKernelParameters(num, SUNTRUE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Sycl: GetKernelParameters returned nonzero\n"); } /* Shortcut to the reduction buffer */ realtype *min = NVEC_SYCL_DBUFFERp(num); SYCL_FOR_REDUCE(Q, nthreads_total, nthreads_per_block, item, min, ::sycl::minimum(), GRID_STRIDE_XLOOP(item, i, N) { if (ddata[i] != ZERO) min.combine(ndata[i] / ddata[i]); }); if (CopyReductionBufferFromDevice(num)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Sycl: CopyReductionBufferFromDevice returned nonzero\n"); } return NVEC_SYCL_HBUFFERp(num)[0]; } /* -------------------------------------------------------------------------- * fused vector operations * -------------------------------------------------------------------------- */ int N_VLinearCombination_Sycl(int nvec, realtype* c, N_Vector* X, N_Vector z) { const sunindextype N = NVEC_SYCL_LENGTH(z); realtype *zdata = NVEC_SYCL_DDATAp(z); ::sycl::queue *Q = NVEC_SYCL_QUEUE(z); size_t nthreads_total, nthreads_per_block; /* Fused op workspace shortcuts */ realtype* cdata = NULL; realtype** xdata = NULL; /* Setup the fused op workspace */ if (FusedBuffer_Init(z, nvec, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(z, c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Sycl: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(z, X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(z)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } if (GetKernelParameters(z, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombination_Sycl: GetKernelParameters returned nonzero\n"); return -1; } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { zdata[i] = cdata[0] * xdata[0][i]; for (int j = 1; j < nvec; j++) { zdata[i] += cdata[j] * xdata[j][i]; } }); return 0; } int N_VScaleAddMulti_Sycl(int nvec, realtype* c, N_Vector x, N_Vector* Y, N_Vector* Z) { const sunindextype N = NVEC_SYCL_LENGTH(x); const realtype *xdata = NVEC_SYCL_DDATAp(x); ::sycl::queue *Q = NVEC_SYCL_QUEUE(x); size_t nthreads_total, nthreads_per_block; /* Shortcuts to the fused op workspace */ realtype* cdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; /* Setup the fused op workspace */ if (FusedBuffer_Init(x, nvec, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(x, c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Sycl: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(x, Y, nvec, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(x, Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(x)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } if (GetKernelParameters(x, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMulti_Sycl: GetKernelParameters returned nonzero\n"); return -1; } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { for (int j = 0; j < nvec; j++) { zdata[j][i] = cdata[j] * xdata[i] + ydata[j][i]; } }); return 0; } /* -------------------------------------------------------------------------- * vector array operations * -------------------------------------------------------------------------- */ int N_VLinearSumVectorArray_Sycl(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { const sunindextype N = NVEC_SYCL_LENGTH(Z[0]); ::sycl::queue *Q = NVEC_SYCL_QUEUE(Z[0]); size_t nthreads_total, nthreads_per_block; /* Shortcuts to the fused op workspace */ realtype** xdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; /* Setup the fused op workspace */ if (FusedBuffer_Init(Z[0], 0, 3 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Y, nvec, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearSumVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinaerSumVectorArray_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } if (GetKernelParameters(Z[0], SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinaerSumVectorArray_Sycl: GetKernelParameters returned nonzero\n"); return -1; } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { for (int j = 0; j < nvec; j++) { zdata[j][i] = a * xdata[j][i] + b * ydata[j][i]; } }); return 0; } int N_VScaleVectorArray_Sycl(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { const sunindextype N = NVEC_SYCL_LENGTH(Z[0]); ::sycl::queue *Q = NVEC_SYCL_QUEUE(Z[0]); size_t nthreads_total, nthreads_per_block; /* Shortcuts to the fused op workspace arrays */ realtype* cdata = NULL; realtype** xdata = NULL; realtype** zdata = NULL; /* Setup the fused op workspace */ if (FusedBuffer_Init(Z[0], nvec, 2 * nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(Z[0], c, nvec, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyReadArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } if (GetKernelParameters(Z[0], SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { for (int j = 0; j < nvec; j++) { zdata[j][i] = cdata[j] * xdata[j][i]; } }); return 0; } int N_VConstVectorArray_Sycl(int nvec, realtype c, N_Vector* Z) { const sunindextype N = NVEC_SYCL_LENGTH(Z[0]); ::sycl::queue *Q = NVEC_SYCL_QUEUE(Z[0]); size_t nthreads_total, nthreads_per_block; /* Shortcuts to the fused op workspace arrays */ realtype** zdata = NULL; /* Setup the fused op workspace */ if (FusedBuffer_Init(Z[0], 0, nvec)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } if (GetKernelParameters(Z[0], SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VConstVectorArray_Sycl: GetKernelParameters returned nonzero\n"); return -1; } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { for (int j = 0; j < nvec; j++) { zdata[j][i] = c; } }); return 0; } int N_VScaleAddMultiVectorArray_Sycl(int nvec, int nsum, realtype* c, N_Vector* X, N_Vector** Y, N_Vector** Z) { const sunindextype N = NVEC_SYCL_LENGTH(X[0]); ::sycl::queue *Q = NVEC_SYCL_QUEUE(X[0]); size_t nthreads_total, nthreads_per_block; /* Shortcuts to the fused op workspace */ realtype* cdata = NULL; realtype** xdata = NULL; realtype** ydata = NULL; realtype** zdata = NULL; /* Setup the fused op workspace */ if (FusedBuffer_Init(X[0], nsum, nvec + 2 * nvec * nsum)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiArray_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(X[0], c, nsum, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiArray_Sycl: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(X[0], X, nvec, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(X[0], Y, nvec, nsum, &ydata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Sycl: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(X[0], Z, nvec, nsum, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Sycl: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(X[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleVectorArray_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } if (GetKernelParameters(X[0], SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VScaleAddMultiVectorArray_Sycl: GetKernelParameters returned nonzero\n"); return -1; } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { for (int j = 0; j < nvec; j++) { for (int k = 0; k < nsum; k++) { zdata[j * nsum + k][i] = cdata[k] * xdata[j][i] + ydata[j * nsum + k][i]; } } }); return 0; } int N_VLinearCombinationVectorArray_Sycl(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { const sunindextype N = NVEC_SYCL_LENGTH(Z[0]); ::sycl::queue *Q = NVEC_SYCL_QUEUE(Z[0]); size_t nthreads_total, nthreads_per_block; /* Shortcuts to the fused op workspace arrays */ realtype* cdata = NULL; realtype** xdata = NULL; realtype** zdata = NULL; /* Setup the fused op workspace */ if (FusedBuffer_Init(Z[0], nsum, nvec + nvec * nsum)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: FusedBuffer_Init returned nonzero\n"); return -1; } if (FusedBuffer_CopyRealArray(Z[0], c, nsum, &cdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: FusedBuffer_CopyRealArray returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray2D(Z[0], X, nvec, nsum, &xdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: FusedBuffer_CopyPtrArray2D returned nonzero\n"); return -1; } if (FusedBuffer_CopyPtrArray1D(Z[0], Z, nvec, &zdata)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: FusedBuffer_CopyPtrArray1D returned nonzero\n"); return -1; } if (FusedBuffer_CopyToDevice(Z[0])) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: FusedBuffer_CopyToDevice returned nonzero\n"); return -1; } if (GetKernelParameters(Z[0], SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_PRINT("ERROR in N_VLinearCombinationVectorArray_Sycl: GetKernelParameters returned nonzero\n"); return -1; } SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, N) { for (int j = 0; j < nvec; j++) { zdata[j][i] = cdata[0] * xdata[j * nsum][i]; for (int k = 1; k < nsum; k++) { zdata[j][i] += cdata[k] * xdata[j * nsum + k][i]; } } }); return 0; } /* -------------------------------------------------------------------------- * OPTIONAL XBraid interface operations * -------------------------------------------------------------------------- */ int N_VBufSize_Sycl(N_Vector x, sunindextype *size) { if (x == NULL) return -1; *size = (sunindextype)NVEC_SYCL_MEMSIZE(x); return 0; } int N_VBufPack_Sycl(N_Vector x, void *buf) { int copy_fail = 0; if (x == NULL || buf == NULL) return -1; SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST); if (buf_mem == NULL) return -1; copy_fail = SUNMemoryHelper_Copy(NVEC_SYCL_MEMHELP(x), buf_mem, NVEC_SYCL_CONTENT(x)->device_data, NVEC_SYCL_MEMSIZE(x), NVEC_SYCL_QUEUE(x)); /* synchronize with the host */ NVEC_SYCL_QUEUE(x)->wait_and_throw(); SUNMemoryHelper_Dealloc(NVEC_SYCL_MEMHELP(x), buf_mem, NVEC_SYCL_QUEUE(x)); return (copy_fail ? -1 : 0); } int N_VBufUnpack_Sycl(N_Vector x, void *buf) { int copy_fail = 0; if (x == NULL || buf == NULL) return -1; SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST); if (buf_mem == NULL) return -1; copy_fail = SUNMemoryHelper_Copy(NVEC_SYCL_MEMHELP(x), NVEC_SYCL_CONTENT(x)->device_data, buf_mem, NVEC_SYCL_MEMSIZE(x), NVEC_SYCL_QUEUE(x)); /* synchronize with the host */ NVEC_SYCL_QUEUE(x)->wait_and_throw(); SUNMemoryHelper_Dealloc(NVEC_SYCL_MEMHELP(x), buf_mem, NVEC_SYCL_QUEUE(x)); return (copy_fail ? -1 : 0); } /* -------------------------------------------------------------------------- * Enable / Disable fused and vector array operations * -------------------------------------------------------------------------- */ int N_VEnableFusedOps_Sycl(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return -1; /* check that ops structure is non-NULL */ if (v->ops == NULL) return -1; if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Sycl; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Sycl; v->ops->nvdotprodmulti = NULL; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Sycl; v->ops->nvscalevectorarray = N_VScaleVectorArray_Sycl; v->ops->nvconstvectorarray = N_VConstVectorArray_Sycl; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Sycl; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Sycl; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; } /* return success */ return 0; } int N_VEnableLinearCombination_Sycl(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvlinearcombination = tf ? N_VLinearCombination_Sycl : NULL; return 0; } int N_VEnableScaleAddMulti_Sycl(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvscaleaddmulti = tf ? N_VScaleAddMulti_Sycl : NULL; return 0; } int N_VEnableLinearSumVectorArray_Sycl(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvlinearsumvectorarray = tf ? N_VLinearSumVectorArray_Sycl : NULL; return 0; } int N_VEnableScaleVectorArray_Sycl(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvscalevectorarray = tf ? N_VScaleVectorArray_Sycl : NULL; return 0; } int N_VEnableConstVectorArray_Sycl(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvconstvectorarray = tf ? N_VConstVectorArray_Sycl : NULL; return 0; } int N_VEnableScaleAddMultiVectorArray_Sycl(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvscaleaddmultivectorarray = tf ? N_VScaleAddMultiVectorArray_Sycl : NULL; return 0; } int N_VEnableLinearCombinationVectorArray_Sycl(N_Vector v, booleantype tf) { if (v == NULL) return -1; if (v->ops == NULL) return -1; v->ops->nvlinearcombinationvectorarray = tf ? N_VLinearCombinationVectorArray_Sycl : NULL; return 0; } /* -------------------------------------------------------------------------- * Private utility functions * -------------------------------------------------------------------------- */ static int AllocateData(N_Vector v) { int alloc_fail = 0; N_VectorContent_Sycl vc = NVEC_SYCL_CONTENT(v); N_PrivateVectorContent_Sycl vcp = NVEC_SYCL_PRIVATE(v); if (N_VGetLength_Sycl(v) == 0) return 0; if (vcp->use_managed_mem) { alloc_fail = SUNMemoryHelper_Alloc(NVEC_SYCL_MEMHELP(v), &(vc->device_data), NVEC_SYCL_MEMSIZE(v), SUNMEMTYPE_UVM, NVEC_SYCL_QUEUE(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed for SUNMEMTYPE_UVM\n"); } vc->host_data = SUNMemoryHelper_Alias(vc->device_data); } else { alloc_fail = SUNMemoryHelper_Alloc(NVEC_SYCL_MEMHELP(v), &(vc->host_data), NVEC_SYCL_MEMSIZE(v), SUNMEMTYPE_HOST, NVEC_SYCL_QUEUE(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); } alloc_fail = SUNMemoryHelper_Alloc(NVEC_SYCL_MEMHELP(v), &(vc->device_data), NVEC_SYCL_MEMSIZE(v), SUNMEMTYPE_DEVICE, NVEC_SYCL_QUEUE(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); } } return (alloc_fail ? -1 : 0); } /* Allocate and initializes the internal memory used for reductions */ static int InitializeReductionBuffer(N_Vector v, const realtype value, size_t n) { int alloc_fail = 0; int copy_fail = 0; booleantype alloc_mem = SUNFALSE; size_t bytes = n * sizeof(realtype); /* Get the vector private memory structure */ N_PrivateVectorContent_Sycl vcp = NVEC_SYCL_PRIVATE(v); /* Wrap the initial value as SUNMemory object */ SUNMemory value_mem = SUNMemoryHelper_Wrap((void*) &value, SUNMEMTYPE_HOST); /* check if the existing reduction memory is not large enough */ if (vcp->reduce_buffer_bytes < bytes) { FreeReductionBuffer(v); alloc_mem = SUNTRUE; } if (alloc_mem) { /* allocate pinned memory on the host */ alloc_fail = SUNMemoryHelper_Alloc(NVEC_SYCL_MEMHELP(v), &(vcp->reduce_buffer_host), bytes, SUNMEMTYPE_PINNED, NVEC_SYCL_QUEUE(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("WARNING in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_PINNED, using SUNMEMTYPE_HOST instead\n"); /* if pinned alloc failed, allocate plain host memory */ alloc_fail = SUNMemoryHelper_Alloc(NVEC_SYCL_MEMHELP(v), &(vcp->reduce_buffer_host), bytes, SUNMEMTYPE_HOST, NVEC_SYCL_QUEUE(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); } } /* allocate device memory */ alloc_fail = SUNMemoryHelper_Alloc(NVEC_SYCL_MEMHELP(v), &(vcp->reduce_buffer_dev), bytes, SUNMEMTYPE_DEVICE, NVEC_SYCL_QUEUE(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); } } if (!alloc_fail) { /* store the size of the reduction memory */ vcp->reduce_buffer_bytes = bytes; /* initialize the memory with the value */ copy_fail = SUNMemoryHelper_CopyAsync(NVEC_SYCL_MEMHELP(v), vcp->reduce_buffer_dev, value_mem, bytes, (void*) NVEC_SYCL_QUEUE(v)); /* wait for copy to finish (possible bug in minimum reduction object) */ NVEC_SYCL_QUEUE(v)->wait_and_throw(); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_CopyAsync failed\n"); } } /* deallocate the wrapper */ SUNMemoryHelper_Dealloc(NVEC_SYCL_MEMHELP(v), value_mem, NVEC_SYCL_QUEUE(v)); return ((alloc_fail || copy_fail) ? -1 : 0); } /* Free the reduction memory */ static void FreeReductionBuffer(N_Vector v) { N_PrivateVectorContent_Sycl vcp = NVEC_SYCL_PRIVATE(v); if (vcp == NULL) return; /* free device mem */ if (vcp->reduce_buffer_dev != NULL) SUNMemoryHelper_Dealloc(NVEC_SYCL_MEMHELP(v), vcp->reduce_buffer_dev, NVEC_SYCL_QUEUE(v)); vcp->reduce_buffer_dev = NULL; /* free host mem */ if (vcp->reduce_buffer_host != NULL) SUNMemoryHelper_Dealloc(NVEC_SYCL_MEMHELP(v), vcp->reduce_buffer_host, NVEC_SYCL_QUEUE(v)); vcp->reduce_buffer_host = NULL; /* reset allocated memory size */ vcp->reduce_buffer_bytes = 0; } /* Copy the reduction memory from the device to the host. */ static int CopyReductionBufferFromDevice(N_Vector v, size_t n) { int copy_fail; copy_fail = SUNMemoryHelper_CopyAsync(NVEC_SYCL_MEMHELP(v), NVEC_SYCL_PRIVATE(v)->reduce_buffer_host, NVEC_SYCL_PRIVATE(v)->reduce_buffer_dev, n * sizeof(realtype), (void*) NVEC_SYCL_QUEUE(v)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in CopyReductionBufferFromDevice: SUNMemoryHelper_CopyAsync returned nonzero\n"); } /* synchronize with respect to the host */ NVEC_SYCL_QUEUE(v)->wait_and_throw(); return (copy_fail ? -1 : 0); } static int FusedBuffer_Init(N_Vector v, int nreal, int nptr) { int alloc_fail = 0; booleantype alloc_mem = SUNFALSE; size_t bytes = nreal * sizeof(realtype) + nptr * sizeof(realtype*); /* Get the vector private memory structure */ N_PrivateVectorContent_Sycl vcp = NVEC_SYCL_PRIVATE(v); /* Check if the existing memory is not large enough */ if (vcp->fused_buffer_bytes < bytes) { FusedBuffer_Free(v); alloc_mem = SUNTRUE; } if (alloc_mem) { /* allocate pinned memory on the host */ alloc_fail = SUNMemoryHelper_Alloc(NVEC_SYCL_MEMHELP(v), &(vcp->fused_buffer_host), bytes, SUNMEMTYPE_PINNED, NVEC_SYCL_QUEUE(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("WARNING in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_PINNED, using SUNMEMTYPE_HOST instead\n"); /* if pinned alloc failed, allocate plain host memory */ alloc_fail = SUNMemoryHelper_Alloc(NVEC_SYCL_MEMHELP(v), &(vcp->fused_buffer_host), bytes, SUNMEMTYPE_HOST, NVEC_SYCL_QUEUE(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n"); return -1; } } /* allocate device memory */ alloc_fail = SUNMemoryHelper_Alloc(NVEC_SYCL_MEMHELP(v), &(vcp->fused_buffer_dev), bytes, SUNMEMTYPE_DEVICE, NVEC_SYCL_QUEUE(v)); if (alloc_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_Init: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n"); return -1; } /* Store the size of the fused op buffer */ vcp->fused_buffer_bytes = bytes; } /* Reset the buffer offset */ vcp->fused_buffer_offset = 0; return 0; } static int FusedBuffer_CopyRealArray(N_Vector v, realtype *rdata, int nval, realtype **shortcut) { /* Get the vector private memory structure */ N_PrivateVectorContent_Sycl vcp = NVEC_SYCL_PRIVATE(v); /* Check buffer space and fill the host buffer */ if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyRealArray: Buffer offset is exceedes the buffer size\n"); return -1; } realtype* h_buffer = (realtype*) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nval; j++) { h_buffer[j] = rdata[j]; } /* Set shortcut to the device buffer and update offset*/ *shortcut = (realtype*) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); vcp->fused_buffer_offset += nval * sizeof(realtype); return 0; } static int FusedBuffer_CopyPtrArray1D(N_Vector v, N_Vector *X, int nvec, realtype ***shortcut) { /* Get the vector private memory structure */ N_PrivateVectorContent_Sycl vcp = NVEC_SYCL_PRIVATE(v); /* Check buffer space and fill the host buffer */ if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyPtrArray1D: Buffer offset is exceedes the buffer size\n"); return -1; return -1; } realtype** h_buffer = (realtype**) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nvec; j++) { h_buffer[j] = NVEC_SYCL_DDATAp(X[j]); } /* Set shortcut to the device buffer and update offset*/ *shortcut = (realtype**) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); vcp->fused_buffer_offset += nvec * sizeof(realtype*); return 0; } static int FusedBuffer_CopyPtrArray2D(N_Vector v, N_Vector **X, int nvec, int nsum, realtype ***shortcut) { /* Get the vector private memory structure */ N_PrivateVectorContent_Sycl vcp = NVEC_SYCL_PRIVATE(v); /* Check buffer space and fill the host buffer */ if (vcp->fused_buffer_offset >= vcp->fused_buffer_bytes) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyPtrArray2D: Buffer offset is exceedes the buffer size\n"); return -1; } realtype** h_buffer = (realtype**) ((char*)(vcp->fused_buffer_host->ptr) + vcp->fused_buffer_offset); for (int j = 0; j < nvec; j++) { for (int k = 0; k < nsum; k++) { h_buffer[j * nsum + k] = NVEC_SYCL_DDATAp(X[k][j]); } } /* Set shortcut to the device buffer and update offset*/ *shortcut = (realtype**) ((char*)(vcp->fused_buffer_dev->ptr) + vcp->fused_buffer_offset); /* Update the offset */ vcp->fused_buffer_offset += nvec * nsum * sizeof(realtype*); return 0; } static int FusedBuffer_CopyToDevice(N_Vector v) { /* Get the vector private memory structure */ N_PrivateVectorContent_Sycl vcp = NVEC_SYCL_PRIVATE(v); /* Copy the fused buffer to the device */ int copy_fail = SUNMemoryHelper_CopyAsync(NVEC_SYCL_MEMHELP(v), vcp->fused_buffer_dev, vcp->fused_buffer_host, vcp->fused_buffer_offset, (void*) NVEC_SYCL_QUEUE(v)); if (copy_fail) { SUNDIALS_DEBUG_PRINT("ERROR in FusedBuffer_CopyToDevice: SUNMemoryHelper_CopyAsync failed\n"); return -1; } return 0; } static int FusedBuffer_Free(N_Vector v) { N_PrivateVectorContent_Sycl vcp = NVEC_SYCL_PRIVATE(v); if (vcp == NULL) return 0; if (vcp->fused_buffer_host) { SUNMemoryHelper_Dealloc(NVEC_SYCL_MEMHELP(v), vcp->fused_buffer_host, NVEC_SYCL_QUEUE(v)); vcp->fused_buffer_host = NULL; } if (vcp->fused_buffer_dev) { SUNMemoryHelper_Dealloc(NVEC_SYCL_MEMHELP(v), vcp->fused_buffer_dev, NVEC_SYCL_QUEUE(v)); vcp->fused_buffer_dev = NULL; } vcp->fused_buffer_bytes = 0; vcp->fused_buffer_offset = 0; return 0; } /* Get the kernel launch parameters based on the kernel type (reduction or not), * using the appropriate kernel execution policy. */ static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& nthreads_total, size_t& nthreads_per_block) { /* Get the execution policy */ SUNSyclExecPolicy* exec_policy = NULL; exec_policy = reduction ? NVEC_SYCL_CONTENT(v)->reduce_exec_policy : NVEC_SYCL_CONTENT(v)->stream_exec_policy; if (exec_policy == NULL) { SUNDIALS_DEBUG_ERROR("The execution policy is NULL\n"); return -1; } /* Get the number of threads per block and total number threads */ nthreads_per_block = exec_policy->blockSize(); nthreads_total = nthreads_per_block * exec_policy->gridSize(NVEC_SYCL_LENGTH(v)); if (nthreads_per_block == 0) { SUNDIALS_DEBUG_ERROR("The number of threads per block must be > 0\n"); return -1; } if (nthreads_total == 0) { SUNDIALS_DEBUG_ERROR("the total number of threads must be > 0\n"); return -1; } return 0; } } /* extern "C" */ StanHeaders/src/nvector/manyvector/0000755000176200001440000000000014511135055017111 5ustar liggesusersStanHeaders/src/nvector/manyvector/nvector_manyvector.c0000644000176200001440000020104014645137106023210 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the ManyVector implementation * of the NVECTOR package. * -----------------------------------------------------------------*/ #include #include #include #ifdef MANYVECTOR_BUILD_WITH_MPI #include #else #include #endif /* Macro to handle separate MPI-aware/unaware installations */ #ifdef MANYVECTOR_BUILD_WITH_MPI #define MVAPPEND(fun) fun##_MPIManyVector #else #define MVAPPEND(fun) fun##_ManyVector #endif #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* ----------------------------------------------------------------- ManyVector content accessor macros -----------------------------------------------------------------*/ #ifdef MANYVECTOR_BUILD_WITH_MPI #define MANYVECTOR_CONTENT(v) ( (N_VectorContent_MPIManyVector)(v->content) ) #define MANYVECTOR_COMM(v) ( MANYVECTOR_CONTENT(v)->comm ) #else #define MANYVECTOR_CONTENT(v) ( (N_VectorContent_ManyVector)(v->content) ) #endif #define MANYVECTOR_NUM_SUBVECS(v) ( MANYVECTOR_CONTENT(v)->num_subvectors ) #define MANYVECTOR_GLOBLENGTH(v) ( MANYVECTOR_CONTENT(v)->global_length ) #define MANYVECTOR_SUBVECS(v) ( MANYVECTOR_CONTENT(v)->subvec_array ) #define MANYVECTOR_SUBVEC(v,i) ( MANYVECTOR_SUBVECS(v)[i] ) #define MANYVECTOR_OWN_DATA(v) ( MANYVECTOR_CONTENT(v)->own_data ) /* ----------------------------------------------------------------- Prototypes of utility routines -----------------------------------------------------------------*/ static N_Vector ManyVectorClone(N_Vector w, booleantype cloneempty); #ifdef MANYVECTOR_BUILD_WITH_MPI static int SubvectorMPIRank(N_Vector w); #endif /* ----------------------------------------------------------------- ManyVector API routines -----------------------------------------------------------------*/ #ifdef MANYVECTOR_BUILD_WITH_MPI /* This function creates an MPIManyVector from a set of existing N_Vector objects, along with a user-created MPI (inter/intra)communicator that couples all subvectors together. */ N_Vector N_VMake_MPIManyVector(MPI_Comm comm, sunindextype num_subvectors, N_Vector *vec_array, SUNContext sunctx) { N_Vector v; N_VectorContent_MPIManyVector content; sunindextype i, local_length; int rank, retval; /* Check that input N_Vectors are non-NULL */ if (vec_array == NULL) return(NULL); for (i=0; iops->nvgetvectorid = N_VGetVectorID_MPIManyVector; v->ops->nvcloneempty = N_VCloneEmpty_MPIManyVector; v->ops->nvclone = N_VClone_MPIManyVector; v->ops->nvdestroy = N_VDestroy_MPIManyVector; v->ops->nvspace = N_VSpace_MPIManyVector; v->ops->nvgetcommunicator = N_VGetCommunicator_MPIManyVector; v->ops->nvgetlength = N_VGetLength_MPIManyVector; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_MPIManyVector; v->ops->nvconst = N_VConst_MPIManyVector; v->ops->nvprod = N_VProd_MPIManyVector; v->ops->nvdiv = N_VDiv_MPIManyVector; v->ops->nvscale = N_VScale_MPIManyVector; v->ops->nvabs = N_VAbs_MPIManyVector; v->ops->nvinv = N_VInv_MPIManyVector; v->ops->nvaddconst = N_VAddConst_MPIManyVector; v->ops->nvdotprod = N_VDotProd_MPIManyVector; v->ops->nvmaxnorm = N_VMaxNorm_MPIManyVector; v->ops->nvwrmsnorm = N_VWrmsNorm_MPIManyVector; v->ops->nvwrmsnormmask = N_VWrmsNormMask_MPIManyVector; v->ops->nvmin = N_VMin_MPIManyVector; v->ops->nvwl2norm = N_VWL2Norm_MPIManyVector; v->ops->nvl1norm = N_VL1Norm_MPIManyVector; v->ops->nvcompare = N_VCompare_MPIManyVector; v->ops->nvinvtest = N_VInvTest_MPIManyVector; v->ops->nvconstrmask = N_VConstrMask_MPIManyVector; v->ops->nvminquotient = N_VMinQuotient_MPIManyVector; /* fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_MPIManyVector; v->ops->nvscaleaddmulti = N_VScaleAddMulti_MPIManyVector; v->ops->nvdotprodmulti = N_VDotProdMulti_MPIManyVector; /* vector array operations */ v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_MPIManyVector; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_MPIManyVector; /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProdLocal_MPIManyVector; v->ops->nvmaxnormlocal = N_VMaxNormLocal_MPIManyVector; v->ops->nvminlocal = N_VMinLocal_MPIManyVector; v->ops->nvl1normlocal = N_VL1NormLocal_MPIManyVector; v->ops->nvinvtestlocal = N_VInvTestLocal_MPIManyVector; v->ops->nvconstrmasklocal = N_VConstrMaskLocal_MPIManyVector; v->ops->nvminquotientlocal = N_VMinQuotientLocal_MPIManyVector; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_MPIManyVector; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_MPIManyVector; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_MPIManyVector; v->ops->nvdotprodmultiallreduce = N_VDotProdMultiAllReduce_MPIManyVector; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_MPIManyVector; v->ops->nvbufpack = N_VBufPack_MPIManyVector; v->ops->nvbufunpack = N_VBufUnpack_MPIManyVector; /* debugging functions */ v->ops->nvprint = N_VPrint_MPIManyVector; v->ops->nvprintfile = N_VPrintFile_MPIManyVector; /* Create content */ content = NULL; content = (N_VectorContent_MPIManyVector) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Attach content components */ /* set scalar content entries, and allocate/set subvector array */ content->comm = MPI_COMM_NULL; content->num_subvectors = num_subvectors; content->own_data = SUNFALSE; content->subvec_array = NULL; content->subvec_array = (N_Vector *) malloc(num_subvectors * sizeof(N_Vector)); if (content->subvec_array == NULL) { N_VDestroy(v); return(NULL); } for (i=0; isubvec_array[i] = vec_array[i]; /* duplicate input communicator (if non-NULL) */ if (comm != MPI_COMM_NULL) { retval = MPI_Comm_dup(comm, &(content->comm)); if (retval != MPI_SUCCESS) { N_VDestroy(v); return(NULL); } } /* Determine overall MPIManyVector length: sum contributions from all subvectors where this rank is the root, then perform reduction */ local_length = 0; for (i=0; iops->nvgetlength) { if (rank == 0) local_length += N_VGetLength(vec_array[i]); } else { N_VDestroy(v); return(NULL); } } if (content->comm != MPI_COMM_NULL) { retval = MPI_Allreduce(&local_length, &(content->global_length), 1, MPI_SUNINDEXTYPE, MPI_SUM, content->comm); if (retval != MPI_SUCCESS) { N_VDestroy(v); return(NULL); } } else { content->global_length = local_length; } return(v); } #endif #ifdef MANYVECTOR_BUILD_WITH_MPI /* This function creates an MPIManyVector from a set of existing N_Vector objects, under the requirement that all MPI-aware sub-vectors use the same MPI communicator (this is verified internally). If no sub-vector is MPI-aware, then this function will return NULL. For single-node partitioning, the regular (not MPI aware) manyvector should be used. */ N_Vector N_VNew_MPIManyVector(sunindextype num_subvectors, N_Vector *vec_array, SUNContext sunctx) { sunindextype i; booleantype nocommfound; void* tmpcomm; MPI_Comm comm, *vcomm; int retval, comparison; N_Vector v = NULL; /* Check that all subvectors have identical MPI communicators (if present) */ nocommfound = SUNTRUE; comm = MPI_COMM_NULL; for (i=0; iops->nvgetvectorid = N_VGetVectorID_ManyVector; v->ops->nvcloneempty = N_VCloneEmpty_ManyVector; v->ops->nvclone = N_VClone_ManyVector; v->ops->nvdestroy = N_VDestroy_ManyVector; v->ops->nvspace = N_VSpace_ManyVector; v->ops->nvgetlength = N_VGetLength_ManyVector; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_ManyVector; v->ops->nvconst = N_VConst_ManyVector; v->ops->nvprod = N_VProd_ManyVector; v->ops->nvdiv = N_VDiv_ManyVector; v->ops->nvscale = N_VScale_ManyVector; v->ops->nvabs = N_VAbs_ManyVector; v->ops->nvinv = N_VInv_ManyVector; v->ops->nvaddconst = N_VAddConst_ManyVector; v->ops->nvdotprod = N_VDotProdLocal_ManyVector; v->ops->nvmaxnorm = N_VMaxNormLocal_ManyVector; v->ops->nvwrmsnorm = N_VWrmsNorm_ManyVector; v->ops->nvwrmsnormmask = N_VWrmsNormMask_ManyVector; v->ops->nvmin = N_VMinLocal_ManyVector; v->ops->nvwl2norm = N_VWL2Norm_ManyVector; v->ops->nvl1norm = N_VL1NormLocal_ManyVector; v->ops->nvcompare = N_VCompare_ManyVector; v->ops->nvinvtest = N_VInvTestLocal_ManyVector; v->ops->nvconstrmask = N_VConstrMaskLocal_ManyVector; v->ops->nvminquotient = N_VMinQuotientLocal_ManyVector; /* fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_ManyVector; v->ops->nvscaleaddmulti = N_VScaleAddMulti_ManyVector; v->ops->nvdotprodmulti = N_VDotProdMulti_ManyVector; /* vector array operations */ v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_ManyVector; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_ManyVector; /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProdLocal_ManyVector; v->ops->nvmaxnormlocal = N_VMaxNormLocal_ManyVector; v->ops->nvminlocal = N_VMinLocal_ManyVector; v->ops->nvl1normlocal = N_VL1NormLocal_ManyVector; v->ops->nvinvtestlocal = N_VInvTestLocal_ManyVector; v->ops->nvconstrmasklocal = N_VConstrMaskLocal_ManyVector; v->ops->nvminquotientlocal = N_VMinQuotientLocal_ManyVector; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_ManyVector; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_ManyVector; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_ManyVector; /* XBraid interface operations */ v->ops->nvbufsize = N_VBufSize_ManyVector; v->ops->nvbufpack = N_VBufPack_ManyVector; v->ops->nvbufunpack = N_VBufUnpack_ManyVector; /* debugging functions */ v->ops->nvprint = N_VPrint_ManyVector; v->ops->nvprintfile = N_VPrintFile_ManyVector; /* Create content */ content = NULL; content = (N_VectorContent_ManyVector) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Attach content components */ /* allocate and set subvector array */ content->num_subvectors = num_subvectors; content->own_data = SUNFALSE; content->subvec_array = NULL; content->subvec_array = (N_Vector *) malloc(num_subvectors * sizeof(N_Vector)); if (content->subvec_array == NULL) { N_VDestroy(v); return(NULL); } for (i=0; isubvec_array[i] = vec_array[i]; /* Determine overall ManyVector length: sum contributions from all subvectors */ content->global_length = 0; for (i=0; iops->nvgetlength) { content->global_length += N_VGetLength(vec_array[i]); } else { N_VDestroy(v); return(NULL); } } return(v); } #endif /* This function returns the vec_num sub-N_Vector from the N_Vector array. If vec_num is outside of applicable bounds, NULL is returned. */ N_Vector MVAPPEND(N_VGetSubvector)(N_Vector v, sunindextype vec_num) { if ( (vec_num < 0) || (vec_num > MANYVECTOR_NUM_SUBVECS(v)) ) return(NULL); return(MANYVECTOR_SUBVEC(v,vec_num)); } /* This function returns data pointer for the vec_num sub-N_Vector from the N_Vector array. If vec_num is outside of applicable bounds, or if the subvector does not support the N_VGetArrayPointer routine, then NULL is returned. */ realtype *MVAPPEND(N_VGetSubvectorArrayPointer)(N_Vector v, sunindextype vec_num) { if ( (vec_num < 0) || (vec_num > MANYVECTOR_NUM_SUBVECS(v)) ) return(NULL); if ( MANYVECTOR_SUBVEC(v,vec_num)->ops->nvgetarraypointer == NULL ) return(NULL); return(N_VGetArrayPointer(MANYVECTOR_SUBVEC(v,vec_num))); } /* This function sets the data pointer for the vec_num sub-N_Vector from the N_Vector array. If vec_num is outside of applicable bounds, or if the subvector does not support the N_VSetArrayPointer routine, then -1 is returned; otherwise this routine returns 0. */ int MVAPPEND(N_VSetSubvectorArrayPointer)(realtype *v_data, N_Vector v, sunindextype vec_num) { if ( (vec_num < 0) || (vec_num > MANYVECTOR_NUM_SUBVECS(v)) ) return(-1); if ( MANYVECTOR_SUBVEC(v,vec_num)->ops->nvsetarraypointer == NULL ) return(-1); N_VSetArrayPointer(v_data, MANYVECTOR_SUBVEC(v,vec_num)); return(0); } /* This function returns the overall number of sub-vectors. It returns a locally stored integer, and is therefore a local call. */ sunindextype MVAPPEND(N_VGetNumSubvectors)(N_Vector v) { return(MANYVECTOR_NUM_SUBVECS(v)); } /* ----------------------------------------------------------------- ManyVector implementations of generic NVector routines -----------------------------------------------------------------*/ /* Returns vector type ID. Used to identify vector implementation from abstract N_Vector interface. */ N_Vector_ID MVAPPEND(N_VGetVectorID)(N_Vector v) { #ifdef MANYVECTOR_BUILD_WITH_MPI return(SUNDIALS_NVEC_MPIMANYVECTOR); #else return(SUNDIALS_NVEC_MANYVECTOR); #endif } /* Prints the vector to stdout, calling Print on subvectors. */ void MVAPPEND(N_VPrint)(N_Vector x) { sunindextype i; for (i=0; icontent != NULL) { /* if subvectors are owned by v, then Destroy those */ if ((MANYVECTOR_OWN_DATA(v) == SUNTRUE) && (MANYVECTOR_SUBVECS(v) != NULL)) { for (i=0; icontent); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } /* Returns the space requirements for the ManyVector, by accumulating this information from all subvectors. */ void MVAPPEND(N_VSpace)(N_Vector v, sunindextype *lrw, sunindextype *liw) { sunindextype i, lrw1, liw1; *lrw = 0; *liw = 0; for (i=0; iops->nvspace != NULL) { N_VSpace(MANYVECTOR_SUBVEC(v,i), &lrw1, &liw1); *lrw += lrw1; *liw += liw1; } } return; } #ifdef MANYVECTOR_BUILD_WITH_MPI /* This function retrieves the MPI Communicator from an MPIManyVector object. */ void *N_VGetCommunicator_MPIManyVector(N_Vector v) { if (MANYVECTOR_COMM(v) == MPI_COMM_NULL) return NULL; else return((void *) &MANYVECTOR_COMM(v)); } #else /* This function retrieves the MPI Communicator from a ManyVector object. */ void *N_VGetCommunicator_ManyVector(N_Vector v) { return NULL; } #endif /* This function retrieves the global length of a ManyVector object. */ sunindextype MVAPPEND(N_VGetLength)(N_Vector v) { return(MANYVECTOR_GLOBLENGTH(v)); } /* Performs the linear sum z = a*x + b*y by calling N_VLinearSum on all subvectors; this routine does not check that x, y and z are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. */ void MVAPPEND(N_VLinearSum)(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i; for (i=0; iops->nvdotprodlocal) { sum += N_VDotProdLocal(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(y,i)); /* otherwise, call nvdotprod and root tasks accumulate to overall sum */ } else { contrib = N_VDotProd(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(y,i)); /* get this task's rank in subvector communicator (note: serial subvectors will result in rank==0) */ rank = SubvectorMPIRank(MANYVECTOR_SUBVEC(x,i)); if (rank < 0) return(ZERO); if (rank == 0) sum += contrib; } #else /* add subvector contribution */ sum += N_VDotProd(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(y,i)); #endif } return(sum); } #ifdef MANYVECTOR_BUILD_WITH_MPI /* Performs the dot product of two ManyVectors by calling N_VDotProdLocal and combining the results. This routine does not check that x and y are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. */ realtype N_VDotProd_MPIManyVector(N_Vector x, N_Vector y) { realtype lsum, gsum; lsum = gsum = N_VDotProdLocal_MPIManyVector(x,y); if (MANYVECTOR_COMM(x) != MPI_COMM_NULL) MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, MANYVECTOR_COMM(x)); return(gsum); } #endif /* Performs the MPI task-local maximum norm of a ManyVector by calling N_VMaxNormLocal on all subvectors. If any subvector does not implement the N_VMaxNormLocal routine (NULL function pointer), then this routine will call N_VMaxNorm instead. */ realtype MVAPPEND(N_VMaxNormLocal)(N_Vector x) { sunindextype i; realtype max, lmax; /* initialize output*/ max = ZERO; for (i=0; iops->nvmaxnormlocal) { lmax = N_VMaxNormLocal(MANYVECTOR_SUBVEC(x,i)); max = (max > lmax) ? max : lmax; /* otherwise, call nvmaxnorm and accumulate to overall max */ } else { lmax = N_VMaxNorm(MANYVECTOR_SUBVEC(x,i)); max = (max > lmax) ? max : lmax; } } return(max); } #ifdef MANYVECTOR_BUILD_WITH_MPI /* Performs the maximum norm of a ManyVector by calling N_VMaxNormLocal and combining the results. */ realtype N_VMaxNorm_MPIManyVector(N_Vector x) { realtype lmax, gmax; lmax = gmax = N_VMaxNormLocal_MPIManyVector(x); if (MANYVECTOR_COMM(x) != MPI_COMM_NULL) MPI_Allreduce(&lmax, &gmax, 1, MPI_SUNREALTYPE, MPI_MAX, MANYVECTOR_COMM(x)); return(gmax); } #endif /* Performs the MPI task-local weighted squared sum of a ManyVector by calling N_VWSqrSumLocal on all subvectors; this routine does not check that x and w are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. If any subvector does not implement the N_VWSqrSumLocal routine (NULL function pointer), then this routine will call N_VWrmsNorm and N_VGetLength to unravel the squared sum of the subvector components. It will then only accumulate this to the overall sum if this is the root task for that subvector's communicator (note: serial vectors are always root task). */ realtype MVAPPEND(N_VWSqrSumLocal)(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, contrib; #ifdef MANYVECTOR_BUILD_WITH_MPI int rank; #endif /* initialize output*/ sum = ZERO; for (i=0; iops->nvwsqrsumlocal) { sum += N_VWSqrSumLocal(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(w,i)); /* otherwise, call nvwrmsnorm, and accumulate to overall sum on root task */ } else { contrib = N_VWrmsNorm(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(w,i)); /* get this task's rank in subvector communicator (note: serial subvectors will result in rank==0) */ rank = SubvectorMPIRank(MANYVECTOR_SUBVEC(x,i)); if (rank < 0) return(ZERO); if (rank == 0) { N = N_VGetLength(MANYVECTOR_SUBVEC(x,i)); sum += (contrib*contrib*N); } } #else /* accumulate subvector contribution to overall sum */ contrib = N_VWrmsNorm(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(w,i)); N = N_VGetLength(MANYVECTOR_SUBVEC(x,i)); sum += (contrib*contrib*N); #endif } return(sum); } /* Performs the WRMS norm of a ManyVector by calling N_VWSqrSumLocal and combining the results; this routine does not check that x and w are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. */ realtype MVAPPEND(N_VWrmsNorm)(N_Vector x, N_Vector w) { realtype gsum; #ifdef MANYVECTOR_BUILD_WITH_MPI realtype lsum; lsum = gsum = N_VWSqrSumLocal_MPIManyVector(x, w); if (MANYVECTOR_COMM(x) != MPI_COMM_NULL) MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, MANYVECTOR_COMM(x)); #else gsum = N_VWSqrSumLocal_ManyVector(x, w); #endif return(SUNRsqrt(gsum/(MANYVECTOR_GLOBLENGTH(x)))); } /* Performs the MPI task-local masked weighted squared sum of a ManyVector by calling N_VWSqrSumMaskLocal on all subvectors; this routine does not check that x, w and id are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. If any subvector does not implement the N_VWSqrSumLocal routine (NULL function pointer), then this routine will call N_VWrmsNormMask and N_VGetLength to unravel the masked squared sum of the subvector components. It will then only accumulate this to the overall sum if this is the root task for that subvector's communicator (note: serial vectors are always root task). */ realtype MVAPPEND(N_VWSqrSumMaskLocal)(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, contrib; #ifdef MANYVECTOR_BUILD_WITH_MPI int rank; #endif /* initialize output*/ sum = ZERO; for (i=0; iops->nvwsqrsummasklocal) { sum += N_VWSqrSumMaskLocal(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(w,i), MANYVECTOR_SUBVEC(id,i)); /* otherwise, call nvwrmsnormmask, and accumulate to overall sum on root task */ } else { contrib = N_VWrmsNormMask(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(w,i), MANYVECTOR_SUBVEC(id,i)); /* get this task's rank in subvector communicator (note: serial subvectors will result in rank==0) */ rank = SubvectorMPIRank(MANYVECTOR_SUBVEC(x,i)); if (rank < 0) return(ZERO); if (rank == 0) { N = N_VGetLength(MANYVECTOR_SUBVEC(x,i)); sum += (contrib*contrib*N); } } #else /* accumulate subvector contribution to overall sum */ contrib = N_VWrmsNormMask(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(w,i), MANYVECTOR_SUBVEC(id,i)); N = N_VGetLength(MANYVECTOR_SUBVEC(x,i)); sum += (contrib*contrib*N); #endif } return(sum); } /* Performs the masked WRMS norm of a ManyVector by calling N_VWSqrSumMaskLocal and combining the results; this routine does not check that x, w and id are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. */ realtype MVAPPEND(N_VWrmsNormMask)(N_Vector x, N_Vector w, N_Vector id) { realtype gsum; #ifdef MANYVECTOR_BUILD_WITH_MPI realtype lsum; lsum = gsum = N_VWSqrSumMaskLocal_MPIManyVector(x, w, id); if (MANYVECTOR_COMM(x) != MPI_COMM_NULL) MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, MANYVECTOR_COMM(x)); #else gsum = N_VWSqrSumMaskLocal_ManyVector(x, w, id); #endif return(SUNRsqrt(gsum/(MANYVECTOR_GLOBLENGTH(x)))); } /* Computes the MPI task-local minimum entry of a ManyVector by calling N_VMinLocal on all subvectors. If any subvector does not implement the N_VMinLocal routine (NULL function pointer), then this routine will call N_VMin instead. */ realtype MVAPPEND(N_VMinLocal)(N_Vector x) { sunindextype i; realtype min, lmin; /* initialize output*/ min = BIG_REAL; for (i=0; iops->nvminlocal) { lmin = N_VMinLocal(MANYVECTOR_SUBVEC(x,i)); min = (min < lmin) ? min : lmin; /* otherwise, call nvmin and accumulate to overall min */ } else { lmin = N_VMin(MANYVECTOR_SUBVEC(x,i)); min = (min < lmin) ? min : lmin; } } return(min); } #ifdef MANYVECTOR_BUILD_WITH_MPI /* Computes the minimum entry of a ManyVector by calling N_VMinLocal and combining the results. */ realtype N_VMin_MPIManyVector(N_Vector x) { realtype lmin, gmin; lmin = gmin = N_VMinLocal_MPIManyVector(x); if (MANYVECTOR_COMM(x) != MPI_COMM_NULL) MPI_Allreduce(&lmin, &gmin, 1, MPI_SUNREALTYPE, MPI_MIN, MANYVECTOR_COMM(x)); return(gmin); } #endif /* Performs the WL2 norm of a ManyVector by calling N_VSqrSumLocal and 'massaging' the result. This routine does not check that x and w are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. */ realtype MVAPPEND(N_VWL2Norm)(N_Vector x, N_Vector w) { realtype gsum; #ifdef MANYVECTOR_BUILD_WITH_MPI realtype lsum; lsum = gsum = N_VWSqrSumLocal_MPIManyVector(x, w); if (MANYVECTOR_COMM(x) != MPI_COMM_NULL) MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, MANYVECTOR_COMM(x)); #else gsum = N_VWSqrSumLocal_ManyVector(x, w); #endif return(SUNRsqrt(gsum)); } /* Performs the MPI task-local L1 norm of a ManyVector by calling N_VL1NormLocal on all subvectors. If any subvector does not implement the N_VL1NormLocal routine (NULL function pointer), then this routine will call N_VL1Norm, but only accumulate the sum if this is the root task for that subvector's communicator (note: serial vectors are always root task). */ realtype MVAPPEND(N_VL1NormLocal)(N_Vector x) { sunindextype i; realtype sum; #ifdef MANYVECTOR_BUILD_WITH_MPI realtype contrib; int rank; #endif /* initialize output*/ sum = ZERO; for (i=0; iops->nvl1normlocal) { sum += N_VL1NormLocal(MANYVECTOR_SUBVEC(x,i)); /* otherwise, call nvl1norm and root tasks accumulate to overall sum */ } else { contrib = N_VL1Norm(MANYVECTOR_SUBVEC(x,i)); /* get this task's rank in subvector communicator (note: serial subvectors will result in rank==0) */ rank = SubvectorMPIRank(MANYVECTOR_SUBVEC(x,i)); if (rank < 0) return(ZERO); if (rank == 0) sum += contrib; } #else /* accumulate subvector contribution to overall sum */ sum += N_VL1Norm(MANYVECTOR_SUBVEC(x,i)); #endif } return(sum); } #ifdef MANYVECTOR_BUILD_WITH_MPI /* Performs the L1 norm of a ManyVector by calling N_VL1NormLocal and combining the results. */ realtype N_VL1Norm_MPIManyVector(N_Vector x) { realtype lsum, gsum; lsum = gsum = N_VL1NormLocal_MPIManyVector(x); if (MANYVECTOR_COMM(x) != MPI_COMM_NULL) MPI_Allreduce(&lsum, &gsum, 1, MPI_SUNREALTYPE, MPI_SUM, MANYVECTOR_COMM(x)); return(gsum); } #endif /* Performs N_VCompare on all subvectors; this routine does not check that x and z are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. */ void MVAPPEND(N_VCompare)(realtype c, N_Vector x, N_Vector z) { sunindextype i; for (i=0; iops->nvinvtestlocal) { subval = N_VInvTestLocal(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(z,i)); val = (val && subval); /* otherwise, call nvinvtest and accumulate to overall val */ } else { subval = N_VInvTest(MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(z,i)); val = (val && subval); } } return(val); } #ifdef MANYVECTOR_BUILD_WITH_MPI /* Performs the InvTest for a ManyVector by calling N_VInvTestLocal and combining the results. This routine does not check that x and z are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. */ booleantype N_VInvTest_MPIManyVector(N_Vector x, N_Vector z) { realtype val, gval; val = gval = (N_VInvTestLocal_MPIManyVector(x, z)) ? ONE : ZERO; if (MANYVECTOR_COMM(x) != MPI_COMM_NULL) MPI_Allreduce(&val, &gval, 1, MPI_SUNREALTYPE, MPI_MIN, MANYVECTOR_COMM(x)); return (gval != ZERO); } #endif /* Performs the MPI task-local ConstrMask for a ManyVector by calling N_VConstrMaskLocal on all subvectors and combining the results appropriately. This routine does not check that c, x and m are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. If any subvector does not implement the N_VConstrMaskLocal routine (NULL function pointer), then this routine will call N_VConstrMask instead. */ booleantype MVAPPEND(N_VConstrMaskLocal)(N_Vector c, N_Vector x, N_Vector m) { sunindextype i; booleantype val, subval; /* initialize output*/ val = SUNTRUE; for (i=0; iops->nvconstrmasklocal) { subval = N_VConstrMaskLocal(MANYVECTOR_SUBVEC(c,i), MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(m,i)); val = (val && subval); /* otherwise, call nvconstrmask and accumulate to overall val */ } else { subval = N_VConstrMask(MANYVECTOR_SUBVEC(c,i), MANYVECTOR_SUBVEC(x,i), MANYVECTOR_SUBVEC(m,i)); val = (val && subval); } } return(val); } #ifdef MANYVECTOR_BUILD_WITH_MPI /* Performs the ConstrMask for a ManyVector by calling N_VConstrMaskLocal and combining the results. This routine does not check that c, x and m are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. */ booleantype N_VConstrMask_MPIManyVector(N_Vector c, N_Vector x, N_Vector m) { realtype val, gval; val = gval = (N_VConstrMaskLocal_MPIManyVector(c, x, m)) ? ONE : ZERO; if (MANYVECTOR_COMM(x) != MPI_COMM_NULL) MPI_Allreduce(&val, &gval, 1, MPI_SUNREALTYPE, MPI_MIN, MANYVECTOR_COMM(x)); return (gval != ZERO); } #endif /* Performs the MPI task-local MinQuotient for a ManyVector by calling N_VMinQuotientLocal on all subvectors and combining the results appropriately. This routine does not check that num and denom are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. If any subvector does not implement the N_VMinQuotientLocal routine (NULL function pointer), then this routine will call N_VMinQuotient instead. */ realtype MVAPPEND(N_VMinQuotientLocal)(N_Vector num, N_Vector denom) { sunindextype i; realtype min, lmin; /* initialize output*/ min = BIG_REAL; for (i=0; iops->nvminquotientlocal) { lmin = N_VMinQuotientLocal(MANYVECTOR_SUBVEC(num,i), MANYVECTOR_SUBVEC(denom,i)); min = (min < lmin) ? min : lmin; /* otherwise, call nvmin and accumulate to overall min */ } else { lmin = N_VMinQuotient(MANYVECTOR_SUBVEC(num,i), MANYVECTOR_SUBVEC(denom,i)); min = (min < lmin) ? min : lmin; } } return(min); } #ifdef MANYVECTOR_BUILD_WITH_MPI /* Performs the MinQuotient for a ManyVector by calling N_VMinQuotientLocal and combining the results. This routine does not check that num and denom are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. */ realtype N_VMinQuotient_MPIManyVector(N_Vector num, N_Vector denom) { realtype lmin, gmin; lmin = gmin = N_VMinQuotientLocal_MPIManyVector(num, denom); if (MANYVECTOR_COMM(num) != MPI_COMM_NULL) MPI_Allreduce(&lmin, &gmin, 1, MPI_SUNREALTYPE, MPI_MIN, MANYVECTOR_COMM(num)); return(gmin); } #endif /* ----------------------------------------------------------------- Single buffer reduction operations ----------------------------------------------------------------- */ int MVAPPEND(N_VDotProdMultiLocal)(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods) { int j, retval; sunindextype i; N_Vector* Ysub; realtype* contrib; /* create temporary workspace arrays */ Ysub = NULL; Ysub = (N_Vector*) malloc(nvec * sizeof(N_Vector)); if (!Ysub) return -1; contrib = NULL; contrib = (realtype*) malloc(nvec * sizeof(realtype)); if (!contrib) return -1; /* initialize output */ for (j = 0; j < nvec; j++) dotprods[j] = ZERO; /* loop over subvectors */ for (i = 0; i < MANYVECTOR_NUM_SUBVECS(x); i++) { /* extract subvectors from vector array */ for (j = 0; j < nvec; j++) Ysub[j] = MANYVECTOR_SUBVEC(Y[j], i); /* compute dot products */ retval = N_VDotProdMultiLocal(nvec, MANYVECTOR_SUBVEC(x,i), Ysub, contrib); if (retval) { free(Ysub); free(contrib); return -1; } /* accumulate contributions */ for (j = 0; j < nvec; j++) dotprods[j] += contrib[j]; } free(Ysub); free(contrib); /* return with success */ return 0; } #ifdef MANYVECTOR_BUILD_WITH_MPI int N_VDotProdMultiAllReduce_MPIManyVector(int nvec_total, N_Vector x, realtype* sum) { /* accumulate totals and return */ if (MANYVECTOR_COMM(x) != MPI_COMM_NULL) return(MPI_Allreduce(MPI_IN_PLACE, sum, nvec_total, MPI_SUNREALTYPE, MPI_SUM, MANYVECTOR_COMM(x))); return(-1); } #endif /* ----------------------------------------------------------------- Fused vector operations ----------------------------------------------------------------- */ /* Performs the linear combination z = sum_j c[j]*X[j] by calling N_VLinearCombination on all subvectors; this routine does not check that z or the components of X are ManyVectors, if they have the same number of subvectors, or if these subvectors are compatible. NOTE: implementation of this routine is more challenging, due to the array-of-arrays of N_Vectors that comprise X. This routine will be passed an array of ManyVectors, so to call the subvector-specific routines we must unravel the subvectors while retaining an array of outer vectors. */ int MVAPPEND(N_VLinearCombination)(int nvec, realtype* c, N_Vector* X, N_Vector z) { sunindextype i, j; int retval; N_Vector *Xsub; /* create array of nvec N_Vector pointers for reuse within loop */ Xsub = NULL; Xsub = (N_Vector *) malloc( nvec * sizeof(N_Vector) ); if (Xsub == NULL) return(1); /* perform operation by calling N_VLinearCombination for each subvector */ for (i=0; i pair */ for (i=0; iops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = MVAPPEND(N_VLinearCombination); v->ops->nvscaleaddmulti = MVAPPEND(N_VScaleAddMulti); v->ops->nvdotprodmulti = MVAPPEND(N_VDotProdMulti); /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = MVAPPEND(N_VLinearSumVectorArray); v->ops->nvscalevectorarray = MVAPPEND(N_VScaleVectorArray); v->ops->nvconstvectorarray = MVAPPEND(N_VConstVectorArray); v->ops->nvwrmsnormvectorarray = MVAPPEND(N_VWrmsNormVectorArray); v->ops->nvwrmsnormmaskvectorarray = MVAPPEND(N_VWrmsNormMaskVectorArray); v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = MVAPPEND(N_VDotProdMultiLocal); } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int MVAPPEND(N_VEnableLinearCombination)(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = MVAPPEND(N_VLinearCombination); else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int MVAPPEND(N_VEnableScaleAddMulti)(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = MVAPPEND(N_VScaleAddMulti); else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int MVAPPEND(N_VEnableDotProdMulti)(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmulti = MVAPPEND(N_VDotProdMulti); else v->ops->nvdotprodmulti = NULL; /* return success */ return(0); } int MVAPPEND(N_VEnableLinearSumVectorArray)(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = MVAPPEND(N_VLinearSumVectorArray); else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int MVAPPEND(N_VEnableScaleVectorArray)(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = MVAPPEND(N_VScaleVectorArray); else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int MVAPPEND(N_VEnableConstVectorArray)(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = MVAPPEND(N_VConstVectorArray); else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int MVAPPEND(N_VEnableWrmsNormVectorArray)(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = MVAPPEND(N_VWrmsNormVectorArray); else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int MVAPPEND(N_VEnableWrmsNormMaskVectorArray)(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = MVAPPEND(N_VWrmsNormMaskVectorArray); else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int MVAPPEND(N_VEnableDotProdMultiLocal)(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvdotprodmultilocal = MVAPPEND(N_VDotProdMultiLocal); else v->ops->nvdotprodmultilocal = NULL; /* return success */ return(0); } /* ----------------------------------------------------------------- Implementation of utility routines -----------------------------------------------------------------*/ /* This function performs a generic clone operation on an input N_Vector. Based on the 'cloneempty' flag it will either call "nvclone" or "nvcloneempty" when creating subvectors in the cloned vector. */ static N_Vector ManyVectorClone(N_Vector w, booleantype cloneempty) { N_Vector v; MVAPPEND(N_VectorContent) content; sunindextype i; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (MVAPPEND(N_VectorContent)) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content and ops to new vector, and return */ v->content = content; /* Attach content components */ /* Set scalar components */ #ifdef MANYVECTOR_BUILD_WITH_MPI content->comm = MPI_COMM_NULL; #endif content->num_subvectors = MANYVECTOR_NUM_SUBVECS(w); content->global_length = MANYVECTOR_GLOBLENGTH(w); content->own_data = SUNTRUE; /* Allocate the subvector array */ content->subvec_array = NULL; content->subvec_array = (N_Vector *) malloc(content->num_subvectors * sizeof(N_Vector)); if (content->subvec_array == NULL) { N_VDestroy(v); return(NULL); } /* Initialize the subvector array to NULL */ for (i=0; inum_subvectors; i++) content->subvec_array[i] = NULL; /* Duplicate the input communicator (if applicable) */ #ifdef MANYVECTOR_BUILD_WITH_MPI if (MANYVECTOR_COMM(w) != MPI_COMM_NULL) { if (MPI_Comm_dup(MANYVECTOR_COMM(w), &(content->comm)) != MPI_SUCCESS) { N_VDestroy(v); return(NULL); } } #endif /* Clone vectors into the subvector array */ for (i=0; inum_subvectors; i++) { if (cloneempty) { content->subvec_array[i] = N_VCloneEmpty(MANYVECTOR_SUBVEC(w,i)); } else { content->subvec_array[i] = N_VClone(MANYVECTOR_SUBVEC(w,i)); } if (content->subvec_array[i] == NULL) { N_VDestroy(v); return(NULL); } } return(v); } #ifdef MANYVECTOR_BUILD_WITH_MPI /* This function returns the rank of this task in the MPI communicator associated with the input N_Vector. If the input N_Vector is MPI-unaware, it returns 0. If an error occurs in the call to MPI_Comm_Rank, it returns -1. */ static int SubvectorMPIRank(N_Vector x) { void* tmpcomm; MPI_Comm *comm; int rank, retval; tmpcomm = N_VGetCommunicator(x); if (tmpcomm == NULL) return(0); comm = (MPI_Comm *) tmpcomm; if ((*comm) == MPI_COMM_NULL) return(0); retval = MPI_Comm_rank(*comm, &rank); if (retval != MPI_SUCCESS) return(-1); return(rank); } #endif StanHeaders/src/nvector/manyvector/fmod/0000755000176200001440000000000014511135055020036 5ustar liggesusersStanHeaders/src/nvector/manyvector/fmod/fnvector_manyvector_mod.c0000644000176200001440000006356114645137106025160 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nvector.h" #include "nvector/nvector_manyvector.h" SWIGEXPORT N_Vector _wrap_FN_VNew_ManyVector(int64_t const *farg1, void *farg2, void *farg3) { N_Vector fresult ; sunindextype arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; SUNContext arg3 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (SUNContext)(farg3); result = (N_Vector)N_VNew_ManyVector(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VGetSubvector_ManyVector(N_Vector farg1, int64_t const *farg2) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype arg2 ; N_Vector result; arg1 = (N_Vector)(farg1); arg2 = (sunindextype)(*farg2); result = (N_Vector)N_VGetSubvector_ManyVector(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT double * _wrap_FN_VGetSubvectorArrayPointer_ManyVector(N_Vector farg1, int64_t const *farg2) { double * fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype arg2 ; realtype *result = 0 ; arg1 = (N_Vector)(farg1); arg2 = (sunindextype)(*farg2); result = (realtype *)N_VGetSubvectorArrayPointer_ManyVector(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT int _wrap_FN_VSetSubvectorArrayPointer_ManyVector(double *farg1, N_Vector farg2, int64_t const *farg3) { int fresult ; realtype *arg1 = (realtype *) 0 ; N_Vector arg2 = (N_Vector) 0 ; sunindextype arg3 ; int result; arg1 = (realtype *)(farg1); arg2 = (N_Vector)(farg2); arg3 = (sunindextype)(*farg3); result = (int)N_VSetSubvectorArrayPointer_ManyVector(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int64_t _wrap_FN_VGetNumSubvectors_ManyVector(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetNumSubvectors_ManyVector(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VGetVectorID_ManyVector(N_Vector farg1) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector_ID result; arg1 = (N_Vector)(farg1); result = (N_Vector_ID)N_VGetVectorID_ManyVector(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FN_VPrint_ManyVector(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VPrint_ManyVector(arg1); } SWIGEXPORT void _wrap_FN_VPrintFile_ManyVector(N_Vector farg1, void *farg2) { N_Vector arg1 = (N_Vector) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (FILE *)(farg2); N_VPrintFile_ManyVector(arg1,arg2); } SWIGEXPORT N_Vector _wrap_FN_VCloneEmpty_ManyVector(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VCloneEmpty_ManyVector(arg1); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VClone_ManyVector(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VClone_ManyVector(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroy_ManyVector(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VDestroy_ManyVector(arg1); } SWIGEXPORT void _wrap_FN_VSpace_ManyVector(N_Vector farg1, int64_t *farg2, int64_t *farg3) { N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; sunindextype *arg3 = (sunindextype *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); arg3 = (sunindextype *)(farg3); N_VSpace_ManyVector(arg1,arg2,arg3); } SWIGEXPORT int64_t _wrap_FN_VGetLength_ManyVector(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetLength_ManyVector(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VLinearSum_ManyVector(double const *farg1, N_Vector farg2, double const *farg3, N_Vector farg4, N_Vector farg5) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); N_VLinearSum_ManyVector(arg1,arg2,arg3,arg4,arg5); } SWIGEXPORT void _wrap_FN_VConst_ManyVector(double const *farg1, N_Vector farg2) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); N_VConst_ManyVector(arg1,arg2); } SWIGEXPORT void _wrap_FN_VProd_ManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VProd_ManyVector(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VDiv_ManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VDiv_ManyVector(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VScale_ManyVector(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VScale_ManyVector(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VAbs_ManyVector(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VAbs_ManyVector(arg1,arg2); } SWIGEXPORT void _wrap_FN_VInv_ManyVector(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VInv_ManyVector(arg1,arg2); } SWIGEXPORT void _wrap_FN_VAddConst_ManyVector(N_Vector farg1, double const *farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); N_VAddConst_ManyVector(arg1,arg2,arg3); } SWIGEXPORT double _wrap_FN_VWrmsNorm_ManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWrmsNorm_ManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNormMask_ManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWrmsNormMask_ManyVector(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWL2Norm_ManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWL2Norm_ManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VCompare_ManyVector(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VCompare_ManyVector(arg1,arg2,arg3); } SWIGEXPORT int _wrap_FN_VLinearCombination_ManyVector(int const *farg1, double *farg2, void *farg3, N_Vector farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); result = (int)N_VLinearCombination_ManyVector(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleAddMulti_ManyVector(int const *farg1, double *farg2, N_Vector farg3, void *farg4, void *farg5) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector *)(farg4); arg5 = (N_Vector *)(farg5); result = (int)N_VScaleAddMulti_ManyVector(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMulti_ManyVector(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMulti_ManyVector(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearSumVectorArray_ManyVector(int const *farg1, double const *farg2, void *farg3, double const *farg4, void *farg5, void *farg6) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype arg4 ; N_Vector *arg5 = (N_Vector *) 0 ; N_Vector *arg6 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector *)(farg5); arg6 = (N_Vector *)(farg6); result = (int)N_VLinearSumVectorArray_ManyVector(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleVectorArray_ManyVector(int const *farg1, double *farg2, void *farg3, void *farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector *)(farg4); result = (int)N_VScaleVectorArray_ManyVector(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstVectorArray_ManyVector(int const *farg1, double const *farg2, void *farg3) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)N_VConstVectorArray_ManyVector(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormVectorArray_ManyVector(int const *farg1, void *farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VWrmsNormVectorArray_ManyVector(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormMaskVectorArray_ManyVector(int const *farg1, void *farg2, void *farg3, N_Vector farg4, double *farg5) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype *arg5 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype *)(farg5); result = (int)N_VWrmsNormMaskVectorArray_ManyVector(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VDotProdLocal_ManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VDotProdLocal_ManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMaxNormLocal_ManyVector(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMaxNormLocal_ManyVector(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinLocal_ManyVector(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMinLocal_ManyVector(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VL1NormLocal_ManyVector(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VL1NormLocal_ManyVector(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumLocal_ManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWSqrSumLocal_ManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumMaskLocal_ManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWSqrSumMaskLocal_ManyVector(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VInvTestLocal_ManyVector(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VInvTestLocal_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstrMaskLocal_ManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)N_VConstrMaskLocal_ManyVector(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinQuotientLocal_ManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VMinQuotientLocal_ManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMultiLocal_ManyVector(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMultiLocal_ManyVector(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufSize_ManyVector(N_Vector farg1, int64_t *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); result = (int)N_VBufSize_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufPack_ManyVector(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufPack_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufUnpack_ManyVector(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufUnpack_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableFusedOps_ManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableFusedOps_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearCombination_ManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearCombination_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleAddMulti_ManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleAddMulti_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableDotProdMulti_ManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableDotProdMulti_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearSumVectorArray_ManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearSumVectorArray_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleVectorArray_ManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleVectorArray_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableConstVectorArray_ManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableConstVectorArray_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormVectorArray_ManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormVectorArray_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormMaskVectorArray_ManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormMaskVectorArray_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableDotProdMultiLocal_ManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableDotProdMultiLocal_ManyVector(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/nvector/manyvector/fmod/fnvector_manyvector_mod.f900000644000176200001440000012216614645137106025331 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fnvector_manyvector_mod use, intrinsic :: ISO_C_BINDING use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FN_VNew_ManyVector public :: FN_VGetSubvector_ManyVector public :: FN_VGetSubvectorArrayPointer_ManyVector public :: FN_VSetSubvectorArrayPointer_ManyVector public :: FN_VGetNumSubvectors_ManyVector public :: FN_VGetVectorID_ManyVector public :: FN_VPrint_ManyVector public :: FN_VPrintFile_ManyVector public :: FN_VCloneEmpty_ManyVector public :: FN_VClone_ManyVector public :: FN_VDestroy_ManyVector public :: FN_VSpace_ManyVector public :: FN_VGetLength_ManyVector public :: FN_VLinearSum_ManyVector public :: FN_VConst_ManyVector public :: FN_VProd_ManyVector public :: FN_VDiv_ManyVector public :: FN_VScale_ManyVector public :: FN_VAbs_ManyVector public :: FN_VInv_ManyVector public :: FN_VAddConst_ManyVector public :: FN_VWrmsNorm_ManyVector public :: FN_VWrmsNormMask_ManyVector public :: FN_VWL2Norm_ManyVector public :: FN_VCompare_ManyVector public :: FN_VLinearCombination_ManyVector public :: FN_VScaleAddMulti_ManyVector public :: FN_VDotProdMulti_ManyVector public :: FN_VLinearSumVectorArray_ManyVector public :: FN_VScaleVectorArray_ManyVector public :: FN_VConstVectorArray_ManyVector public :: FN_VWrmsNormVectorArray_ManyVector public :: FN_VWrmsNormMaskVectorArray_ManyVector public :: FN_VDotProdLocal_ManyVector public :: FN_VMaxNormLocal_ManyVector public :: FN_VMinLocal_ManyVector public :: FN_VL1NormLocal_ManyVector public :: FN_VWSqrSumLocal_ManyVector public :: FN_VWSqrSumMaskLocal_ManyVector public :: FN_VInvTestLocal_ManyVector public :: FN_VConstrMaskLocal_ManyVector public :: FN_VMinQuotientLocal_ManyVector public :: FN_VDotProdMultiLocal_ManyVector public :: FN_VBufSize_ManyVector public :: FN_VBufPack_ManyVector public :: FN_VBufUnpack_ManyVector public :: FN_VEnableFusedOps_ManyVector public :: FN_VEnableLinearCombination_ManyVector public :: FN_VEnableScaleAddMulti_ManyVector public :: FN_VEnableDotProdMulti_ManyVector public :: FN_VEnableLinearSumVectorArray_ManyVector public :: FN_VEnableScaleVectorArray_ManyVector public :: FN_VEnableConstVectorArray_ManyVector public :: FN_VEnableWrmsNormVectorArray_ManyVector public :: FN_VEnableWrmsNormMaskVectorArray_ManyVector public :: FN_VEnableDotProdMultiLocal_ManyVector ! WRAPPER DECLARATIONS interface function swigc_FN_VNew_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VNew_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FN_VGetSubvector_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VGetSubvector_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VGetSubvectorArrayPointer_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VGetSubvectorArrayPointer_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VSetSubvectorArrayPointer_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VSetSubvectorArrayPointer_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VGetNumSubvectors_ManyVector(farg1) & bind(C, name="_wrap_FN_VGetNumSubvectors_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FN_VGetVectorID_ManyVector(farg1) & bind(C, name="_wrap_FN_VGetVectorID_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function subroutine swigc_FN_VPrint_ManyVector(farg1) & bind(C, name="_wrap_FN_VPrint_ManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VPrintFile_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VPrintFile_ManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FN_VCloneEmpty_ManyVector(farg1) & bind(C, name="_wrap_FN_VCloneEmpty_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VClone_ManyVector(farg1) & bind(C, name="_wrap_FN_VClone_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroy_ManyVector(farg1) & bind(C, name="_wrap_FN_VDestroy_ManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VSpace_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VSpace_ManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VGetLength_ManyVector(farg1) & bind(C, name="_wrap_FN_VGetLength_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function subroutine swigc_FN_VLinearSum_ManyVector(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VLinearSum_ManyVector") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 end subroutine subroutine swigc_FN_VConst_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VConst_ManyVector") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VProd_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VProd_ManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VDiv_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VDiv_ManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VScale_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VScale_ManyVector") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VAbs_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VAbs_ManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VInv_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VInv_ManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VAddConst_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VAddConst_ManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VWrmsNorm_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VWrmsNorm_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNormMask_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWrmsNormMask_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VWL2Norm_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VWL2Norm_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function subroutine swigc_FN_VCompare_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VCompare_ManyVector") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VLinearCombination_ManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VLinearCombination_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VScaleAddMulti_ManyVector(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VScaleAddMulti_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMulti_ManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMulti_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VLinearSumVectorArray_ManyVector(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FN_VLinearSumVectorArray_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FN_VScaleVectorArray_ManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VScaleVectorArray_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VConstVectorArray_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstVectorArray_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormVectorArray_ManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VWrmsNormVectorArray_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormMaskVectorArray_ManyVector(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VWrmsNormMaskVectorArray_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdLocal_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VDotProdLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VMaxNormLocal_ManyVector(farg1) & bind(C, name="_wrap_FN_VMaxNormLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VMinLocal_ManyVector(farg1) & bind(C, name="_wrap_FN_VMinLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VL1NormLocal_ManyVector(farg1) & bind(C, name="_wrap_FN_VL1NormLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumLocal_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VWSqrSumLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumMaskLocal_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWSqrSumMaskLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VInvTestLocal_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VInvTestLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VConstrMaskLocal_ManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstrMaskLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VMinQuotientLocal_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VMinQuotientLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VDotProdMultiLocal_ManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMultiLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VBufSize_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VBufSize_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufPack_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VBufPack_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufUnpack_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VBufUnpack_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableFusedOps_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableFusedOps_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearCombination_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearCombination_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleAddMulti_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleAddMulti_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableDotProdMulti_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableDotProdMulti_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearSumVectorArray_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearSumVectorArray_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleVectorArray_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleVectorArray_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableConstVectorArray_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableConstVectorArray_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormVectorArray_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormVectorArray_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormMaskVectorArray_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormMaskVectorArray_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableDotProdMultiLocal_ManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableDotProdMultiLocal_ManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FN_VNew_ManyVector(num_subvectors, vec_array, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: num_subvectors type(C_PTR) :: vec_array type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = num_subvectors farg2 = vec_array farg3 = sunctx fresult = swigc_FN_VNew_ManyVector(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FN_VGetSubvector_ManyVector(v, vec_num) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), intent(in) :: vec_num type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 farg1 = c_loc(v) farg2 = vec_num fresult = swigc_FN_VGetSubvector_ManyVector(farg1, farg2) call c_f_pointer(fresult, swig_result) end function function FN_VGetSubvectorArrayPointer_ManyVector(v, vec_num) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), intent(in) :: vec_num type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 farg1 = c_loc(v) farg2 = vec_num fresult = swigc_FN_VGetSubvectorArrayPointer_ManyVector(farg1, farg2) call c_f_pointer(fresult, swig_result, [1]) end function function FN_VSetSubvectorArrayPointer_ManyVector(v_data, v, vec_num) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), intent(in) :: vec_num integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT64_T) :: farg3 farg1 = c_loc(v_data(1)) farg2 = c_loc(v) farg3 = vec_num fresult = swigc_FN_VSetSubvectorArrayPointer_ManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VGetNumSubvectors_ManyVector(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetNumSubvectors_ManyVector(farg1) swig_result = fresult end function function FN_VGetVectorID_ManyVector(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(N_Vector_ID) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetVectorID_ManyVector(farg1) swig_result = fresult end function subroutine FN_VPrint_ManyVector(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VPrint_ManyVector(farg1) end subroutine subroutine FN_VPrintFile_ManyVector(v, outfile) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: outfile type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v) farg2 = outfile call swigc_FN_VPrintFile_ManyVector(farg1, farg2) end subroutine function FN_VCloneEmpty_ManyVector(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VCloneEmpty_ManyVector(farg1) call c_f_pointer(fresult, swig_result) end function function FN_VClone_ManyVector(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VClone_ManyVector(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FN_VDestroy_ManyVector(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VDestroy_ManyVector(farg1) end subroutine subroutine FN_VSpace_ManyVector(v, lrw, liw) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), dimension(*), target, intent(inout) :: lrw integer(C_INT64_T), dimension(*), target, intent(inout) :: liw type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(v) farg2 = c_loc(lrw(1)) farg3 = c_loc(liw(1)) call swigc_FN_VSpace_ManyVector(farg1, farg2, farg3) end subroutine function FN_VGetLength_ManyVector(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetLength_ManyVector(farg1) swig_result = fresult end function subroutine FN_VLinearSum_ManyVector(a, x, b, y, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: a type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = a farg2 = c_loc(x) farg3 = b farg4 = c_loc(y) farg5 = c_loc(z) call swigc_FN_VLinearSum_ManyVector(farg1, farg2, farg3, farg4, farg5) end subroutine subroutine FN_VConst_ManyVector(c, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 farg1 = c farg2 = c_loc(z) call swigc_FN_VConst_ManyVector(farg1, farg2) end subroutine subroutine FN_VProd_ManyVector(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VProd_ManyVector(farg1, farg2, farg3) end subroutine subroutine FN_VDiv_ManyVector(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VDiv_ManyVector(farg1, farg2, farg3) end subroutine subroutine FN_VScale_ManyVector(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VScale_ManyVector(farg1, farg2, farg3) end subroutine subroutine FN_VAbs_ManyVector(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VAbs_ManyVector(farg1, farg2) end subroutine subroutine FN_VInv_ManyVector(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VInv_ManyVector(farg1, farg2) end subroutine subroutine FN_VAddConst_ManyVector(x, b, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = b farg3 = c_loc(z) call swigc_FN_VAddConst_ManyVector(farg1, farg2, farg3) end subroutine function FN_VWrmsNorm_ManyVector(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWrmsNorm_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VWrmsNormMask_ManyVector(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWrmsNormMask_ManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VWL2Norm_ManyVector(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWL2Norm_ManyVector(farg1, farg2) swig_result = fresult end function subroutine FN_VCompare_ManyVector(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VCompare_ManyVector(farg1, farg2, farg3) end subroutine function FN_VLinearCombination_ManyVector(nvec, c, v, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: v type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = v farg4 = c_loc(z) fresult = swigc_FN_VLinearCombination_ManyVector(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VScaleAddMulti_ManyVector(nvec, a, x, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = c_loc(a(1)) farg3 = c_loc(x) farg4 = y farg5 = z fresult = swigc_FN_VScaleAddMulti_ManyVector(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdMulti_ManyVector(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMulti_ManyVector(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VLinearSumVectorArray_ManyVector(nvec, a, x, b, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), intent(in) :: a type(C_PTR) :: x real(C_DOUBLE), intent(in) :: b type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 farg1 = nvec farg2 = a farg3 = x farg4 = b farg5 = y farg6 = z fresult = swigc_FN_VLinearSumVectorArray_ManyVector(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FN_VScaleVectorArray_ManyVector(nvec, c, x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: x type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = x farg4 = z fresult = swigc_FN_VScaleVectorArray_ManyVector(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VConstVectorArray_ManyVector(nvecs, c, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs real(C_DOUBLE), intent(in) :: c type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = nvecs farg2 = c farg3 = z fresult = swigc_FN_VConstVectorArray_ManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VWrmsNormVectorArray_ManyVector(nvecs, x, w, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs type(C_PTR) :: x type(C_PTR) :: w real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvecs farg2 = x farg3 = w farg4 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormVectorArray_ManyVector(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VWrmsNormMaskVectorArray_ManyVector(nvec, x, w, id, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(C_PTR) :: x type(C_PTR) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = x farg3 = w farg4 = c_loc(id) farg5 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormMaskVectorArray_ManyVector(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdLocal_ManyVector(x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(y) fresult = swigc_FN_VDotProdLocal_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VMaxNormLocal_ManyVector(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMaxNormLocal_ManyVector(farg1) swig_result = fresult end function function FN_VMinLocal_ManyVector(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMinLocal_ManyVector(farg1) swig_result = fresult end function function FN_VL1NormLocal_ManyVector(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VL1NormLocal_ManyVector(farg1) swig_result = fresult end function function FN_VWSqrSumLocal_ManyVector(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWSqrSumLocal_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VWSqrSumMaskLocal_ManyVector(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWSqrSumMaskLocal_ManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VInvTestLocal_ManyVector(x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) fresult = swigc_FN_VInvTestLocal_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VConstrMaskLocal_ManyVector(c, x, m) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: m integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(c) farg2 = c_loc(x) farg3 = c_loc(m) fresult = swigc_FN_VConstrMaskLocal_ManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VMinQuotientLocal_ManyVector(num, denom) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: num type(N_Vector), target, intent(inout) :: denom real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(num) farg2 = c_loc(denom) fresult = swigc_FN_VMinQuotientLocal_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VDotProdMultiLocal_ManyVector(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMultiLocal_ManyVector(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VBufSize_ManyVector(x, size) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x integer(C_INT64_T), dimension(*), target, intent(inout) :: size integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(size(1)) fresult = swigc_FN_VBufSize_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VBufPack_ManyVector(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufPack_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VBufUnpack_ManyVector(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufUnpack_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableFusedOps_ManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableFusedOps_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearCombination_ManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearCombination_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleAddMulti_ManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleAddMulti_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableDotProdMulti_ManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableDotProdMulti_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearSumVectorArray_ManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearSumVectorArray_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleVectorArray_ManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleVectorArray_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableConstVectorArray_ManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableConstVectorArray_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormVectorArray_ManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormVectorArray_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormMaskVectorArray_ManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormMaskVectorArray_ManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableDotProdMultiLocal_ManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableDotProdMultiLocal_ManyVector(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/nvector/manyvector/fmod/fnvector_mpimanyvector_mod.c0000644000176200001440000007332014645137106025660 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nvector.h" #include "nvector/nvector_mpimanyvector.h" SWIGEXPORT N_Vector _wrap_FN_VMake_MPIManyVector(int const *farg1, int64_t const *farg2, void *farg3, void *farg4) { N_Vector fresult ; MPI_Comm arg1 ; sunindextype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; SUNContext arg4 = (SUNContext) 0 ; N_Vector result; #ifdef SUNDIALS_MPI_ENABLED arg1 = MPI_Comm_f2c((MPI_Fint)(*farg1)); #else arg1 = *farg1; #endif arg2 = (sunindextype)(*farg2); arg3 = (N_Vector *)(farg3); arg4 = (SUNContext)(farg4); result = (N_Vector)N_VMake_MPIManyVector(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VNew_MPIManyVector(int64_t const *farg1, void *farg2, void *farg3) { N_Vector fresult ; sunindextype arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; SUNContext arg3 = (SUNContext) 0 ; N_Vector result; arg1 = (sunindextype)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (SUNContext)(farg3); result = (N_Vector)N_VNew_MPIManyVector(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VGetSubvector_MPIManyVector(N_Vector farg1, int64_t const *farg2) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype arg2 ; N_Vector result; arg1 = (N_Vector)(farg1); arg2 = (sunindextype)(*farg2); result = (N_Vector)N_VGetSubvector_MPIManyVector(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT double * _wrap_FN_VGetSubvectorArrayPointer_MPIManyVector(N_Vector farg1, int64_t const *farg2) { double * fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype arg2 ; realtype *result = 0 ; arg1 = (N_Vector)(farg1); arg2 = (sunindextype)(*farg2); result = (realtype *)N_VGetSubvectorArrayPointer_MPIManyVector(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT int _wrap_FN_VSetSubvectorArrayPointer_MPIManyVector(double *farg1, N_Vector farg2, int64_t const *farg3) { int fresult ; realtype *arg1 = (realtype *) 0 ; N_Vector arg2 = (N_Vector) 0 ; sunindextype arg3 ; int result; arg1 = (realtype *)(farg1); arg2 = (N_Vector)(farg2); arg3 = (sunindextype)(*farg3); result = (int)N_VSetSubvectorArrayPointer_MPIManyVector(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int64_t _wrap_FN_VGetNumSubvectors_MPIManyVector(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetNumSubvectors_MPIManyVector(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VGetVectorID_MPIManyVector(N_Vector farg1) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector_ID result; arg1 = (N_Vector)(farg1); result = (N_Vector_ID)N_VGetVectorID_MPIManyVector(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FN_VPrint_MPIManyVector(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VPrint_MPIManyVector(arg1); } SWIGEXPORT void _wrap_FN_VPrintFile_MPIManyVector(N_Vector farg1, void *farg2) { N_Vector arg1 = (N_Vector) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (FILE *)(farg2); N_VPrintFile_MPIManyVector(arg1,arg2); } SWIGEXPORT N_Vector _wrap_FN_VCloneEmpty_MPIManyVector(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VCloneEmpty_MPIManyVector(arg1); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VClone_MPIManyVector(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VClone_MPIManyVector(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroy_MPIManyVector(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VDestroy_MPIManyVector(arg1); } SWIGEXPORT void _wrap_FN_VSpace_MPIManyVector(N_Vector farg1, int64_t *farg2, int64_t *farg3) { N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; sunindextype *arg3 = (sunindextype *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); arg3 = (sunindextype *)(farg3); N_VSpace_MPIManyVector(arg1,arg2,arg3); } SWIGEXPORT void * _wrap_FN_VGetCommunicator_MPIManyVector(N_Vector farg1) { void * fresult ; N_Vector arg1 = (N_Vector) 0 ; void *result = 0 ; arg1 = (N_Vector)(farg1); result = (void *)N_VGetCommunicator_MPIManyVector(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FN_VGetLength_MPIManyVector(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetLength_MPIManyVector(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VLinearSum_MPIManyVector(double const *farg1, N_Vector farg2, double const *farg3, N_Vector farg4, N_Vector farg5) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); N_VLinearSum_MPIManyVector(arg1,arg2,arg3,arg4,arg5); } SWIGEXPORT void _wrap_FN_VConst_MPIManyVector(double const *farg1, N_Vector farg2) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); N_VConst_MPIManyVector(arg1,arg2); } SWIGEXPORT void _wrap_FN_VProd_MPIManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VProd_MPIManyVector(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VDiv_MPIManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VDiv_MPIManyVector(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VScale_MPIManyVector(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VScale_MPIManyVector(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VAbs_MPIManyVector(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VAbs_MPIManyVector(arg1,arg2); } SWIGEXPORT void _wrap_FN_VInv_MPIManyVector(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VInv_MPIManyVector(arg1,arg2); } SWIGEXPORT void _wrap_FN_VAddConst_MPIManyVector(N_Vector farg1, double const *farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); N_VAddConst_MPIManyVector(arg1,arg2,arg3); } SWIGEXPORT double _wrap_FN_VDotProd_MPIManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VDotProd_MPIManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMaxNorm_MPIManyVector(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMaxNorm_MPIManyVector(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNorm_MPIManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWrmsNorm_MPIManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNormMask_MPIManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWrmsNormMask_MPIManyVector(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMin_MPIManyVector(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMin_MPIManyVector(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWL2Norm_MPIManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWL2Norm_MPIManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VL1Norm_MPIManyVector(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VL1Norm_MPIManyVector(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VCompare_MPIManyVector(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VCompare_MPIManyVector(arg1,arg2,arg3); } SWIGEXPORT int _wrap_FN_VInvTest_MPIManyVector(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VInvTest_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstrMask_MPIManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)N_VConstrMask_MPIManyVector(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinQuotient_MPIManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VMinQuotient_MPIManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearCombination_MPIManyVector(int const *farg1, double *farg2, void *farg3, N_Vector farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); result = (int)N_VLinearCombination_MPIManyVector(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleAddMulti_MPIManyVector(int const *farg1, double *farg2, N_Vector farg3, void *farg4, void *farg5) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector *)(farg4); arg5 = (N_Vector *)(farg5); result = (int)N_VScaleAddMulti_MPIManyVector(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMulti_MPIManyVector(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMulti_MPIManyVector(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMultiLocal_MPIManyVector(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMultiLocal_MPIManyVector(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMultiAllReduce_MPIManyVector(int const *farg1, N_Vector farg2, double *farg3) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; realtype *arg3 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (realtype *)(farg3); result = (int)N_VDotProdMultiAllReduce_MPIManyVector(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearSumVectorArray_MPIManyVector(int const *farg1, double const *farg2, void *farg3, double const *farg4, void *farg5, void *farg6) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype arg4 ; N_Vector *arg5 = (N_Vector *) 0 ; N_Vector *arg6 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector *)(farg5); arg6 = (N_Vector *)(farg6); result = (int)N_VLinearSumVectorArray_MPIManyVector(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleVectorArray_MPIManyVector(int const *farg1, double *farg2, void *farg3, void *farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector *)(farg4); result = (int)N_VScaleVectorArray_MPIManyVector(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstVectorArray_MPIManyVector(int const *farg1, double const *farg2, void *farg3) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)N_VConstVectorArray_MPIManyVector(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormVectorArray_MPIManyVector(int const *farg1, void *farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VWrmsNormVectorArray_MPIManyVector(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormMaskVectorArray_MPIManyVector(int const *farg1, void *farg2, void *farg3, N_Vector farg4, double *farg5) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype *arg5 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype *)(farg5); result = (int)N_VWrmsNormMaskVectorArray_MPIManyVector(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VDotProdLocal_MPIManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VDotProdLocal_MPIManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMaxNormLocal_MPIManyVector(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMaxNormLocal_MPIManyVector(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinLocal_MPIManyVector(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMinLocal_MPIManyVector(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VL1NormLocal_MPIManyVector(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VL1NormLocal_MPIManyVector(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumLocal_MPIManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWSqrSumLocal_MPIManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumMaskLocal_MPIManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWSqrSumMaskLocal_MPIManyVector(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VInvTestLocal_MPIManyVector(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VInvTestLocal_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstrMaskLocal_MPIManyVector(N_Vector farg1, N_Vector farg2, N_Vector farg3) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)N_VConstrMaskLocal_MPIManyVector(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinQuotientLocal_MPIManyVector(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VMinQuotientLocal_MPIManyVector(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufSize_MPIManyVector(N_Vector farg1, int64_t *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); result = (int)N_VBufSize_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufPack_MPIManyVector(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufPack_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufUnpack_MPIManyVector(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufUnpack_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableFusedOps_MPIManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableFusedOps_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearCombination_MPIManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearCombination_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleAddMulti_MPIManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleAddMulti_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableDotProdMulti_MPIManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableDotProdMulti_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableLinearSumVectorArray_MPIManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableLinearSumVectorArray_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableScaleVectorArray_MPIManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableScaleVectorArray_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableConstVectorArray_MPIManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableConstVectorArray_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormVectorArray_MPIManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormVectorArray_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableWrmsNormMaskVectorArray_MPIManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableWrmsNormMaskVectorArray_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VEnableDotProdMultiLocal_MPIManyVector(N_Vector farg1, int const *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); result = (int)N_VEnableDotProdMultiLocal_MPIManyVector(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/nvector/manyvector/fmod/fnvector_mpimanyvector_mod.f900000644000176200001440000014167014645137106026040 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fnvector_mpimanyvector_mod use, intrinsic :: ISO_C_BINDING use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FN_VMake_MPIManyVector public :: FN_VNew_MPIManyVector public :: FN_VGetSubvector_MPIManyVector public :: FN_VGetSubvectorArrayPointer_MPIManyVector public :: FN_VSetSubvectorArrayPointer_MPIManyVector public :: FN_VGetNumSubvectors_MPIManyVector public :: FN_VGetVectorID_MPIManyVector public :: FN_VPrint_MPIManyVector public :: FN_VPrintFile_MPIManyVector public :: FN_VCloneEmpty_MPIManyVector public :: FN_VClone_MPIManyVector public :: FN_VDestroy_MPIManyVector public :: FN_VSpace_MPIManyVector public :: FN_VGetCommunicator_MPIManyVector public :: FN_VGetLength_MPIManyVector public :: FN_VLinearSum_MPIManyVector public :: FN_VConst_MPIManyVector public :: FN_VProd_MPIManyVector public :: FN_VDiv_MPIManyVector public :: FN_VScale_MPIManyVector public :: FN_VAbs_MPIManyVector public :: FN_VInv_MPIManyVector public :: FN_VAddConst_MPIManyVector public :: FN_VDotProd_MPIManyVector public :: FN_VMaxNorm_MPIManyVector public :: FN_VWrmsNorm_MPIManyVector public :: FN_VWrmsNormMask_MPIManyVector public :: FN_VMin_MPIManyVector public :: FN_VWL2Norm_MPIManyVector public :: FN_VL1Norm_MPIManyVector public :: FN_VCompare_MPIManyVector public :: FN_VInvTest_MPIManyVector public :: FN_VConstrMask_MPIManyVector public :: FN_VMinQuotient_MPIManyVector public :: FN_VLinearCombination_MPIManyVector public :: FN_VScaleAddMulti_MPIManyVector public :: FN_VDotProdMulti_MPIManyVector public :: FN_VDotProdMultiLocal_MPIManyVector public :: FN_VDotProdMultiAllReduce_MPIManyVector public :: FN_VLinearSumVectorArray_MPIManyVector public :: FN_VScaleVectorArray_MPIManyVector public :: FN_VConstVectorArray_MPIManyVector public :: FN_VWrmsNormVectorArray_MPIManyVector public :: FN_VWrmsNormMaskVectorArray_MPIManyVector public :: FN_VDotProdLocal_MPIManyVector public :: FN_VMaxNormLocal_MPIManyVector public :: FN_VMinLocal_MPIManyVector public :: FN_VL1NormLocal_MPIManyVector public :: FN_VWSqrSumLocal_MPIManyVector public :: FN_VWSqrSumMaskLocal_MPIManyVector public :: FN_VInvTestLocal_MPIManyVector public :: FN_VConstrMaskLocal_MPIManyVector public :: FN_VMinQuotientLocal_MPIManyVector public :: FN_VBufSize_MPIManyVector public :: FN_VBufPack_MPIManyVector public :: FN_VBufUnpack_MPIManyVector public :: FN_VEnableFusedOps_MPIManyVector public :: FN_VEnableLinearCombination_MPIManyVector public :: FN_VEnableScaleAddMulti_MPIManyVector public :: FN_VEnableDotProdMulti_MPIManyVector public :: FN_VEnableLinearSumVectorArray_MPIManyVector public :: FN_VEnableScaleVectorArray_MPIManyVector public :: FN_VEnableConstVectorArray_MPIManyVector public :: FN_VEnableWrmsNormVectorArray_MPIManyVector public :: FN_VEnableWrmsNormMaskVectorArray_MPIManyVector public :: FN_VEnableDotProdMultiLocal_MPIManyVector ! WRAPPER DECLARATIONS interface function swigc_FN_VMake_MPIManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VMake_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 integer(C_INT64_T), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FN_VNew_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VNew_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FN_VGetSubvector_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VGetSubvector_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VGetSubvectorArrayPointer_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VGetSubvectorArrayPointer_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VSetSubvectorArrayPointer_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VSetSubvectorArrayPointer_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VGetNumSubvectors_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VGetNumSubvectors_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FN_VGetVectorID_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VGetVectorID_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function subroutine swigc_FN_VPrint_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VPrint_MPIManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VPrintFile_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VPrintFile_MPIManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FN_VCloneEmpty_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VCloneEmpty_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VClone_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VClone_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroy_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VDestroy_MPIManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VSpace_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VSpace_MPIManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VGetCommunicator_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VGetCommunicator_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VGetLength_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VGetLength_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function subroutine swigc_FN_VLinearSum_MPIManyVector(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VLinearSum_MPIManyVector") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 end subroutine subroutine swigc_FN_VConst_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VConst_MPIManyVector") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VProd_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VProd_MPIManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VDiv_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VDiv_MPIManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VScale_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VScale_MPIManyVector") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VAbs_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VAbs_MPIManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VInv_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VInv_MPIManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VAddConst_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VAddConst_MPIManyVector") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VDotProd_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VDotProd_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VMaxNorm_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VMaxNorm_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNorm_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VWrmsNorm_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNormMask_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWrmsNormMask_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VMin_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VMin_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWL2Norm_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VWL2Norm_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VL1Norm_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VL1Norm_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function subroutine swigc_FN_VCompare_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VCompare_MPIManyVector") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VInvTest_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VInvTest_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VConstrMask_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstrMask_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VMinQuotient_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VMinQuotient_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VLinearCombination_MPIManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VLinearCombination_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VScaleAddMulti_MPIManyVector(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VScaleAddMulti_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMulti_MPIManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMulti_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMultiLocal_MPIManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMultiLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMultiAllReduce_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VDotProdMultiAllReduce_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VLinearSumVectorArray_MPIManyVector(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FN_VLinearSumVectorArray_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FN_VScaleVectorArray_MPIManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VScaleVectorArray_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VConstVectorArray_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstVectorArray_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormVectorArray_MPIManyVector(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VWrmsNormVectorArray_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormMaskVectorArray_MPIManyVector(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VWrmsNormMaskVectorArray_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdLocal_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VDotProdLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VMaxNormLocal_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VMaxNormLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VMinLocal_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VMinLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VL1NormLocal_MPIManyVector(farg1) & bind(C, name="_wrap_FN_VL1NormLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumLocal_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VWSqrSumLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumMaskLocal_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWSqrSumMaskLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VInvTestLocal_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VInvTestLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VConstrMaskLocal_MPIManyVector(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstrMaskLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VMinQuotientLocal_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VMinQuotientLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VBufSize_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VBufSize_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufPack_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VBufPack_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufUnpack_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VBufUnpack_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableFusedOps_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableFusedOps_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearCombination_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearCombination_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleAddMulti_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleAddMulti_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableDotProdMulti_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableDotProdMulti_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableLinearSumVectorArray_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableLinearSumVectorArray_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableScaleVectorArray_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableScaleVectorArray_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableConstVectorArray_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableConstVectorArray_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormVectorArray_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormVectorArray_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableWrmsNormMaskVectorArray_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableWrmsNormMaskVectorArray_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VEnableDotProdMultiLocal_MPIManyVector(farg1, farg2) & bind(C, name="_wrap_FN_VEnableDotProdMultiLocal_MPIManyVector") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FN_VMake_MPIManyVector(comm, num_subvectors, vec_array, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer :: comm integer(C_INT64_T), intent(in) :: num_subvectors type(C_PTR) :: vec_array type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT) :: farg1 integer(C_INT64_T) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = int(comm, C_INT) farg2 = num_subvectors farg3 = vec_array farg4 = sunctx fresult = swigc_FN_VMake_MPIManyVector(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FN_VNew_MPIManyVector(num_subvectors, vec_array, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result integer(C_INT64_T), intent(in) :: num_subvectors type(C_PTR) :: vec_array type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = num_subvectors farg2 = vec_array farg3 = sunctx fresult = swigc_FN_VNew_MPIManyVector(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FN_VGetSubvector_MPIManyVector(v, vec_num) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), intent(in) :: vec_num type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 farg1 = c_loc(v) farg2 = vec_num fresult = swigc_FN_VGetSubvector_MPIManyVector(farg1, farg2) call c_f_pointer(fresult, swig_result) end function function FN_VGetSubvectorArrayPointer_MPIManyVector(v, vec_num) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), intent(in) :: vec_num type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 farg1 = c_loc(v) farg2 = vec_num fresult = swigc_FN_VGetSubvectorArrayPointer_MPIManyVector(farg1, farg2) call c_f_pointer(fresult, swig_result, [1]) end function function FN_VSetSubvectorArrayPointer_MPIManyVector(v_data, v, vec_num) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), intent(in) :: vec_num integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT64_T) :: farg3 farg1 = c_loc(v_data(1)) farg2 = c_loc(v) farg3 = vec_num fresult = swigc_FN_VSetSubvectorArrayPointer_MPIManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VGetNumSubvectors_MPIManyVector(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetNumSubvectors_MPIManyVector(farg1) swig_result = fresult end function function FN_VGetVectorID_MPIManyVector(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(N_Vector_ID) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetVectorID_MPIManyVector(farg1) swig_result = fresult end function subroutine FN_VPrint_MPIManyVector(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VPrint_MPIManyVector(farg1) end subroutine subroutine FN_VPrintFile_MPIManyVector(v, outfile) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: outfile type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v) farg2 = outfile call swigc_FN_VPrintFile_MPIManyVector(farg1, farg2) end subroutine function FN_VCloneEmpty_MPIManyVector(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VCloneEmpty_MPIManyVector(farg1) call c_f_pointer(fresult, swig_result) end function function FN_VClone_MPIManyVector(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VClone_MPIManyVector(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FN_VDestroy_MPIManyVector(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VDestroy_MPIManyVector(farg1) end subroutine subroutine FN_VSpace_MPIManyVector(v, lrw, liw) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), dimension(*), target, intent(inout) :: lrw integer(C_INT64_T), dimension(*), target, intent(inout) :: liw type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(v) farg2 = c_loc(lrw(1)) farg3 = c_loc(liw(1)) call swigc_FN_VSpace_MPIManyVector(farg1, farg2, farg3) end subroutine function FN_VGetCommunicator_MPIManyVector(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetCommunicator_MPIManyVector(farg1) swig_result = fresult end function function FN_VGetLength_MPIManyVector(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetLength_MPIManyVector(farg1) swig_result = fresult end function subroutine FN_VLinearSum_MPIManyVector(a, x, b, y, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: a type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = a farg2 = c_loc(x) farg3 = b farg4 = c_loc(y) farg5 = c_loc(z) call swigc_FN_VLinearSum_MPIManyVector(farg1, farg2, farg3, farg4, farg5) end subroutine subroutine FN_VConst_MPIManyVector(c, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 farg1 = c farg2 = c_loc(z) call swigc_FN_VConst_MPIManyVector(farg1, farg2) end subroutine subroutine FN_VProd_MPIManyVector(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VProd_MPIManyVector(farg1, farg2, farg3) end subroutine subroutine FN_VDiv_MPIManyVector(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VDiv_MPIManyVector(farg1, farg2, farg3) end subroutine subroutine FN_VScale_MPIManyVector(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VScale_MPIManyVector(farg1, farg2, farg3) end subroutine subroutine FN_VAbs_MPIManyVector(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VAbs_MPIManyVector(farg1, farg2) end subroutine subroutine FN_VInv_MPIManyVector(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VInv_MPIManyVector(farg1, farg2) end subroutine subroutine FN_VAddConst_MPIManyVector(x, b, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = b farg3 = c_loc(z) call swigc_FN_VAddConst_MPIManyVector(farg1, farg2, farg3) end subroutine function FN_VDotProd_MPIManyVector(x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(y) fresult = swigc_FN_VDotProd_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VMaxNorm_MPIManyVector(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMaxNorm_MPIManyVector(farg1) swig_result = fresult end function function FN_VWrmsNorm_MPIManyVector(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWrmsNorm_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VWrmsNormMask_MPIManyVector(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWrmsNormMask_MPIManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VMin_MPIManyVector(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMin_MPIManyVector(farg1) swig_result = fresult end function function FN_VWL2Norm_MPIManyVector(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWL2Norm_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VL1Norm_MPIManyVector(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VL1Norm_MPIManyVector(farg1) swig_result = fresult end function subroutine FN_VCompare_MPIManyVector(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VCompare_MPIManyVector(farg1, farg2, farg3) end subroutine function FN_VInvTest_MPIManyVector(x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) fresult = swigc_FN_VInvTest_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VConstrMask_MPIManyVector(c, x, m) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: m integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(c) farg2 = c_loc(x) farg3 = c_loc(m) fresult = swigc_FN_VConstrMask_MPIManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VMinQuotient_MPIManyVector(num, denom) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: num type(N_Vector), target, intent(inout) :: denom real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(num) farg2 = c_loc(denom) fresult = swigc_FN_VMinQuotient_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VLinearCombination_MPIManyVector(nvec, c, v, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: v type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = v farg4 = c_loc(z) fresult = swigc_FN_VLinearCombination_MPIManyVector(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VScaleAddMulti_MPIManyVector(nvec, a, x, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = c_loc(a(1)) farg3 = c_loc(x) farg4 = y farg5 = z fresult = swigc_FN_VScaleAddMulti_MPIManyVector(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdMulti_MPIManyVector(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMulti_MPIManyVector(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VDotProdMultiLocal_MPIManyVector(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMultiLocal_MPIManyVector(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VDotProdMultiAllReduce_MPIManyVector(nvec_total, x, sum) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec_total type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), dimension(*), target, intent(inout) :: sum integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = nvec_total farg2 = c_loc(x) farg3 = c_loc(sum(1)) fresult = swigc_FN_VDotProdMultiAllReduce_MPIManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VLinearSumVectorArray_MPIManyVector(nvec, a, x, b, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), intent(in) :: a type(C_PTR) :: x real(C_DOUBLE), intent(in) :: b type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 farg1 = nvec farg2 = a farg3 = x farg4 = b farg5 = y farg6 = z fresult = swigc_FN_VLinearSumVectorArray_MPIManyVector(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FN_VScaleVectorArray_MPIManyVector(nvec, c, x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: x type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = x farg4 = z fresult = swigc_FN_VScaleVectorArray_MPIManyVector(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VConstVectorArray_MPIManyVector(nvecs, c, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs real(C_DOUBLE), intent(in) :: c type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = nvecs farg2 = c farg3 = z fresult = swigc_FN_VConstVectorArray_MPIManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VWrmsNormVectorArray_MPIManyVector(nvecs, x, w, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvecs type(C_PTR) :: x type(C_PTR) :: w real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvecs farg2 = x farg3 = w farg4 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormVectorArray_MPIManyVector(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VWrmsNormMaskVectorArray_MPIManyVector(nvec, x, w, id, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(C_PTR) :: x type(C_PTR) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = x farg3 = w farg4 = c_loc(id) farg5 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormMaskVectorArray_MPIManyVector(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdLocal_MPIManyVector(x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(y) fresult = swigc_FN_VDotProdLocal_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VMaxNormLocal_MPIManyVector(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMaxNormLocal_MPIManyVector(farg1) swig_result = fresult end function function FN_VMinLocal_MPIManyVector(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMinLocal_MPIManyVector(farg1) swig_result = fresult end function function FN_VL1NormLocal_MPIManyVector(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VL1NormLocal_MPIManyVector(farg1) swig_result = fresult end function function FN_VWSqrSumLocal_MPIManyVector(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWSqrSumLocal_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VWSqrSumMaskLocal_MPIManyVector(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWSqrSumMaskLocal_MPIManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VInvTestLocal_MPIManyVector(x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) fresult = swigc_FN_VInvTestLocal_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VConstrMaskLocal_MPIManyVector(c, x, m) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: m integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(c) farg2 = c_loc(x) farg3 = c_loc(m) fresult = swigc_FN_VConstrMaskLocal_MPIManyVector(farg1, farg2, farg3) swig_result = fresult end function function FN_VMinQuotientLocal_MPIManyVector(num, denom) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: num type(N_Vector), target, intent(inout) :: denom real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(num) farg2 = c_loc(denom) fresult = swigc_FN_VMinQuotientLocal_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VBufSize_MPIManyVector(x, size) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x integer(C_INT64_T), dimension(*), target, intent(inout) :: size integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(size(1)) fresult = swigc_FN_VBufSize_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VBufPack_MPIManyVector(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufPack_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VBufUnpack_MPIManyVector(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufUnpack_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableFusedOps_MPIManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableFusedOps_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearCombination_MPIManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearCombination_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleAddMulti_MPIManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleAddMulti_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableDotProdMulti_MPIManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableDotProdMulti_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableLinearSumVectorArray_MPIManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableLinearSumVectorArray_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableScaleVectorArray_MPIManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableScaleVectorArray_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableConstVectorArray_MPIManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableConstVectorArray_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormVectorArray_MPIManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormVectorArray_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableWrmsNormMaskVectorArray_MPIManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableWrmsNormMaskVectorArray_MPIManyVector(farg1, farg2) swig_result = fresult end function function FN_VEnableDotProdMultiLocal_MPIManyVector(v, tf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT), intent(in) :: tf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(v) farg2 = tf fresult = swigc_FN_VEnableDotProdMultiLocal_MPIManyVector(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/init.c0000644000176200001440000000066514571443743014374 0ustar liggesusers#include #include #include #include #include static const R_CallMethodDef CallEntries[] = { {NULL, NULL, 0} }; void attribute_visible R_init_StanHeaders(DllInfo *dll) { // next line is necessary to avoid a NOTE from R CMD check R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, TRUE); // necessary for .onLoad() to work } StanHeaders/src/sunmatrix/0000755000176200001440000000000014511135055015274 5ustar liggesusersStanHeaders/src/sunmatrix/dense/0000755000176200001440000000000014645137104016377 5ustar liggesusersStanHeaders/src/sunmatrix/dense/sunmatrix_dense.c0000644000176200001440000002147014645137106021761 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * David Gardner @ LLNL * Based on code sundials_dense.c by: Scott D. Cohen, * Alan C. Hindmarsh and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the dense implementation of * the SUNMATRIX package. * -----------------------------------------------------------------*/ #include #include #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* Private function prototypes */ static booleantype SMCompatible_Dense(SUNMatrix A, SUNMatrix B); static booleantype SMCompatible2_Dense(SUNMatrix A, N_Vector x, N_Vector y); /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new dense matrix */ SUNMatrix SUNDenseMatrix(sunindextype M, sunindextype N, SUNContext sunctx) { SUNMatrix A; SUNMatrixContent_Dense content; sunindextype j; /* return with NULL matrix on illegal dimension input */ if ( (M <= 0) || (N <= 0) ) return(NULL); /* Create an empty matrix object */ A = NULL; A = SUNMatNewEmpty(sunctx); if (A == NULL) return(NULL); /* Attach operations */ A->ops->getid = SUNMatGetID_Dense; A->ops->clone = SUNMatClone_Dense; A->ops->destroy = SUNMatDestroy_Dense; A->ops->zero = SUNMatZero_Dense; A->ops->copy = SUNMatCopy_Dense; A->ops->scaleadd = SUNMatScaleAdd_Dense; A->ops->scaleaddi = SUNMatScaleAddI_Dense; A->ops->matvec = SUNMatMatvec_Dense; A->ops->space = SUNMatSpace_Dense; /* Create content */ content = NULL; content = (SUNMatrixContent_Dense) malloc(sizeof *content); if (content == NULL) { SUNMatDestroy(A); return(NULL); } /* Attach content */ A->content = content; /* Fill content */ content->M = M; content->N = N; content->ldata = M*N; content->data = NULL; content->cols = NULL; /* Allocate content */ content->data = (realtype *) calloc(M * N, sizeof(realtype)); if (content->data == NULL) { SUNMatDestroy(A); return(NULL); } content->cols = (realtype **) malloc(N * sizeof(realtype *)); if (content->cols == NULL) { SUNMatDestroy(A); return(NULL); } for (j=0; jcols[j] = content->data + j * M; return(A); } /* ---------------------------------------------------------------------------- * Function to print the dense matrix */ void SUNDenseMatrix_Print(SUNMatrix A, FILE* outfile) { sunindextype i, j; /* should not be called unless A is a dense matrix; otherwise return immediately */ if (SUNMatGetID(A) != SUNMATRIX_DENSE) return; /* perform operation */ STAN_SUNDIALS_FPRINTF(outfile,"\n"); for (i=0; isunctx); return(B); } void SUNMatDestroy_Dense(SUNMatrix A) { if (A == NULL) return; /* free content */ if (A->content != NULL) { /* free data array */ if (SM_DATA_D(A) != NULL) { free(SM_DATA_D(A)); SM_DATA_D(A) = NULL; } /* free column pointers */ if (SM_CONTENT_D(A)->cols != NULL) { free(SM_CONTENT_D(A)->cols); SM_CONTENT_D(A)->cols = NULL; } /* free content struct */ free(A->content); A->content = NULL; } /* free ops and matrix */ if (A->ops) { free(A->ops); A->ops = NULL; } free(A); A = NULL; return; } int SUNMatZero_Dense(SUNMatrix A) { sunindextype i; realtype *Adata; /* Perform operation */ Adata = SM_DATA_D(A); for (i=0; i 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_matrix.h" #include "sunmatrix/sunmatrix_dense.h" SWIGEXPORT SUNMatrix _wrap_FSUNDenseMatrix(int64_t const *farg1, int64_t const *farg2, void *farg3) { SUNMatrix fresult ; sunindextype arg1 ; sunindextype arg2 ; SUNContext arg3 = (SUNContext) 0 ; SUNMatrix result; arg1 = (sunindextype)(*farg1); arg2 = (sunindextype)(*farg2); arg3 = (SUNContext)(farg3); result = (SUNMatrix)SUNDenseMatrix(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT void _wrap_FSUNDenseMatrix_Print(SUNMatrix farg1, void *farg2) { SUNMatrix arg1 = (SUNMatrix) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (SUNMatrix)(farg1); arg2 = (FILE *)(farg2); SUNDenseMatrix_Print(arg1,arg2); } SWIGEXPORT int64_t _wrap_FSUNDenseMatrix_Rows(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNDenseMatrix_Rows(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNDenseMatrix_Columns(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNDenseMatrix_Columns(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNDenseMatrix_LData(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNDenseMatrix_LData(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT double * _wrap_FSUNDenseMatrix_Data(SUNMatrix farg1) { double * fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; realtype *result = 0 ; arg1 = (SUNMatrix)(farg1); result = (realtype *)SUNDenseMatrix_Data(arg1); fresult = result; return fresult; } SWIGEXPORT void * _wrap_FSUNDenseMatrix_Cols(SUNMatrix farg1) { void * fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; realtype **result = 0 ; arg1 = (SUNMatrix)(farg1); result = (realtype **)SUNDenseMatrix_Cols(arg1); fresult = result; return fresult; } SWIGEXPORT double * _wrap_FSUNDenseMatrix_Column(SUNMatrix farg1, int64_t const *farg2) { double * fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype arg2 ; realtype *result = 0 ; arg1 = (SUNMatrix)(farg1); arg2 = (sunindextype)(*farg2); result = (realtype *)SUNDenseMatrix_Column(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNMatGetID_Dense(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix_ID result; arg1 = (SUNMatrix)(farg1); result = (SUNMatrix_ID)SUNMatGetID_Dense(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT SUNMatrix _wrap_FSUNMatClone_Dense(SUNMatrix farg1) { SUNMatrix fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix result; arg1 = (SUNMatrix)(farg1); result = (SUNMatrix)SUNMatClone_Dense(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FSUNMatDestroy_Dense(SUNMatrix farg1) { SUNMatrix arg1 = (SUNMatrix) 0 ; arg1 = (SUNMatrix)(farg1); SUNMatDestroy_Dense(arg1); } SWIGEXPORT int _wrap_FSUNMatZero_Dense(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); result = (int)SUNMatZero_Dense(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatCopy_Dense(SUNMatrix farg1, SUNMatrix farg2) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNMatCopy_Dense(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatScaleAdd_Dense(double const *farg1, SUNMatrix farg2, SUNMatrix farg3) { int fresult ; realtype arg1 ; SUNMatrix arg2 = (SUNMatrix) 0 ; SUNMatrix arg3 = (SUNMatrix) 0 ; int result; arg1 = (realtype)(*farg1); arg2 = (SUNMatrix)(farg2); arg3 = (SUNMatrix)(farg3); result = (int)SUNMatScaleAdd_Dense(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatScaleAddI_Dense(double const *farg1, SUNMatrix farg2) { int fresult ; realtype arg1 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (realtype)(*farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNMatScaleAddI_Dense(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatMatvec_Dense(SUNMatrix farg1, N_Vector farg2, N_Vector farg3) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)SUNMatMatvec_Dense(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatSpace_Dense(SUNMatrix farg1, long *farg2, long *farg3) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNMatSpace_Dense(arg1,arg2,arg3); fresult = (int)(result); return fresult; } StanHeaders/src/sunmatrix/sparse/0000755000176200001440000000000014511135055016571 5ustar liggesusersStanHeaders/src/sunmatrix/sparse/sunmatrix_sparse.c0000644000176200001440000010365514645137106022365 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * David Gardner @ LLNL * Based on code sundials_sparse.c by: Carol Woodward and * Slaven Peles @ LLNL, and Daniel R. Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the sparse implementation of * the SUNMATRIX package. * ----------------------------------------------------------------- */ #include #include #include #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* Private function prototypes */ static booleantype SMCompatible_Sparse(SUNMatrix A, SUNMatrix B); static booleantype SMCompatible2_Sparse(SUNMatrix A, N_Vector x, N_Vector y); static int Matvec_SparseCSC(SUNMatrix A, N_Vector x, N_Vector y); static int Matvec_SparseCSR(SUNMatrix A, N_Vector x, N_Vector y); static int format_convert(const SUNMatrix A, SUNMatrix B); /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* * ================================================================== * Private function prototypes (functions working on SlsMat) * ================================================================== */ /* ---------------------------------------------------------------------------- * Function to create a new sparse matrix */ SUNMatrix SUNSparseMatrix(sunindextype M, sunindextype N, sunindextype NNZ, int sparsetype, SUNContext sunctx) { SUNMatrix A; SUNMatrixContent_Sparse content; /* return with NULL matrix on illegal input */ if ( (M <= 0) || (N <= 0) || (NNZ < 0) ) return(NULL); if ( (sparsetype != CSC_MAT) && (sparsetype != CSR_MAT) ) return(NULL); /* Create an empty matrix object */ A = NULL; A = SUNMatNewEmpty(sunctx); if (A == NULL) return(NULL); /* Attach operations */ A->ops->getid = SUNMatGetID_Sparse; A->ops->clone = SUNMatClone_Sparse; A->ops->destroy = SUNMatDestroy_Sparse; A->ops->zero = SUNMatZero_Sparse; A->ops->copy = SUNMatCopy_Sparse; A->ops->scaleadd = SUNMatScaleAdd_Sparse; A->ops->scaleaddi = SUNMatScaleAddI_Sparse; A->ops->matvec = SUNMatMatvec_Sparse; A->ops->space = SUNMatSpace_Sparse; /* Create content */ content = NULL; content = (SUNMatrixContent_Sparse) malloc(sizeof *content); if (content == NULL) { SUNMatDestroy(A); return(NULL); } /* Attach content */ A->content = content; /* Fill content */ content->sparsetype = sparsetype; content->M = M; content->N = N; content->NNZ = NNZ; switch(sparsetype){ case CSC_MAT: content->NP = N; content->rowvals = &(content->indexvals); content->colptrs = &(content->indexptrs); /* CSR indices */ content->colvals = NULL; content->rowptrs = NULL; break; case CSR_MAT: content->NP = M; content->colvals = &(content->indexvals); content->rowptrs = &(content->indexptrs); /* CSC indices */ content->rowvals = NULL; content->colptrs = NULL; } content->data = NULL; content->indexvals = NULL; content->indexptrs = NULL; /* Allocate content */ content->data = (realtype *) calloc(NNZ, sizeof(realtype)); if (content->data == NULL) { SUNMatDestroy(A); return(NULL); } content->indexvals = (sunindextype *) calloc(NNZ, sizeof(sunindextype)); if (content->indexvals == NULL) { SUNMatDestroy(A); return(NULL); } content->indexptrs = (sunindextype *) calloc((content->NP + 1), sizeof(sunindextype)); if (content->indexptrs == NULL) { SUNMatDestroy(A); return(NULL); } content->indexptrs[content->NP] = 0; return(A); } /* ---------------------------------------------------------------------------- * Function to create a new sparse matrix from an existing dense matrix * by copying all nonzero values into the sparse matrix structure. Returns NULL * if the request for matrix storage cannot be satisfied. */ SUNMatrix SUNSparseFromDenseMatrix(SUNMatrix Ad, realtype droptol, int sparsetype) { sunindextype i, j, nnz; sunindextype M, N; SUNMatrix As; /* check for legal sparsetype, droptol and input matrix type */ if ( (sparsetype != CSR_MAT) && (sparsetype != CSC_MAT) ) return NULL; if ( droptol < ZERO ) return NULL; if (SUNMatGetID(Ad) != SUNMATRIX_DENSE) return NULL; /* set size of new matrix */ M = SM_ROWS_D(Ad); N = SM_COLUMNS_D(Ad); /* determine total number of nonzeros */ nnz = 0; for (j=0; j droptol); /* allocate sparse matrix */ As = NULL; As = SUNSparseMatrix(M, N, nnz, sparsetype, Ad->sunctx); if (As == NULL) return NULL; /* copy nonzeros from Ad into As, based on CSR/CSC type */ nnz = 0; if (sparsetype == CSC_MAT) { for (j=0; j droptol ) { (SM_INDEXVALS_S(As))[nnz] = i; (SM_DATA_S(As))[nnz++] = SM_ELEMENT_D(Ad,i,j); } } } (SM_INDEXPTRS_S(As))[N] = nnz; } else { /* CSR_MAT */ for (i=0; i droptol ) { (SM_INDEXVALS_S(As))[nnz] = j; (SM_DATA_S(As))[nnz++] = SM_ELEMENT_D(Ad,i,j); } } } (SM_INDEXPTRS_S(As))[M] = nnz; } return(As); } /* ---------------------------------------------------------------------------- * Function to create a new sparse matrix from an existing band matrix * by copying all nonzero values into the sparse matrix structure. Returns NULL * if the request for matrix storage cannot be satisfied. */ SUNMatrix SUNSparseFromBandMatrix(SUNMatrix Ad, realtype droptol, int sparsetype) { sunindextype i, j, nnz; sunindextype M, N; SUNMatrix As; /* check for legal sparsetype, droptol and input matrix type */ if ( (sparsetype != CSR_MAT) && (sparsetype != CSC_MAT) ) return NULL; if ( droptol < ZERO ) return NULL; if (SUNMatGetID(Ad) != SUNMATRIX_BAND) return NULL; /* set size of new matrix */ M = SM_ROWS_B(Ad); N = SM_COLUMNS_B(Ad); /* determine total number of nonzeros */ nnz = 0; for (j=0; j droptol); /* allocate sparse matrix */ As = SUNSparseMatrix(M, N, nnz, sparsetype, Ad->sunctx); if (As == NULL) return NULL; /* copy nonzeros from Ad into As, based on CSR/CSC type */ nnz = 0; if (sparsetype == CSC_MAT) { for (j=0; j droptol ) { (SM_INDEXVALS_S(As))[nnz] = i; (SM_DATA_S(As))[nnz++] = SM_ELEMENT_B(Ad,i,j); } } } (SM_INDEXPTRS_S(As))[N] = nnz; } else { /* CSR_MAT */ for (i=0; i droptol ) { (SM_INDEXVALS_S(As))[nnz] = j; (SM_DATA_S(As))[nnz++] = SM_ELEMENT_B(Ad,i,j); } } } (SM_INDEXPTRS_S(As))[M] = nnz; } return(As); } /* ---------------------------------------------------------------------------- * Function to create a new CSR matrix from a CSC matrix. */ int SUNSparseMatrix_ToCSR(const SUNMatrix A, SUNMatrix* Bout) { if (A == NULL) return(SUNMAT_ILL_INPUT); if (SM_SPARSETYPE_S(A) != CSC_MAT) return(SUNMAT_ILL_INPUT); *Bout = SUNSparseMatrix(SM_ROWS_S(A), SM_COLUMNS_S(A), SM_NNZ_S(A), CSR_MAT, A->sunctx); if (*Bout == NULL) return(SUNMAT_MEM_FAIL); return format_convert(A, *Bout); } /* ---------------------------------------------------------------------------- * Function to create a new CSC matrix from a CSR matrix. */ int SUNSparseMatrix_ToCSC(const SUNMatrix A, SUNMatrix* Bout) { if (A == NULL) return(SUNMAT_ILL_INPUT); if (SM_SPARSETYPE_S(A) != CSR_MAT) return(SUNMAT_ILL_INPUT); *Bout = SUNSparseMatrix(SM_ROWS_S(A), SM_COLUMNS_S(A), SM_NNZ_S(A), CSC_MAT, A->sunctx); if (*Bout == NULL) return(SUNMAT_MEM_FAIL); return format_convert(A, *Bout); } /* ---------------------------------------------------------------------------- * Function to reallocate internal sparse matrix storage arrays so that the * resulting sparse matrix holds indexptrs[NP] nonzeros. Returns 0 on success * and 1 on failure (e.g. if A does not have sparse type, or if nnz is negative) */ int SUNSparseMatrix_Realloc(SUNMatrix A) { sunindextype nzmax; /* check for valid matrix type */ if (SUNMatGetID(A) != SUNMATRIX_SPARSE) return SUNMAT_ILL_INPUT; /* get total number of nonzeros (return with failure if illegal) */ nzmax = (SM_INDEXPTRS_S(A))[SM_NP_S(A)]; if (nzmax < 0) return SUNMAT_ILL_INPUT; /* perform reallocation */ SM_INDEXVALS_S(A) = (sunindextype *) realloc(SM_INDEXVALS_S(A), nzmax*sizeof(sunindextype)); SM_DATA_S(A) = (realtype *) realloc(SM_DATA_S(A), nzmax*sizeof(realtype)); SM_NNZ_S(A) = nzmax; return SUNMAT_SUCCESS; } /* ---------------------------------------------------------------------------- * Function to reallocate internal sparse matrix storage arrays so that the * resulting sparse matrix has storage for a specified number of nonzeros. * Returns 0 on success and 1 on failure (e.g. if A does not have sparse type, * or if nnz is negative) */ int SUNSparseMatrix_Reallocate(SUNMatrix A, sunindextype NNZ) { /* check for valid matrix type */ if (SUNMatGetID(A) != SUNMATRIX_SPARSE) return SUNMAT_ILL_INPUT; /* check for valid nnz */ if (NNZ < 0) return SUNMAT_ILL_INPUT; /* perform reallocation */ SM_INDEXVALS_S(A) = (sunindextype *) realloc(SM_INDEXVALS_S(A), NNZ*sizeof(sunindextype)); SM_DATA_S(A) = (realtype *) realloc(SM_DATA_S(A), NNZ*sizeof(realtype)); SM_NNZ_S(A) = NNZ; return SUNMAT_SUCCESS; } /* ---------------------------------------------------------------------------- * Function to print the sparse matrix */ void SUNSparseMatrix_Print(SUNMatrix A, FILE* outfile) { sunindextype i, j; char *matrixtype; char *indexname; /* should not be called unless A is a sparse matrix; otherwise return immediately */ if (SUNMatGetID(A) != SUNMATRIX_SPARSE) return; /* perform operation */ if (SM_SPARSETYPE_S(A) == CSC_MAT) { indexname = (char*) "col"; matrixtype = (char*) "CSC"; } else { indexname = (char*) "row"; matrixtype = (char*) "CSR"; } STAN_SUNDIALS_FPRINTF(outfile, "\n"); STAN_SUNDIALS_FPRINTF(outfile, "%ld by %ld %s matrix, NNZ: %ld \n", (long int) SM_ROWS_S(A), (long int) SM_COLUMNS_S(A), matrixtype, (long int) SM_NNZ_S(A)); for (j=0; jsunctx); return(B); } void SUNMatDestroy_Sparse(SUNMatrix A) { if (A == NULL) return; /* free content */ if (A->content != NULL) { /* free data array */ if (SM_DATA_S(A)) { free(SM_DATA_S(A)); SM_DATA_S(A) = NULL; } /* free index values array */ if (SM_INDEXVALS_S(A)) { free(SM_INDEXVALS_S(A)); SM_INDEXVALS_S(A) = NULL; SM_CONTENT_S(A)->rowvals = NULL; SM_CONTENT_S(A)->colvals = NULL; } /* free index pointers array */ if (SM_INDEXPTRS_S(A)) { free(SM_INDEXPTRS_S(A)); SM_INDEXPTRS_S(A) = NULL; SM_CONTENT_S(A)->colptrs = NULL; SM_CONTENT_S(A)->rowptrs = NULL; } /* free content struct */ free(A->content); A->content = NULL; } /* free ops and matrix */ if (A->ops) { free(A->ops); A->ops = NULL; } free(A); A = NULL; return; } int SUNMatZero_Sparse(SUNMatrix A) { sunindextype i; /* Perform operation */ for (i=0; i (SM_NNZ_S(A) - Ap[N])) newmat = SUNTRUE; /* perform operation based on existing/necessary structure */ /* case 1: A already contains a diagonal */ if (newvals == 0) { /* iterate through columns, adding 1.0 to diagonal */ for (j=0; j < SUNMIN(M,N); j++) for (i=Ap[j]; i=0; j--) { /* clear out temporary arrays for this column (row) */ for (i=0; i=0; i--) { if ( w[i] > 0 ) { Ai[--nz] = i; Ax[nz] = x[i]; } } /* store ptr past this col (row) from orig A, update value for new A */ cend = Ap[j]; Ap[j] = nz; } /* clean up */ free(w); free(x); /* case 3: A must be reallocated with sufficient storage */ } else { /* create work arrays for nonzero indices and values */ w = (sunindextype *) malloc(M * sizeof(sunindextype)); x = (realtype *) malloc(M * sizeof(realtype)); /* create new matrix for sum */ C = SUNSparseMatrix(SM_ROWS_S(A), SM_COLUMNS_S(A), Ap[N] + newvals, SM_SPARSETYPE_S(A), A->sunctx); /* access data from CSR structures (return if failure) */ Cp = Ci = NULL; Cx = NULL; if (SM_INDEXPTRS_S(C)) Cp = SM_INDEXPTRS_S(C); else return (SUNMAT_MEM_FAIL); if (SM_INDEXVALS_S(C)) Ci = SM_INDEXVALS_S(C); else return (SUNMAT_MEM_FAIL); if (SM_DATA_S(C)) Cx = SM_DATA_S(C); else return (SUNMAT_MEM_FAIL); /* initialize total nonzero count */ nz = 0; /* iterate through columns (rows for CSR) */ for (j=0; j 0 ) { Ci[nz] = i; Cx[nz++] = x[i]; } } } /* indicate end of data */ Cp[N] = nz; /* update A's structure with C's values; nullify C's pointers */ SM_NNZ_S(A) = SM_NNZ_S(C); if (SM_DATA_S(A)) free(SM_DATA_S(A)); SM_DATA_S(A) = SM_DATA_S(C); SM_DATA_S(C) = NULL; if (SM_INDEXVALS_S(A)) free(SM_INDEXVALS_S(A)); SM_INDEXVALS_S(A) = SM_INDEXVALS_S(C); SM_INDEXVALS_S(C) = NULL; if (SM_INDEXPTRS_S(A)) free(SM_INDEXPTRS_S(A)); SM_INDEXPTRS_S(A) = SM_INDEXPTRS_S(C); SM_INDEXPTRS_S(C) = NULL; /* clean up */ SUNMatDestroy_Sparse(C); free(w); free(x); } return SUNMAT_SUCCESS; } int SUNMatScaleAdd_Sparse(realtype c, SUNMatrix A, SUNMatrix B) { sunindextype j, i, p, nz, newvals, M, N, cend; booleantype newmat; sunindextype *w, *Ap, *Ai, *Bp, *Bi, *Cp, *Ci; realtype *x, *Ax, *Bx, *Cx; SUNMatrix C; /* Verify that A and B are compatible */ if (!SMCompatible_Sparse(A, B)) return SUNMAT_ILL_INPUT; /* store shortcuts to matrix dimensions (M is inner dimension, N is outer) */ if (SM_SPARSETYPE_S(A) == CSC_MAT) { M = SM_ROWS_S(A); N = SM_COLUMNS_S(A); } else { M = SM_COLUMNS_S(A); N = SM_ROWS_S(A); } /* access data arrays from A and B (return if failure) */ Ap = Ai = Bp = Bi = NULL; Ax = Bx = NULL; if (SM_INDEXPTRS_S(A)) Ap = SM_INDEXPTRS_S(A); else return(SUNMAT_MEM_FAIL); if (SM_INDEXVALS_S(A)) Ai = SM_INDEXVALS_S(A); else return(SUNMAT_MEM_FAIL); if (SM_DATA_S(A)) Ax = SM_DATA_S(A); else return(SUNMAT_MEM_FAIL); if (SM_INDEXPTRS_S(B)) Bp = SM_INDEXPTRS_S(B); else return(SUNMAT_MEM_FAIL); if (SM_INDEXVALS_S(B)) Bi = SM_INDEXVALS_S(B); else return(SUNMAT_MEM_FAIL); if (SM_DATA_S(B)) Bx = SM_DATA_S(B); else return(SUNMAT_MEM_FAIL); /* create work arrays for row indices and nonzero column values */ w = (sunindextype *) malloc(M * sizeof(sunindextype)); x = (realtype *) malloc(M * sizeof(realtype)); /* determine if A already contains the sparsity pattern of B */ newvals = 0; for (j=0; j (SM_NNZ_S(A) - Ap[N])) newmat = SUNTRUE; /* perform operation based on existing/necessary structure */ /* case 1: A already contains sparsity pattern of B */ if (newvals == 0) { /* iterate through columns, adding matrices */ for (j=0; j=0; j--) { /* clear out temporary arrays for this column (row) */ for (i=0; i=0; i--) { if ( w[i] > 0 ) { Ai[--nz] = i; Ax[nz] = x[i]; } } /* store ptr past this col (row) from orig A, update value for new A */ cend = Ap[j]; Ap[j] = nz; } /* case 3: A must be reallocated with sufficient storage */ } else { /* create new matrix for sum */ C = SUNSparseMatrix(SM_ROWS_S(A), SM_COLUMNS_S(A), Ap[N] + newvals, SM_SPARSETYPE_S(A), A->sunctx); /* access data from CSR structures (return if failure) */ Cp = Ci = NULL; Cx = NULL; if (SM_INDEXPTRS_S(C)) Cp = SM_INDEXPTRS_S(C); else return(SUNMAT_MEM_FAIL); if (SM_INDEXVALS_S(C)) Ci = SM_INDEXVALS_S(C); else return(SUNMAT_MEM_FAIL); if (SM_DATA_S(C)) Cx = SM_DATA_S(C); else return(SUNMAT_MEM_FAIL); /* initialize total nonzero count */ nz = 0; /* iterate through columns (rows) */ for (j=0; j 0 ) { Ci[nz] = i; Cx[nz++] = x[i]; } } } /* indicate end of data */ Cp[N] = nz; /* update A's structure with C's values; nullify C's pointers */ SM_NNZ_S(A) = SM_NNZ_S(C); free(SM_DATA_S(A)); SM_DATA_S(A) = SM_DATA_S(C); SM_DATA_S(C) = NULL; free(SM_INDEXVALS_S(A)); SM_INDEXVALS_S(A) = SM_INDEXVALS_S(C); SM_INDEXVALS_S(C) = NULL; free(SM_INDEXPTRS_S(A)); SM_INDEXPTRS_S(A) = SM_INDEXPTRS_S(C); SM_INDEXPTRS_S(C) = NULL; /* clean up */ SUNMatDestroy_Sparse(C); } /* clean up */ free(w); free(x); /* return success */ return(0); } int SUNMatMatvec_Sparse(SUNMatrix A, N_Vector x, N_Vector y) { /* Verify that A, x and y are compatible */ if (!SMCompatible2_Sparse(A, x, y)) return SUNMAT_ILL_INPUT; /* Perform operation */ if(SM_SPARSETYPE_S(A) == CSC_MAT) return Matvec_SparseCSC(A, x, y); else return Matvec_SparseCSR(A, x, y); } int SUNMatSpace_Sparse(SUNMatrix A, long int *lenrw, long int *leniw) { *lenrw = SM_NNZ_S(A); *leniw = 10 + SM_NP_S(A) + SM_NNZ_S(A); return SUNMAT_SUCCESS; } /* * ================================================================= * private functions * ================================================================= */ /* ----------------------------------------------------------------- * Function to check compatibility of two sparse SUNMatrix objects */ static booleantype SMCompatible_Sparse(SUNMatrix A, SUNMatrix B) { /* both matrices must be sparse */ if ( (SUNMatGetID(A) != SUNMATRIX_SPARSE) || (SUNMatGetID(B) != SUNMATRIX_SPARSE) ) return SUNFALSE; /* both matrices must have the same shape and sparsity type */ if (SUNSparseMatrix_Rows(A) != SUNSparseMatrix_Rows(B)) return SUNFALSE; if (SUNSparseMatrix_Columns(A) != SUNSparseMatrix_Columns(B)) return SUNFALSE; if (SM_SPARSETYPE_S(A) != SM_SPARSETYPE_S(B)) return SUNFALSE; return SUNTRUE; } /* ----------------------------------------------------------------- * Function to check compatibility of a SUNMatrix object with two * N_Vectors (A*x = b) */ static booleantype SMCompatible2_Sparse(SUNMatrix A, N_Vector x, N_Vector y) { /* vectors must implement N_VGetArrayPointer */ if ( (x->ops->nvgetarraypointer == NULL) || (y->ops->nvgetarraypointer == NULL) ) return SUNFALSE; /* Verify that the dimensions of A, x, and y agree */ if ( (SUNSparseMatrix_Columns(A) != N_VGetLength(x)) || (SUNSparseMatrix_Rows(A) != N_VGetLength(y)) ) return SUNFALSE; return SUNTRUE; } /* ----------------------------------------------------------------- * Computes y=A*x, where A is a CSC SUNMatrix_Sparse of dimension MxN, x is a * compatible N_Vector object of length N, and y is a compatible * N_Vector object of length M. * * Returns 0 if successful, 1 if unsuccessful (failed memory access, or both * x and y are the same vector). */ int Matvec_SparseCSC(SUNMatrix A, N_Vector x, N_Vector y) { sunindextype i, j; sunindextype *Ap, *Ai; realtype *Ax, *xd, *yd; /* access data from CSC structure (return if failure) */ Ap = SM_INDEXPTRS_S(A); Ai = SM_INDEXVALS_S(A); Ax = SM_DATA_S(A); if ((Ap == NULL) || (Ai == NULL) || (Ax == NULL)) return SUNMAT_MEM_FAIL; /* access vector data (return if failure) */ xd = N_VGetArrayPointer(x); yd = N_VGetArrayPointer(y); if ((xd == NULL) || (yd == NULL) || (xd == yd) ) return SUNMAT_MEM_FAIL; /* initialize result */ for (i=0; i 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_matrix.h" #include "sunmatrix/sunmatrix_sparse.h" SWIGEXPORT SUNMatrix _wrap_FSUNSparseMatrix(int64_t const *farg1, int64_t const *farg2, int64_t const *farg3, int const *farg4, void *farg5) { SUNMatrix fresult ; sunindextype arg1 ; sunindextype arg2 ; sunindextype arg3 ; int arg4 ; SUNContext arg5 = (SUNContext) 0 ; SUNMatrix result; arg1 = (sunindextype)(*farg1); arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (int)(*farg4); arg5 = (SUNContext)(farg5); result = (SUNMatrix)SUNSparseMatrix(arg1,arg2,arg3,arg4,arg5); fresult = result; return fresult; } SWIGEXPORT SUNMatrix _wrap_FSUNSparseFromDenseMatrix(SUNMatrix farg1, double const *farg2, int const *farg3) { SUNMatrix fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; realtype arg2 ; int arg3 ; SUNMatrix result; arg1 = (SUNMatrix)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); result = (SUNMatrix)SUNSparseFromDenseMatrix(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT SUNMatrix _wrap_FSUNSparseFromBandMatrix(SUNMatrix farg1, double const *farg2, int const *farg3) { SUNMatrix fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; realtype arg2 ; int arg3 ; SUNMatrix result; arg1 = (SUNMatrix)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); result = (SUNMatrix)SUNSparseFromBandMatrix(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNSparseMatrix_ToCSR(SUNMatrix farg1, void *farg2) { int fresult ; SUNMatrix arg1 = (SUNMatrix) (SUNMatrix)0 ; SUNMatrix *arg2 = (SUNMatrix *) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (SUNMatrix *)(farg2); result = (int)SUNSparseMatrix_ToCSR(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNSparseMatrix_ToCSC(SUNMatrix farg1, void *farg2) { int fresult ; SUNMatrix arg1 = (SUNMatrix) (SUNMatrix)0 ; SUNMatrix *arg2 = (SUNMatrix *) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (SUNMatrix *)(farg2); result = (int)SUNSparseMatrix_ToCSC(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNSparseMatrix_Realloc(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); result = (int)SUNSparseMatrix_Realloc(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNSparseMatrix_Reallocate(SUNMatrix farg1, int64_t const *farg2) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype arg2 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (sunindextype)(*farg2); result = (int)SUNSparseMatrix_Reallocate(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FSUNSparseMatrix_Print(SUNMatrix farg1, void *farg2) { SUNMatrix arg1 = (SUNMatrix) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (SUNMatrix)(farg1); arg2 = (FILE *)(farg2); SUNSparseMatrix_Print(arg1,arg2); } SWIGEXPORT int64_t _wrap_FSUNSparseMatrix_Rows(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNSparseMatrix_Rows(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNSparseMatrix_Columns(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNSparseMatrix_Columns(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNSparseMatrix_NNZ(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNSparseMatrix_NNZ(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNSparseMatrix_NP(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNSparseMatrix_NP(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FSUNSparseMatrix_SparseType(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); result = (int)SUNSparseMatrix_SparseType(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT double * _wrap_FSUNSparseMatrix_Data(SUNMatrix farg1) { double * fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; realtype *result = 0 ; arg1 = (SUNMatrix)(farg1); result = (realtype *)SUNSparseMatrix_Data(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t * _wrap_FSUNSparseMatrix_IndexValues(SUNMatrix farg1) { int64_t * fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype *result = 0 ; arg1 = (SUNMatrix)(farg1); result = (sunindextype *)SUNSparseMatrix_IndexValues(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t * _wrap_FSUNSparseMatrix_IndexPointers(SUNMatrix farg1) { int64_t * fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype *result = 0 ; arg1 = (SUNMatrix)(farg1); result = (sunindextype *)SUNSparseMatrix_IndexPointers(arg1); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNMatGetID_Sparse(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix_ID result; arg1 = (SUNMatrix)(farg1); result = (SUNMatrix_ID)SUNMatGetID_Sparse(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT SUNMatrix _wrap_FSUNMatClone_Sparse(SUNMatrix farg1) { SUNMatrix fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix result; arg1 = (SUNMatrix)(farg1); result = (SUNMatrix)SUNMatClone_Sparse(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FSUNMatDestroy_Sparse(SUNMatrix farg1) { SUNMatrix arg1 = (SUNMatrix) 0 ; arg1 = (SUNMatrix)(farg1); SUNMatDestroy_Sparse(arg1); } SWIGEXPORT int _wrap_FSUNMatZero_Sparse(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); result = (int)SUNMatZero_Sparse(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatCopy_Sparse(SUNMatrix farg1, SUNMatrix farg2) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNMatCopy_Sparse(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatScaleAdd_Sparse(double const *farg1, SUNMatrix farg2, SUNMatrix farg3) { int fresult ; realtype arg1 ; SUNMatrix arg2 = (SUNMatrix) 0 ; SUNMatrix arg3 = (SUNMatrix) 0 ; int result; arg1 = (realtype)(*farg1); arg2 = (SUNMatrix)(farg2); arg3 = (SUNMatrix)(farg3); result = (int)SUNMatScaleAdd_Sparse(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatScaleAddI_Sparse(double const *farg1, SUNMatrix farg2) { int fresult ; realtype arg1 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (realtype)(*farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNMatScaleAddI_Sparse(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatMatvec_Sparse(SUNMatrix farg1, N_Vector farg2, N_Vector farg3) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)SUNMatMatvec_Sparse(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatSpace_Sparse(SUNMatrix farg1, long *farg2, long *farg3) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNMatSpace_Sparse(arg1,arg2,arg3); fresult = (int)(result); return fresult; } StanHeaders/src/sunmatrix/sparse/fmod/fsunmatrix_sparse_mod.f900000644000176200001440000004254114645137106024467 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunmatrix_sparse_mod use, intrinsic :: ISO_C_BINDING use fsundials_matrix_mod use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: CSC_MAT = 0_C_INT integer(C_INT), parameter, public :: CSR_MAT = 1_C_INT public :: FSUNSparseMatrix public :: FSUNSparseFromDenseMatrix public :: FSUNSparseFromBandMatrix public :: FSUNSparseMatrix_ToCSR public :: FSUNSparseMatrix_ToCSC public :: FSUNSparseMatrix_Realloc public :: FSUNSparseMatrix_Reallocate public :: FSUNSparseMatrix_Print public :: FSUNSparseMatrix_Rows public :: FSUNSparseMatrix_Columns public :: FSUNSparseMatrix_NNZ public :: FSUNSparseMatrix_NP public :: FSUNSparseMatrix_SparseType public :: FSUNSparseMatrix_Data public :: FSUNSparseMatrix_IndexValues public :: FSUNSparseMatrix_IndexPointers public :: FSUNMatGetID_Sparse public :: FSUNMatClone_Sparse public :: FSUNMatDestroy_Sparse public :: FSUNMatZero_Sparse public :: FSUNMatCopy_Sparse public :: FSUNMatScaleAdd_Sparse public :: FSUNMatScaleAddI_Sparse public :: FSUNMatMatvec_Sparse public :: FSUNMatSpace_Sparse ! WRAPPER DECLARATIONS interface function swigc_FSUNSparseMatrix(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNSparseMatrix") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR) :: fresult end function function swigc_FSUNSparseFromDenseMatrix(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNSparseFromDenseMatrix") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR) :: fresult end function function swigc_FSUNSparseFromBandMatrix(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNSparseFromBandMatrix") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR) :: fresult end function function swigc_FSUNSparseMatrix_ToCSR(farg1, farg2) & bind(C, name="_wrap_FSUNSparseMatrix_ToCSR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNSparseMatrix_ToCSC(farg1, farg2) & bind(C, name="_wrap_FSUNSparseMatrix_ToCSC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNSparseMatrix_Realloc(farg1) & bind(C, name="_wrap_FSUNSparseMatrix_Realloc") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNSparseMatrix_Reallocate(farg1, farg2) & bind(C, name="_wrap_FSUNSparseMatrix_Reallocate") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT) :: fresult end function subroutine swigc_FSUNSparseMatrix_Print(farg1, farg2) & bind(C, name="_wrap_FSUNSparseMatrix_Print") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FSUNSparseMatrix_Rows(farg1) & bind(C, name="_wrap_FSUNSparseMatrix_Rows") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNSparseMatrix_Columns(farg1) & bind(C, name="_wrap_FSUNSparseMatrix_Columns") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNSparseMatrix_NNZ(farg1) & bind(C, name="_wrap_FSUNSparseMatrix_NNZ") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNSparseMatrix_NP(farg1) & bind(C, name="_wrap_FSUNSparseMatrix_NP") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNSparseMatrix_SparseType(farg1) & bind(C, name="_wrap_FSUNSparseMatrix_SparseType") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNSparseMatrix_Data(farg1) & bind(C, name="_wrap_FSUNSparseMatrix_Data") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNSparseMatrix_IndexValues(farg1) & bind(C, name="_wrap_FSUNSparseMatrix_IndexValues") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNSparseMatrix_IndexPointers(farg1) & bind(C, name="_wrap_FSUNSparseMatrix_IndexPointers") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNMatGetID_Sparse(farg1) & bind(C, name="_wrap_FSUNMatGetID_Sparse") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNMatClone_Sparse(farg1) & bind(C, name="_wrap_FSUNMatClone_Sparse") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FSUNMatDestroy_Sparse(farg1) & bind(C, name="_wrap_FSUNMatDestroy_Sparse") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FSUNMatZero_Sparse(farg1) & bind(C, name="_wrap_FSUNMatZero_Sparse") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNMatCopy_Sparse(farg1, farg2) & bind(C, name="_wrap_FSUNMatCopy_Sparse") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNMatScaleAdd_Sparse(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNMatScaleAdd_Sparse") & result(fresult) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNMatScaleAddI_Sparse(farg1, farg2) & bind(C, name="_wrap_FSUNMatScaleAddI_Sparse") & result(fresult) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNMatMatvec_Sparse(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNMatMatvec_Sparse") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNMatSpace_Sparse(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNMatSpace_Sparse") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNSparseMatrix(m, n, nnz, sparsetype, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), pointer :: swig_result integer(C_INT64_T), intent(in) :: m integer(C_INT64_T), intent(in) :: n integer(C_INT64_T), intent(in) :: nnz integer(C_INT), intent(in) :: sparsetype type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT) :: farg4 type(C_PTR) :: farg5 farg1 = m farg2 = n farg3 = nnz farg4 = sparsetype farg5 = sunctx fresult = swigc_FSUNSparseMatrix(farg1, farg2, farg3, farg4, farg5) call c_f_pointer(fresult, swig_result) end function function FSUNSparseFromDenseMatrix(a, droptol, sparsetype) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a real(C_DOUBLE), intent(in) :: droptol integer(C_INT), intent(in) :: sparsetype type(C_PTR) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 farg1 = c_loc(a) farg2 = droptol farg3 = sparsetype fresult = swigc_FSUNSparseFromDenseMatrix(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FSUNSparseFromBandMatrix(a, droptol, sparsetype) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a real(C_DOUBLE), intent(in) :: droptol integer(C_INT), intent(in) :: sparsetype type(C_PTR) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 farg1 = c_loc(a) farg2 = droptol farg3 = sparsetype fresult = swigc_FSUNSparseFromBandMatrix(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FSUNSparseMatrix_ToCSR(a, bout) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a type(C_PTR), target, intent(inout) :: bout integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(a) farg2 = c_loc(bout) fresult = swigc_FSUNSparseMatrix_ToCSR(farg1, farg2) swig_result = fresult end function function FSUNSparseMatrix_ToCSC(a, bout) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a type(C_PTR), target, intent(inout) :: bout integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(a) farg2 = c_loc(bout) fresult = swigc_FSUNSparseMatrix_ToCSC(farg1, farg2) swig_result = fresult end function function FSUNSparseMatrix_Realloc(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNSparseMatrix_Realloc(farg1) swig_result = fresult end function function FSUNSparseMatrix_Reallocate(a, nnz) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T), intent(in) :: nnz integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 farg1 = c_loc(a) farg2 = nnz fresult = swigc_FSUNSparseMatrix_Reallocate(farg1, farg2) swig_result = fresult end function subroutine FSUNSparseMatrix_Print(a, outfile) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: outfile type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(a) farg2 = outfile call swigc_FSUNSparseMatrix_Print(farg1, farg2) end subroutine function FSUNSparseMatrix_Rows(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNSparseMatrix_Rows(farg1) swig_result = fresult end function function FSUNSparseMatrix_Columns(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNSparseMatrix_Columns(farg1) swig_result = fresult end function function FSUNSparseMatrix_NNZ(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNSparseMatrix_NNZ(farg1) swig_result = fresult end function function FSUNSparseMatrix_NP(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNSparseMatrix_NP(farg1) swig_result = fresult end function function FSUNSparseMatrix_SparseType(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNSparseMatrix_SparseType(farg1) swig_result = fresult end function function FSUNSparseMatrix_Data(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNSparseMatrix_Data(farg1) call c_f_pointer(fresult, swig_result, [1]) end function function FSUNSparseMatrix_IndexValues(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), dimension(:), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNSparseMatrix_IndexValues(farg1) call c_f_pointer(fresult, swig_result, [1]) end function function FSUNSparseMatrix_IndexPointers(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), dimension(:), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNSparseMatrix_IndexPointers(farg1) call c_f_pointer(fresult, swig_result, [1]) end function function FSUNMatGetID_Sparse(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNMatrix_ID) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNMatGetID_Sparse(farg1) swig_result = fresult end function function FSUNMatClone_Sparse(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNMatClone_Sparse(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FSUNMatDestroy_Sparse(a) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: farg1 farg1 = c_loc(a) call swigc_FSUNMatDestroy_Sparse(farg1) end subroutine function FSUNMatZero_Sparse(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNMatZero_Sparse(farg1) swig_result = fresult end function function FSUNMatCopy_Sparse(a, b) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a type(SUNMatrix), target, intent(inout) :: b integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(a) farg2 = c_loc(b) fresult = swigc_FSUNMatCopy_Sparse(farg1, farg2) swig_result = fresult end function function FSUNMatScaleAdd_Sparse(c, a, b) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result real(C_DOUBLE), intent(in) :: c type(SUNMatrix), target, intent(inout) :: a type(SUNMatrix), target, intent(inout) :: b integer(C_INT) :: fresult real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(a) farg3 = c_loc(b) fresult = swigc_FSUNMatScaleAdd_Sparse(farg1, farg2, farg3) swig_result = fresult end function function FSUNMatScaleAddI_Sparse(c, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result real(C_DOUBLE), intent(in) :: c type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 farg1 = c farg2 = c_loc(a) fresult = swigc_FSUNMatScaleAddI_Sparse(farg1, farg2) swig_result = fresult end function function FSUNMatMatvec_Sparse(a, x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(a) farg2 = c_loc(x) farg3 = c_loc(y) fresult = swigc_FSUNMatMatvec_Sparse(farg1, farg2, farg3) swig_result = fresult end function function FSUNMatSpace_Sparse(a, lenrw, leniw) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_LONG), dimension(*), target, intent(inout) :: lenrw integer(C_LONG), dimension(*), target, intent(inout) :: leniw integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(a) farg2 = c_loc(lenrw(1)) farg3 = c_loc(leniw(1)) fresult = swigc_FSUNMatSpace_Sparse(farg1, farg2, farg3) swig_result = fresult end function end module StanHeaders/src/sunmatrix/slunrloc/0000755000176200001440000000000014511135055017135 5ustar liggesusersStanHeaders/src/sunmatrix/slunrloc/sunmatrix_slunrloc.c0000644000176200001440000003062614645137106023272 0ustar liggesusers/* * ---------------------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ---------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ---------------------------------------------------------------------------- * This is the implementation file for the SuperLU SuperMatrix SLU_NR_loc * format compatibile SUNMatrix. * ---------------------------------------------------------------------------- */ #include #include #include #include #include #include #include /* * ---------------------------------------------------------------------------- * Macros for accessing the SUNMatrix_SLUNRloc content structure members * and useful values stored in nested SuperLU structures. * ---------------------------------------------------------------------------- */ #define SM_CONTENT_SLUNRLOC(A) ( (SUNMatrixContent_SLUNRloc)(A->content) ) #define SM_SUPERMATRIX_SLUNRLOC(A) ( SM_CONTENT_SLUNRLOC(A)->A_super ) #define SM_GRID_SLUNRLOC(A) ( SM_CONTENT_SLUNRLOC(A)->grid ) #define SM_COMMPATTERN_SLUNRLOC(A) ( SM_CONTENT_SLUNRLOC(A)->gsmv_comm ) #define SM_ROWTOPROC_SLUNRLOC(A) ( SM_CONTENT_SLUNRLOC(A)->row_to_proc ) #define SM_OWNDATA_SLUNRLOC(A) ( SM_CONTENT_SLUNRLOC(A)->own_data ) #define SM_COLSORTED_SLUNRLOC(A) ( SM_CONTENT_SLUNRLOC(A)->ACS_super ) #define SM_SUPERSTORE_SLUNRLOC(A) ( (NRformat_loc*)(SM_SUPERMATRIX_SLUNRLOC(A)->Store) ) #define SM_GLOBALROWS_SLUNRLOC(A) ( SM_SUPERMATRIX_SLUNRLOC(A)->nrow ) #define SM_GLOBALCOLS_SLUNRLOC(A) ( SM_SUPERMATRIX_SLUNRLOC(A)->ncol ) #define SM_LOCALROWS_SLUNRLOC(A) ( SM_SUPERSTORE_SLUNRLOC(A)->m_loc ) #define SM_LOCALNNZ_SLUNRLOC(A) ( SM_SUPERSTORE_SLUNRLOC(A)->nnz_loc ) #define SM_FSTROW_SLUNRLOC(A) ( SM_SUPERSTORE_SLUNRLOC(A)->fst_row ) #define SM_DATA_SLUNRLOC(A) ( (realtype*)SM_SUPERSTORE_SLUNRLOC(A)->nzval ) #define SM_COLIND_SLUNRLOC(A) ( SM_SUPERSTORE_SLUNRLOC(A)->colind ) #define SM_ROWPTRS_SLUNRLOC(A) ( SM_SUPERSTORE_SLUNRLOC(A)->rowptr ) /* constants */ #define ZERO RCONST(0.0) /* Private function prototypes */ static booleantype SMCompatible_SLUNRloc(SUNMatrix A, SUNMatrix B); /* * ---------------------------------------------------------------------------- * Exported functions * ---------------------------------------------------------------------------- */ SUNMatrix SUNMatrix_SLUNRloc(SuperMatrix *A_super, gridinfo_t *grid) { SUNMatrix A; SUNMatrix_Ops ops; SUNMatrixContent_SLUNRloc content; /* Check for valid intputs */ if (A_super == NULL || grid == NULL) return(NULL); if (A_super->Stype != SLU_NR_loc || A_super->Dtype != SLU_D || A_super->Mtype != SLU_GE) return(NULL); /* Create the matrix */ A = NULL; A = (SUNMatrix) malloc(sizeof(*A)); if (A == NULL) return(NULL); /* Create the matrix operations structure */ ops = NULL; ops = (SUNMatrix_Ops) malloc(sizeof(struct _generic_SUNMatrix_Ops)); if (ops == NULL) { free(A); return(NULL); } /* Attach operations */ ops->getid = SUNMatGetID_SLUNRloc; ops->clone = SUNMatClone_SLUNRloc; ops->destroy = SUNMatDestroy_SLUNRloc; ops->zero = SUNMatZero_SLUNRloc; ops->copy = SUNMatCopy_SLUNRloc; ops->scaleadd = SUNMatScaleAdd_SLUNRloc; ops->scaleaddi = SUNMatScaleAddI_SLUNRloc; ops->matvecsetup = SUNMatMatvecSetup_SLUNRloc; ops->matvec = SUNMatMatvec_SLUNRloc; ops->space = SUNMatSpace_SLUNRloc; /* Create content */ content = NULL; content = (SUNMatrixContent_SLUNRloc) malloc(sizeof(struct _SUNMatrixContent_SLUNRloc)); if (content == NULL) { free(ops); free(A); return(NULL); } /* Attach content to SuperMatrix */ content->A_super = A_super; content->own_data = SUNFALSE; content->grid = grid; content->row_to_proc = NULL; content->gsmv_comm = NULL; content->ACS_super = NULL; /* Attach content and ops to SUNMatrix */ A->content = content; A->ops = ops; return(A); } void SUNMatrix_SLUNRloc_Print(SUNMatrix A, FILE *fp) { STAN_SUNDIALS_FPRINTF(fp, "====== START SUNMatrix_SLUNRloc_Print %p ======\n", (void*) A); STAN_SUNDIALS_FPRINTF(fp, "A->content->A_super = %p\n", (void*) SM_SUPERMATRIX_SLUNRLOC(A)); /* Call SuperLU_DIST print routine */ file_dPrint_CompRowLoc_Matrix_dist(fp, SM_SUPERMATRIX_SLUNRLOC(A)); STAN_SUNDIALS_FPRINTF(fp, "======= END SUNMatrix_SLUNRloc_Print %p =======\n", (void*) A); } /* * ---------------------------------------------------------------------------- * Implementation of accessor functions * ---------------------------------------------------------------------------- */ SuperMatrix* SUNMatrix_SLUNRloc_SuperMatrix(SUNMatrix A) { return(SM_SUPERMATRIX_SLUNRLOC(A)); } gridinfo_t* SUNMatrix_SLUNRloc_ProcessGrid(SUNMatrix A) { return(SM_GRID_SLUNRLOC(A)); } booleantype SUNMatrix_SLUNRloc_OwnData(SUNMatrix A) { return(SM_OWNDATA_SLUNRLOC(A)); } /* * ---------------------------------------------------------------------------- * Implementation of matrix operations * ---------------------------------------------------------------------------- */ SUNMatrix_ID SUNMatGetID_SLUNRloc(SUNMatrix A) { return(SUNMATRIX_SLUNRLOC); } SUNMatrix SUNMatClone_SLUNRloc(SUNMatrix A) { SUNMatrix B; SuperMatrix *B_super; /* allocate new SuperMatrix */ B_super = NULL; B_super = malloc(sizeof(SuperMatrix)); if (B_super == NULL) return(NULL); /* call the SuperLU-DIST clone function */ dClone_CompRowLoc_Matrix_dist(SM_SUPERMATRIX_SLUNRLOC(A), B_super); /* create the new SUNMatrix */ B = NULL; B = SUNMatrix_SLUNRloc(B_super, SM_GRID_SLUNRLOC(A)); if (B == NULL) { /* call SuperLU-DIST destroy function */ Destroy_CompRowLoc_Matrix_dist(B_super); free(B_super); return(NULL); } /* Allocated the SuperMatrix ourselves, so SUNMatrix now owns the data */ SM_OWNDATA_SLUNRLOC(B) = SUNTRUE; return(B); } void SUNMatDestroy_SLUNRloc(SUNMatrix A) { if (SM_OWNDATA_SLUNRLOC(A)) { /* call SuperLU-DIST destroy function */ Destroy_CompRowLoc_Matrix_dist(SM_SUPERMATRIX_SLUNRLOC(A)); free(SM_SUPERMATRIX_SLUNRLOC(A)); } if (SM_COLSORTED_SLUNRLOC(A)) { /* if CS exists, then the Matvec has been initialized and we must finalize it by calling the SuperLU DIST pdgsmv_finalize routine to free up memory allocated */ pdgsmv_finalize(SM_COMMPATTERN_SLUNRLOC(A)); Destroy_CompRowLoc_Matrix_dist(SM_COLSORTED_SLUNRLOC(A)); free(SM_COLSORTED_SLUNRLOC(A)); SM_COLSORTED_SLUNRLOC(A) = NULL; } if (SM_ROWTOPROC_SLUNRLOC(A)) { free(SM_ROWTOPROC_SLUNRLOC(A)); SM_ROWTOPROC_SLUNRLOC(A) = NULL; } if (SM_COMMPATTERN_SLUNRLOC(A)) { free(SM_COMMPATTERN_SLUNRLOC(A)); SM_COMMPATTERN_SLUNRLOC(A) = NULL; } free(A->content); A->content = NULL; free(A->ops); A->ops = NULL; free(A); A = NULL; return; } int SUNMatZero_SLUNRloc(SUNMatrix A) { /* call SuperLU-DIST clone function */ dZero_CompRowLoc_Matrix_dist(SM_SUPERMATRIX_SLUNRLOC(A)); return(SUNMAT_SUCCESS); } int SUNMatCopy_SLUNRloc(SUNMatrix A, SUNMatrix B) { /* call SuperLU-DIST copy function */ dCopy_CompRowLoc_Matrix_dist(SM_SUPERMATRIX_SLUNRLOC(A), SM_SUPERMATRIX_SLUNRLOC(B)); return(SUNMAT_SUCCESS); } int SUNMatScaleAdd_SLUNRloc(realtype c, SUNMatrix A, SUNMatrix B) { /* check that B can be added into A */ if (!SMCompatible_SLUNRloc(A, B)) return(SUNMAT_ILL_INPUT); /* call SuperLU-DIST ScaleAdd function */ dScaleAdd_CompRowLoc_Matrix_dist(SM_SUPERMATRIX_SLUNRLOC(A), SM_SUPERMATRIX_SLUNRLOC(B), c); return(SUNMAT_SUCCESS); } int SUNMatScaleAddI_SLUNRloc(realtype c, SUNMatrix A) { /* call SuperLU-DIST ScaleAddI function */ dScaleAddId_CompRowLoc_Matrix_dist(SM_SUPERMATRIX_SLUNRLOC(A), c); return(SUNMAT_SUCCESS); } int SUNMatMatvec_SLUNRloc(SUNMatrix A, N_Vector x, N_Vector y) { SuperMatrix *ACS; realtype *xdata, *ydata; /* Extract the column-sorted A */ ACS = SM_COLSORTED_SLUNRLOC(A); /* The column-sorted matrix ACS and the communication pattern must be established by calling SUNMatMatvecSetup prior to calling SUNMatMatvec. */ if (ACS == NULL || SM_ROWTOPROC_SLUNRLOC(A) == NULL || SM_COMMPATTERN_SLUNRLOC(A) == NULL) return(SUNMAT_MATVEC_SETUP_REQUIRED); xdata = N_VGetArrayPointer(x); ydata = N_VGetArrayPointer(y); if (xdata == NULL || ydata == NULL) return(SUNMAT_MEM_FAIL); /* Call SuperLU-DIST Matvec routine to perform the actual Matvec. */ pdgsmv(0, ACS, SM_GRID_SLUNRLOC(A), SM_COMMPATTERN_SLUNRLOC(A), xdata, ydata); return(SUNMAT_SUCCESS); } int SUNMatMatvecSetup_SLUNRloc(SUNMatrix A) { sunindextype *temp; sunindextype nprocs; sunindextype i, j; SuperMatrix *ACS = SM_COLSORTED_SLUNRLOC(A); /* If ACS is NULL, then this is the first setup call and things must be allocated */ if (ACS == NULL) { ACS = (SuperMatrix *) malloc(sizeof(SuperMatrix)); /* Clone and copy A to create ACS which will be A but with column-sorted column indices to [internal, external]. ACS is used with the Matvec routine. */ dClone_CompRowLoc_Matrix_dist(SM_SUPERMATRIX_SLUNRLOC(A), ACS); dCopy_CompRowLoc_Matrix_dist(SM_SUPERMATRIX_SLUNRLOC(A), ACS); SM_ROWTOPROC_SLUNRLOC(A) = (sunindextype *) malloc(SM_GLOBALROWS_SLUNRLOC(A)*sizeof(sunindextype)); if (SM_ROWTOPROC_SLUNRLOC(A) == NULL) { Destroy_CompRowLoc_Matrix_dist(ACS); free(ACS); return(SUNMAT_MEM_FAIL); } SM_COMMPATTERN_SLUNRLOC(A) = (pdgsmv_comm_t *) malloc(sizeof(pdgsmv_comm_t)); if (SM_COMMPATTERN_SLUNRLOC(A) == NULL) { free(SM_ROWTOPROC_SLUNRLOC(A)); Destroy_CompRowLoc_Matrix_dist(ACS); free(ACS); return(SUNMAT_MEM_FAIL); } SM_COLSORTED_SLUNRLOC(A) = ACS; } else { /* if ACS has already been created, we can reuse it to save allocations, but we must finalize the last matvec to avoid leaking memory.*/ pdgsmv_finalize(SM_COMMPATTERN_SLUNRLOC(A)); } /* calculate the number of processes the matrix is spread across */ nprocs = SM_GRID_SLUNRLOC(A)->nprow*SM_GRID_SLUNRLOC(A)->npcol; /* establish a row number to process mapping */ temp = (sunindextype*) malloc((nprocs+1)*sizeof(sunindextype)); if (temp == NULL) { SUNMatDestroy(A); return(SUNMAT_MEM_FAIL); } MPI_Allgather(&SM_FSTROW_SLUNRLOC(A), 1, MPI_SUNINDEXTYPE, temp, 1, MPI_SUNINDEXTYPE, SM_GRID_SLUNRLOC(A)->comm); temp[nprocs] = SM_GLOBALROWS_SLUNRLOC(A); for (i=0; i #include #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* Private function prototypes */ static booleantype SMCompatible_Band(SUNMatrix A, SUNMatrix B); static booleantype SMCompatible2_Band(SUNMatrix A, N_Vector x, N_Vector y); static int SMScaleAddNew_Band(realtype c, SUNMatrix A, SUNMatrix B); /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new band matrix with default storage upper bandwidth */ SUNMatrix SUNBandMatrix(sunindextype N, sunindextype mu, sunindextype ml, SUNContext sunctx) { return (SUNBandMatrixStorage(N, mu, ml, mu+ml, sunctx)); } /* ---------------------------------------------------------------------------- * Function to create a new band matrix with specified storage upper bandwidth */ SUNMatrix SUNBandMatrixStorage(sunindextype N, sunindextype mu, sunindextype ml, sunindextype smu, SUNContext sunctx) { SUNMatrix A; SUNMatrixContent_Band content; sunindextype j, colSize; /* return with NULL matrix on illegal dimension input */ if ( (N <= 0) || (smu < 0) || (ml < 0) ) return(NULL); /* Create an empty matrix object */ A = NULL; A = SUNMatNewEmpty(sunctx); if (A == NULL) return(NULL); /* Attach operations */ A->ops->getid = SUNMatGetID_Band; A->ops->clone = SUNMatClone_Band; A->ops->destroy = SUNMatDestroy_Band; A->ops->zero = SUNMatZero_Band; A->ops->copy = SUNMatCopy_Band; A->ops->scaleadd = SUNMatScaleAdd_Band; A->ops->scaleaddi = SUNMatScaleAddI_Band; A->ops->matvec = SUNMatMatvec_Band; A->ops->space = SUNMatSpace_Band; /* Create content */ content = NULL; content = (SUNMatrixContent_Band) malloc(sizeof *content); if (content == NULL) { SUNMatDestroy(A); return(NULL); } /* Attach content */ A->content = content; /* Fill content */ colSize = smu + ml + 1; content->M = N; content->N = N; content->mu = mu; content->ml = ml; content->s_mu = smu; content->ldim = colSize; content->ldata = N * colSize; content->data = NULL; content->cols = NULL; /* Allocate content */ content->data = (realtype *) calloc(N * colSize, sizeof(realtype)); if (content->data == NULL) { SUNMatDestroy(A); return(NULL); } content->cols = (realtype **) malloc(N * sizeof(realtype *)); if (content->cols == NULL) { SUNMatDestroy(A); return(NULL); } for (j=0; jcols[j] = content->data + j * colSize; return(A); } /* ---------------------------------------------------------------------------- * Function to print the band matrix */ void SUNBandMatrix_Print(SUNMatrix A, FILE* outfile) { sunindextype i, j, start, finish; /* should not be called unless A is a band matrix; otherwise return immediately */ if (SUNMatGetID(A) != SUNMATRIX_BAND) return; /* perform operation */ STAN_SUNDIALS_FPRINTF(outfile,"\n"); for (i=0; isunctx); return(B); } void SUNMatDestroy_Band(SUNMatrix A) { if (A == NULL) return; /* free content */ if (A->content != NULL) { /* free data array */ if (SM_DATA_B(A)) { free(SM_DATA_B(A)); SM_DATA_B(A) = NULL; } /* free column pointers */ if (SM_COLS_B(A)) { free(SM_COLS_B(A)); SM_COLS_B(A) = NULL; } /* free content struct */ free(A->content); A->content = NULL; } /* free ops and matrix */ if (A->ops) { free(A->ops); A->ops = NULL; } free(A); A = NULL; return; } int SUNMatZero_Band(SUNMatrix A) { sunindextype i; realtype *Adata; /* Verify that A is a band matrix */ if (SUNMatGetID(A) != SUNMATRIX_BAND) return SUNMAT_ILL_INPUT; /* Perform operation */ Adata = SM_DATA_B(A); for (i=0; i SM_UBAND_B(B)) || (SM_LBAND_B(A) > SM_LBAND_B(B)) ) { ml = SUNMAX(SM_LBAND_B(B),SM_LBAND_B(A)); mu = SUNMAX(SM_UBAND_B(B),SM_UBAND_B(A)); smu = SUNMAX(SM_SUBAND_B(B),SM_SUBAND_B(A)); colSize = smu + ml + 1; SM_CONTENT_B(B)->mu = mu; SM_CONTENT_B(B)->ml = ml; SM_CONTENT_B(B)->s_mu = smu; SM_CONTENT_B(B)->ldim = colSize; SM_CONTENT_B(B)->ldata = SM_COLUMNS_B(B) * colSize; SM_CONTENT_B(B)->data = (realtype *) realloc(SM_CONTENT_B(B)->data, SM_COLUMNS_B(B) * colSize*sizeof(realtype)); for (j=0; jcols[j] = SM_CONTENT_B(B)->data + j * colSize; } /* Perform operation */ if (SUNMatZero_Band(B) != SUNMAT_SUCCESS) return SUNMAT_OPERATION_FAIL; for (j=0; j SM_UBAND_B(A)) || (SM_LBAND_B(B) > SM_LBAND_B(A)) ) { return SMScaleAddNew_Band(c,A,B); } /* Otherwise, perform operation in-place */ for (j=0; jsunctx); /* scale/add c*A into new matrix */ for (j=0; jcontent); A->content = NULL; A->content = C->content; C->content = NULL; SUNMatDestroy_Band(C); return SUNMAT_SUCCESS; } StanHeaders/src/sunmatrix/band/fmod/0000755000176200001440000000000014511135055017125 5ustar liggesusersStanHeaders/src/sunmatrix/band/fmod/fsunmatrix_band_mod.c0000644000176200001440000003143214645137106023326 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_matrix.h" #include "sunmatrix/sunmatrix_band.h" SWIGEXPORT SUNMatrix _wrap_FSUNBandMatrix(int64_t const *farg1, int64_t const *farg2, int64_t const *farg3, void *farg4) { SUNMatrix fresult ; sunindextype arg1 ; sunindextype arg2 ; sunindextype arg3 ; SUNContext arg4 = (SUNContext) 0 ; SUNMatrix result; arg1 = (sunindextype)(*farg1); arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (SUNContext)(farg4); result = (SUNMatrix)SUNBandMatrix(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT SUNMatrix _wrap_FSUNBandMatrixStorage(int64_t const *farg1, int64_t const *farg2, int64_t const *farg3, int64_t const *farg4, void *farg5) { SUNMatrix fresult ; sunindextype arg1 ; sunindextype arg2 ; sunindextype arg3 ; sunindextype arg4 ; SUNContext arg5 = (SUNContext) 0 ; SUNMatrix result; arg1 = (sunindextype)(*farg1); arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (sunindextype)(*farg4); arg5 = (SUNContext)(farg5); result = (SUNMatrix)SUNBandMatrixStorage(arg1,arg2,arg3,arg4,arg5); fresult = result; return fresult; } SWIGEXPORT void _wrap_FSUNBandMatrix_Print(SUNMatrix farg1, void *farg2) { SUNMatrix arg1 = (SUNMatrix) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (SUNMatrix)(farg1); arg2 = (FILE *)(farg2); SUNBandMatrix_Print(arg1,arg2); } SWIGEXPORT int64_t _wrap_FSUNBandMatrix_Rows(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNBandMatrix_Rows(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNBandMatrix_Columns(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNBandMatrix_Columns(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNBandMatrix_LowerBandwidth(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNBandMatrix_LowerBandwidth(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNBandMatrix_UpperBandwidth(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNBandMatrix_UpperBandwidth(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNBandMatrix_StoredUpperBandwidth(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNBandMatrix_StoredUpperBandwidth(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNBandMatrix_LDim(SUNMatrix farg1) { int64_t fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype result; arg1 = (SUNMatrix)(farg1); result = SUNBandMatrix_LDim(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT double * _wrap_FSUNBandMatrix_Data(SUNMatrix farg1) { double * fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; realtype *result = 0 ; arg1 = (SUNMatrix)(farg1); result = (realtype *)SUNBandMatrix_Data(arg1); fresult = result; return fresult; } SWIGEXPORT void * _wrap_FSUNBandMatrix_Cols(SUNMatrix farg1) { void * fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; realtype **result = 0 ; arg1 = (SUNMatrix)(farg1); result = (realtype **)SUNBandMatrix_Cols(arg1); fresult = result; return fresult; } SWIGEXPORT double * _wrap_FSUNBandMatrix_Column(SUNMatrix farg1, int64_t const *farg2) { double * fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; sunindextype arg2 ; realtype *result = 0 ; arg1 = (SUNMatrix)(farg1); arg2 = (sunindextype)(*farg2); result = (realtype *)SUNBandMatrix_Column(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNMatGetID_Band(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix_ID result; arg1 = (SUNMatrix)(farg1); result = (SUNMatrix_ID)SUNMatGetID_Band(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT SUNMatrix _wrap_FSUNMatClone_Band(SUNMatrix farg1) { SUNMatrix fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix result; arg1 = (SUNMatrix)(farg1); result = (SUNMatrix)SUNMatClone_Band(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FSUNMatDestroy_Band(SUNMatrix farg1) { SUNMatrix arg1 = (SUNMatrix) 0 ; arg1 = (SUNMatrix)(farg1); SUNMatDestroy_Band(arg1); } SWIGEXPORT int _wrap_FSUNMatZero_Band(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); result = (int)SUNMatZero_Band(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatCopy_Band(SUNMatrix farg1, SUNMatrix farg2) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNMatCopy_Band(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatScaleAdd_Band(double const *farg1, SUNMatrix farg2, SUNMatrix farg3) { int fresult ; realtype arg1 ; SUNMatrix arg2 = (SUNMatrix) 0 ; SUNMatrix arg3 = (SUNMatrix) 0 ; int result; arg1 = (realtype)(*farg1); arg2 = (SUNMatrix)(farg2); arg3 = (SUNMatrix)(farg3); result = (int)SUNMatScaleAdd_Band(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatScaleAddI_Band(double const *farg1, SUNMatrix farg2) { int fresult ; realtype arg1 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (realtype)(*farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNMatScaleAddI_Band(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatMatvec_Band(SUNMatrix farg1, N_Vector farg2, N_Vector farg3) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)SUNMatMatvec_Band(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatSpace_Band(SUNMatrix farg1, long *farg2, long *farg3) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNMatSpace_Band(arg1,arg2,arg3); fresult = (int)(result); return fresult; } StanHeaders/src/sunmatrix/band/fmod/fsunmatrix_band_mod.f900000644000176200001440000003464114645137106023507 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunmatrix_band_mod use, intrinsic :: ISO_C_BINDING use fsundials_matrix_mod use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FSUNBandMatrix public :: FSUNBandMatrixStorage public :: FSUNBandMatrix_Print public :: FSUNBandMatrix_Rows public :: FSUNBandMatrix_Columns public :: FSUNBandMatrix_LowerBandwidth public :: FSUNBandMatrix_UpperBandwidth public :: FSUNBandMatrix_StoredUpperBandwidth public :: FSUNBandMatrix_LDim public :: FSUNBandMatrix_Data public :: FSUNBandMatrix_Cols public :: FSUNBandMatrix_Column public :: FSUNMatGetID_Band public :: FSUNMatClone_Band public :: FSUNMatDestroy_Band public :: FSUNMatZero_Band public :: FSUNMatCopy_Band public :: FSUNMatScaleAdd_Band public :: FSUNMatScaleAddI_Band public :: FSUNMatMatvec_Band public :: FSUNMatSpace_Band ! WRAPPER DECLARATIONS interface function swigc_FSUNBandMatrix(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNBandMatrix") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FSUNBandMatrixStorage(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNBandMatrixStorage") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T), intent(in) :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT64_T), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR) :: fresult end function subroutine swigc_FSUNBandMatrix_Print(farg1, farg2) & bind(C, name="_wrap_FSUNBandMatrix_Print") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FSUNBandMatrix_Rows(farg1) & bind(C, name="_wrap_FSUNBandMatrix_Rows") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNBandMatrix_Columns(farg1) & bind(C, name="_wrap_FSUNBandMatrix_Columns") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNBandMatrix_LowerBandwidth(farg1) & bind(C, name="_wrap_FSUNBandMatrix_LowerBandwidth") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNBandMatrix_UpperBandwidth(farg1) & bind(C, name="_wrap_FSUNBandMatrix_UpperBandwidth") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNBandMatrix_StoredUpperBandwidth(farg1) & bind(C, name="_wrap_FSUNBandMatrix_StoredUpperBandwidth") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNBandMatrix_LDim(farg1) & bind(C, name="_wrap_FSUNBandMatrix_LDim") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNBandMatrix_Data(farg1) & bind(C, name="_wrap_FSUNBandMatrix_Data") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNBandMatrix_Cols(farg1) & bind(C, name="_wrap_FSUNBandMatrix_Cols") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNBandMatrix_Column(farg1, farg2) & bind(C, name="_wrap_FSUNBandMatrix_Column") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 type(C_PTR) :: fresult end function function swigc_FSUNMatGetID_Band(farg1) & bind(C, name="_wrap_FSUNMatGetID_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNMatClone_Band(farg1) & bind(C, name="_wrap_FSUNMatClone_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FSUNMatDestroy_Band(farg1) & bind(C, name="_wrap_FSUNMatDestroy_Band") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FSUNMatZero_Band(farg1) & bind(C, name="_wrap_FSUNMatZero_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNMatCopy_Band(farg1, farg2) & bind(C, name="_wrap_FSUNMatCopy_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNMatScaleAdd_Band(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNMatScaleAdd_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNMatScaleAddI_Band(farg1, farg2) & bind(C, name="_wrap_FSUNMatScaleAddI_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNMatMatvec_Band(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNMatMatvec_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNMatSpace_Band(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNMatSpace_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNBandMatrix(n, mu, ml, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), pointer :: swig_result integer(C_INT64_T), intent(in) :: n integer(C_INT64_T), intent(in) :: mu integer(C_INT64_T), intent(in) :: ml type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 type(C_PTR) :: farg4 farg1 = n farg2 = mu farg3 = ml farg4 = sunctx fresult = swigc_FSUNBandMatrix(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FSUNBandMatrixStorage(n, mu, ml, smu, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), pointer :: swig_result integer(C_INT64_T), intent(in) :: n integer(C_INT64_T), intent(in) :: mu integer(C_INT64_T), intent(in) :: ml integer(C_INT64_T), intent(in) :: smu type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT64_T) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT64_T) :: farg4 type(C_PTR) :: farg5 farg1 = n farg2 = mu farg3 = ml farg4 = smu farg5 = sunctx fresult = swigc_FSUNBandMatrixStorage(farg1, farg2, farg3, farg4, farg5) call c_f_pointer(fresult, swig_result) end function subroutine FSUNBandMatrix_Print(a, outfile) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: outfile type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(a) farg2 = outfile call swigc_FSUNBandMatrix_Print(farg1, farg2) end subroutine function FSUNBandMatrix_Rows(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNBandMatrix_Rows(farg1) swig_result = fresult end function function FSUNBandMatrix_Columns(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNBandMatrix_Columns(farg1) swig_result = fresult end function function FSUNBandMatrix_LowerBandwidth(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNBandMatrix_LowerBandwidth(farg1) swig_result = fresult end function function FSUNBandMatrix_UpperBandwidth(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNBandMatrix_UpperBandwidth(farg1) swig_result = fresult end function function FSUNBandMatrix_StoredUpperBandwidth(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNBandMatrix_StoredUpperBandwidth(farg1) swig_result = fresult end function function FSUNBandMatrix_LDim(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNBandMatrix_LDim(farg1) swig_result = fresult end function function FSUNBandMatrix_Data(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNBandMatrix_Data(farg1) call c_f_pointer(fresult, swig_result, [1]) end function function FSUNBandMatrix_Cols(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNBandMatrix_Cols(farg1) call c_f_pointer(fresult, swig_result) end function function FSUNBandMatrix_Column(a, j) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T), intent(in) :: j type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 farg1 = c_loc(a) farg2 = j fresult = swigc_FSUNBandMatrix_Column(farg1, farg2) call c_f_pointer(fresult, swig_result, [1]) end function function FSUNMatGetID_Band(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNMatrix_ID) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNMatGetID_Band(farg1) swig_result = fresult end function function FSUNMatClone_Band(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNMatClone_Band(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FSUNMatDestroy_Band(a) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: farg1 farg1 = c_loc(a) call swigc_FSUNMatDestroy_Band(farg1) end subroutine function FSUNMatZero_Band(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNMatZero_Band(farg1) swig_result = fresult end function function FSUNMatCopy_Band(a, b) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a type(SUNMatrix), target, intent(inout) :: b integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(a) farg2 = c_loc(b) fresult = swigc_FSUNMatCopy_Band(farg1, farg2) swig_result = fresult end function function FSUNMatScaleAdd_Band(c, a, b) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result real(C_DOUBLE), intent(in) :: c type(SUNMatrix), target, intent(inout) :: a type(SUNMatrix), target, intent(inout) :: b integer(C_INT) :: fresult real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(a) farg3 = c_loc(b) fresult = swigc_FSUNMatScaleAdd_Band(farg1, farg2, farg3) swig_result = fresult end function function FSUNMatScaleAddI_Band(c, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result real(C_DOUBLE), intent(in) :: c type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 farg1 = c farg2 = c_loc(a) fresult = swigc_FSUNMatScaleAddI_Band(farg1, farg2) swig_result = fresult end function function FSUNMatMatvec_Band(a, x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(a) farg2 = c_loc(x) farg3 = c_loc(y) fresult = swigc_FSUNMatMatvec_Band(farg1, farg2, farg3) swig_result = fresult end function function FSUNMatSpace_Band(a, lenrw, leniw) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_LONG), dimension(*), target, intent(inout) :: lenrw integer(C_LONG), dimension(*), target, intent(inout) :: leniw integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(a) farg2 = c_loc(lenrw(1)) farg3 = c_loc(leniw(1)) fresult = swigc_FSUNMatSpace_Band(farg1, farg2, farg3) swig_result = fresult end function end module StanHeaders/src/sunmatrix/cusparse/0000755000176200001440000000000014511135055017121 5ustar liggesusersStanHeaders/src/sunmatrix/cusparse/cusparse_kernels.cuh0000644000176200001440000001227214645137106023205 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file is for the cuSPARSE implementation of the * SUNMATRIX module. * ----------------------------------------------------------------- */ #ifndef _SUNCUSPARSE_MATRIX_KERNELS_CUH_ #define _SUNCUSPARSE_MATRIX_KERNELS_CUH_ #include #include namespace sundials { namespace sunmatrix_cusparse { template __global__ void scaleAddIKernelCSR(I m, T c, T* A, const I* rowptr, const I* colind) { // REQUIRES THE DIAGONAL TO BE PRESENT! // Each thread loops over one row of the matrix so memory accesses by a thread are stride-1. // If there aren't enough threads to cover all rows, then some threads will be reused for // more than one row. for (I row = blockIdx.x*blockDim.x + threadIdx.x; row < m; row += blockDim.x * gridDim.x) { I tmp = rowptr[row]; I rownnz = rowptr[row+1] - tmp; I idx = tmp; for (I j = 0; j < rownnz; j++) { if (colind[idx+j] == row) A[idx+j] = c*A[idx+j] + 1.0; else A[idx+j] = c*A[idx+j]; } } } template __global__ void scaleAddIKernelBCSR(I m, I nblocks, I blocknnz, T c, T* A, const I* rowptr, const I* colind) { // REQUIRES THE DIAGONAL TO BE PRESENT! // Ideally each thread block will be in charge of one block of the matrix. for (I block = blockIdx.x; block < nblocks; block += gridDim.x) { // Each thread loops over one row of the matrix so memory accesses by a thread are stride-1. // If there aren't enough threads to cover all rows, then some threads will be reused for // more than one row. for (I row = threadIdx.x; row < m; row += blockDim.x) { I tmp = rowptr[row]; I rownnz = rowptr[row+1] - tmp; I idxl = tmp; I idxg = block*blocknnz + tmp; for (I j = 0; j < rownnz; j++) { if (colind[idxl+j] == row) A[idxg+j] = c*A[idxg+j] + 1.0; else A[idxg+j] = c*A[idxg+j]; } } } } template __global__ void scaleAddKernelCSR(I nnz, T c, T* A, const T* B) { // REQUIRES A AND B TO HAVE THE SAME SPARSITY PATTERN for (I i = blockIdx.x * blockDim.x + threadIdx.x; i < nnz; i += blockDim.x * gridDim.x) { A[i] = c*A[i] + B[i]; } } template __global__ void matvecBCSR(I m, I nblocks, I blocknnz, const T* A, const I* rowptr, const I* colind, const T* x, T* y) { // Zero out result vector for (I i = blockIdx.x * blockDim.x + threadIdx.x; i < nblocks*blocknnz; i += blockDim.x * gridDim.x) { y[i] = 0.0; } __syncthreads(); // Ideally each thread block will be in charge of one block of the matrix. for (I block = blockIdx.x; block < nblocks; block += gridDim.x) { // Each thread loops over one row of the matrix so memory accesses by a thread are stride-1. // If there aren't enough threads to cover all rows, then some threads will be reused for // more than one row. for (I row = threadIdx.x; row < m; row += blockDim.x) { I tmp = rowptr[row]; I rownnz = rowptr[row+1] - tmp; // number of nnz in this row I idxl = tmp; // local (to this block) starting nonzero index I idxg = block*blocknnz + tmp; // global (overall matrix) starting nonzero index I rowg = block*m+row; // global (overall matrix) row I colg = block*m; // global (overall matrix) starting column for (I j = 0; j < rownnz; j++) { y[rowg] += A[idxg+j] * x[ colg+colind[idxl+j] ]; } } } } // kernels for debugging #ifdef SUNDIALS_DEBUG template __global__ void print_kernel(I m, I nnz, I blocknnz, T* A, const I* rowptr, const I* colind) { for (I i = blockIdx.x * blockDim.x + threadIdx.x; i < nnz; i += blockDim.x * gridDim.x) { printf("A[%d] = %f\n", i, A[i]); } for (I i = blockIdx.x * blockDim.x + threadIdx.x; i < m+1; i += blockDim.x * gridDim.x) { printf("rowptr[%d] = %d\n", i, rowptr[i]); } for (I i = blockIdx.x * blockDim.x + threadIdx.x; i < blocknnz; i += blockDim.x * gridDim.x) { printf("colind[%d] = %d\n", i, colind[i]); } } #endif } // namespace sunmatrix_cusparse } // namespace sundials #endifStanHeaders/src/sunmatrix/cusparse/sunmatrix_cusparse.cu0000644000176200001440000012301014645137106023415 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file is for the cuSPARSE implementation of the * SUNMATRIX module. * ----------------------------------------------------------------- */ #include #include #include #include #include "sundials_cuda.h" #include "sundials_debug.h" #include "cusparse_kernels.cuh" /* Use the namespace for the kernels */ using namespace sundials::cuda; using namespace sundials::sunmatrix_cusparse; /* Constants */ #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* Private function prototypes */ static booleantype SMCompatible_cuSparse(SUNMatrix, SUNMatrix); static SUNMatrix SUNMatrix_cuSparse_NewEmpty(SUNContext sunctx); #if CUDART_VERSION >= 11000 static cusparseStatus_t CreateSpMatDescr(SUNMatrix, cusparseSpMatDescr_t*); #endif /* Macros for handling the different function names based on precision */ #if defined(SUNDIALS_DOUBLE_PRECISION) #define cusparseXcsrmv cusparseDcsrmv #define CUDA_R_XF CUDA_R_64F #elif defined(SUNDIALS_SINGLE_PRECISION) #define cusparseXcsrmv cusparseScsrmv #define CUDA_R_XF CUDA_R_32F #endif /* Content accessor macros */ #define SMCU_CONTENT(A) ( (SUNMatrix_Content_cuSparse)(A->content) ) #define SMCU_ROWS(A) ( SMCU_CONTENT(A)->M ) #define SMCU_COLUMNS(A) ( SMCU_CONTENT(A)->N ) #define SMCU_NNZ(A) ( SMCU_CONTENT(A)->NNZ ) #define SMCU_NBLOCKS(A) ( SMCU_CONTENT(A)->nblocks ) #define SMCU_BLOCKROWS(A) ( SMCU_CONTENT(A)->blockrows ) #define SMCU_BLOCKCOLS(A) ( SMCU_CONTENT(A)->blockcols ) #define SMCU_BLOCKNNZ(A) ( SMCU_CONTENT(A)->blocknnz ) #define SMCU_NP(A) ( SMCU_CONTENT(A)->NP ) #define SMCU_SPARSETYPE(A) ( SMCU_CONTENT(A)->sparse_type ) #define SMCU_OWNMATD(A) ( SMCU_CONTENT(A)->own_matd ) #define SMCU_DATA(A) ( SMCU_CONTENT(A)->data ) #define SMCU_DATAp(A) ( (realtype*)SMCU_CONTENT(A)->data->ptr ) #define SMCU_INDEXVALS(A) ( SMCU_CONTENT(A)->colind ) #define SMCU_INDEXPTRS(A) ( SMCU_CONTENT(A)->rowptrs ) #define SMCU_INDEXVALSp(A) ( (int*) SMCU_CONTENT(A)->colind->ptr ) #define SMCU_INDEXPTRSp(A) ( (int*) SMCU_CONTENT(A)->rowptrs->ptr ) #define SMCU_MEMHELP(A) ( SMCU_CONTENT(A)->mem_helper ) #define SMCU_MATDESCR(A) ( SMCU_CONTENT(A)->mat_descr ) #define SMCU_CUSPHANDLE(A) ( SMCU_CONTENT(A)->cusp_handle ) #define SMCU_FIXEDPATTERN(A)( SMCU_CONTENT(A)->fixed_pattern ) #define SMCU_EXECPOLICY(A) ( SMCU_CONTENT(A)->exec_policy ) /* ------------------------------------------------------------------ * Default execution policy definition. * * This policy tries to help us leverage the structure of the matrix. * It will choose block sizes which are a multiple of the warp size, * and it will choose a grid size to such that all work elements are * covered. * ------------------------------------------------------------------ */ class SUNCuSparseMatrixExecPolicy : public ExecPolicy { public: SUNCuSparseMatrixExecPolicy(const cudaStream_t stream = 0) : ExecPolicy(stream) {} SUNCuSparseMatrixExecPolicy(const SUNCuSparseMatrixExecPolicy& ex) : ExecPolicy(ex.stream_) {} virtual size_t gridSize(size_t numWorkElements, size_t blockDim = 0) const { return(numWorkElements + blockDim - 1)/blockDim; } virtual size_t blockSize(size_t numWorkElements = 0, size_t gridDim = 0) const { return(max_block_size(WARP_SIZE*(numWorkElements + WARP_SIZE - 1)/WARP_SIZE)); } virtual const cudaStream_t* stream() const { return(&stream_); } virtual ExecPolicy* clone() const { return(static_cast(new SUNCuSparseMatrixExecPolicy(*this))); } static size_t max_block_size(int val) { return((val > MAX_BLOCK_SIZE) ? MAX_BLOCK_SIZE : val ); } }; SUNCuSparseMatrixExecPolicy DEFAULT_EXEC_POLICY; /* ------------------------------------------------------------------ * Constructors. * ------------------------------------------------------------------ */ SUNMatrix SUNMatrix_cuSparse_NewCSR(int M, int N, int NNZ, cusparseHandle_t cusp, SUNContext sunctx) { SUNMemory d_colind, d_rowptr, d_values; int alloc_fail = 0; /* return with NULL matrix on illegal input */ if ( (M <= 0) || (N <= 0) || (NNZ < 0) ) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: illegal value(s) for M, N, or NNZ\n"); return(NULL); } SUNMatrix A = SUNMatrix_cuSparse_NewEmpty(sunctx); if (A == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMatrix_cuSparse_NewEmpty returned NULL\n"); return(NULL); } SMCU_MEMHELP(A) = SUNMemoryHelper_Cuda(sunctx); if (SMCU_MEMHELP(A) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMemoryHelper_Cuda returned NULL\n"); SUNMatDestroy(A); return(NULL); } /* Allocate device memory for the matrix */ alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_colind, sizeof(int)*NNZ, SUNMEMTYPE_DEVICE, nullptr); alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_rowptr, sizeof(int)*(M+1), SUNMEMTYPE_DEVICE, nullptr); alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_values, sizeof(realtype)*NNZ, SUNMEMTYPE_DEVICE, nullptr); if (alloc_fail) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values, nullptr); SUNMatDestroy(A); return(NULL); } /* Choose sensible defaults */ cusparseStatus_t cusparse_status = CUSPARSE_STATUS_SUCCESS; cusparseMatDescr_t mat_descr; cusparse_status = cusparseCreateMatDescr(&mat_descr); if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status)) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values, nullptr); SUNMatDestroy(A); return(NULL); } cusparse_status = cusparseSetMatType(mat_descr, CUSPARSE_MATRIX_TYPE_GENERAL); if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status)) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values, nullptr); cusparseDestroyMatDescr(mat_descr); SUNMatDestroy(A); return(NULL); } cusparse_status = cusparseSetMatIndexBase(mat_descr, CUSPARSE_INDEX_BASE_ZERO); if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status)) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values, nullptr); cusparseDestroyMatDescr(mat_descr); SUNMatDestroy(A); return(NULL); } cudaStream_t stream; if (!SUNDIALS_CUSPARSE_VERIFY(cusparseGetStream(cusp, &stream))) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values, nullptr); cusparseDestroyMatDescr(mat_descr); SUNMatDestroy(A); return(NULL); } /* Fill the content */ SMCU_CONTENT(A)->M = M; SMCU_CONTENT(A)->N = N; SMCU_CONTENT(A)->NNZ = NNZ; SMCU_CONTENT(A)->nblocks = 1; SMCU_CONTENT(A)->blockrows = M; SMCU_CONTENT(A)->blockcols = N; SMCU_CONTENT(A)->blocknnz = NNZ; SMCU_CONTENT(A)->own_matd = SUNTRUE; SMCU_CONTENT(A)->matvec_issetup = SUNFALSE; SMCU_CONTENT(A)->fixed_pattern = SUNFALSE; SMCU_CONTENT(A)->sparse_type = SUNMAT_CUSPARSE_CSR; SMCU_CONTENT(A)->colind = d_colind; SMCU_CONTENT(A)->rowptrs = d_rowptr; SMCU_CONTENT(A)->data = d_values; SMCU_CONTENT(A)->mat_descr = mat_descr; SMCU_CONTENT(A)->cusp_handle = cusp; SMCU_CONTENT(A)->exec_policy = DEFAULT_EXEC_POLICY.clone_new_stream(stream); #if CUDART_VERSION >= 11000 cusparseSpMatDescr_t spmat_descr; if (!SUNDIALS_CUSPARSE_VERIFY(CreateSpMatDescr(A, &spmat_descr))) { SUNMatDestroy(A); return(NULL); } SMCU_CONTENT(A)->spmat_descr = spmat_descr; SMCU_CONTENT(A)->dBufferMem = NULL; SMCU_CONTENT(A)->bufferSize = 0; SMCU_CONTENT(A)->vecX = NULL; SMCU_CONTENT(A)->vecY = NULL; #endif return A; } SUNMatrix SUNMatrix_cuSparse_MakeCSR(cusparseMatDescr_t mat_descr, int M, int N, int NNZ, int *rowptrs , int *colind , realtype *data, cusparseHandle_t cusp, SUNContext sunctx) { /* return with NULL matrix on illegal input */ if ( (M <= 0) || (N <= 0) || (NNZ < 0) ) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: illegal value(s) for M, N, or NNZ\n"); return(NULL); } if ( (rowptrs == NULL) || (colind == NULL) || (data == NULL) ) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: rowptrs, colind, or data is NULL\n"); return(NULL); } if (cusparseGetMatIndexBase(mat_descr) != CUSPARSE_INDEX_BASE_ZERO) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: the cusparseMatDescr_t must have index base CUSPARSE_INDEX_BASE_ZERO\n"); return(NULL); } SUNMatrix A = SUNMatrix_cuSparse_NewEmpty(sunctx); if (A == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_MakeCSR_cuSparse: SUNMatrix_cuSparse_NewEmpty returned NULL\n"); return(NULL); } SMCU_MEMHELP(A) = SUNMemoryHelper_Cuda(sunctx); if (SMCU_MEMHELP(A) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMemoryHelper_Cuda returned NULL\n"); SUNMatDestroy(A); return(NULL); } cudaStream_t stream; if (!SUNDIALS_CUSPARSE_VERIFY(cusparseGetStream(cusp, &stream))) { SUNMatDestroy(A); return(NULL); } /* Fill content */ SMCU_CONTENT(A)->M = M; SMCU_CONTENT(A)->N = N; SMCU_CONTENT(A)->NNZ = NNZ; SMCU_CONTENT(A)->nblocks = 1; SMCU_CONTENT(A)->blockrows = M; SMCU_CONTENT(A)->blockcols = N; SMCU_CONTENT(A)->blocknnz = NNZ; SMCU_CONTENT(A)->own_matd = SUNFALSE; SMCU_CONTENT(A)->matvec_issetup = SUNFALSE; SMCU_CONTENT(A)->fixed_pattern = SUNFALSE; SMCU_CONTENT(A)->sparse_type = SUNMAT_CUSPARSE_CSR; SMCU_CONTENT(A)->colind = SUNMemoryHelper_Wrap(colind, SUNMEMTYPE_DEVICE); SMCU_CONTENT(A)->rowptrs = SUNMemoryHelper_Wrap(rowptrs, SUNMEMTYPE_DEVICE); SMCU_CONTENT(A)->data = SUNMemoryHelper_Wrap(data, SUNMEMTYPE_DEVICE); SMCU_CONTENT(A)->mat_descr = mat_descr; SMCU_CONTENT(A)->cusp_handle = cusp; SMCU_CONTENT(A)->exec_policy = DEFAULT_EXEC_POLICY.clone_new_stream(stream); if (SMCU_CONTENT(A)->colind == NULL || SMCU_CONTENT(A)->rowptrs == NULL || SMCU_CONTENT(A)->data == NULL) { SUNMatDestroy(A); return(NULL); } #if CUDART_VERSION >= 11000 cusparseSpMatDescr_t spmat_descr; if (!SUNDIALS_CUSPARSE_VERIFY(CreateSpMatDescr(A, &spmat_descr))) { SUNMatDestroy(A); return(NULL); } SMCU_CONTENT(A)->spmat_descr = spmat_descr; SMCU_CONTENT(A)->dBufferMem = NULL; SMCU_CONTENT(A)->bufferSize = 0; SMCU_CONTENT(A)->vecX = NULL; SMCU_CONTENT(A)->vecY = NULL; #endif return(A); } SUNMatrix SUNMatrix_cuSparse_NewBlockCSR(int nblocks, int blockrows, int blockcols, int blocknnz, cusparseHandle_t cusp, SUNContext sunctx) { SUNMemory d_colind, d_rowptr, d_values; int M, N, NNZ; int alloc_fail = 0; /* Return with NULL matrix on illegal input */ if (blockrows != blockcols) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewBlockCSR: matrix must be square for the BCSR format\n"); return(NULL); } M = nblocks * blockrows; N = M; NNZ = nblocks * blocknnz; /* Return with NULL matrix on illegal input */ if ( (M <= 0) || (N <= 0) || (NNZ < 0) ) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewBlockCSR: illegal value(s) for M, N, or NNZ\n"); return(NULL); } /* Allocate the SUNMatrix object */ SUNMatrix A = SUNMatrix_cuSparse_NewEmpty(sunctx); if (A == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewBlockCSR: SUNMatrix_cuSparse_NewEmpty returned NULL\n"); return(NULL); } SMCU_MEMHELP(A) = SUNMemoryHelper_Cuda(sunctx); if (SMCU_MEMHELP(A) == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_NewCSR_cuSparse: SUNMemoryHelper_Cuda returned NULL\n"); SUNMatDestroy(A); return(NULL); } /* Allocate device memory for the matrix */ alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_colind, sizeof(int)*blocknnz, SUNMEMTYPE_DEVICE, nullptr); alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_rowptr, sizeof(int)*(blockrows + 1), SUNMEMTYPE_DEVICE, nullptr); alloc_fail += SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &d_values, sizeof(realtype)*blocknnz*nblocks, SUNMEMTYPE_DEVICE, nullptr); if (alloc_fail) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values, nullptr); SUNMatDestroy(A); return(NULL); } /* Choose sensible defaults */ cusparseStatus_t cusparse_status = CUSPARSE_STATUS_SUCCESS; cusparseMatDescr_t mat_descr; cusparse_status = cusparseCreateMatDescr(&mat_descr); if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status)) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values, nullptr); SUNMatDestroy(A); return(NULL); } cusparse_status = cusparseSetMatType(mat_descr, CUSPARSE_MATRIX_TYPE_GENERAL); if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status)) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values, nullptr); cusparseDestroyMatDescr(mat_descr); SUNMatDestroy(A); return(NULL); } cusparse_status = cusparseSetMatIndexBase(mat_descr, CUSPARSE_INDEX_BASE_ZERO); if (!SUNDIALS_CUSPARSE_VERIFY(cusparse_status)) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values, nullptr); cusparseDestroyMatDescr(mat_descr); SUNMatDestroy(A); return(NULL); } cudaStream_t stream; if (!SUNDIALS_CUSPARSE_VERIFY(cusparseGetStream(cusp, &stream))) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_colind, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_rowptr, nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), d_values, nullptr); cusparseDestroyMatDescr(mat_descr); SUNMatDestroy(A); return(NULL); } /* Fill the content */ SMCU_CONTENT(A)->M = M; SMCU_CONTENT(A)->N = N; SMCU_CONTENT(A)->NNZ = NNZ; SMCU_CONTENT(A)->nblocks = nblocks; SMCU_CONTENT(A)->blockrows = blockrows; SMCU_CONTENT(A)->blockcols = blockrows; SMCU_CONTENT(A)->blocknnz = blocknnz; SMCU_CONTENT(A)->own_matd = SUNTRUE; SMCU_CONTENT(A)->matvec_issetup = SUNFALSE; SMCU_CONTENT(A)->cusp_handle = cusp; SMCU_CONTENT(A)->fixed_pattern = SUNFALSE; SMCU_CONTENT(A)->sparse_type = SUNMAT_CUSPARSE_BCSR; SMCU_CONTENT(A)->colind = d_colind; SMCU_CONTENT(A)->rowptrs = d_rowptr; SMCU_CONTENT(A)->data = d_values; SMCU_CONTENT(A)->mat_descr = mat_descr; SMCU_CONTENT(A)->exec_policy = DEFAULT_EXEC_POLICY.clone_new_stream(stream); #if CUDART_VERSION >= 11000 cusparseSpMatDescr_t spmat_descr; if (!SUNDIALS_CUSPARSE_VERIFY(CreateSpMatDescr(A, &spmat_descr))) { SUNMatDestroy(A); return(NULL); } SMCU_CONTENT(A)->spmat_descr = spmat_descr; SMCU_CONTENT(A)->dBufferMem = NULL; SMCU_CONTENT(A)->bufferSize = 0; SMCU_CONTENT(A)->vecX = NULL; SMCU_CONTENT(A)->vecY = NULL; #endif return(A); } /* ------------------------------------------------------------------ * Implementation specific routines. * ------------------------------------------------------------------ */ int SUNMatrix_cuSparse_SparseType(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_SPARSETYPE(A)); else return(SUNMAT_ILL_INPUT); } int SUNMatrix_cuSparse_Rows(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_ROWS(A)); else return(SUNMAT_ILL_INPUT); } int SUNMatrix_cuSparse_Columns(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_COLUMNS(A)); else return(SUNMAT_ILL_INPUT); } int SUNMatrix_cuSparse_NNZ(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_NNZ(A)); else return(SUNMAT_ILL_INPUT); } int* SUNMatrix_cuSparse_IndexPointers(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_INDEXPTRSp(A)); else return(NULL); } int* SUNMatrix_cuSparse_IndexValues(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_INDEXVALSp(A)); else return(NULL); } realtype* SUNMatrix_cuSparse_Data(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_DATAp(A)); else return(NULL); } int SUNMatrix_cuSparse_NumBlocks(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_NBLOCKS(A)); else return(SUNMAT_ILL_INPUT); } int SUNMatrix_cuSparse_BlockRows(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_BLOCKROWS(A)); else return(SUNMAT_ILL_INPUT); } int SUNMatrix_cuSparse_BlockColumns(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_BLOCKCOLS(A)); else return(SUNMAT_ILL_INPUT); } int SUNMatrix_cuSparse_BlockNNZ(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_BLOCKNNZ(A)); else return(SUNMAT_ILL_INPUT); } realtype* SUNMatrix_cuSparse_BlockData(SUNMatrix A, int blockidx) { realtype *matdata; int offset; if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE) return(NULL); if (blockidx >= SMCU_NBLOCKS(A)) return(NULL); matdata = SMCU_DATAp(A); offset = SMCU_BLOCKNNZ(A)*blockidx; return(&matdata[offset]); } cusparseMatDescr_t SUNMatrix_cuSparse_MatDescr(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_CUSPARSE) return(SMCU_MATDESCR(A)); else return(NULL); } int SUNMatrix_cuSparse_SetFixedPattern(SUNMatrix A, booleantype yesno) { if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE) return(SUNMAT_ILL_INPUT); SMCU_FIXEDPATTERN(A) = yesno; return(SUNMAT_SUCCESS); } int SUNMatrix_cuSparse_SetKernelExecPolicy(SUNMatrix A, SUNCudaExecPolicy* exec_policy) { if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE) return(SUNMAT_ILL_INPUT); /* Reset to the default policy if the new one is NULL */ delete SMCU_EXECPOLICY(A); if (exec_policy) SMCU_EXECPOLICY(A) = exec_policy->clone(); else SMCU_EXECPOLICY(A) = DEFAULT_EXEC_POLICY.clone_new_stream(*SMCU_EXECPOLICY(A)->stream()); return(SUNMAT_SUCCESS); } int SUNMatrix_cuSparse_CopyToDevice(SUNMatrix dA, realtype* h_data, int* h_idxptrs, int* h_idxvals) { int retval; SUNMemory _h_data, _h_idxptrs, _h_idxvals; const cudaStream_t* stream; int nidxvals, nidxptrs; if (SUNMatGetID(dA) != SUNMATRIX_CUSPARSE) return(SUNMAT_ILL_INPUT); stream = SMCU_EXECPOLICY(dA)->stream(); if (h_data != NULL) { _h_data = SUNMemoryHelper_Wrap(h_data, SUNMEMTYPE_HOST); retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA), SMCU_DATA(dA), _h_data, SMCU_NNZ(dA)*sizeof(realtype), (void*) stream); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_data, nullptr); if (retval != 0) return(SUNMAT_OPERATION_FAIL); } switch(SMCU_SPARSETYPE(dA)) { case SUNMAT_CUSPARSE_CSR: nidxptrs = SMCU_ROWS(dA)+1; nidxvals = SMCU_NNZ(dA); break; case SUNMAT_CUSPARSE_BCSR: nidxptrs = SMCU_BLOCKROWS(dA)+1; nidxvals = SMCU_BLOCKNNZ(dA); break; default: SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_CopyToDevice: unrecognized sparse type\n"); return(SUNMAT_ILL_INPUT); } if (h_idxptrs != NULL) { _h_idxptrs = SUNMemoryHelper_Wrap(h_idxptrs, SUNMEMTYPE_HOST); retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA), SMCU_INDEXPTRS(dA), _h_idxptrs, nidxptrs*sizeof(int), (void*) stream); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxptrs, nullptr); if (retval != 0) return(SUNMAT_OPERATION_FAIL); } if (h_idxvals != NULL) { _h_idxvals = SUNMemoryHelper_Wrap(h_idxvals, SUNMEMTYPE_HOST); retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA), SMCU_INDEXVALS(dA), _h_idxvals, nidxvals*sizeof(int), (void*) stream); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxvals, nullptr); if (retval != 0) return(SUNMAT_OPERATION_FAIL); } return(SUNMAT_SUCCESS); } int SUNMatrix_cuSparse_CopyFromDevice(SUNMatrix dA, realtype* h_data, int* h_idxptrs, int* h_idxvals) { int retval; SUNMemory _h_data, _h_idxptrs, _h_idxvals; const cudaStream_t* stream; int nidxvals, nidxptrs; if (SUNMatGetID(dA) != SUNMATRIX_CUSPARSE) return(SUNMAT_ILL_INPUT); stream = SMCU_EXECPOLICY(dA)->stream(); if (h_data != NULL) { _h_data = SUNMemoryHelper_Wrap(h_data, SUNMEMTYPE_HOST); retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA), _h_data, SMCU_DATA(dA), SMCU_NNZ(dA)*sizeof(realtype), (void*) stream); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_data, nullptr); if (retval != 0) return(SUNMAT_OPERATION_FAIL); } switch(SMCU_SPARSETYPE(dA)) { case SUNMAT_CUSPARSE_CSR: nidxptrs = SMCU_ROWS(dA)+1; nidxvals = SMCU_NNZ(dA); case SUNMAT_CUSPARSE_BCSR: nidxptrs = SMCU_BLOCKROWS(dA)+1; nidxvals = SMCU_BLOCKNNZ(dA); } if (h_idxptrs != NULL) { _h_idxptrs = SUNMemoryHelper_Wrap(h_idxptrs, SUNMEMTYPE_HOST); retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA), _h_idxptrs, SMCU_INDEXPTRS(dA), nidxptrs*sizeof(int), (void*) stream); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxptrs, nullptr); if (retval != 0) return(SUNMAT_OPERATION_FAIL); } if (h_idxvals != NULL) { _h_idxvals = SUNMemoryHelper_Wrap(h_idxvals, SUNMEMTYPE_HOST); retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(dA), _h_idxvals, SMCU_INDEXVALS(dA), nidxvals*sizeof(int), (void*) stream); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(dA), _h_idxvals, nullptr); if (retval != 0) return(SUNMAT_OPERATION_FAIL); } return(SUNMAT_SUCCESS); } /* * ----------------------------------------------------------------- * implementation of matrix operations * ----------------------------------------------------------------- */ SUNMatrix_ID SUNMatGetID_cuSparse(SUNMatrix A) { return(SUNMATRIX_CUSPARSE); } /* Returns a new matrix allocated to have the same structure as A, but it does not copy any nonzeros, column vals, or row pointers. */ SUNMatrix SUNMatClone_cuSparse(SUNMatrix A) { SUNMatrix B; switch (SMCU_SPARSETYPE(A)) { case SUNMAT_CUSPARSE_CSR: B = SUNMatrix_cuSparse_NewCSR(SMCU_ROWS(A), SMCU_COLUMNS(A), SMCU_NNZ(A), SMCU_CUSPHANDLE(A), A->sunctx); break; case SUNMAT_CUSPARSE_BCSR: B = SUNMatrix_cuSparse_NewBlockCSR(SMCU_NBLOCKS(A), SMCU_BLOCKROWS(A), SMCU_BLOCKCOLS(A), SMCU_BLOCKNNZ(A), SMCU_CUSPHANDLE(A), A->sunctx); break; default: SUNDIALS_DEBUG_PRINT("ERROR in SUNMatClone_cuSparse: sparse type not recognized\n"); B = NULL; } SMCU_FIXEDPATTERN(B) = SMCU_FIXEDPATTERN(A); delete SMCU_EXECPOLICY(B); SMCU_EXECPOLICY(B) = SMCU_EXECPOLICY(A)->clone(); return(B); } /* Deallocates the SUNMatrix object and all data it owns */ void SUNMatDestroy_cuSparse(SUNMatrix A) { if (A == NULL) return; /* free content */ if (A->content != NULL) { if (SMCU_MEMHELP(A)) { SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_DATA(A), nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_INDEXPTRS(A), nullptr); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_INDEXVALS(A), nullptr); } else { SUNDIALS_DEBUG_PRINT("WARNING in SUNMatDestroy_cuSparse: mem_helper was NULL when trying to dealloc data, this could result in a memory leak\n"); } if (SMCU_OWNMATD(A)) { /* free cusparseMatDescr_t */ SUNDIALS_CUSPARSE_VERIFY( cusparseDestroyMatDescr(SMCU_MATDESCR(A)) ); } #if CUDART_VERSION >= 11000 SUNDIALS_CUSPARSE_VERIFY( cusparseDestroyDnVec(SMCU_CONTENT(A)->vecX) ); SUNDIALS_CUSPARSE_VERIFY( cusparseDestroyDnVec(SMCU_CONTENT(A)->vecY) ); SUNDIALS_CUSPARSE_VERIFY( cusparseDestroySpMat(SMCU_CONTENT(A)->spmat_descr) ); SUNMemoryHelper_Dealloc(SMCU_MEMHELP(A), SMCU_CONTENT(A)->dBufferMem, nullptr); #endif if (SMCU_EXECPOLICY(A)) { delete SMCU_EXECPOLICY(A); SMCU_EXECPOLICY(A) = NULL; } SUNMemoryHelper_Destroy(SMCU_MEMHELP(A)); /* free content struct */ free(A->content); A->content = NULL; } /* free ops and matrix */ if (A->ops) { free(A->ops); A->ops = NULL; } free(A); A = NULL; return; } /* Performs A_ij = 0 */ int SUNMatZero_cuSparse(SUNMatrix A) { cudaError_t cuerr; cudaStream_t stream; stream = *SMCU_EXECPOLICY(A)->stream(); /* set all data to zero */ cuerr = cudaMemsetAsync(SMCU_DATAp(A), 0, SMCU_NNZ(A)*sizeof(realtype), stream); if (!SUNDIALS_CUDA_VERIFY(cuerr)) return(SUNMAT_OPERATION_FAIL); /* set all rowptrs to zero unless the sparsity pattern is fixed */ if (!SMCU_FIXEDPATTERN(A)) { cuerr = cudaMemsetAsync(SMCU_INDEXPTRSp(A), 0, (SMCU_BLOCKROWS(A)+1)*sizeof(int), stream); if (!SUNDIALS_CUDA_VERIFY(cuerr)) return(SUNMAT_OPERATION_FAIL); /* set all colind to zero */ cuerr = cudaMemsetAsync(SMCU_INDEXVALSp(A), 0, SMCU_BLOCKNNZ(A)*sizeof(int), stream); if (!SUNDIALS_CUDA_VERIFY(cuerr)) return(SUNMAT_OPERATION_FAIL); } return(SUNMAT_SUCCESS); } /* Copies the nonzeros, column vals, and row pointers into dst */ int SUNMatCopy_cuSparse(SUNMatrix src, SUNMatrix dst) { int retval; const cudaStream_t* stream; /* Verify that src and dst are compatible */ if (!SMCompatible_cuSparse(src, dst)) return(SUNMAT_ILL_INPUT); stream = SMCU_EXECPOLICY(src)->stream(); /* Ensure that dst is allocated with at least as much memory as we have nonzeros in src */ if (SMCU_NNZ(dst) < SMCU_NNZ(src)) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatCopy_cuSparse: the destination matrix has less nonzeros than the source\n"); return(SUNMAT_ILL_INPUT); } /* Zero out dst so that copy works correctly */ if (SUNMatZero_cuSparse(dst) != SUNMAT_SUCCESS) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatCopy_cuSparse: SUNMatZero_cuSparse failed\n"); return(SUNMAT_OPERATION_FAIL); } /* Copy the data over */ retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(src), SMCU_DATA(dst), SMCU_DATA(src), SMCU_NNZ(src)*sizeof(realtype), (void*) stream); if (retval) return(SUNMAT_OPERATION_FAIL); /* Copy the row pointers over */ retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(src), SMCU_INDEXPTRS(dst), SMCU_INDEXPTRS(src), (SMCU_BLOCKROWS(src)+1)*sizeof(int), (void*) stream); if (retval) return(SUNMAT_OPERATION_FAIL); /* Copy the column indices over */ retval = SUNMemoryHelper_CopyAsync(SMCU_MEMHELP(src), SMCU_INDEXVALS(dst), SMCU_INDEXVALS(src), SMCU_BLOCKNNZ(src)*sizeof(int), (void*) stream); if (retval) return(SUNMAT_OPERATION_FAIL); return(SUNMAT_SUCCESS); } /* Performs A = cA + I. Requires the diagonal to be allocated already. */ int SUNMatScaleAddI_cuSparse(realtype c, SUNMatrix A) { unsigned threadsPerBlock, gridSize; cudaStream_t stream = *SMCU_EXECPOLICY(A)->stream(); switch (SMCU_SPARSETYPE(A)) { case SUNMAT_CUSPARSE_CSR: /* Choose the grid size to be the number of rows in the matrix, and then choose threadsPerBlock to be a multiple of the warp size that results in enough threads to have one per 2 columns. */ threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_COLUMNS(A)/2); gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_ROWS(A)*SMCU_COLUMNS(A)/2, threadsPerBlock); scaleAddIKernelCSR <<>>(SMCU_ROWS(A), c, SMCU_DATAp(A), SMCU_INDEXPTRSp(A), SMCU_INDEXVALSp(A)); break; case SUNMAT_CUSPARSE_BCSR: /* Choose the grid size to be the number of blocks in the matrix, and then choose threadsPerBlock to be a multiple of the warp size that results in enough threads to have one per row of the block. */ threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_BLOCKROWS(A)); gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_NBLOCKS(A)*SMCU_BLOCKROWS(A), threadsPerBlock); scaleAddIKernelBCSR <<>>(SMCU_BLOCKROWS(A), SMCU_NBLOCKS(A), SMCU_BLOCKNNZ(A), c, SMCU_DATAp(A), SMCU_INDEXPTRSp(A), SMCU_INDEXVALSp(A)); break; default: SUNDIALS_DEBUG_PRINT("ERROR in SUNMatScaleAddI_cuSparse: sparse type not recognized\n"); return(SUNMAT_ILL_INPUT); } #ifdef SUNDIALS_DEBUG_CUDA_LASTERROR cudaDeviceSynchronize(); if (!SUNDIALS_CUDA_VERIFY(cudaGetLastError())) return(SUNMAT_OPERATION_FAIL); #endif return(SUNMAT_SUCCESS); } /* Performs A = cA + B */ int SUNMatScaleAdd_cuSparse(realtype c, SUNMatrix A, SUNMatrix B) { cudaStream_t stream; unsigned threadsPerBlock, gridSize; if (!SMCompatible_cuSparse(A, B)) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatScaleAdd_cuSparse: SUNMatScaleAdd_cuSparse failed\n"); return(SUNMAT_ILL_INPUT); } stream = *SMCU_EXECPOLICY(A)->stream(); switch (SMCU_SPARSETYPE(A)) { case SUNMAT_CUSPARSE_CSR: /* Choose the grid size to be the number of rows in the matrix, and then choose threadsPerBlock to be a multiple of the warp size that results in enough threads to have one per 2 columns. */ threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_COLUMNS(A)/2); gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_ROWS(A)*SMCU_COLUMNS(A)/2, threadsPerBlock); scaleAddKernelCSR <<>>(SMCU_NNZ(A), c, SMCU_DATAp(A), SMCU_DATAp(B)); break; case SUNMAT_CUSPARSE_BCSR: /* Choose the grid size to be the number of blocks in the matrix, and then choose threadsPerBlock to be a multiple of the warp size that results in enough threads to have one per row of the block. */ threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_BLOCKROWS(A)); gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_NBLOCKS(A)*SMCU_BLOCKROWS(A), threadsPerBlock); scaleAddKernelCSR <<>>(SMCU_NNZ(A), c, SMCU_DATAp(A), SMCU_DATAp(B)); break; default: SUNDIALS_DEBUG_PRINT("ERROR in SUNMatScaleAdd_cuSparse: sparse type not recognized\n"); return(SUNMAT_ILL_INPUT); } #ifdef SUNDIALS_DEBUG_CUDA_LASTERROR cudaDeviceSynchronize(); if (!SUNDIALS_CUDA_VERIFY(cudaGetLastError())) return(SUNMAT_OPERATION_FAIL); #endif return(SUNMAT_SUCCESS); } /* Setup buffers needed for Matvec */ int SUNMatMatvecSetup_cuSparse(SUNMatrix A) { #if CUDART_VERSION >= 11000 realtype placeholder[1]; const realtype one = ONE; /* Check if setup has already been done */ if (!(SMCU_CONTENT(A)->matvec_issetup)) { SUNDIALS_CUSPARSE_VERIFY( cusparseCreateDnVec(&SMCU_CONTENT(A)->vecX, SMCU_COLUMNS(A), placeholder, CUDA_R_XF) ); SUNDIALS_CUSPARSE_VERIFY( cusparseCreateDnVec(&SMCU_CONTENT(A)->vecY, SMCU_ROWS(A), placeholder, CUDA_R_XF) ); SUNDIALS_CUSPARSE_VERIFY( cusparseSpMV_bufferSize(SMCU_CUSPHANDLE(A), CUSPARSE_OPERATION_NON_TRANSPOSE, &one, SMCU_CONTENT(A)->spmat_descr, SMCU_CONTENT(A)->vecX, &one, SMCU_CONTENT(A)->vecY, CUDA_R_XF, CUSPARSE_MV_ALG_DEFAULT, &SMCU_CONTENT(A)->bufferSize) ); if ( SUNMemoryHelper_Alloc(SMCU_MEMHELP(A), &SMCU_CONTENT(A)->dBufferMem, SMCU_CONTENT(A)->bufferSize, SUNMEMTYPE_DEVICE, nullptr) ) return(SUNMAT_OPERATION_FAIL); } #endif SMCU_CONTENT(A)->matvec_issetup = SUNTRUE; return(SUNMAT_SUCCESS); } /* Perform y = Ax */ int SUNMatMatvec_cuSparse(SUNMatrix A, N_Vector x, N_Vector y) { /* Verify that the dimensions of A, x, and y agree */ if ( (SMCU_COLUMNS(A) != N_VGetLength(x)) || (SMCU_ROWS(A) != N_VGetLength(y)) ) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatMatvec_cuSparse: dimensions do not agree\n"); return(SUNMAT_ILL_INPUT); } realtype *d_xdata = N_VGetDeviceArrayPointer(x); realtype *d_ydata = N_VGetDeviceArrayPointer(y); if (SMCU_SPARSETYPE(A) == SUNMAT_CUSPARSE_CSR) { const realtype one = ONE; /* Zero result vector */ N_VConst(ZERO, y); #if CUDART_VERSION >= 11000 { /* Setup matvec if it has not been done yet */ if (!SMCU_CONTENT(A)->matvec_issetup && SUNMatMatvecSetup_cuSparse(A)) { return(SUNMAT_OPERATION_FAIL); } SUNDIALS_CUSPARSE_VERIFY( cusparseDnVecSetValues(SMCU_CONTENT(A)->vecX, d_xdata) ); SUNDIALS_CUSPARSE_VERIFY( cusparseDnVecSetValues(SMCU_CONTENT(A)->vecY, d_ydata) ); SUNDIALS_CUSPARSE_VERIFY( cusparseSpMV(SMCU_CUSPHANDLE(A), CUSPARSE_OPERATION_NON_TRANSPOSE, &one, SMCU_CONTENT(A)->spmat_descr, SMCU_CONTENT(A)->vecX, &one, SMCU_CONTENT(A)->vecY, CUDA_R_XF, CUSPARSE_MV_ALG_DEFAULT, SMCU_CONTENT(A)->dBufferMem->ptr) ); } #else SUNDIALS_CUSPARSE_VERIFY( cusparseXcsrmv(SMCU_CUSPHANDLE(A), CUSPARSE_OPERATION_NON_TRANSPOSE, SMCU_ROWS(A), SMCU_COLUMNS(A), SMCU_NNZ(A), &one, SMCU_MATDESCR(A), SMCU_DATAp(A), SMCU_INDEXPTRSp(A), SMCU_INDEXVALSp(A), d_xdata, &one, d_ydata) ); #endif } else if (SMCU_SPARSETYPE(A) == SUNMAT_CUSPARSE_BCSR) { cudaStream_t stream; unsigned gridSize, threadsPerBlock; stream = *SMCU_EXECPOLICY(A)->stream(); /* Choose the grid size to be the number of blocks in the matrix, and then choose threadsPerBlock to be a multiple of the warp size that results in enough threads to have one per row of the block. */ threadsPerBlock = SMCU_EXECPOLICY(A)->blockSize(SMCU_COLUMNS(A)/2); gridSize = SMCU_EXECPOLICY(A)->gridSize(SMCU_ROWS(A)*SMCU_COLUMNS(A)/2, threadsPerBlock); matvecBCSR <<>>(SMCU_BLOCKROWS(A), SMCU_NBLOCKS(A), SMCU_BLOCKNNZ(A), SMCU_DATAp(A), SMCU_INDEXPTRSp(A), SMCU_INDEXVALSp(A), d_xdata, d_ydata); } else { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatMatvec_cuSparse: sparse type not recognized\n"); return(SUNMAT_ILL_INPUT); } #ifdef SUNDIALS_DEBUG_CUDA_LASTERROR cudaDeviceSynchronize(); if (!SUNDIALS_CUDA_VERIFY(cudaGetLastError())) return(SUNMAT_OPERATION_FAIL); #endif return(SUNMAT_SUCCESS); } /* * ================================================================= * private functions * ================================================================= */ /* ----------------------------------------------------------------- * Function to check compatibility of two sparse SUNMatrix objects */ static booleantype SMCompatible_cuSparse(SUNMatrix A, SUNMatrix B) { /* both matrices must be sparse */ if ( (SUNMatGetID(A) != SUNMATRIX_CUSPARSE) || (SUNMatGetID(B) != SUNMATRIX_CUSPARSE) ) return(SUNFALSE); /* both matrices must have the same shape and sparsity type */ if (SMCU_ROWS(A) != SMCU_ROWS(B)) return(SUNFALSE); if (SMCU_COLUMNS(A) != SMCU_COLUMNS(B)) return(SUNFALSE); if (SMCU_SPARSETYPE(A) != SMCU_SPARSETYPE(B)) return(SUNFALSE); return(SUNTRUE); } /* ----------------------------------------------------------------- * Function to create empty SUNMatrix with ops attached and * the content structure allocated. */ SUNMatrix SUNMatrix_cuSparse_NewEmpty(SUNContext sunctx) { /* Create an empty matrix object */ SUNMatrix A = NULL; A = SUNMatNewEmpty(sunctx); if (A == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewEmpty: SUNMatNewEmpty failed\n"); return(NULL); } /* Attach operations */ A->ops->getid = SUNMatGetID_cuSparse; A->ops->clone = SUNMatClone_cuSparse; A->ops->destroy = SUNMatDestroy_cuSparse; A->ops->zero = SUNMatZero_cuSparse; A->ops->copy = SUNMatCopy_cuSparse; A->ops->scaleadd = SUNMatScaleAdd_cuSparse; A->ops->scaleaddi = SUNMatScaleAddI_cuSparse; A->ops->matvecsetup = SUNMatMatvecSetup_cuSparse; A->ops->matvec = SUNMatMatvec_cuSparse; /* Create content */ SUNMatrix_Content_cuSparse content = NULL; content = (SUNMatrix_Content_cuSparse) malloc(sizeof *content); if (content == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMatrix_cuSparse_NewEmpty: failed to malloc content\n"); SUNMatDestroy(A); return(NULL); } /* Attach content */ A->content = content; content->mem_helper = NULL; return(A); } #if CUDART_VERSION >= 11000 cusparseStatus_t CreateSpMatDescr(SUNMatrix A, cusparseSpMatDescr_t *spmat_descr) { /* CUDA 11 introduced the "Generic API" and removed the cusparseXcsrmv that works on the old cusparseMatDescr_t and raw data arrays. However, cuSolverSp stuff requires the cusparseMatDescr_t still. So, we have to create this cusparseSpMatDescr_t *and* the cusparseMatDescr_t. */ return(cusparseCreateCsr(spmat_descr, SMCU_ROWS(A), SMCU_COLUMNS(A), SMCU_NNZ(A), SMCU_INDEXPTRSp(A), SMCU_INDEXVALSp(A), SMCU_DATAp(A), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_R_XF)); } #endif StanHeaders/src/sunmatrix/magmadense/0000755000176200001440000000000014511135055017375 5ustar liggesusersStanHeaders/src/sunmatrix/magmadense/sunmatrix_magmadense.cpp0000644000176200001440000004621214645137106024330 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the dense implementation of * the SUNMATRIX package based on MAGMA. * -----------------------------------------------------------------*/ #include #include #if defined(SUNDIALS_MAGMA_BACKENDS_CUDA) #include "sundials_cuda.h" #include "dense_cuda_kernels.cuh" using namespace sundials::sunmatrix_gpudense::cuda; #define SUNDIALS_HIP_OR_CUDA(a,b) b #elif defined(SUNDIALS_MAGMA_BACKENDS_HIP) #include "sundials_hip.h" #include "dense_hip_kernels.hip.hpp" using namespace sundials::sunmatrix_gpudense::hip; #define SUNDIALS_HIP_OR_CUDA(a,b) a #endif /* Content accessor macro */ #define SMLD_CONTENT(A) ( (SUNMatrixContent_MagmaDense) (A->content) ) /* Constants */ #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* Macros for magma operations based on precision */ #if defined(SUNDIALS_DOUBLE_PRECISION) #define xgemv(q,...) magma_dgemv(__VA_ARGS__, q) #define xgemv_batched(q,...) magmablas_dgemv_batched(__VA_ARGS__, q) #define magma_xset_pointer(q,...) magma_dset_pointer(__VA_ARGS__, q) #define xprint(q,...) magma_dprint_gpu(__VA_ARGS__, q) #elif defined(SUNDIALS_SINGLE_PRECISION) #define xgemv(q,...) magma_sgemv(__VA_ARGS__, q) #define xgemv_batched(q,...) magmablas_sgemv_batched(__VA_ARGS__, q) #define magma_xset_pointer(q,...) magma_sset_pointer(__VA_ARGS__, q) #define xprint(q,...) magma_sprint_gpu(__VA_ARGS__, q) #else #error unsupported precision #endif /* Private function prototypes */ static booleantype SMCompatible_MagmaDense(SUNMatrix A, SUNMatrix B); static booleantype SMCompatible2_MagmaDense(SUNMatrix A, N_Vector x, N_Vector y); /* * ---------------------------------------------------------------------------- * Implementation specific routines * ---------------------------------------------------------------------------- */ /* * Constructor functions */ SUNMatrix SUNMatrix_MagmaDense(sunindextype M, sunindextype N, SUNMemoryType memtype, SUNMemoryHelper memhelper, void* queue, SUNContext sunctx) { return(SUNMatrix_MagmaDenseBlock(1, M, N, memtype, memhelper, queue, sunctx)); } SUNMatrix SUNMatrix_MagmaDenseBlock(sunindextype nblocks, sunindextype M, sunindextype N, SUNMemoryType memtype, SUNMemoryHelper memhelper, void* queue, SUNContext sunctx) { SUNMatrix Amat; SUNMatrixContent_MagmaDense A; int retval; /* Return with NULL matrix on illegal dimension input */ if ( (M <= 0) || (N <= 0) || (nblocks <= 0)) return(NULL); /* Check for valid memory type options */ if ((memtype != SUNMEMTYPE_UVM) && (memtype != SUNMEMTYPE_DEVICE)) return(NULL); /* Check for valid memory helper */ if (memhelper == NULL) return(NULL); /* First thing we do is initialize magma */ retval = magma_init(); if (retval != MAGMA_SUCCESS) return(NULL); /* Create an empty matrix object */ Amat = NULL; Amat = SUNMatNewEmpty(sunctx); if (Amat == NULL) return(NULL); /* Attach operations */ Amat->ops->getid = SUNMatGetID_MagmaDense; Amat->ops->clone = SUNMatClone_MagmaDense; Amat->ops->destroy = SUNMatDestroy_MagmaDense; Amat->ops->zero = SUNMatZero_MagmaDense; Amat->ops->copy = SUNMatCopy_MagmaDense; Amat->ops->scaleadd = SUNMatScaleAdd_MagmaDense; Amat->ops->scaleaddi = SUNMatScaleAddI_MagmaDense; Amat->ops->matvecsetup = SUNMatMatvecSetup_MagmaDense; Amat->ops->matvec = SUNMatMatvec_MagmaDense; Amat->ops->space = SUNMatSpace_MagmaDense; /* Create content */ A = NULL; A = (SUNMatrixContent_MagmaDense) malloc(sizeof(*A)); if (A == NULL) { SUNMatDestroy(Amat); return(NULL); } /* Attach content */ Amat->content = A; /* Fill content */ A->M = M; A->N = N; A->nblocks = nblocks; A->ldata = M*N*nblocks; A->data = NULL; A->blocks = NULL; A->xblocks = NULL; A->yblocks = NULL; A->memhelp = memhelper; A->q = NULL; magma_getdevice(&A->device_id); SUNDIALS_HIP_OR_CUDA( magma_queue_create_from_hip(A->device_id, (hipStream_t) queue, NULL, NULL, &A->q);, magma_queue_create_from_cuda(A->device_id, (cudaStream_t) queue, NULL, NULL, &A->q); ) /* Allocate data */ retval = SUNMemoryHelper_Alloc(A->memhelp, &A->data, sizeof(realtype) * A->ldata, memtype, nullptr); if (retval) { SUNMatDestroy(Amat); return(NULL); } if (A->nblocks > 1) { /* Allocate array of pointers to block data */ retval = SUNMemoryHelper_Alloc(A->memhelp, &A->blocks, sizeof(realtype*) * A->nblocks, memtype, nullptr); if (retval) { SUNMatDestroy(Amat); return(NULL); } /* Initialize array of pointers to block data */ magma_xset_pointer(A->q, (realtype**)A->blocks->ptr, (realtype*)A->data->ptr, A->M, 0, 0, A->M*A->N, A->nblocks); } return(Amat); } /* * Accessor functions */ sunindextype SUNMatrix_MagmaDense_Rows(SUNMatrix Amat) { SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); if (SUNMatGetID(Amat) == SUNMATRIX_MAGMADENSE) return(A->M * A->nblocks); else return(SUNMAT_ILL_INPUT); } sunindextype SUNMatrix_MagmaDense_Columns(SUNMatrix Amat) { SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); if (SUNMatGetID(Amat) == SUNMATRIX_MAGMADENSE) return(A->N * A->nblocks); else return(SUNMAT_ILL_INPUT); } sunindextype SUNMatrix_MagmaDense_BlockRows(SUNMatrix Amat) { SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); if (SUNMatGetID(Amat) == SUNMATRIX_MAGMADENSE) return(A->M); else return(SUNMAT_ILL_INPUT); } sunindextype SUNMatrix_MagmaDense_BlockColumns(SUNMatrix Amat) { SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); if (SUNMatGetID(Amat) == SUNMATRIX_MAGMADENSE) return(A->N); else return(SUNMAT_ILL_INPUT); } sunindextype SUNMatrix_MagmaDense_NumBlocks(SUNMatrix Amat) { SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); if (SUNMatGetID(Amat) == SUNMATRIX_MAGMADENSE) return(A->nblocks); else return(SUNMAT_ILL_INPUT); } sunindextype SUNMatrix_MagmaDense_LData(SUNMatrix Amat) { SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); if (SUNMatGetID(Amat) == SUNMATRIX_MAGMADENSE) return(A->ldata); else return(SUNMAT_ILL_INPUT); } realtype* SUNMatrix_MagmaDense_Data(SUNMatrix Amat) { SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); if (SUNMatGetID(Amat) == SUNMATRIX_MAGMADENSE) return((realtype*) A->data->ptr); else return(NULL); } realtype** SUNMatrix_MagmaDense_BlockData(SUNMatrix Amat) { SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); if (SUNMatGetID(Amat) == SUNMATRIX_MAGMADENSE) return((realtype**) A->blocks->ptr); else return(NULL); } extern realtype* SUNMatrix_MagmaDense_Block(SUNMatrix Amat, sunindextype k); extern realtype* SUNMatrix_MagmaDense_Column(SUNMatrix Amat, sunindextype j); extern realtype* SUNMatrix_MagmaDense_BlockColumn(SUNMatrix Amat, sunindextype k, sunindextype j); /* * Utility functions */ void SUNMatrix_MagmaDense_Print(SUNMatrix Amat) { if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return; SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); for (sunindextype k = 0; k < A->nblocks; k++) xprint(A->q, A->M, A->N, SUNMatrix_MagmaDense_Block(Amat,k), A->M); } int SUNMatrix_MagmaDense_CopyToDevice(SUNMatrix Amat, realtype* h_data) { if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return(SUNMAT_ILL_INPUT); SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); int retval = 0; SUNMemory _h_data = SUNMemoryHelper_Wrap(h_data, SUNMEMTYPE_HOST); SUNDIALS_HIP_OR_CUDA( hipStream_t stream = magma_queue_get_hip_stream(A->q);, cudaStream_t stream = magma_queue_get_cuda_stream(A->q); ) retval = SUNMemoryHelper_CopyAsync(A->memhelp, A->data, _h_data, sizeof(realtype) * A->ldata, (void*) &stream); magma_queue_sync(A->q); /* sync with respect to host, but only this stream */ SUNMemoryHelper_Dealloc(A->memhelp, _h_data, nullptr); return(retval == 0 ? SUNMAT_SUCCESS : SUNMAT_MEM_FAIL); } int SUNMatrix_MagmaDense_CopyFromDevice(SUNMatrix Amat, realtype* h_data) { if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return(SUNMAT_ILL_INPUT); SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); int retval = 0; SUNMemory _h_data = SUNMemoryHelper_Wrap(h_data, SUNMEMTYPE_HOST); SUNDIALS_HIP_OR_CUDA( hipStream_t stream = magma_queue_get_hip_stream(A->q);, cudaStream_t stream = magma_queue_get_cuda_stream(A->q); ) retval = SUNMemoryHelper_CopyAsync(A->memhelp, _h_data, A->data, sizeof(realtype) * A->ldata, (void*) &stream); magma_queue_sync(A->q); /* sync with respect to host, but only this stream */ SUNMemoryHelper_Dealloc(A->memhelp, _h_data, nullptr); return(retval == 0 ? SUNMAT_SUCCESS : SUNMAT_MEM_FAIL); } /* * ----------------------------------------------------------------- * Implementation of generic SUNMatrix operations. * ----------------------------------------------------------------- */ SUNMatrix SUNMatClone_MagmaDense(SUNMatrix Amat) { if (Amat == NULL) return(NULL); if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return(NULL); SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); SUNMatrix B = NULL; SUNDIALS_HIP_OR_CUDA( hipStream_t stream = magma_queue_get_hip_stream(A->q);, cudaStream_t stream = magma_queue_get_cuda_stream(A->q); ) if (A->nblocks > 1) B = SUNMatrix_MagmaDenseBlock(A->nblocks, A->M, A->N, A->data->type, A->memhelp, stream, Amat->sunctx); else B = SUNMatrix_MagmaDense(A->M, A->N, A->data->type, A->memhelp, stream, Amat->sunctx); return(B); } void SUNMatDestroy_MagmaDense(SUNMatrix Amat) { if (Amat == NULL) return; if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return; SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); /* Sync before destroying */ magma_queue_sync(A->q); /* Free content */ if (A) { /* Free data array(s) */ if (A->data) SUNMemoryHelper_Dealloc(A->memhelp, A->data, nullptr); if (A->blocks) SUNMemoryHelper_Dealloc(A->memhelp, A->blocks, nullptr); if (A->xblocks) SUNMemoryHelper_Dealloc(A->memhelp, A->xblocks, nullptr); if (A->yblocks) SUNMemoryHelper_Dealloc(A->memhelp, A->yblocks, nullptr); magma_queue_destroy(A->q); /* Free content struct */ free(A); Amat->content = NULL; } /* Free ops */ if (Amat->ops) { free(Amat->ops); Amat->ops = NULL; } /* Free matrix */ free(Amat); Amat = NULL; /* Call magma_finalize, but note that magma_finalize does nothing until it has been called the same number of times as magma_init */ magma_finalize(); return; } int SUNMatZero_MagmaDense(SUNMatrix Amat) { if (Amat == NULL) return(SUNMAT_ILL_INPUT); if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return(SUNMAT_ILL_INPUT); SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); /* Zero out matrix */ SUNDIALS_LAUNCH_KERNEL(SUNDIALS_KERNEL_NAME(zeroKernel), dim3(std::min(A->nblocks,INT_MAX),1,1), SUNDIALS_HIP_OR_CUDA( dim3(1,16,16), dim3(1,16,32) ), /* We choose slightly larger thread blocks when using HIP since the warps are larger */ 0, SUNDIALS_HIP_OR_CUDA( magma_queue_get_hip_stream(A->q), magma_queue_get_cuda_stream(A->q) ), A->M, A->N, A->nblocks, (realtype*) A->data->ptr ); return(SUNMAT_SUCCESS); } int SUNMatCopy_MagmaDense(SUNMatrix Amat, SUNMatrix Bmat) { if ((Amat == NULL) || (Bmat == NULL)) return(SUNMAT_ILL_INPUT); if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return(SUNMAT_ILL_INPUT); SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); SUNMatrixContent_MagmaDense B = SMLD_CONTENT(Bmat); /* Verify that A and B are compatible */ if (!SMCompatible_MagmaDense(Amat, Bmat)) return SUNMAT_ILL_INPUT; /* Copy A into B */ SUNDIALS_LAUNCH_KERNEL(SUNDIALS_KERNEL_NAME(copyKernel), dim3(std::min(A->nblocks,INT_MAX),1,1), SUNDIALS_HIP_OR_CUDA( dim3(1,16,16), dim3(1,16,32) ), 0, SUNDIALS_HIP_OR_CUDA( magma_queue_get_hip_stream(A->q), magma_queue_get_cuda_stream(A->q) ), A->M, A->N, A->nblocks, (const realtype*) A->data->ptr, (realtype*) B->data->ptr ); return(SUNMAT_SUCCESS); } int SUNMatScaleAddI_MagmaDense(realtype c, SUNMatrix Amat) { if (Amat == NULL) return(SUNMAT_ILL_INPUT); if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return(SUNMAT_ILL_INPUT); SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); SUNDIALS_LAUNCH_KERNEL(SUNDIALS_KERNEL_NAME(scaleAddIKernel), dim3(std::min(A->nblocks,INT_MAX),1,1), SUNDIALS_HIP_OR_CUDA( dim3(1,16,16), dim3(1,16,32) ), 0, SUNDIALS_HIP_OR_CUDA( magma_queue_get_hip_stream(A->q), magma_queue_get_cuda_stream(A->q) ), A->M, A->N, A->nblocks, c, (realtype*) A->data->ptr ); return(SUNMAT_SUCCESS); } int SUNMatScaleAdd_MagmaDense(realtype c, SUNMatrix Amat, SUNMatrix Bmat) { if ((Amat == NULL) || (Bmat == NULL)) return(SUNMAT_ILL_INPUT); if ((SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) || !SMCompatible_MagmaDense(Amat, Bmat)) return(SUNMAT_ILL_INPUT); SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); SUNMatrixContent_MagmaDense B = SMLD_CONTENT(Bmat); SUNDIALS_LAUNCH_KERNEL(SUNDIALS_KERNEL_NAME(scaleAddKernel), dim3(std::min(A->nblocks,INT_MAX),1,1), SUNDIALS_HIP_OR_CUDA( dim3(1,16,16), dim3(1,16,32) ), 0, SUNDIALS_HIP_OR_CUDA( magma_queue_get_hip_stream(A->q), magma_queue_get_cuda_stream(A->q) ), A->M, A->N, A->nblocks, c, (realtype*) A->data->ptr, (const realtype*) B->data->ptr ); return(SUNMAT_SUCCESS); } int SUNMatMatvecSetup_MagmaDense(SUNMatrix Amat) { int retval = 0; if (Amat == NULL) return(SUNMAT_ILL_INPUT); SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); if (A->nblocks > 1) { /* Allocate array of pointers to blocks on device */ if (A->xblocks == NULL) retval = SUNMemoryHelper_Alloc(A->memhelp, &A->xblocks, sizeof(realtype*) * A->nblocks, A->data->type, nullptr); if (retval) return(SUNMAT_MEM_FAIL); if (A->yblocks == NULL) retval = SUNMemoryHelper_Alloc(A->memhelp, &A->yblocks, sizeof(realtype*) * A->nblocks, A->data->type, nullptr); if (retval) return(SUNMAT_MEM_FAIL); } return(SUNMAT_SUCCESS); } int SUNMatMatvec_MagmaDense(SUNMatrix Amat, N_Vector x, N_Vector y) { if ((Amat == NULL) || (x == NULL) || (y == NULL)) return(SUNMAT_ILL_INPUT); if ((SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) || !SMCompatible2_MagmaDense(Amat, x, y)) return(SUNMAT_ILL_INPUT); SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); if (A->nblocks > 1) { /* First, we need to create an array of pointers to the matrix and vector blocks */ SUNDIALS_LAUNCH_KERNEL(SUNDIALS_KERNEL_NAME(getBlockPointers), A->nblocks, 256, 0, SUNDIALS_HIP_OR_CUDA( magma_queue_get_hip_stream(A->q), magma_queue_get_cuda_stream(A->q) ), A->M, A->N, A->nblocks, (realtype*)A->data->ptr, (realtype**)A->blocks->ptr, (realtype*)N_VGetDeviceArrayPointer(x), (realtype**)A->xblocks->ptr, (realtype*)N_VGetDeviceArrayPointer(y), (realtype**)A->yblocks->ptr ); /* Now we can use a batched gemv to do y = alpha*A*x + beta*y where A is block diagonal */ xgemv_batched( A->q, /* queue/stream to execute in */ MagmaNoTrans, /* use A not A^T */ A->M, /* number of rows for a block */ A->N, /* number of cols for a block */ ONE, /* alpha */ (realtype**)A->blocks->ptr, A->M, /* leading dimension of A */ (realtype**)A->xblocks->ptr, 1, /* increment (stride) of xblocks */ ZERO, /* beta */ (realtype**)A->yblocks->ptr, 1, /* increment (stride) of yblocks */ A->nblocks /* number of blocks */ ); } else { /* Now we can use gemv to do y = alpha*A*x + beta*y */ xgemv( A->q, /* queue/stream to execute in */ MagmaNoTrans, /* use A not A^T */ A->M, /* number of rows */ A->N, /* number of cols */ ONE, /* alpha */ (const realtype*)A->data->ptr, A->M, /* leading dimension of A */ (const realtype*)N_VGetDeviceArrayPointer(x), 1, /* increment for x data */ ZERO, /* beta */ (realtype*)N_VGetDeviceArrayPointer(y), 1 /* increment for y data */ ); } return(SUNMAT_SUCCESS); } int SUNMatSpace_MagmaDense(SUNMatrix Amat, long int *lenrw, long int *leniw) { if (Amat == NULL) return(SUNMAT_ILL_INPUT); if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return(SUNMAT_ILL_INPUT); SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); *lenrw = A->ldata; *leniw = 4; return(SUNMAT_SUCCESS); } /* * ----------------------------------------------------------------- * Private functions * ----------------------------------------------------------------- */ static booleantype SMCompatible_MagmaDense(SUNMatrix Amat, SUNMatrix Bmat) { SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); SUNMatrixContent_MagmaDense B = SMLD_CONTENT(Bmat); /* Both matrices must be SUNMATRIX_MAGMADENSE */ if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return(SUNFALSE); if (SUNMatGetID(Bmat) != SUNMATRIX_MAGMADENSE) return(SUNFALSE); /* Both matrices must have the same shape */ if (A->M != B->M) return(SUNFALSE); if (A->N != B->N) return(SUNFALSE); if (A->nblocks != B->nblocks) return(SUNFALSE); return(SUNTRUE); } static booleantype SMCompatible2_MagmaDense(SUNMatrix Amat, N_Vector x, N_Vector y) { SUNMatrixContent_MagmaDense A = SMLD_CONTENT(Amat); /* Vectors must implement N_VGetDeviceArrayPointer */ if (x->ops->nvgetdevicearraypointer == NULL || y->ops->nvgetdevicearraypointer == NULL) return(SUNFALSE); /* Inner dimensions must agree */ if (A->N*A->nblocks != N_VGetLength(x)) return(SUNFALSE); /* Outer dimensions must agree */ if (A->M*A->nblocks != N_VGetLength(y)) return(SUNFALSE); return(SUNTRUE); } StanHeaders/src/sunmatrix/magmadense/dense_cuda_kernels.cuh0000644000176200001440000000576214645137106023734 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the dense matrix CUDA kernels * for the SUNMATRIX package based on MAGMA. * -----------------------------------------------------------------*/ #ifndef _SUNGPUDENSE_MATRIX_KERNELS_CUH_ #define _SUNGPUDENSE_MATRIX_KERNELS_CUH_ #include #include namespace sundials { namespace sunmatrix_gpudense { namespace cuda { template __device__ __forceinline__ void block_col_row(I nblocks, I m, I n, Lambda&& fn) { for (I block = blockIdx.x*blockDim.x + threadIdx.x; block < nblocks; block += blockDim.x*gridDim.x) { for (I col = blockIdx.y*blockDim.y + threadIdx.y; col < n; col += blockDim.y*gridDim.y) { for (I row = blockIdx.z*blockDim.z + threadIdx.z; row < m; row += blockDim.z*gridDim.z) { fn(block*m*n+(col*m + row), row, col); } } } } template __global__ void getBlockPointers(I m, I n, I nblocks, T* A, T** Ablocks, T* x, T** xblocks, T* y, T** yblocks) { for (I block = blockIdx.x*blockDim.x + threadIdx.x; block < nblocks; block += blockDim.x*gridDim.x) { Ablocks[block] = &A[block*m*n]; xblocks[block] = &x[block*n]; yblocks[block] = &y[block*m]; }; } template __global__ void zeroKernel(I m, I n, I nblocks, T* A) { block_col_row(nblocks, m, n, [=] __device__ (I kij, I row, I col) { A[kij] = 0.0; }); } template __global__ void copyKernel(I m, I n, I nblocks, const T* A, T* B) { block_col_row(nblocks, m, n, [=] __device__ (I kij, I row, I col) { B[kij] = A[kij]; }); } template __global__ void scaleAddIKernel(I m, I n, I nblocks, T c, T* A) { block_col_row(nblocks, m, n, [=] __device__ (I kij, I row, I col) { if (row == col) A[kij] = c*A[kij] + 1.0; else A[kij] = c*A[kij]; }); } template __global__ void scaleAddKernel(I m, I n, I nblocks, T c, T* A, const T* B) { block_col_row(nblocks, m, n, [=] __device__ (I kij, I row, I col) { A[kij] = c*A[kij] + B[kij]; }); } } // namespace cuda } // namespace sunmatrix_gpudense } // namespace sundials #endif StanHeaders/src/sunmatrix/magmadense/dense_hip_kernels.hip.hpp0000644000176200001440000000570514645137106024364 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the dense matrix HIP kernels * for the SUNMATRIX package based on MAGMA. * -----------------------------------------------------------------*/ #ifndef _SUNGPUDENSE_MATRIX_KERNELS_HIP #define _SUNGPUDENSE_MATRIX_KERNELS_HIP #include namespace sundials { namespace sunmatrix_gpudense { namespace hip { template __device__ __forceinline__ void block_col_row(I nblocks, I m, I n, Lambda&& fn) { for (I block = blockIdx.x*blockDim.x + threadIdx.x; block < nblocks; block += blockDim.x*gridDim.x) { for (I col = blockIdx.y*blockDim.y + threadIdx.y; col < n; col += blockDim.y*gridDim.y) { for (I row = blockIdx.z*blockDim.z + threadIdx.z; row < m; row += blockDim.z*gridDim.z) { fn(block*m*n+(col*m + row), row, col); } } } } template __global__ void getBlockPointers(I m, I n, I nblocks, T* A, T** Ablocks, T* x, T** xblocks, T* y, T** yblocks) { for (I block = blockIdx.x*blockDim.x + threadIdx.x; block < nblocks; block += blockDim.x*gridDim.x) { Ablocks[block] = &A[block*m*n]; xblocks[block] = &x[block*n]; yblocks[block] = &y[block*m]; }; } template __global__ void zeroKernel(I m, I n, I nblocks, T* A) { block_col_row(nblocks, m, n, [=] __device__ (I kij, I row, I col) { A[kij] = 0.0; }); } template __global__ void copyKernel(I m, I n, I nblocks, const T* A, T* B) { block_col_row(nblocks, m, n, [=] __device__ (I kij, I row, I col) { B[kij] = A[kij]; }); } template __global__ void scaleAddIKernel(I m, I n, I nblocks, T c, T* A) { block_col_row(nblocks, m, n, [=] __device__ (I kij, I row, I col) { if (row == col) A[kij] = c*A[kij] + 1.0; else A[kij] = c*A[kij]; }); } template __global__ void scaleAddKernel(I m, I n, I nblocks, T c, T* A, const T* B) { block_col_row(nblocks, m, n, [=] __device__ (I kij, I row, I col) { A[kij] = c*A[kij] + B[kij]; }); } } // namespace cuda } // namespace sunmatrix_gpudense } // namespace sundials #endif StanHeaders/src/sunmatrix/onemkldense/0000755000176200001440000000000014511135055017600 5ustar liggesusersStanHeaders/src/sunmatrix/onemkldense/sunmatrix_onemkldense.cpp0000644000176200001440000005103214645137106024732 0ustar liggesusers/* --------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * --------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * --------------------------------------------------------------------------- * This is the implementation file for the dense implementation of the * SUNMATRIX class using the Intel oneAPI Math Kernel Library (oneMKL). * ---------------------------------------------------------------------------*/ #include #include #include #include // SUNDIALS public headers #include #include // SUNDIALS private headers #include "sundials_debug.h" #include "sundials_sycl.h" // Check for a valid precision #if defined(SUNDIALS_EXTENDED_PRECISION) #error "oneMLK unsupported precision" #endif // Constants #define ZERO RCONST(0.0) #define ONE RCONST(1.0) // Content accessor macros #define MAT_CONTENT(A) ((SUNMatrixContent_OneMklDense) (A->content)) #define MAT_LAST_FLAG(A) (MAT_CONTENT(A)->last_flag) #define MAT_BLOCK_ROWS(A) (MAT_CONTENT(A)->block_rows) #define MAT_BLOCK_COLS(A) (MAT_CONTENT(A)->block_cols) #define MAT_ROWS(A) (MAT_CONTENT(A)->rows) #define MAT_COLS(A) (MAT_CONTENT(A)->cols) #define MAT_NBLOCKS(A) (MAT_CONTENT(A)->num_blocks) #define MAT_LDATA(A) (MAT_CONTENT(A)->ldata) #define MAT_DATA(A) (MAT_CONTENT(A)->data) #define MAT_DATAp(A) ((realtype*) MAT_CONTENT(A)->data->ptr) #define MAT_BLOCKS(A) (MAT_CONTENT(A)->blocks) #define MAT_BLOCKSp(A) ((realtype**) MAT_CONTENT(A)->blocks->ptr) #define MAT_EXECPOLICY(A) (MAT_CONTENT(A)->exec_policy) #define MAT_MEMTYPE(A) (MAT_CONTENT(A)->mem_type) #define MAT_MEMHELPER(A) (MAT_CONTENT(A)->mem_helper) #define MAT_QUEUE(A) (MAT_CONTENT(A)->queue) // Private function prototypes static booleantype Compatible_AB(SUNMatrix A, SUNMatrix B); static booleantype Compatible_Axy(SUNMatrix A, N_Vector x, N_Vector y); // Kernel launch parameters static int GetKernelParameters(SUNMatrix A, booleantype reduction, size_t& nthreads_total, size_t& nthreads_per_block); /* -------------------------------------------------------------------------- * Constructors * -------------------------------------------------------------------------- */ SUNMatrix SUNMatrix_OneMklDense(sunindextype M, sunindextype N, SUNMemoryType mem_type, SUNMemoryHelper mem_helper, sycl::queue* queue, SUNContext sunctx) { return SUNMatrix_OneMklDenseBlock(1, M, N, mem_type, mem_helper, queue, sunctx); } SUNMatrix SUNMatrix_OneMklDenseBlock(sunindextype num_blocks, sunindextype M, sunindextype N, SUNMemoryType mem_type, SUNMemoryHelper mem_helper, sycl::queue* queue, SUNContext sunctx) { int retval; // Check inputs if ( (M <= 0) || (N <= 0) || (num_blocks <= 0) || (!mem_helper) || ((mem_type != SUNMEMTYPE_UVM) && (mem_type != SUNMEMTYPE_DEVICE))) { SUNDIALS_DEBUG_ERROR("Illegal input\n"); return NULL; } // Create an empty matrix object SUNMatrix A = SUNMatNewEmpty(sunctx); if (!A) { SUNDIALS_DEBUG_ERROR("SUNMatNewEmpty returned NULL\n"); return NULL; } // Attach operations A->ops->getid = SUNMatGetID_OneMklDense; A->ops->clone = SUNMatClone_OneMklDense; A->ops->destroy = SUNMatDestroy_OneMklDense; A->ops->zero = SUNMatZero_OneMklDense; A->ops->copy = SUNMatCopy_OneMklDense; A->ops->scaleadd = SUNMatScaleAdd_OneMklDense; A->ops->scaleaddi = SUNMatScaleAddI_OneMklDense; A->ops->matvec = SUNMatMatvec_OneMklDense; A->ops->space = SUNMatSpace_OneMklDense; // Create content A->content = (SUNMatrixContent_OneMklDense) malloc(sizeof(_SUNMatrixContent_OneMklDense)); if (!(A->content)) { SUNDIALS_DEBUG_ERROR("Content allocation failed\n"); SUNMatDestroy(A); return NULL; } // Fill content MAT_CONTENT(A)->block_rows = M; MAT_CONTENT(A)->block_cols = N; MAT_CONTENT(A)->rows = num_blocks * M; MAT_CONTENT(A)->cols = num_blocks * N; MAT_CONTENT(A)->num_blocks = num_blocks; MAT_CONTENT(A)->ldata = M * N * num_blocks; MAT_CONTENT(A)->data = NULL; MAT_CONTENT(A)->blocks = NULL; MAT_CONTENT(A)->mem_type = mem_type; MAT_CONTENT(A)->mem_helper = mem_helper; MAT_CONTENT(A)->exec_policy = new sundials::sycl::ThreadDirectExecPolicy(SYCL_BLOCKDIM(queue)); MAT_CONTENT(A)->queue = queue; // Allocate data retval = SUNMemoryHelper_Alloc(MAT_MEMHELPER(A), &(MAT_DATA(A)), sizeof(realtype) * MAT_LDATA(A), mem_type, queue); if (retval) { SUNDIALS_DEBUG_ERROR("SUNMemory allocation failed\n"); SUNMatDestroy(A); return NULL; } if (MAT_NBLOCKS(A) > 1) { // Allocate array of pointers to block data retval = SUNMemoryHelper_Alloc(MAT_MEMHELPER(A), &(MAT_BLOCKS(A)), sizeof(realtype*) * MAT_NBLOCKS(A), mem_type, queue); if (retval) { SUNDIALS_DEBUG_ERROR("SUNMemory allocation failed\n"); SUNMatDestroy(A); return NULL; } size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(A, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_ERROR("GetKernelParameters returned nonzero\n"); SUNMatDestroy(A); return NULL; } realtype* Adata = MAT_DATAp(A); realtype** Ablocks = MAT_BLOCKSp(A); // Initialize array of pointers to block data SYCL_FOR(queue, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, num_blocks) { Ablocks[i] = Adata + i * M * N; }); } return A; } /* -------------------------------------------------------------------------- * Accessor functions * -------------------------------------------------------------------------- */ sunindextype SUNMatrix_OneMklDense_Rows(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_ONEMKLDENSE) return MAT_ROWS(A); else return SUNMAT_ILL_INPUT; } sunindextype SUNMatrix_OneMklDense_Columns(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_ONEMKLDENSE) return MAT_COLS(A); else return SUNMAT_ILL_INPUT; } sunindextype SUNMatrix_OneMklDense_NumBlocks(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_ONEMKLDENSE) return MAT_NBLOCKS(A); else return SUNMAT_ILL_INPUT; } sunindextype SUNMatrix_OneMklDense_BlockRows(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_ONEMKLDENSE) return MAT_BLOCK_ROWS(A); else return SUNMAT_ILL_INPUT; } sunindextype SUNMatrix_OneMklDense_BlockColumns(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_ONEMKLDENSE) return MAT_BLOCK_COLS(A); else return SUNMAT_ILL_INPUT; } sunindextype SUNMatrix_OneMklDense_LData(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_ONEMKLDENSE) return MAT_LDATA(A); else return SUNMAT_ILL_INPUT; } realtype* SUNMatrix_OneMklDense_Data(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_ONEMKLDENSE) return MAT_DATAp(A); else return NULL; } sunindextype SUNMatrix_OneMklDense_BlockLData(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_ONEMKLDENSE) return MAT_BLOCK_ROWS(A) * MAT_BLOCK_COLS(A); else return SUNMAT_ILL_INPUT; } realtype** SUNMatrix_OneMklDense_BlockData(SUNMatrix A) { if (SUNMatGetID(A) == SUNMATRIX_ONEMKLDENSE) return MAT_BLOCKSp(A); else return NULL; } /* Functions that return pointers to the start of a block, column, or block column. These are defined as inline functions in sunmatrix_onemkldense.h, so we just mark them as extern here. */ extern realtype* SUNMatrix_OneMklDense_Block(SUNMatrix A, sunindextype k); extern realtype* SUNMatrix_OneMklDense_Column(SUNMatrix A, sunindextype j); extern realtype* SUNMatrix_OneMklDense_BlockColumn(SUNMatrix A, sunindextype k, sunindextype j); /* -------------------------------------------------------------------------- * Utility functions * -------------------------------------------------------------------------- */ int SUNMatrix_OneMklDense_CopyToDevice(SUNMatrix A, realtype* h_data) { if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Illegal input\n"); return SUNMAT_ILL_INPUT; } // Wrap the input pointer SUNMemory _h_data = SUNMemoryHelper_Wrap(h_data, SUNMEMTYPE_HOST); if (!_h_data) { SUNDIALS_DEBUG_ERROR("SUNMemory wrap failed\n"); return SUNMAT_ILL_INPUT; } // Copy the data int copy_fail = SUNMemoryHelper_CopyAsync(MAT_MEMHELPER(A), MAT_DATA(A), _h_data, sizeof(realtype) * MAT_LDATA(A), MAT_QUEUE(A)); // Sync with respect to host, but only this queue MAT_QUEUE(A)->wait_and_throw(); int retval = SUNMemoryHelper_Dealloc(MAT_MEMHELPER(A), _h_data, MAT_QUEUE(A)); if (retval) { SUNDIALS_DEBUG_ERROR("SUNMemory dealloc failed\n"); return SUNMAT_MEM_FAIL; } return (copy_fail ? SUNMAT_MEM_FAIL : SUNMAT_SUCCESS); } int SUNMatrix_OneMklDense_CopyFromDevice(SUNMatrix A, realtype* h_data) { if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Illegal input\n"); return SUNMAT_ILL_INPUT; } SUNMemory _h_data = SUNMemoryHelper_Wrap(h_data, SUNMEMTYPE_HOST); if (!_h_data) { SUNDIALS_DEBUG_ERROR("SUNMemory wrap failed\n"); return SUNMAT_MEM_FAIL; } int copy_fail = SUNMemoryHelper_CopyAsync(MAT_MEMHELPER(A), _h_data, MAT_DATA(A), sizeof(realtype) * MAT_LDATA(A), MAT_QUEUE(A)); // Sync with respect to host, but only this queue MAT_QUEUE(A)->wait_and_throw(); int retval = SUNMemoryHelper_Dealloc(MAT_MEMHELPER(A), _h_data, MAT_QUEUE(A)); if (retval) { SUNDIALS_DEBUG_ERROR("SUNMemory dealloc failed\n"); return SUNMAT_MEM_FAIL; } return (copy_fail ? SUNMAT_MEM_FAIL : SUNMAT_SUCCESS); } /* -------------------------------------------------------------------------- * Implementation of generic SUNMatrix operations. * -------------------------------------------------------------------------- */ SUNMatrix SUNMatClone_OneMklDense(SUNMatrix A) { if (!A) { SUNDIALS_DEBUG_ERROR("Input matrix is NULL\n"); return NULL; } if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Invalid matrix ID\n"); return NULL; } SUNMatrix B = SUNMatrix_OneMklDenseBlock(MAT_NBLOCKS(A), MAT_BLOCK_ROWS(A), MAT_BLOCK_COLS(A), MAT_DATA(A)->type, MAT_MEMHELPER(A), MAT_QUEUE(A), A->sunctx); if (!B) { SUNDIALS_DEBUG_ERROR("Output matrix is NULL\n"); return NULL; } return B; } void SUNMatDestroy_OneMklDense(SUNMatrix A) { if (!A) { SUNDIALS_DEBUG_ERROR("Input matrix is NULL\n"); return; } if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Invalid matrix ID\n"); return; } // Free content if (A->content) { // Free data array(s) if (MAT_DATA(A)) SUNMemoryHelper_Dealloc(MAT_MEMHELPER(A), MAT_DATA(A), MAT_QUEUE(A)); if (MAT_BLOCKS(A)) SUNMemoryHelper_Dealloc(MAT_MEMHELPER(A), MAT_BLOCKS(A), MAT_QUEUE(A)); // Free content struct free(A->content); A->content = NULL; } // Free matrix SUNMatFreeEmpty(A); A = NULL; return; } int SUNMatZero_OneMklDense(SUNMatrix A) { if (!A) { SUNDIALS_DEBUG_ERROR("Input matrix is NULL\n"); return SUNMAT_ILL_INPUT; } if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Invalid matrix ID\n"); return SUNMAT_ILL_INPUT; } const sunindextype ldata = MAT_LDATA(A); realtype *Adata = MAT_DATAp(A); sycl::queue *Q = MAT_QUEUE(A); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(A, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_ERROR("GetKernelParameters returned nonzero\n"); return SUNMAT_MEM_FAIL; } // Zero out matrix SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, ldata) { Adata[i] = ZERO; }); return SUNMAT_SUCCESS; } int SUNMatCopy_OneMklDense(SUNMatrix A, SUNMatrix B) { if (!A || !B) { SUNDIALS_DEBUG_ERROR("An input matrix is NULL\n"); return SUNMAT_ILL_INPUT; } if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Invalid matrix ID\n"); return SUNMAT_ILL_INPUT; } // Verify that A and B are compatible if (!Compatible_AB(A, B)) { SUNDIALS_DEBUG_ERROR("Input matrices are incompatible\n"); return SUNMAT_ILL_INPUT; } const sunindextype ldata = MAT_LDATA(A); realtype *Adata = MAT_DATAp(A); realtype *Bdata = MAT_DATAp(B); sycl::queue *Q = MAT_QUEUE(A); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(A, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_ERROR("GetKernelParameters returned nonzero\n"); return SUNMAT_MEM_FAIL; } // Copy A into B SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, ldata) { Bdata[i] = Adata[i]; }); return SUNMAT_SUCCESS; } int SUNMatScaleAddI_OneMklDense(realtype c, SUNMatrix A) { if (!A) { SUNDIALS_DEBUG_ERROR("Input matrix is NULL\n"); return SUNMAT_ILL_INPUT; } if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Invalid matrix ID\n"); return SUNMAT_ILL_INPUT; } const size_t M = static_cast(MAT_BLOCK_ROWS(A)); const size_t N = static_cast(MAT_BLOCK_COLS(A)); const size_t B = static_cast(MAT_NBLOCKS(A)); realtype* Adata = MAT_DATAp(A); sycl::queue* Q = MAT_QUEUE(A); // Compute A = c * A + I Q->submit([&](sycl::handler& h) { h.parallel_for(sycl::range{M, N, B}, [=](sycl::id<3> idx) { sunindextype i = idx[0]; sunindextype j = idx[1]; sunindextype k = idx[2]; // Index into 1D data array sunindextype tid = k * M * N + j * M + i; if (i == j) { Adata[tid] = c * Adata[tid] + ONE; } else { Adata[tid] = c * Adata[tid]; } }); }); return SUNMAT_SUCCESS; } int SUNMatScaleAdd_OneMklDense(realtype c, SUNMatrix A, SUNMatrix B) { if (!A || !B) { SUNDIALS_DEBUG_ERROR("An input matrix is NULL\n"); return SUNMAT_ILL_INPUT; } if (!Compatible_AB(A, B)) { SUNDIALS_DEBUG_ERROR("Input matrices are incompatible\n"); return SUNMAT_ILL_INPUT; } const sunindextype ldata = MAT_LDATA(A); realtype *Adata = MAT_DATAp(A); realtype *Bdata = MAT_DATAp(B); sycl::queue *Q = MAT_QUEUE(A); size_t nthreads_total, nthreads_per_block; if (GetKernelParameters(A, SUNFALSE, nthreads_total, nthreads_per_block)) { SUNDIALS_DEBUG_ERROR("GetKernelParameters returned nonzero\n"); return SUNMAT_MEM_FAIL; } // Compute A = c * A + B SYCL_FOR(Q, nthreads_total, nthreads_per_block, item, GRID_STRIDE_XLOOP(item, i, ldata) { Adata[i] = c * Adata[i] + Bdata[i]; }); return SUNMAT_SUCCESS; } int SUNMatMatvec_OneMklDense(SUNMatrix A, N_Vector x, N_Vector y) { if (!A || !x || !y) { SUNDIALS_DEBUG_ERROR("Input matrix or vectors are NULL\n"); return SUNMAT_ILL_INPUT; } if (!Compatible_Axy(A, x, y)) { SUNDIALS_DEBUG_ERROR("Input matrix and vectors are incompatible\n"); return SUNMAT_ILL_INPUT; } if (MAT_NBLOCKS(A) > 1) { sycl::queue* Q = MAT_QUEUE(A); sunindextype M = MAT_BLOCK_ROWS(A); sunindextype N = MAT_BLOCK_COLS(A); // TODO(DJG): Replace with batched function for (sunindextype i = 0; i < MAT_NBLOCKS(A); i++) { const realtype* Adata = MAT_DATAp(A) + i * M * N; const realtype* xdata = N_VGetDeviceArrayPointer(x) + i * N; realtype* ydata = N_VGetDeviceArrayPointer(y) + i * M; // Copmute y = a * A * x + b * y oneapi::mkl::blas::gemv(*Q, oneapi::mkl::transpose::N, M, N, ONE, Adata, M, xdata, 1, ZERO, ydata, 1); } } else { sycl::queue* Q = MAT_QUEUE(A); sunindextype M = MAT_ROWS(A); sunindextype N = MAT_COLS(A); const realtype* Adata = MAT_DATAp(A); const realtype* xdata = N_VGetDeviceArrayPointer(x); realtype* ydata = N_VGetDeviceArrayPointer(y); // Copmute y = a * A * x + b * y oneapi::mkl::blas::gemv(*Q, oneapi::mkl::transpose::N, M, N, ONE, Adata, M, xdata, 1, ZERO, ydata, 1); } return SUNMAT_SUCCESS; } int SUNMatSpace_OneMklDense(SUNMatrix A, long int *lenrw, long int *leniw) { if (!A) { SUNDIALS_DEBUG_ERROR("Input matrix is NULL\n"); return SUNMAT_ILL_INPUT; } if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Invalid matrix ID\n"); return SUNMAT_ILL_INPUT; } *lenrw = MAT_LDATA(A); *leniw = 4; return SUNMAT_SUCCESS; } /* -------------------------------------------------------------------------- * Private functions * -------------------------------------------------------------------------- */ // Get the kernel launch parameters static int GetKernelParameters(SUNMatrix A, booleantype reduction, size_t& nthreads_total, size_t& nthreads_per_block) { if (!MAT_EXECPOLICY(A)) { SUNDIALS_DEBUG_ERROR("The execution policy is NULL\n"); return -1; } /* Get the number of threads per block and total number threads */ nthreads_per_block = MAT_EXECPOLICY(A)->blockSize(); nthreads_total = nthreads_per_block * MAT_EXECPOLICY(A)->gridSize(MAT_LDATA(A)); if (nthreads_per_block == 0) { SUNDIALS_DEBUG_ERROR("The number of threads per block must be > 0\n"); return -1; } if (nthreads_total == 0) { SUNDIALS_DEBUG_ERROR("The total number of threads must be > 0\n"); return -1; } return 0; } static booleantype Compatible_AB(SUNMatrix A, SUNMatrix B) { // Both matrices must have the SUNMATRIEX_MKLDENSE ID if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Illegal matrix ID\n"); return SUNFALSE; } if (SUNMatGetID(B) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Illegal matrix ID\n"); return SUNFALSE; } // Both matrices must have the same shape if (MAT_BLOCK_ROWS(A) != MAT_BLOCK_ROWS(B)) { SUNDIALS_DEBUG_ERROR("Number of block rows do not match\n"); return SUNFALSE; } if (MAT_BLOCK_COLS(A) != MAT_BLOCK_COLS(B)) { SUNDIALS_DEBUG_ERROR("Number of block columns do not match\n"); return SUNFALSE; } if (MAT_NBLOCKS(A) != MAT_NBLOCKS(B)) { SUNDIALS_DEBUG_ERROR("Number of blocks do not match\n"); return SUNFALSE; } return SUNTRUE; } static booleantype Compatible_Axy(SUNMatrix A, N_Vector x, N_Vector y) { // Vectors must implement N_VGetDeviceArrayPointer if (!(x->ops->nvgetdevicearraypointer) || !(y->ops->nvgetdevicearraypointer)) { SUNDIALS_DEBUG_ERROR("Vectors do not have GetDeviceArrayPointer\n"); return SUNFALSE; } // Inner dimensions must agree if (MAT_COLS(A) != N_VGetLength(x)) { SUNDIALS_DEBUG_ERROR("Number of columns != input vectors length\n"); return SUNFALSE; } // Outer dimensions must agree if (MAT_ROWS(A) != N_VGetLength(y)) { SUNDIALS_DEBUG_ERROR("Number of rows != output vector length\n"); return SUNFALSE; } return SUNTRUE; } StanHeaders/src/Makevars0000644000176200001440000000331014571446076014751 0ustar liggesusers.PHONY: static CXX_STD = CXX17 PKG_CPPFLAGS = -DNO_FPRINTF_OUTPUT -I"../inst/include" -I"sundials" -include stan_sundials_printf_override.hpp PKG_CPPFLAGS += -DBOOST_DISABLE_ASSERTS -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION -DBOOST_NO_AUTO_PTR -D_REENTRANT -DSTAN_THREADS -DSTRICT_R_HEADERS PKG_CXXFLAGS += -DSTRICT_R_HEADERS PKG_CPPFLAGS += -DUSE_PTHREAD SHLIB_LDFLAGS = $(SHLIB_CXXLDFLAGS) SHLIB_LD = $(SHLIB_CXXLD) SUNDIALS_CVODES := \ $(wildcard cvodes/*.c) \ $(wildcard sundials/*.c) \ $(wildcard sunmatrix/band/[^f]*.c) \ $(wildcard sunmatrix/dense/[^f]*.c) \ $(wildcard sunlinsol/band/[^f]*.c) \ $(wildcard sunlinsol/dense/[^f]*.c) \ $(wildcard sunnonlinsol/newton/[^f]*.c) \ $(wildcard sunnonlinsol/fixedpoint/[^f]*.c) SUNDIALS_IDAS := \ $(wildcard idas/*.c) \ $(wildcard sundials/*.c) \ $(wildcard sunmatrix/band/[^f]*.c) \ $(wildcard sunmatrix/dense/[^f]*.c) \ $(wildcard sunlinsol/band/[^f]*.c) \ $(wildcard sunlinsol/dense/[^f]*.c) \ $(wildcard sunnonlinsol/newton/[^f]*.c) \ $(wildcard sunnonlinsol/fixedpoint/[^f]*.c) SUNDIALS_KINSOL := \ $(wildcard kinsol/*.c) \ $(wildcard sundials/*.c) \ $(wildcard sunmatrix/band/[^f]*.c) \ $(wildcard sunmatrix/dense/[^f]*.c) \ $(wildcard sunlinsol/band/[^f]*.c) \ $(wildcard sunlinsol/dense/[^f]*.c) \ $(wildcard sunnonlinsol/newton/[^f]*.c) \ $(wildcard sunnonlinsol/fixedpoint/[^f]*.c) SUNDIALS_NVECSERIAL := \ $(addprefix , nvector/serial/nvector_serial.c sundials/sundials_math.c) SOURCES = $(SUNDIALS_CVODES) $(SUNDIALS_IDAS) $(SUNDIALS_KINSOL) $(SUNDIALS_NVECSERIAL) OBJECTS_C = $(SOURCES:.c=.o) OBJECTS = $(OBJECTS_C:.cpp=.o) static: $(OBJECTS) @mkdir -p ../lib $(AR) -rs ../lib/libStanHeaders.a $(OBJECTS) clean: rm -rf ../lib rm $(OBJECTS) StanHeaders/src/sunnonlinsol/0000755000176200001440000000000014511135055016003 5ustar liggesusersStanHeaders/src/sunnonlinsol/petscsnes/0000755000176200001440000000000014511135055020012 5ustar liggesusersStanHeaders/src/sunnonlinsol/petscsnes/sunnonlinsol_petscsnes.c0000644000176200001440000002672514645137106025031 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the implementation file for the SUNNonlinearSolver module * implementation that interfaces to the PETSc SNES nonlinear solvers. * ---------------------------------------------------------------------------*/ #include #include #include #include #include #include #include #define SUNNLS_SNES_CONTENT(NLS) ( (SUNNonlinearSolverContent_PetscSNES)(NLS->content) ) #define SUNNLS_SNESOBJ(NLS) ( SUNNLS_SNES_CONTENT(NLS)->snes ) /* private function which translates the SNESFunction form to the SUNNonlinSolSysFn form */ static PetscErrorCode PetscSysFn(SNES snes, Vec x, Vec f, void *ctx); /*============================================================================== Constructor ============================================================================*/ /* create a SUNNonlinearSolver wrapper for the PETSc SNES context */ SUNNonlinearSolver SUNNonlinSol_PetscSNES(N_Vector y, SNES snes, SUNContext sunctx) { int ierr; SUNNonlinearSolver NLS; SUNNonlinearSolverContent_PetscSNES content; /* check that the supplied SNES is non-NULL */ if (snes == NULL || y == NULL) return NULL; /* check that the vector is the right type */ if (N_VGetVectorID(y) != SUNDIALS_NVEC_PETSC) return NULL; /* * Create an empty nonlinear linear solver object */ NLS = SUNNonlinSolNewEmpty(sunctx); if (NLS == NULL) return NULL; /* Attach operations */ NLS->ops->gettype = SUNNonlinSolGetType_PetscSNES; NLS->ops->initialize = SUNNonlinSolInitialize_PetscSNES; NLS->ops->solve = SUNNonlinSolSolve_PetscSNES; NLS->ops->free = SUNNonlinSolFree_PetscSNES; NLS->ops->setsysfn = SUNNonlinSolSetSysFn_PetscSNES; NLS->ops->getnumiters = SUNNonlinSolGetNumIters_PetscSNES; NLS->ops->getnumconvfails = SUNNonlinSolGetNumConvFails_PetscSNES; /* * Create content */ content = NULL; content = (SUNNonlinearSolverContent_PetscSNES) malloc(sizeof *content); if (content == NULL) { SUNNonlinSolFree(NLS); return NULL; } /* Initialize all components of content to 0/NULL */ memset(content, 0, sizeof(struct _SUNNonlinearSolverContent_PetscSNES)); /* Attach content */ NLS->content = content; /* * Fill content */ content->snes = snes; /* Create all internal vectors */ content->y = N_VCloneEmpty(y); if (content->y == NULL) { SUNNonlinSolFree(NLS); return NULL; } content->f = N_VCloneEmpty(y); if (content->f == NULL) { SUNNonlinSolFree(NLS); return NULL; } ierr = VecDuplicate(N_VGetVector_Petsc(y), &content->r); if (ierr != 0) { SUNNonlinSolFree(NLS); return NULL; } /* tell SNES about the sys function */ ierr = SNESSetFunction(SUNNLS_SNESOBJ(NLS), SUNNLS_SNES_CONTENT(NLS)->r, PetscSysFn, NLS); if (ierr != 0) { SUNNonlinSolFree(NLS); return NULL; } return(NLS); } /*============================================================================== GetType, Initialize, Setup, Solve, and Free operations ============================================================================*/ /* get the type of SUNNonlinearSolver */ SUNNonlinearSolver_Type SUNNonlinSolGetType_PetscSNES(SUNNonlinearSolver NLS) { return(SUNNONLINEARSOLVER_ROOTFIND); } /* performs any initialization needed */ int SUNNonlinSolInitialize_PetscSNES(SUNNonlinearSolver NLS) { PetscErrorCode ptcerr; /* set the system function again to ensure it is the wrapper */ ptcerr = SNESSetFunction(SUNNLS_SNESOBJ(NLS), SUNNLS_SNES_CONTENT(NLS)->r, PetscSysFn, NLS); if (ptcerr != 0) { SUNNLS_SNES_CONTENT(NLS)->petsc_last_err = ptcerr; return SUN_NLS_EXT_FAIL; } return SUN_NLS_SUCCESS; } /*------------------------------------------------------------------------------ SUNNonlinSolSolve_PetscSNES: Performs the nonlinear solve F(y) = 0 or G(y) = y Successful solve return code: SUN_NLS_SUCCESS = 0 Recoverable failure return codes (positive): SUN_NLS_CONV_RECVR *_RHSFUNC_RECVR (ODEs) or *_RES_RECVR (DAEs) Unrecoverable failure return codes (negative): *_MEM_NULL *_RHSFUNC_FAIL (ODEs) or *_RES_FAIL (DAEs) Note return values beginning with * are package specific values returned by the Sys function provided to the nonlinear solver. ----------------------------------------------------------------------------*/ int SUNNonlinSolSolve_PetscSNES(SUNNonlinearSolver NLS, N_Vector y0, N_Vector y, N_Vector w, realtype tol, booleantype callLSetup, void* mem) { /* local variables */ PetscErrorCode ierr; SNESConvergedReason reason; int retval; /* check that the inputs are non-null */ if ( (NLS == NULL) || (y0 == NULL) || (y == NULL) || (w == NULL) ) return SUN_NLS_MEM_NULL; /* store a pointer to the integrator memory so it can be * accessed in the system function */ SUNNLS_SNES_CONTENT(NLS)->imem = mem; /* reset convergence failure count */ SUNNLS_SNES_CONTENT(NLS)->nconvfails = 0; /* call petsc SNES solve */ ierr = SNESSolve(SUNNLS_SNESOBJ(NLS), NULL, N_VGetVector_Petsc(y)); /* check if the call to the system function failed */ if (SUNNLS_SNES_CONTENT(NLS)->sysfn_last_err != SUN_NLS_SUCCESS) return SUNNLS_SNES_CONTENT(NLS)->sysfn_last_err; /* check if the SNESSolve had a failure elsewhere */ if (ierr != 0) { SUNNLS_SNES_CONTENT(NLS)->petsc_last_err = ierr; return SUN_NLS_EXT_FAIL; /* ierr != 0 is not recoverable with PETSc */ } /* * determine if and why the system converged/diverged so we can determine * if it is a recoverable failure or success */ ierr = SNESGetConvergedReason(SUNNLS_SNESOBJ(NLS), &reason); if (ierr != 0) { SUNNLS_SNES_CONTENT(NLS)->petsc_last_err = ierr; return SUN_NLS_EXT_FAIL; /* ierr != 0 is not recoverable with PETSc */ } if ( (reason == SNES_CONVERGED_ITERATING) || (reason == SNES_CONVERGED_FNORM_ABS) || (reason == SNES_CONVERGED_FNORM_RELATIVE) || (reason == SNES_CONVERGED_SNORM_RELATIVE) ) { /* success */ retval = SUN_NLS_SUCCESS; } else { /* recoverable failure */ retval = SUN_NLS_CONV_RECVR; /* update convergence failure count */ SUNNLS_SNES_CONTENT(NLS)->nconvfails++; } return retval; } /* free the SUNNonlinearSolver */ int SUNNonlinSolFree_PetscSNES(SUNNonlinearSolver NLS) { /* return if NLS is already free */ if (NLS == NULL) return SUN_NLS_SUCCESS; /* free items from contents, then the generic structure */ if (NLS->content) { if (SUNNLS_SNES_CONTENT(NLS)->r) VecDestroy(&SUNNLS_SNES_CONTENT(NLS)->r); if (SUNNLS_SNES_CONTENT(NLS)->y) N_VDestroy_Petsc(SUNNLS_SNES_CONTENT(NLS)->y); if (SUNNLS_SNES_CONTENT(NLS)->f) N_VDestroy_Petsc(SUNNLS_SNES_CONTENT(NLS)->f); free(NLS->content); NLS->content = NULL; } /* free the ops structure */ if (NLS->ops) { free(NLS->ops); NLS->ops = NULL; } /* free the nonlinear solver */ free(NLS); return SUN_NLS_SUCCESS; } /*============================================================================== Set functions ============================================================================*/ /* set the system residual function */ int SUNNonlinSolSetSysFn_PetscSNES(SUNNonlinearSolver NLS, SUNNonlinSolSysFn SysFn) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return SUN_NLS_MEM_NULL; /* check that the nonlinear system function is non-null */ if (SysFn == NULL) return(SUN_NLS_ILL_INPUT); SUNNLS_SNES_CONTENT(NLS)->Sys = SysFn; return SUN_NLS_SUCCESS; } /*============================================================================== Get functions ============================================================================*/ /* get the PETSc SNES context underneath the SUNNonlinearSolver object */ int SUNNonlinSolGetSNES_PetscSNES(SUNNonlinearSolver NLS, SNES *snes) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return SUN_NLS_MEM_NULL; /* return the SNES context */ *snes = SUNNLS_SNESOBJ(NLS); return SUN_NLS_SUCCESS; } /* get the last error return by SNES */ int SUNNonlinSolGetPetscError_PetscSNES(SUNNonlinearSolver NLS, PetscErrorCode* err) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return SUN_NLS_MEM_NULL; /* return the last PETSc error code returned by SNES */ *err = SUNNLS_SNES_CONTENT(NLS)->petsc_last_err; return SUN_NLS_SUCCESS; } /* get a pointer to the SUNDIALS integrator-provided system function F(y) */ int SUNNonlinSolGetSysFn_PetscSNES(SUNNonlinearSolver NLS, SUNNonlinSolSysFn* SysFn) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return SUN_NLS_MEM_NULL; /* return the nonlinear system defining function */ *SysFn = SUNNLS_SNES_CONTENT(NLS)->Sys; return SUN_NLS_SUCCESS; } /* get the number of iterations performed in the last solve */ int SUNNonlinSolGetNumIters_PetscSNES(SUNNonlinearSolver NLS, long int* nni) { int ierr; sunindextype niters; /* check that the nonlinear solver is non-null */ if (NLS == NULL) return SUN_NLS_MEM_NULL; /* get iteration count */ ierr = SNESGetIterationNumber(SUNNLS_SNESOBJ(NLS), &niters); if (ierr != 0) { SUNNLS_SNES_CONTENT(NLS)->petsc_last_err = ierr; return SUN_NLS_EXT_FAIL; /* ierr != 0 is not recoverable with PETSc */ } *nni = (long int) niters; return SUN_NLS_SUCCESS; } /* get the total number of nonlinear convergence failures in the lifetime of this SUNNonlinearSolver object */ int SUNNonlinSolGetNumConvFails_PetscSNES(SUNNonlinearSolver NLS, long int* nconvfails) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return SUN_NLS_MEM_NULL; *nconvfails = SUNNLS_SNES_CONTENT(NLS)->nconvfails; return SUN_NLS_SUCCESS; } /*============================================================================== Private functions ============================================================================*/ static PetscErrorCode PetscSysFn(SNES snes, Vec x, Vec f, void *ctx) { int retval; SUNNonlinearSolver NLS = (SUNNonlinearSolver) ctx; /* wrap up the petsc vectors in nvectors */ N_VSetVector_Petsc(SUNNLS_SNES_CONTENT(NLS)->y, x); N_VSetVector_Petsc(SUNNLS_SNES_CONTENT(NLS)->f, f); /* now call the SUNDIALS format system function to evaluate F(y) */ retval = SUNNLS_SNES_CONTENT(NLS)->Sys(SUNNLS_SNES_CONTENT(NLS)->y, SUNNLS_SNES_CONTENT(NLS)->f, SUNNLS_SNES_CONTENT(NLS)->imem); /* store the return value */ SUNNLS_SNES_CONTENT(NLS)->sysfn_last_err = retval; /* if there was some sort of failure in the system function, return -1 to * indicate an error instead of retval so that we don't overlap with one of * the standard PETSc error codes */ return (retval != 0) ? -1 : 0; } StanHeaders/src/sunnonlinsol/fixedpoint/0000755000176200001440000000000014645137104020161 5ustar liggesusersStanHeaders/src/sunnonlinsol/fixedpoint/sunnonlinsol_fixedpoint.c0000644000176200001440000005663214645137106025335 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the implementation file for the SUNNonlinearSolver module * implementation of the Anderson-accelerated Fixed-Point method. * ---------------------------------------------------------------------------*/ #include #include #include #include #include #include #include "sundials_debug.h" /* Internal utility routines */ static int AndersonAccelerate(SUNNonlinearSolver NLS, N_Vector gval, N_Vector x, N_Vector xold, int iter); static int AllocateContent(SUNNonlinearSolver NLS, N_Vector tmpl); static void FreeContent(SUNNonlinearSolver NLS); /* Content structure accessibility macros */ #define FP_CONTENT(S) ( (SUNNonlinearSolverContent_FixedPoint)(S->content) ) /* Constant macros */ #define ONE RCONST(1.0) #define ZERO RCONST(0.0) /*============================================================================== Constructor to create a new fixed point solver ============================================================================*/ SUNNonlinearSolver SUNNonlinSol_FixedPoint(N_Vector y, int m, SUNContext sunctx) { SUNNonlinearSolver NLS; SUNNonlinearSolverContent_FixedPoint content; int retval; /* Check that the supplied N_Vector is non-NULL */ if (y == NULL) return(NULL); /* Check that the supplied N_Vector supports all required operations */ if ( (y->ops->nvclone == NULL) || (y->ops->nvdestroy == NULL) || (y->ops->nvscale == NULL) || (y->ops->nvlinearsum == NULL) || (y->ops->nvdotprod == NULL) ) return(NULL); /* Create nonlinear linear solver */ NLS = SUNNonlinSolNewEmpty(sunctx); if (NLS == NULL) return(NULL); /* Attach operations */ NLS->ops->gettype = SUNNonlinSolGetType_FixedPoint; NLS->ops->initialize = SUNNonlinSolInitialize_FixedPoint; NLS->ops->solve = SUNNonlinSolSolve_FixedPoint; NLS->ops->free = SUNNonlinSolFree_FixedPoint; NLS->ops->setsysfn = SUNNonlinSolSetSysFn_FixedPoint; NLS->ops->setctestfn = SUNNonlinSolSetConvTestFn_FixedPoint; NLS->ops->setmaxiters = SUNNonlinSolSetMaxIters_FixedPoint; NLS->ops->getnumiters = SUNNonlinSolGetNumIters_FixedPoint; NLS->ops->getcuriter = SUNNonlinSolGetCurIter_FixedPoint; NLS->ops->getnumconvfails = SUNNonlinSolGetNumConvFails_FixedPoint; /* Create nonlinear solver content structure */ content = NULL; content = (SUNNonlinearSolverContent_FixedPoint) malloc(sizeof *content); if (content == NULL) { SUNNonlinSolFree(NLS); return(NULL); } /* Initialize all components of content to 0/NULL */ memset(content, 0, sizeof(struct _SUNNonlinearSolverContent_FixedPoint)); /* Attach content */ NLS->content = content; /* Fill general content */ content->Sys = NULL; content->CTest = NULL; content->m = m; content->damping = SUNFALSE; content->beta = ONE; content->curiter = 0; content->maxiters = 3; content->niters = 0; content->nconvfails = 0; content->ctest_data = NULL; content->print_level = 0; content->info_file = NULL; /* Fill allocatable content */ retval = AllocateContent(NLS, y); if (retval != SUN_NLS_SUCCESS) { SUNNonlinSolFree(NLS); return(NULL); } return(NLS); } /*============================================================================== Constructor wrapper to create a new fixed point solver for sensitivity solvers ============================================================================*/ SUNNonlinearSolver SUNNonlinSol_FixedPointSens(int count, N_Vector y, int m, SUNContext sunctx) { SUNNonlinearSolver NLS; N_Vector w; /* create sensitivity vector wrapper */ w = N_VNew_SensWrapper(count, y); /* create nonlinear solver using sensitivity vector wrapper */ NLS = SUNNonlinSol_FixedPoint(w, m, sunctx); /* free sensitivity vector wrapper */ N_VDestroy(w); /* return NLS object */ return(NLS); } /*============================================================================== GetType, Initialize, Setup, Solve, and Free operations ============================================================================*/ SUNNonlinearSolver_Type SUNNonlinSolGetType_FixedPoint(SUNNonlinearSolver NLS) { return(SUNNONLINEARSOLVER_FIXEDPOINT); } int SUNNonlinSolInitialize_FixedPoint(SUNNonlinearSolver NLS) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check that all required function pointers have been set */ if ( (FP_CONTENT(NLS)->Sys == NULL) || (FP_CONTENT(NLS)->CTest == NULL) ) return(SUN_NLS_MEM_NULL); /* reset the total number of iterations and convergence failures */ FP_CONTENT(NLS)->niters = 0; FP_CONTENT(NLS)->nconvfails = 0; return(SUN_NLS_SUCCESS); } /*----------------------------------------------------------------------------- SUNNonlinSolSolve_FixedPoint: Performs the fixed-point solve g(y) = y Successful solve return code: SUN_NLS_SUCCESS = 0 Recoverable failure return codes (positive): SUN_NLS_CONV_RECVR *_RHSFUNC_RECVR (ODEs) or *_RES_RECVR (DAEs) Unrecoverable failure return codes (negative): *_MEM_NULL *_RHSFUNC_FAIL (ODEs) or *_RES_FAIL (DAEs) Note that return values beginning with * are package specific values returned by the Sys function provided to the nonlinear solver. ---------------------------------------------------------------------------*/ int SUNNonlinSolSolve_FixedPoint(SUNNonlinearSolver NLS, N_Vector y0, N_Vector ycor, N_Vector w, realtype tol, booleantype callSetup, void* mem) { /* local variables */ int retval; N_Vector yprev, gy, delta; /* check that the inputs are non-null */ if ( (NLS == NULL) || (y0 == NULL) || (ycor == NULL) || (w == NULL) || (mem == NULL) ) return(SUN_NLS_MEM_NULL); /* check that all required function pointers have been set */ if ( (FP_CONTENT(NLS)->Sys == NULL) || (FP_CONTENT(NLS)->CTest == NULL) ) return(SUN_NLS_MEM_NULL); /* check that all required function pointers have been set */ if ( (FP_CONTENT(NLS)->Sys == NULL) || (FP_CONTENT(NLS)->CTest == NULL) ) return(SUN_NLS_MEM_NULL); /* set local shortcut variables */ yprev = FP_CONTENT(NLS)->yprev; gy = FP_CONTENT(NLS)->gy; delta = FP_CONTENT(NLS)->delta; /* initialize iteration and convergence fail counters for this solve */ FP_CONTENT(NLS)->niters = 0; FP_CONTENT(NLS)->nconvfails = 0; #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print current iteration number and the nonlinear residual */ if (FP_CONTENT(NLS)->print_level && FP_CONTENT(NLS)->info_file) { STAN_SUNDIALS_FPRINTF(FP_CONTENT(NLS)->info_file, "SUNNONLINSOL_FIXEDPOINT (nni=%ld):\n", (long int) FP_CONTENT(NLS)->niters); } #endif /* Looping point for attempts at solution of the nonlinear system: Evaluate fixed-point function (store in gy). Performs the accelerated fixed-point iteration. Performs stopping tests. */ for( FP_CONTENT(NLS)->curiter = 0; FP_CONTENT(NLS)->curiter < FP_CONTENT(NLS)->maxiters; FP_CONTENT(NLS)->curiter++ ) { /* update previous solution guess */ N_VScale(ONE, ycor, yprev); /* compute fixed-point iteration function, store in gy */ retval = FP_CONTENT(NLS)->Sys(ycor, gy, mem); if (retval != SUN_NLS_SUCCESS) break; /* perform fixed point update, based on choice of acceleration or not */ if (FP_CONTENT(NLS)->m == 0) { /* basic fixed-point solver */ N_VScale(ONE, gy, ycor); } else { /* Anderson-accelerated solver */ retval = AndersonAccelerate(NLS, gy, ycor, yprev, FP_CONTENT(NLS)->curiter); } /* increment nonlinear solver iteration counter */ FP_CONTENT(NLS)->niters++; /* compute change in solution, and call the convergence test function */ N_VLinearSum(ONE, ycor, -ONE, yprev, delta); /* test for convergence */ retval = FP_CONTENT(NLS)->CTest(NLS, ycor, delta, tol, w, FP_CONTENT(NLS)->ctest_data); #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print current iteration number and the nonlinear residual */ if (FP_CONTENT(NLS)->print_level && FP_CONTENT(NLS)->info_file) { STAN_SUNDIALS_FPRINTF(FP_CONTENT(NLS)->info_file, SUN_NLS_MSG_RESIDUAL, (long int) FP_CONTENT(NLS)->curiter, N_VWrmsNorm(delta, w)); } #endif /* return if successful */ if (retval == SUN_NLS_SUCCESS) return(SUN_NLS_SUCCESS); /* check if the iterations should continue; otherwise increment the convergence failure count and return error flag */ if (retval != SUN_NLS_CONTINUE) { FP_CONTENT(NLS)->nconvfails++; return(retval); } } /* if we've reached this point, then we exhausted the iteration limit; increment the convergence failure count and return */ FP_CONTENT(NLS)->nconvfails++; return(SUN_NLS_CONV_RECVR); } int SUNNonlinSolFree_FixedPoint(SUNNonlinearSolver NLS) { /* return if NLS is already free */ if (NLS == NULL) return(SUN_NLS_SUCCESS); /* free items from content structure, then the structure itself */ if (NLS->content) { FreeContent(NLS); free(NLS->content); NLS->content = NULL; } /* free the ops structure */ if (NLS->ops) { free(NLS->ops); NLS->ops = NULL; } /* free the overall NLS structure */ free(NLS); return(SUN_NLS_SUCCESS); } /*============================================================================== Set functions ============================================================================*/ int SUNNonlinSolSetSysFn_FixedPoint(SUNNonlinearSolver NLS, SUNNonlinSolSysFn SysFn) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check that the nonlinear system function is non-null */ if (SysFn == NULL) return(SUN_NLS_ILL_INPUT); FP_CONTENT(NLS)->Sys = SysFn; return(SUN_NLS_SUCCESS); } int SUNNonlinSolSetConvTestFn_FixedPoint(SUNNonlinearSolver NLS, SUNNonlinSolConvTestFn CTestFn, void* ctest_data) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check that the convergence test function is non-null */ if (CTestFn == NULL) return(SUN_NLS_ILL_INPUT); FP_CONTENT(NLS)->CTest = CTestFn; /* attach convergence test data */ FP_CONTENT(NLS)->ctest_data = ctest_data; return(SUN_NLS_SUCCESS); } int SUNNonlinSolSetMaxIters_FixedPoint(SUNNonlinearSolver NLS, int maxiters) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check that maxiters is a vaild */ if (maxiters < 1) return(SUN_NLS_ILL_INPUT); FP_CONTENT(NLS)->maxiters = maxiters; return(SUN_NLS_SUCCESS); } int SUNNonlinSolSetDamping_FixedPoint(SUNNonlinearSolver NLS, realtype beta) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check that beta is a vaild */ if (beta <= ZERO) return(SUN_NLS_ILL_INPUT); if (beta < ONE) { /* enable damping */ FP_CONTENT(NLS)->beta = beta; FP_CONTENT(NLS)->damping = SUNTRUE; } else { /* disable damping */ FP_CONTENT(NLS)->beta = ONE; FP_CONTENT(NLS)->damping = SUNFALSE; } return(SUN_NLS_SUCCESS); } /*============================================================================== Get functions ============================================================================*/ int SUNNonlinSolGetNumIters_FixedPoint(SUNNonlinearSolver NLS, long int *niters) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* return number of nonlinear iterations in the last solve */ *niters = FP_CONTENT(NLS)->niters; return(SUN_NLS_SUCCESS); } int SUNNonlinSolGetCurIter_FixedPoint(SUNNonlinearSolver NLS, int *iter) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* return the current nonlinear solver iteration count */ *iter = FP_CONTENT(NLS)->curiter; return(SUN_NLS_SUCCESS); } int SUNNonlinSolGetNumConvFails_FixedPoint(SUNNonlinearSolver NLS, long int *nconvfails) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* return the total number of nonlinear convergence failures */ *nconvfails = FP_CONTENT(NLS)->nconvfails; return(SUN_NLS_SUCCESS); } int SUNNonlinSolGetSysFn_FixedPoint(SUNNonlinearSolver NLS, SUNNonlinSolSysFn *SysFn) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* return the nonlinear system defining function */ *SysFn = FP_CONTENT(NLS)->Sys; return(SUN_NLS_SUCCESS); } /*============================================================================= Utility routines ===========================================================================*/ /*--------------------------------------------------------------- AndersonAccelerate This routine computes the Anderson-accelerated fixed point iterate. Upon entry, the predicted solution is held in xold; this array is never changed throughout this routine. The result of the routine is held in x. Possible return values: SUN_NLS_MEM_NULL --> a required item was missing from memory SUN_NLS_SUCCESS --> successful completion -------------------------------------------------------------*/ static int AndersonAccelerate(SUNNonlinearSolver NLS, N_Vector gval, N_Vector x, N_Vector xold, int iter) { /* local variables */ int nvec, retval, i_pt, i, j, lAA, maa, *ipt_map; realtype a, b, rtemp, c, s, beta, onembeta, *cvals, *R, *gamma; N_Vector fv, vtemp, gold, fold, *df, *dg, *Q, *Xvecs; booleantype damping; /* local shortcut variables */ vtemp = x; /* use result as temporary vector */ ipt_map = FP_CONTENT(NLS)->imap; maa = FP_CONTENT(NLS)->m; gold = FP_CONTENT(NLS)->gold; fold = FP_CONTENT(NLS)->fold; df = FP_CONTENT(NLS)->df; dg = FP_CONTENT(NLS)->dg; Q = FP_CONTENT(NLS)->q; cvals = FP_CONTENT(NLS)->cvals; Xvecs = FP_CONTENT(NLS)->Xvecs; R = FP_CONTENT(NLS)->R; gamma = FP_CONTENT(NLS)->gamma; fv = FP_CONTENT(NLS)->delta; damping = FP_CONTENT(NLS)->damping; beta = FP_CONTENT(NLS)->beta; /* reset ipt_map, i_pt */ for (i = 0; i < maa; i++) ipt_map[i]=0; i_pt = iter-1 - ((iter-1)/maa)*maa; /* update dg[i_pt], df[i_pt], fv, gold and fold*/ N_VLinearSum(ONE, gval, -ONE, xold, fv); if (iter > 0) { N_VLinearSum(ONE, gval, -ONE, gold, dg[i_pt]); /* dg_new = gval - gold */ N_VLinearSum(ONE, fv, -ONE, fold, df[i_pt]); /* df_new = fv - fold */ } N_VScale(ONE, gval, gold); N_VScale(ONE, fv, fold); /* on first iteration, just do basic fixed-point update */ if (iter == 0) { N_VScale(ONE, gval, x); return(SUN_NLS_SUCCESS); } /* update data structures based on current iteration index */ if (iter == 1) { /* second iteration */ R[0] = SUNRsqrt( N_VDotProd(df[i_pt], df[i_pt]) ); N_VScale(ONE/R[0], df[i_pt], Q[i_pt]); ipt_map[0] = 0; } else if (iter <= maa) { /* another iteration before we've reached maa */ N_VScale(ONE, df[i_pt], vtemp); for (j = 0; j < iter-1; j++) { ipt_map[j] = j; R[(iter-1)*maa+j] = N_VDotProd(Q[j], vtemp); N_VLinearSum(ONE, vtemp, -R[(iter-1)*maa+j], Q[j], vtemp); } R[(iter-1)*maa+iter-1] = SUNRsqrt( N_VDotProd(vtemp, vtemp) ); if (R[(iter-1)*maa+iter-1] == ZERO) { N_VScale(ZERO, vtemp, Q[i_pt]); } else { N_VScale((ONE/R[(iter-1)*maa+iter-1]), vtemp, Q[i_pt]); } ipt_map[iter-1] = iter-1; } else { /* we've filled the acceleration subspace, so start recycling */ /* delete left-most column vector from QR factorization */ for (i = 0; i < maa-1; i++) { a = R[(i+1)*maa + i]; b = R[(i+1)*maa + i+1]; rtemp = SUNRsqrt(a*a + b*b); c = a / rtemp; s = b / rtemp; R[(i+1)*maa + i] = rtemp; R[(i+1)*maa + i+1] = ZERO; if (i < maa-1) { for (j = i+2; j < maa; j++) { a = R[j*maa + i]; b = R[j*maa + i+1]; rtemp = c * a + s * b; R[j*maa + i+1] = -s*a + c*b; R[j*maa + i] = rtemp; } } N_VLinearSum(c, Q[i], s, Q[i+1], vtemp); N_VLinearSum(-s, Q[i], c, Q[i+1], Q[i+1]); N_VScale(ONE, vtemp, Q[i]); } /* ahift R to the left by one */ for (i = 1; i < maa; i++) for (j = 0; j < maa-1; j++) R[(i-1)*maa + j] = R[i*maa + j]; /* add the new df vector */ N_VScale(ONE, df[i_pt], vtemp); for (j = 0; j < maa-1; j++) { R[(maa-1)*maa+j] = N_VDotProd(Q[j], vtemp); N_VLinearSum(ONE, vtemp, -R[(maa-1)*maa+j], Q[j], vtemp); } R[(maa-1)*maa+maa-1] = SUNRsqrt( N_VDotProd(vtemp, vtemp) ); N_VScale((ONE/R[(maa-1)*maa+maa-1]), vtemp, Q[maa-1]); /* update the iteration map */ j = 0; for (i = i_pt+1; i < maa; i++) ipt_map[j++] = i; for (i = 0; i < i_pt+1; i++) ipt_map[j++] = i; } /* solve least squares problem and update solution */ lAA = iter; if (maa < iter) lAA = maa; retval = N_VDotProdMulti(lAA, fv, Q, gamma); if (retval != 0) return(SUN_NLS_VECTOROP_ERR); /* set arrays for fused vector operation */ cvals[0] = ONE; Xvecs[0] = gval; nvec = 1; for (i = lAA-1; i > -1; i--) { for (j = i+1; j < lAA; j++) gamma[i] -= R[j*maa+i]*gamma[j]; if (gamma[i] == ZERO) { gamma[i] = ZERO; } else { gamma[i] /= R[i*maa+i]; } cvals[nvec] = -gamma[i]; Xvecs[nvec] = dg[ipt_map[i]]; nvec += 1; } /* if enabled, apply damping */ if (damping) { onembeta = (ONE - beta); cvals[nvec] = -onembeta; Xvecs[nvec] = fv; nvec += 1; for (i = lAA - 1; i > -1; i--) { cvals[nvec] = onembeta * gamma[i]; Xvecs[nvec] = df[ipt_map[i]]; nvec += 1; } } /* update solution */ retval = N_VLinearCombination(nvec, cvals, Xvecs, x); if (retval != 0) return(SUN_NLS_VECTOROP_ERR); return(SUN_NLS_SUCCESS); } static int AllocateContent(SUNNonlinearSolver NLS, N_Vector y) { int m = FP_CONTENT(NLS)->m; FP_CONTENT(NLS)->yprev = N_VClone(y); if (FP_CONTENT(NLS)->yprev == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->gy = N_VClone(y); if (FP_CONTENT(NLS)->gy == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->delta = N_VClone(y); if (FP_CONTENT(NLS)->delta == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } /* Allocate all m-dependent content */ if (m > 0) { FP_CONTENT(NLS)->fold = N_VClone(y); if (FP_CONTENT(NLS)->fold == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->gold = N_VClone(y); if (FP_CONTENT(NLS)->gold == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->imap = (int *) malloc(m * sizeof(int)); if (FP_CONTENT(NLS)->imap == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->R = (realtype *) malloc((m*m) * sizeof(realtype)); if (FP_CONTENT(NLS)->R == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->gamma = (realtype *) malloc(m * sizeof(realtype)); if (FP_CONTENT(NLS)->gamma == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->cvals = (realtype *) malloc(2*(m+1) * sizeof(realtype)); if (FP_CONTENT(NLS)->cvals == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->df = N_VCloneVectorArray(m, y); if (FP_CONTENT(NLS)->df == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->dg = N_VCloneVectorArray(m, y); if (FP_CONTENT(NLS)->dg == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->q = N_VCloneVectorArray(m, y); if (FP_CONTENT(NLS)->q == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } FP_CONTENT(NLS)->Xvecs = (N_Vector *) malloc(2*(m+1) * sizeof(N_Vector)); if (FP_CONTENT(NLS)->Xvecs == NULL) { FreeContent(NLS); return(SUN_NLS_MEM_FAIL); } } return(SUN_NLS_SUCCESS); } static void FreeContent(SUNNonlinearSolver NLS) { if (FP_CONTENT(NLS)->yprev) { N_VDestroy(FP_CONTENT(NLS)->yprev); FP_CONTENT(NLS)->yprev = NULL; } if (FP_CONTENT(NLS)->gy) { N_VDestroy(FP_CONTENT(NLS)->gy); FP_CONTENT(NLS)->gy = NULL; } if (FP_CONTENT(NLS)->fold) { N_VDestroy(FP_CONTENT(NLS)->fold); FP_CONTENT(NLS)->fold = NULL; } if (FP_CONTENT(NLS)->gold) { N_VDestroy(FP_CONTENT(NLS)->gold); FP_CONTENT(NLS)->gold = NULL; } if (FP_CONTENT(NLS)->delta) { N_VDestroy(FP_CONTENT(NLS)->delta); FP_CONTENT(NLS)->delta = NULL; } if (FP_CONTENT(NLS)->imap) { free(FP_CONTENT(NLS)->imap); FP_CONTENT(NLS)->imap = NULL; } if (FP_CONTENT(NLS)->R) { free(FP_CONTENT(NLS)->R); FP_CONTENT(NLS)->R = NULL; } if (FP_CONTENT(NLS)->gamma) { free(FP_CONTENT(NLS)->gamma); FP_CONTENT(NLS)->gamma = NULL; } if (FP_CONTENT(NLS)->cvals) { free(FP_CONTENT(NLS)->cvals); FP_CONTENT(NLS)->cvals = NULL; } if (FP_CONTENT(NLS)->df) { N_VDestroyVectorArray(FP_CONTENT(NLS)->df, FP_CONTENT(NLS)->m); FP_CONTENT(NLS)->df = NULL; } if (FP_CONTENT(NLS)->dg) { N_VDestroyVectorArray(FP_CONTENT(NLS)->dg, FP_CONTENT(NLS)->m); FP_CONTENT(NLS)->dg = NULL; } if (FP_CONTENT(NLS)->q) { N_VDestroyVectorArray(FP_CONTENT(NLS)->q, FP_CONTENT(NLS)->m); FP_CONTENT(NLS)->q = NULL; } if (FP_CONTENT(NLS)->Xvecs) { free(FP_CONTENT(NLS)->Xvecs); FP_CONTENT(NLS)->Xvecs = NULL; } return; } int SUNNonlinSolSetInfoFile_FixedPoint(SUNNonlinearSolver NLS, FILE* info_file) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); FP_CONTENT(NLS)->info_file = info_file; return(SUN_NLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNNonlinSolSetInfoFile_FixedPoint: SUNDIALS was not built with monitoring\n"); return(SUN_NLS_ILL_INPUT); #endif } int SUNNonlinSolSetPrintLevel_FixedPoint(SUNNonlinearSolver NLS, int print_level) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check for valid print level */ if (print_level < 0 || print_level > 1) return(SUN_NLS_ILL_INPUT); FP_CONTENT(NLS)->print_level = print_level; return(SUN_NLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNNonlinSolSetPrintLevel_FixedPoint: SUNDIALS was not built with monitoring\n"); return(SUN_NLS_ILL_INPUT); #endif } StanHeaders/src/sunnonlinsol/fixedpoint/fmod/0000755000176200001440000000000014511135055021101 5ustar liggesusersStanHeaders/src/sunnonlinsol/fixedpoint/fmod/fsunnonlinsol_fixedpoint_mod.c0000644000176200001440000003156414645137106027264 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nonlinearsolver.h" #include "sunnonlinsol/sunnonlinsol_fixedpoint.h" SWIGEXPORT SUNNonlinearSolver _wrap_FSUNNonlinSol_FixedPoint(N_Vector farg1, int const *farg2, void *farg3) { SUNNonlinearSolver fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; SUNContext arg3 = (SUNContext) 0 ; SUNNonlinearSolver result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); arg3 = (SUNContext)(farg3); result = (SUNNonlinearSolver)SUNNonlinSol_FixedPoint(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT SUNNonlinearSolver _wrap_FSUNNonlinSol_FixedPointSens(int const *farg1, N_Vector farg2, int const *farg3, void *farg4) { SUNNonlinearSolver fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; int arg3 ; SUNContext arg4 = (SUNContext) 0 ; SUNNonlinearSolver result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (int)(*farg3); arg4 = (SUNContext)(farg4); result = (SUNNonlinearSolver)SUNNonlinSol_FixedPointSens(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetType_FixedPoint(SUNNonlinearSolver farg1) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinearSolver_Type result; arg1 = (SUNNonlinearSolver)(farg1); result = (SUNNonlinearSolver_Type)SUNNonlinSolGetType_FixedPoint(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolInitialize_FixedPoint(SUNNonlinearSolver farg1) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); result = (int)SUNNonlinSolInitialize_FixedPoint(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSolve_FixedPoint(SUNNonlinearSolver farg1, N_Vector farg2, N_Vector farg3, N_Vector farg4, double const *farg5, int const *farg6, void *farg7) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int arg6 ; void *arg7 = (void *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); arg6 = (int)(*farg6); arg7 = (void *)(farg7); result = (int)SUNNonlinSolSolve_FixedPoint(arg1,arg2,arg3,arg4,arg5,arg6,arg7); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolFree_FixedPoint(SUNNonlinearSolver farg1) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); result = (int)SUNNonlinSolFree_FixedPoint(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetSysFn_FixedPoint(SUNNonlinearSolver farg1, SUNNonlinSolSysFn farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolSysFn arg2 = (SUNNonlinSolSysFn) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolSysFn)(farg2); result = (int)SUNNonlinSolSetSysFn_FixedPoint(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetConvTestFn_FixedPoint(SUNNonlinearSolver farg1, SUNNonlinSolConvTestFn farg2, void *farg3) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolConvTestFn arg2 = (SUNNonlinSolConvTestFn) 0 ; void *arg3 = (void *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolConvTestFn)(farg2); arg3 = (void *)(farg3); result = (int)SUNNonlinSolSetConvTestFn_FixedPoint(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetMaxIters_FixedPoint(SUNNonlinearSolver farg1, int const *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNNonlinSolSetMaxIters_FixedPoint(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetDamping_FixedPoint(SUNNonlinearSolver farg1, double const *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; realtype arg2 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (realtype)(*farg2); result = (int)SUNNonlinSolSetDamping_FixedPoint(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetNumIters_FixedPoint(SUNNonlinearSolver farg1, long *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (long *)(farg2); result = (int)SUNNonlinSolGetNumIters_FixedPoint(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetCurIter_FixedPoint(SUNNonlinearSolver farg1, int *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (int *)(farg2); result = (int)SUNNonlinSolGetCurIter_FixedPoint(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetNumConvFails_FixedPoint(SUNNonlinearSolver farg1, long *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (long *)(farg2); result = (int)SUNNonlinSolGetNumConvFails_FixedPoint(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetSysFn_FixedPoint(SUNNonlinearSolver farg1, void *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolSysFn *arg2 = (SUNNonlinSolSysFn *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolSysFn *)(farg2); result = (int)SUNNonlinSolGetSysFn_FixedPoint(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetInfoFile_FixedPoint(SUNNonlinearSolver farg1, void *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (FILE *)(farg2); result = (int)SUNNonlinSolSetInfoFile_FixedPoint(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetPrintLevel_FixedPoint(SUNNonlinearSolver farg1, int const *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNNonlinSolSetPrintLevel_FixedPoint(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/sunnonlinsol/fixedpoint/fmod/fsunnonlinsol_fixedpoint_mod.f900000644000176200001440000003336614645137106027442 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunnonlinsol_fixedpoint_mod use, intrinsic :: ISO_C_BINDING use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_nonlinearsolver_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FSUNNonlinSol_FixedPoint public :: FSUNNonlinSol_FixedPointSens public :: FSUNNonlinSolGetType_FixedPoint public :: FSUNNonlinSolInitialize_FixedPoint public :: FSUNNonlinSolSolve_FixedPoint public :: FSUNNonlinSolFree_FixedPoint public :: FSUNNonlinSolSetSysFn_FixedPoint public :: FSUNNonlinSolSetConvTestFn_FixedPoint public :: FSUNNonlinSolSetMaxIters_FixedPoint public :: FSUNNonlinSolSetDamping_FixedPoint public :: FSUNNonlinSolGetNumIters_FixedPoint public :: FSUNNonlinSolGetCurIter_FixedPoint public :: FSUNNonlinSolGetNumConvFails_FixedPoint public :: FSUNNonlinSolGetSysFn_FixedPoint public :: FSUNNonlinSolSetInfoFile_FixedPoint public :: FSUNNonlinSolSetPrintLevel_FixedPoint ! WRAPPER DECLARATIONS interface function swigc_FSUNNonlinSol_FixedPoint(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNNonlinSol_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FSUNNonlinSol_FixedPointSens(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNNonlinSol_FixedPointSens") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FSUNNonlinSolGetType_FixedPoint(farg1) & bind(C, name="_wrap_FSUNNonlinSolGetType_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolInitialize_FixedPoint(farg1) & bind(C, name="_wrap_FSUNNonlinSolInitialize_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSolve_FixedPoint(farg1, farg2, farg3, farg4, farg5, farg6, farg7) & bind(C, name="_wrap_FSUNNonlinSolSolve_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT), intent(in) :: farg6 type(C_PTR), value :: farg7 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolFree_FixedPoint(farg1) & bind(C, name="_wrap_FSUNNonlinSolFree_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetSysFn_FixedPoint(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetSysFn_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetConvTestFn_FixedPoint(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNNonlinSolSetConvTestFn_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetMaxIters_FixedPoint(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetMaxIters_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetDamping_FixedPoint(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetDamping_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetNumIters_FixedPoint(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetNumIters_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetCurIter_FixedPoint(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetCurIter_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetNumConvFails_FixedPoint(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetNumConvFails_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetSysFn_FixedPoint(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetSysFn_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetInfoFile_FixedPoint(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetInfoFile_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetPrintLevel_FixedPoint(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetPrintLevel_FixedPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNNonlinSol_FixedPoint(y, m, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNNonlinearSolver), pointer :: swig_result type(N_Vector), target, intent(inout) :: y integer(C_INT), intent(in) :: m type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(y) farg2 = m farg3 = sunctx fresult = swigc_FSUNNonlinSol_FixedPoint(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FSUNNonlinSol_FixedPointSens(count, y, m, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNNonlinearSolver), pointer :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: y integer(C_INT), intent(in) :: m type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = count farg2 = c_loc(y) farg3 = m farg4 = sunctx fresult = swigc_FSUNNonlinSol_FixedPointSens(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FSUNNonlinSolGetType_FixedPoint(nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNNonlinearSolver_Type) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(nls) fresult = swigc_FSUNNonlinSolGetType_FixedPoint(farg1) swig_result = fresult end function function FSUNNonlinSolInitialize_FixedPoint(nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(nls) fresult = swigc_FSUNNonlinSolInitialize_FixedPoint(farg1) swig_result = fresult end function function FSUNNonlinSolSolve_FixedPoint(nls, y0, y, w, tol, callsetup, mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(N_Vector), target, intent(inout) :: y0 type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: w real(C_DOUBLE), intent(in) :: tol integer(C_INT), intent(in) :: callsetup type(C_PTR) :: mem integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 integer(C_INT) :: farg6 type(C_PTR) :: farg7 farg1 = c_loc(nls) farg2 = c_loc(y0) farg3 = c_loc(y) farg4 = c_loc(w) farg5 = tol farg6 = callsetup farg7 = mem fresult = swigc_FSUNNonlinSolSolve_FixedPoint(farg1, farg2, farg3, farg4, farg5, farg6, farg7) swig_result = fresult end function function FSUNNonlinSolFree_FixedPoint(nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(nls) fresult = swigc_FSUNNonlinSolFree_FixedPoint(farg1) swig_result = fresult end function function FSUNNonlinSolSetSysFn_FixedPoint(nls, sysfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), intent(in), value :: sysfn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = c_loc(nls) farg2 = sysfn fresult = swigc_FSUNNonlinSolSetSysFn_FixedPoint(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetConvTestFn_FixedPoint(nls, ctestfn, ctest_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), intent(in), value :: ctestfn type(C_PTR) :: ctest_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(nls) farg2 = ctestfn farg3 = ctest_data fresult = swigc_FSUNNonlinSolSetConvTestFn_FixedPoint(farg1, farg2, farg3) swig_result = fresult end function function FSUNNonlinSolSetMaxIters_FixedPoint(nls, maxiters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT), intent(in) :: maxiters integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(nls) farg2 = maxiters fresult = swigc_FSUNNonlinSolSetMaxIters_FixedPoint(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetDamping_FixedPoint(nls, beta) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls real(C_DOUBLE), intent(in) :: beta integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = c_loc(nls) farg2 = beta fresult = swigc_FSUNNonlinSolSetDamping_FixedPoint(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetNumIters_FixedPoint(nls, niters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_LONG), dimension(*), target, intent(inout) :: niters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(niters(1)) fresult = swigc_FSUNNonlinSolGetNumIters_FixedPoint(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetCurIter_FixedPoint(nls, iter) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT), dimension(*), target, intent(inout) :: iter integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(iter(1)) fresult = swigc_FSUNNonlinSolGetCurIter_FixedPoint(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetNumConvFails_FixedPoint(nls, nconvfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_LONG), dimension(*), target, intent(inout) :: nconvfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(nconvfails(1)) fresult = swigc_FSUNNonlinSolGetNumConvFails_FixedPoint(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetSysFn_FixedPoint(nls, sysfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), target, intent(inout) :: sysfn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(sysfn) fresult = swigc_FSUNNonlinSolGetSysFn_FixedPoint(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetInfoFile_FixedPoint(nls, info_file) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_PTR) :: info_file integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = info_file fresult = swigc_FSUNNonlinSolSetInfoFile_FixedPoint(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetPrintLevel_FixedPoint(nls, print_level) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT), intent(in) :: print_level integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(nls) farg2 = print_level fresult = swigc_FSUNNonlinSolSetPrintLevel_FixedPoint(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/sunnonlinsol/newton/0000755000176200001440000000000014645137104017322 5ustar liggesusersStanHeaders/src/sunnonlinsol/newton/sunnonlinsol_newton.c0000644000176200001440000003756114645137106023637 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the implementation file for the SUNNonlinearSolver module * implementation of Newton's method. * ---------------------------------------------------------------------------*/ #include #include #include #include #include #include #include "sundials_debug.h" /* Content structure accessibility macros */ #define NEWTON_CONTENT(S) ( (SUNNonlinearSolverContent_Newton)(S->content) ) /* Constant macros */ #define ZERO RCONST(0.0) /* real 0.0 */ #define ONE RCONST(1.0) /* real 1.0 */ /*============================================================================== Constructor to create a new Newton solver ============================================================================*/ SUNNonlinearSolver SUNNonlinSol_Newton(N_Vector y, SUNContext sunctx) { SUNNonlinearSolver NLS; SUNNonlinearSolverContent_Newton content; /* Check that the supplied N_Vector is non-NULL */ if (y == NULL) return(NULL); /* Check that the supplied N_Vector supports all required operations */ if ( (y->ops->nvclone == NULL) || (y->ops->nvdestroy == NULL) || (y->ops->nvscale == NULL) || (y->ops->nvlinearsum == NULL) ) return(NULL); /* Create an empty nonlinear linear solver object */ NLS = SUNNonlinSolNewEmpty(sunctx); if (NLS == NULL) return(NULL); /* Attach operations */ NLS->ops->gettype = SUNNonlinSolGetType_Newton; NLS->ops->initialize = SUNNonlinSolInitialize_Newton; NLS->ops->solve = SUNNonlinSolSolve_Newton; NLS->ops->free = SUNNonlinSolFree_Newton; NLS->ops->setsysfn = SUNNonlinSolSetSysFn_Newton; NLS->ops->setlsetupfn = SUNNonlinSolSetLSetupFn_Newton; NLS->ops->setlsolvefn = SUNNonlinSolSetLSolveFn_Newton; NLS->ops->setctestfn = SUNNonlinSolSetConvTestFn_Newton; NLS->ops->setmaxiters = SUNNonlinSolSetMaxIters_Newton; NLS->ops->getnumiters = SUNNonlinSolGetNumIters_Newton; NLS->ops->getcuriter = SUNNonlinSolGetCurIter_Newton; NLS->ops->getnumconvfails = SUNNonlinSolGetNumConvFails_Newton; /* Create content */ content = NULL; content = (SUNNonlinearSolverContent_Newton) malloc(sizeof *content); if (content == NULL) { SUNNonlinSolFree(NLS); return(NULL); } /* Initialize all components of content to 0/NULL */ memset(content, 0, sizeof(struct _SUNNonlinearSolverContent_Newton)); /* Attach content */ NLS->content = content; /* Fill general content */ content->Sys = NULL; content->LSetup = NULL; content->LSolve = NULL; content->CTest = NULL; content->jcur = SUNFALSE; content->curiter = 0; content->maxiters = 3; content->niters = 0; content->nconvfails = 0; content->ctest_data = NULL; content->print_level = 0; content->info_file = stdout; /* Fill allocatable content */ content->delta = N_VClone(y); if (content->delta == NULL) { SUNNonlinSolFree(NLS); return(NULL); } return(NLS); } /*============================================================================== Constructor wrapper to create a new Newton solver for sensitivity solvers ============================================================================*/ SUNNonlinearSolver SUNNonlinSol_NewtonSens(int count, N_Vector y, SUNContext sunctx) { SUNNonlinearSolver NLS; N_Vector w; /* create sensitivity vector wrapper */ w = N_VNew_SensWrapper(count, y); /* create nonlinear solver using sensitivity vector wrapper */ NLS = SUNNonlinSol_Newton(w, sunctx); /* free sensitivity vector wrapper */ N_VDestroy(w); /* return NLS object */ return(NLS); } /*============================================================================== GetType, Initialize, Setup, Solve, and Free operations ============================================================================*/ SUNNonlinearSolver_Type SUNNonlinSolGetType_Newton(SUNNonlinearSolver NLS) { return(SUNNONLINEARSOLVER_ROOTFIND); } int SUNNonlinSolInitialize_Newton(SUNNonlinearSolver NLS) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check that all required function pointers have been set */ if ( (NEWTON_CONTENT(NLS)->Sys == NULL) || (NEWTON_CONTENT(NLS)->LSolve == NULL) || (NEWTON_CONTENT(NLS)->CTest == NULL) ) { return(SUN_NLS_MEM_NULL); } /* reset the total number of iterations and convergence failures */ NEWTON_CONTENT(NLS)->niters = 0; NEWTON_CONTENT(NLS)->nconvfails = 0; /* reset the Jacobian status */ NEWTON_CONTENT(NLS)->jcur = SUNFALSE; return(SUN_NLS_SUCCESS); } /*------------------------------------------------------------------------------ SUNNonlinSolSolve_Newton: Performs the nonlinear solve F(y) = 0 Successful solve return code: SUN_NLS_SUCCESS = 0 Recoverable failure return codes (positive): SUN_NLS_CONV_RECVR *_RHSFUNC_RECVR (ODEs) or *_RES_RECVR (DAEs) *_LSETUP_RECVR *_LSOLVE_RECVR Unrecoverable failure return codes (negative): *_MEM_NULL *_RHSFUNC_FAIL (ODEs) or *_RES_FAIL (DAEs) *_LSETUP_FAIL *_LSOLVE_FAIL Note return values beginning with * are package specific values returned by the Sys, LSetup, and LSolve functions provided to the nonlinear solver. ----------------------------------------------------------------------------*/ int SUNNonlinSolSolve_Newton(SUNNonlinearSolver NLS, N_Vector y0, N_Vector ycor, N_Vector w, realtype tol, booleantype callLSetup, void* mem) { /* local variables */ int retval; booleantype jbad; N_Vector delta; /* check that the inputs are non-null */ if ( (NLS == NULL) || (y0 == NULL) || (ycor == NULL) || (w == NULL) || (mem == NULL) ) return(SUN_NLS_MEM_NULL); /* check that all required function pointers have been set */ if ( (NEWTON_CONTENT(NLS)->Sys == NULL) || (NEWTON_CONTENT(NLS)->LSolve == NULL) || (callLSetup && (NEWTON_CONTENT(NLS)->LSetup == NULL)) || (NEWTON_CONTENT(NLS)->CTest == NULL) ) { return(SUN_NLS_MEM_NULL); } /* set local shortcut variables */ delta = NEWTON_CONTENT(NLS)->delta; /* assume the Jacobian is good */ jbad = SUNFALSE; /* initialize iteration and convergence fail counters for this solve */ NEWTON_CONTENT(NLS)->niters = 0; NEWTON_CONTENT(NLS)->nconvfails = 0; /* looping point for attempts at solution of the nonlinear system: Evaluate the nonlinear residual function (store in delta) Setup the linear solver if necessary Preform Newton iteraion */ for(;;) { /* compute the nonlinear residual, store in delta */ retval = NEWTON_CONTENT(NLS)->Sys(ycor, delta, mem); if (retval != SUN_NLS_SUCCESS) break; /* if indicated, setup the linear system */ if (callLSetup) { retval = NEWTON_CONTENT(NLS)->LSetup(jbad, &(NEWTON_CONTENT(NLS)->jcur), mem); if (retval != SUN_NLS_SUCCESS) break; } /* initialize current iteration counter for this solve attempt */ NEWTON_CONTENT(NLS)->curiter = 0; #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print current iteration number and the nonlinear residual */ if (NEWTON_CONTENT(NLS)->print_level && NEWTON_CONTENT(NLS)->info_file) { STAN_SUNDIALS_FPRINTF(NEWTON_CONTENT(NLS)->info_file, "SUNNONLINSOL_NEWTON (nni=%ld):\n", (long int) NEWTON_CONTENT(NLS)->niters); } #endif /* looping point for Newton iteration. Break out on any error. */ for(;;) { /* increment nonlinear solver iteration counter */ NEWTON_CONTENT(NLS)->niters++; /* compute the negative of the residual for the linear system rhs */ N_VScale(-ONE, delta, delta); /* solve the linear system to get Newton update delta */ retval = NEWTON_CONTENT(NLS)->LSolve(delta, mem); if (retval != SUN_NLS_SUCCESS) break; /* update the Newton iterate */ N_VLinearSum(ONE, ycor, ONE, delta, ycor); /* test for convergence */ retval = NEWTON_CONTENT(NLS)->CTest(NLS, ycor, delta, tol, w, NEWTON_CONTENT(NLS)->ctest_data); #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print current iteration number and the nonlinear residual */ if (NEWTON_CONTENT(NLS)->print_level && NEWTON_CONTENT(NLS)->info_file) { STAN_SUNDIALS_FPRINTF(NEWTON_CONTENT(NLS)->info_file, SUN_NLS_MSG_RESIDUAL, (long int) NEWTON_CONTENT(NLS)->curiter, N_VWrmsNorm(delta, w)); } #endif /* if successful update Jacobian status and return */ if (retval == SUN_NLS_SUCCESS) { NEWTON_CONTENT(NLS)->jcur = SUNFALSE; return(SUN_NLS_SUCCESS); } /* check if the iteration should continue; otherwise exit Newton loop */ if (retval != SUN_NLS_CONTINUE) break; /* not yet converged. Increment curiter and test for max allowed. */ NEWTON_CONTENT(NLS)->curiter++; if (NEWTON_CONTENT(NLS)->curiter >= NEWTON_CONTENT(NLS)->maxiters) { retval = SUN_NLS_CONV_RECVR; break; } /* compute the nonlinear residual, store in delta */ retval = NEWTON_CONTENT(NLS)->Sys(ycor, delta, mem); if (retval != SUN_NLS_SUCCESS) break; } /* end of Newton iteration loop */ /* all errors go here */ /* If there is a recoverable convergence failure and the Jacobian-related data appears not to be current, increment the convergence failure count, reset the initial correction to zero, and loop again with a call to lsetup in which jbad is TRUE. Otherwise break out and return. */ if ((retval > 0) && !(NEWTON_CONTENT(NLS)->jcur) && (NEWTON_CONTENT(NLS)->LSetup)) { NEWTON_CONTENT(NLS)->nconvfails++; callLSetup = SUNTRUE; jbad = SUNTRUE; N_VConst(ZERO, ycor); continue; } else { break; } } /* end of setup loop */ /* increment number of convergence failures */ NEWTON_CONTENT(NLS)->nconvfails++; /* all error returns exit here */ return(retval); } int SUNNonlinSolFree_Newton(SUNNonlinearSolver NLS) { /* return if NLS is already free */ if (NLS == NULL) return(SUN_NLS_SUCCESS); /* free items from contents, then the generic structure */ if (NLS->content) { if (NEWTON_CONTENT(NLS)->delta) N_VDestroy(NEWTON_CONTENT(NLS)->delta); NEWTON_CONTENT(NLS)->delta = NULL; free(NLS->content); NLS->content = NULL; } /* free the ops structure */ if (NLS->ops) { free(NLS->ops); NLS->ops = NULL; } /* free the nonlinear solver */ free(NLS); return(SUN_NLS_SUCCESS); } /*============================================================================== Set functions ============================================================================*/ int SUNNonlinSolSetSysFn_Newton(SUNNonlinearSolver NLS, SUNNonlinSolSysFn SysFn) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check that the nonlinear system function is non-null */ if (SysFn == NULL) return(SUN_NLS_ILL_INPUT); NEWTON_CONTENT(NLS)->Sys = SysFn; return(SUN_NLS_SUCCESS); } int SUNNonlinSolSetLSetupFn_Newton(SUNNonlinearSolver NLS, SUNNonlinSolLSetupFn LSetupFn) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); NEWTON_CONTENT(NLS)->LSetup = LSetupFn; return(SUN_NLS_SUCCESS); } int SUNNonlinSolSetLSolveFn_Newton(SUNNonlinearSolver NLS, SUNNonlinSolLSolveFn LSolveFn) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check that the linear solver solve function is non-null */ if (LSolveFn == NULL) return(SUN_NLS_ILL_INPUT); NEWTON_CONTENT(NLS)->LSolve = LSolveFn; return(SUN_NLS_SUCCESS); } int SUNNonlinSolSetConvTestFn_Newton(SUNNonlinearSolver NLS, SUNNonlinSolConvTestFn CTestFn, void* ctest_data) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check that the convergence test function is non-null */ if (CTestFn == NULL) return(SUN_NLS_ILL_INPUT); NEWTON_CONTENT(NLS)->CTest = CTestFn; /* attach convergence test data */ NEWTON_CONTENT(NLS)->ctest_data = ctest_data; return(SUN_NLS_SUCCESS); } int SUNNonlinSolSetMaxIters_Newton(SUNNonlinearSolver NLS, int maxiters) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check that maxiters is a vaild */ if (maxiters < 1) return(SUN_NLS_ILL_INPUT); NEWTON_CONTENT(NLS)->maxiters = maxiters; return(SUN_NLS_SUCCESS); } /*============================================================================== Get functions ============================================================================*/ int SUNNonlinSolGetNumIters_Newton(SUNNonlinearSolver NLS, long int *niters) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* return the number of nonlinear iterations in the last solve */ *niters = NEWTON_CONTENT(NLS)->niters; return(SUN_NLS_SUCCESS); } int SUNNonlinSolGetCurIter_Newton(SUNNonlinearSolver NLS, int *iter) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* return the current nonlinear solver iteration count */ *iter = NEWTON_CONTENT(NLS)->curiter; return(SUN_NLS_SUCCESS); } int SUNNonlinSolGetNumConvFails_Newton(SUNNonlinearSolver NLS, long int *nconvfails) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* return the total number of nonlinear convergence failures */ *nconvfails = NEWTON_CONTENT(NLS)->nconvfails; return(SUN_NLS_SUCCESS); } int SUNNonlinSolGetSysFn_Newton(SUNNonlinearSolver NLS, SUNNonlinSolSysFn *SysFn) { /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* return the nonlinear system defining function */ *SysFn = NEWTON_CONTENT(NLS)->Sys; return(SUN_NLS_SUCCESS); } int SUNNonlinSolSetInfoFile_Newton(SUNNonlinearSolver NLS, FILE* info_file) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); NEWTON_CONTENT(NLS)->info_file = info_file; return(SUN_NLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNNonlinSolSetInfoFile_Newton: SUNDIALS was not built with monitoring\n"); return(SUN_NLS_ILL_INPUT); #endif } int SUNNonlinSolSetPrintLevel_Newton(SUNNonlinearSolver NLS, int print_level) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the nonlinear solver is non-null */ if (NLS == NULL) return(SUN_NLS_MEM_NULL); /* check for valid print level */ if (print_level < 0 || print_level > 1) return(SUN_NLS_ILL_INPUT); NEWTON_CONTENT(NLS)->print_level = print_level; return(SUN_NLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNNonlinSolSetPrintLevel_Newton: SUNDIALS was not built with monitoring\n"); return(SUN_NLS_ILL_INPUT); #endif } StanHeaders/src/sunnonlinsol/newton/fmod/0000755000176200001440000000000014511135055020242 5ustar liggesusersStanHeaders/src/sunnonlinsol/newton/fmod/fsunnonlinsol_newton_mod.f900000644000176200001440000003371714645137106025744 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunnonlinsol_newton_mod use, intrinsic :: ISO_C_BINDING use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_nonlinearsolver_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FSUNNonlinSol_Newton public :: FSUNNonlinSol_NewtonSens public :: FSUNNonlinSolGetType_Newton public :: FSUNNonlinSolInitialize_Newton public :: FSUNNonlinSolSolve_Newton public :: FSUNNonlinSolFree_Newton public :: FSUNNonlinSolSetSysFn_Newton public :: FSUNNonlinSolSetLSetupFn_Newton public :: FSUNNonlinSolSetLSolveFn_Newton public :: FSUNNonlinSolSetConvTestFn_Newton public :: FSUNNonlinSolSetMaxIters_Newton public :: FSUNNonlinSolGetNumIters_Newton public :: FSUNNonlinSolGetCurIter_Newton public :: FSUNNonlinSolGetNumConvFails_Newton public :: FSUNNonlinSolGetSysFn_Newton public :: FSUNNonlinSolSetInfoFile_Newton public :: FSUNNonlinSolSetPrintLevel_Newton ! WRAPPER DECLARATIONS interface function swigc_FSUNNonlinSol_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSol_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function function swigc_FSUNNonlinSol_NewtonSens(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNNonlinSol_NewtonSens") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FSUNNonlinSolGetType_Newton(farg1) & bind(C, name="_wrap_FSUNNonlinSolGetType_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolInitialize_Newton(farg1) & bind(C, name="_wrap_FSUNNonlinSolInitialize_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSolve_Newton(farg1, farg2, farg3, farg4, farg5, farg6, farg7) & bind(C, name="_wrap_FSUNNonlinSolSolve_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT), intent(in) :: farg6 type(C_PTR), value :: farg7 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolFree_Newton(farg1) & bind(C, name="_wrap_FSUNNonlinSolFree_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetSysFn_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetSysFn_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetLSetupFn_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetLSetupFn_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetLSolveFn_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetLSolveFn_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetConvTestFn_Newton(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNNonlinSolSetConvTestFn_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetMaxIters_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetMaxIters_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetNumIters_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetNumIters_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetCurIter_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetCurIter_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetNumConvFails_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetNumConvFails_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetSysFn_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetSysFn_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetInfoFile_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetInfoFile_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetPrintLevel_Newton(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetPrintLevel_Newton") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNNonlinSol_Newton(y, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNNonlinearSolver), pointer :: swig_result type(N_Vector), target, intent(inout) :: y type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(y) farg2 = sunctx fresult = swigc_FSUNNonlinSol_Newton(farg1, farg2) call c_f_pointer(fresult, swig_result) end function function FSUNNonlinSol_NewtonSens(count, y, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNNonlinearSolver), pointer :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: y type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = count farg2 = c_loc(y) farg3 = sunctx fresult = swigc_FSUNNonlinSol_NewtonSens(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FSUNNonlinSolGetType_Newton(nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNNonlinearSolver_Type) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(nls) fresult = swigc_FSUNNonlinSolGetType_Newton(farg1) swig_result = fresult end function function FSUNNonlinSolInitialize_Newton(nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(nls) fresult = swigc_FSUNNonlinSolInitialize_Newton(farg1) swig_result = fresult end function function FSUNNonlinSolSolve_Newton(nls, y0, y, w, tol, calllsetup, mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(N_Vector), target, intent(inout) :: y0 type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: w real(C_DOUBLE), intent(in) :: tol integer(C_INT), intent(in) :: calllsetup type(C_PTR) :: mem integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 integer(C_INT) :: farg6 type(C_PTR) :: farg7 farg1 = c_loc(nls) farg2 = c_loc(y0) farg3 = c_loc(y) farg4 = c_loc(w) farg5 = tol farg6 = calllsetup farg7 = mem fresult = swigc_FSUNNonlinSolSolve_Newton(farg1, farg2, farg3, farg4, farg5, farg6, farg7) swig_result = fresult end function function FSUNNonlinSolFree_Newton(nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(nls) fresult = swigc_FSUNNonlinSolFree_Newton(farg1) swig_result = fresult end function function FSUNNonlinSolSetSysFn_Newton(nls, sysfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), intent(in), value :: sysfn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = c_loc(nls) farg2 = sysfn fresult = swigc_FSUNNonlinSolSetSysFn_Newton(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetLSetupFn_Newton(nls, lsetupfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), intent(in), value :: lsetupfn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = c_loc(nls) farg2 = lsetupfn fresult = swigc_FSUNNonlinSolSetLSetupFn_Newton(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetLSolveFn_Newton(nls, lsolvefn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), intent(in), value :: lsolvefn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = c_loc(nls) farg2 = lsolvefn fresult = swigc_FSUNNonlinSolSetLSolveFn_Newton(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetConvTestFn_Newton(nls, ctestfn, ctest_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), intent(in), value :: ctestfn type(C_PTR) :: ctest_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(nls) farg2 = ctestfn farg3 = ctest_data fresult = swigc_FSUNNonlinSolSetConvTestFn_Newton(farg1, farg2, farg3) swig_result = fresult end function function FSUNNonlinSolSetMaxIters_Newton(nls, maxiters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT), intent(in) :: maxiters integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(nls) farg2 = maxiters fresult = swigc_FSUNNonlinSolSetMaxIters_Newton(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetNumIters_Newton(nls, niters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_LONG), dimension(*), target, intent(inout) :: niters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(niters(1)) fresult = swigc_FSUNNonlinSolGetNumIters_Newton(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetCurIter_Newton(nls, iter) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT), dimension(*), target, intent(inout) :: iter integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(iter(1)) fresult = swigc_FSUNNonlinSolGetCurIter_Newton(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetNumConvFails_Newton(nls, nconvfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_LONG), dimension(*), target, intent(inout) :: nconvfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(nconvfails(1)) fresult = swigc_FSUNNonlinSolGetNumConvFails_Newton(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetSysFn_Newton(nls, sysfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), target, intent(inout) :: sysfn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(sysfn) fresult = swigc_FSUNNonlinSolGetSysFn_Newton(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetInfoFile_Newton(nls, info_file) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_PTR) :: info_file integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = info_file fresult = swigc_FSUNNonlinSolSetInfoFile_Newton(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetPrintLevel_Newton(nls, print_level) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT), intent(in) :: print_level integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(nls) farg2 = print_level fresult = swigc_FSUNNonlinSolSetPrintLevel_Newton(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/sunnonlinsol/newton/fmod/fsunnonlinsol_newton_mod.c0000644000176200001440000003215114645137106025557 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nonlinearsolver.h" #include "sunnonlinsol/sunnonlinsol_newton.h" SWIGEXPORT SUNNonlinearSolver _wrap_FSUNNonlinSol_Newton(N_Vector farg1, void *farg2) { SUNNonlinearSolver fresult ; N_Vector arg1 = (N_Vector) 0 ; SUNContext arg2 = (SUNContext) 0 ; SUNNonlinearSolver result; arg1 = (N_Vector)(farg1); arg2 = (SUNContext)(farg2); result = (SUNNonlinearSolver)SUNNonlinSol_Newton(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT SUNNonlinearSolver _wrap_FSUNNonlinSol_NewtonSens(int const *farg1, N_Vector farg2, void *farg3) { SUNNonlinearSolver fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; SUNContext arg3 = (SUNContext) 0 ; SUNNonlinearSolver result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (SUNContext)(farg3); result = (SUNNonlinearSolver)SUNNonlinSol_NewtonSens(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetType_Newton(SUNNonlinearSolver farg1) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinearSolver_Type result; arg1 = (SUNNonlinearSolver)(farg1); result = (SUNNonlinearSolver_Type)SUNNonlinSolGetType_Newton(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolInitialize_Newton(SUNNonlinearSolver farg1) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); result = (int)SUNNonlinSolInitialize_Newton(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSolve_Newton(SUNNonlinearSolver farg1, N_Vector farg2, N_Vector farg3, N_Vector farg4, double const *farg5, int const *farg6, void *farg7) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int arg6 ; void *arg7 = (void *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); arg6 = (int)(*farg6); arg7 = (void *)(farg7); result = (int)SUNNonlinSolSolve_Newton(arg1,arg2,arg3,arg4,arg5,arg6,arg7); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolFree_Newton(SUNNonlinearSolver farg1) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); result = (int)SUNNonlinSolFree_Newton(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetSysFn_Newton(SUNNonlinearSolver farg1, SUNNonlinSolSysFn farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolSysFn arg2 = (SUNNonlinSolSysFn) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolSysFn)(farg2); result = (int)SUNNonlinSolSetSysFn_Newton(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetLSetupFn_Newton(SUNNonlinearSolver farg1, SUNNonlinSolLSetupFn farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolLSetupFn arg2 = (SUNNonlinSolLSetupFn) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolLSetupFn)(farg2); result = (int)SUNNonlinSolSetLSetupFn_Newton(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetLSolveFn_Newton(SUNNonlinearSolver farg1, SUNNonlinSolLSolveFn farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolLSolveFn arg2 = (SUNNonlinSolLSolveFn) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolLSolveFn)(farg2); result = (int)SUNNonlinSolSetLSolveFn_Newton(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetConvTestFn_Newton(SUNNonlinearSolver farg1, SUNNonlinSolConvTestFn farg2, void *farg3) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolConvTestFn arg2 = (SUNNonlinSolConvTestFn) 0 ; void *arg3 = (void *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolConvTestFn)(farg2); arg3 = (void *)(farg3); result = (int)SUNNonlinSolSetConvTestFn_Newton(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetMaxIters_Newton(SUNNonlinearSolver farg1, int const *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNNonlinSolSetMaxIters_Newton(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetNumIters_Newton(SUNNonlinearSolver farg1, long *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (long *)(farg2); result = (int)SUNNonlinSolGetNumIters_Newton(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetCurIter_Newton(SUNNonlinearSolver farg1, int *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (int *)(farg2); result = (int)SUNNonlinSolGetCurIter_Newton(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetNumConvFails_Newton(SUNNonlinearSolver farg1, long *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (long *)(farg2); result = (int)SUNNonlinSolGetNumConvFails_Newton(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetSysFn_Newton(SUNNonlinearSolver farg1, void *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolSysFn *arg2 = (SUNNonlinSolSysFn *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolSysFn *)(farg2); result = (int)SUNNonlinSolGetSysFn_Newton(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetInfoFile_Newton(SUNNonlinearSolver farg1, void *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (FILE *)(farg2); result = (int)SUNNonlinSolSetInfoFile_Newton(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetPrintLevel_Newton(SUNNonlinearSolver farg1, int const *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNNonlinSolSetPrintLevel_Newton(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/idas/0000755000176200001440000000000014645137104014167 5ustar liggesusersStanHeaders/src/idas/idaa_io.c0000644000176200001440000005262414645137106015733 0ustar liggesusers/* * ----------------------------------------------------------------- * $Revision$ * $Date$ * ----------------------------------------------------------------- * Programmer(s): Radu Serban and Cosmin Petra @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the optional input and output * functions for the adjoint module in the IDAS solver. * ----------------------------------------------------------------- */ /* * ================================================================= * IMPORTED HEADER FILES * ================================================================= */ #include #include #include "idas_impl.h" #include /* * ================================================================= * IDAA PRIVATE CONSTANTS * ================================================================= */ #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * Optional input functions for ASA * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * IDAAdjSetNoSensi * ----------------------------------------------------------------- * Disables the forward sensitivity analysis in IDASolveF. * ----------------------------------------------------------------- */ int IDAAdjSetNoSensi(void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAAdjSetNoSensi", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAAdjSetNoSensi", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; IDAADJ_mem->ia_storeSensi = SUNFALSE; return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * Optional input functions for backward integration * ----------------------------------------------------------------- */ int IDASetNonlinearSolverB(void *ida_mem, int which, SUNNonlinearSolver NLS) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Check if ida_mem exists */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASetNonlinearSolverB", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Was ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASetNonlinearSolverB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASetNonlinearSolverB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which' */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if ( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) (IDAB_mem->IDA_mem); return(IDASetNonlinearSolver(ida_memB, NLS)); } int IDASetUserDataB(void *ida_mem, int which, void *user_dataB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASetUserDataB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASetUserDataB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASetUserDataB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } /* Set user data for this backward problem. */ IDAB_mem->ida_user_data = user_dataB; return(IDA_SUCCESS); } int IDASetMaxOrdB(void *ida_mem, int which, int maxordB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASetMaxOrdB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASetMaxOrdB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASetMaxOrdB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return IDASetMaxOrd(ida_memB, maxordB); } int IDASetMaxNumStepsB(void *ida_mem, int which, long int mxstepsB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASetMaxNumStepsB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASetMaxNumStepsB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASetMaxNumStepsB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return IDASetMaxNumSteps(ida_memB, mxstepsB); } int IDASetInitStepB(void *ida_mem, int which, realtype hinB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASetInitStepB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASetInitStepB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASetInitStepB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return IDASetInitStep(ida_memB, hinB); } int IDASetMaxStepB(void *ida_mem, int which, realtype hmaxB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASetMaxStepB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASetMaxStepB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASetMaxStepB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return IDASetMaxStep(ida_memB, hmaxB); } int IDASetSuppressAlgB(void *ida_mem, int which, booleantype suppressalgB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASetSuppressAlgB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASetSuppressAlgB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASetSuppressAlgB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return IDASetSuppressAlg(ida_memB, suppressalgB); } int IDASetIdB(void *ida_mem, int which, N_Vector idB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASetIdB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASetIdB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASetIdB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return IDASetId(ida_memB, idB); } int IDASetConstraintsB(void *ida_mem, int which, N_Vector constraintsB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASetConstraintsB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASetConstraintsB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASetConstraintsB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return IDASetConstraints(ida_memB, constraintsB); } /* * ---------------------------------------------------------------- * Input quadrature functions for ASA * ---------------------------------------------------------------- */ int IDASetQuadErrConB(void *ida_mem, int which, int errconQB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASetQuadErrConB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASetQuadErrConB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASetQuadErrConB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return IDASetQuadErrCon(ida_memB, errconQB); } /* * ----------------------------------------------------------------- * Optional output functions for backward integration * ----------------------------------------------------------------- */ /* * IDAGetAdjIDABmem * * This function returns a (void *) pointer to the IDAS * memory allocated for the backward problem. This pointer can * then be used to call any of the IDAGet* IDAS routines to * extract optional output for the backward integration phase. */ void *IDAGetAdjIDABmem(void *ida_mem, int which) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, 0, "IDAA", "IDAGetAdjIDABmem", MSGAM_NULL_IDAMEM); return(NULL); } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, 0, "IDAA", "IDAGetAdjIDABmem", MSGAM_NO_ADJ); return(NULL); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, 0, "IDAA", "IDAGetAdjIDABmem", MSGAM_BAD_WHICH); return(NULL); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return(ida_memB); } /* * IDAGetAdjCheckPointsInfo * * Loads an array of nckpnts structures of type IDAadjCheckPointRec * defined below. * * The user must allocate space for ckpnt (ncheck+1). */ int IDAGetAdjCheckPointsInfo(void *ida_mem, IDAadjCheckPointRec *ckpnt) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; CkpntMem ck_mem; int i; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAGetAdjCheckPointsInfo", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAGetAdjCheckPointsInfo", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; i=0; ck_mem = IDAADJ_mem->ck_mem; while (ck_mem != NULL) { ckpnt[i].my_addr = (void *) ck_mem; ckpnt[i].next_addr = (void *) ck_mem->ck_next; ckpnt[i].t0 = ck_mem->ck_t0; ckpnt[i].t1 = ck_mem->ck_t1; ckpnt[i].nstep = ck_mem->ck_nst; ckpnt[i].order = ck_mem->ck_kk; ckpnt[i].step = ck_mem->ck_hh; ck_mem = ck_mem->ck_next; i++; } return(IDA_SUCCESS); } /* IDAGetConsistentICB * * Returns the consistent initial conditions computed by IDACalcICB or * IDACalcICBS * * It must be preceded by a successful call to IDACalcICB or IDACalcICBS * for 'which' backward problem. */ int IDAGetConsistentICB(void *ida_mem, int which, N_Vector yyB0_mod, N_Vector ypB0_mod) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; int flag; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAGetConsistentICB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAGetConsistentICB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAGetConsistentICB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; flag = IDAGetConsistentIC(ida_memB, yyB0_mod, ypB0_mod); return(flag); } /* * ----------------------------------------------------------------- * Undocumented development user-callable functions * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * IDAGetAdjDataPointHermite * ----------------------------------------------------------------- * Returns the 2 vectors stored for cubic Hermite interpolation at * the data point 'which'. The user must allocate space for yy and * yd. * * Returns IDA_MEM_NULL if ida_mem is NULL, IDA_ILL_INPUT if the * interpolation type previously specified is not IDA_HERMITE or * IDA_SUCCESS otherwise. * */ int IDAGetAdjDataPointHermite(void *ida_mem, int which, realtype *t, N_Vector yy, N_Vector yd) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; DtpntMem *dt_mem; HermiteDataMem content; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAGetAdjDataPointHermite", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAGetAdjDataPointHermite", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; dt_mem = IDAADJ_mem->dt_mem; if (IDAADJ_mem->ia_interpType != IDA_HERMITE) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAGetAdjDataPointHermite", MSGAM_WRONG_INTERP); return(IDA_ILL_INPUT); } *t = dt_mem[which]->t; content = (HermiteDataMem) dt_mem[which]->content; if (yy != NULL) N_VScale(ONE, content->y, yy); if (yd != NULL) N_VScale(ONE, content->yd, yd); return(IDA_SUCCESS); } /* * IDAGetAdjDataPointPolynomial * * Returns the vector stored for polynomial interpolation at the * data point 'which'. The user must allocate space for y. * * Returns IDA_MEM_NULL if ida_mem is NULL, IDA_ILL_INPUT if the * interpolation type previously specified is not IDA_POLYNOMIAL or * IDA_SUCCESS otherwise. */ int IDAGetAdjDataPointPolynomial(void *ida_mem, int which, realtype *t, int *order, N_Vector y) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; DtpntMem *dt_mem; PolynomialDataMem content; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAGetAdjDataPointPolynomial", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAGetAdjDataPointPolynomial", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; dt_mem = IDAADJ_mem->dt_mem; if (IDAADJ_mem->ia_interpType != IDA_POLYNOMIAL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAGetAdjDataPointPolynomial", MSGAM_WRONG_INTERP); return(IDA_ILL_INPUT); } *t = dt_mem[which]->t; content = (PolynomialDataMem) dt_mem[which]->content; if (y != NULL) N_VScale(ONE, content->y, y); *order = content->order; return(IDA_SUCCESS); } /* * IDAGetAdjCurrentCheckPoint * * Returns the address of the 'active' check point. */ int IDAGetAdjCurrentCheckPoint(void *ida_mem, void **addr) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAGetAdjCurrentCheckPoint", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAGetAdjCurrentCheckPoint", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; *addr = (void *) IDAADJ_mem->ia_ckpntData; return(IDA_SUCCESS); } StanHeaders/src/idas/idas_direct.c0000644000176200001440000000452314645137106016613 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU *----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *----------------------------------------------------------------- * Implementation file for the deprecated direct linear solver interface in * IDA; these routines now just wrap the updated IDA generic * linear solver interface in idas_ls.h. *-----------------------------------------------------------------*/ #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*================================================================= Exported Functions (wrappers for equivalent routines in idas_ls.h) =================================================================*/ int IDADlsSetLinearSolver(void *ida_mem, SUNLinearSolver LS, SUNMatrix A) { return(IDASetLinearSolver(ida_mem, LS, A)); } int IDADlsSetJacFn(void *ida_mem, IDADlsJacFn jac) { return(IDASetJacFn(ida_mem, jac)); } int IDADlsGetWorkSpace(void *ida_mem, long int *lenrwLS, long int *leniwLS) { return(IDAGetLinWorkSpace(ida_mem, lenrwLS, leniwLS)); } int IDADlsGetNumJacEvals(void *ida_mem, long int *njevals) { return(IDAGetNumJacEvals(ida_mem, njevals)); } int IDADlsGetNumResEvals(void *ida_mem, long int *nrevalsLS) { return(IDAGetNumLinResEvals(ida_mem, nrevalsLS)); } int IDADlsGetLastFlag(void *ida_mem, long int *flag) { return(IDAGetLastLinFlag(ida_mem, flag)); } char *IDADlsGetReturnFlagName(long int flag) { return(IDAGetLinReturnFlagName(flag)); } int IDADlsSetLinearSolverB(void *ida_mem, int which, SUNLinearSolver LS, SUNMatrix A) { return(IDASetLinearSolverB(ida_mem, which, LS, A)); } int IDADlsSetJacFnB(void *ida_mem, int which, IDADlsJacFnB jacB) { return(IDASetJacFnB(ida_mem, which, jacB)); } int IDADlsSetJacFnBS(void *ida_mem, int which, IDADlsJacFnBS jacBS) { return(IDASetJacFnBS(ida_mem, which, jacBS)); } #ifdef __cplusplus } #endif StanHeaders/src/idas/idas_nls.c0000644000176200001440000002575014645137106016142 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This the implementation file for the IDA nonlinear solver interface. * ---------------------------------------------------------------------------*/ #include "idas_impl.h" #include "sundials/sundials_math.h" /* constant macros */ #define PT0001 RCONST(0.0001) /* real 0.0001 */ #define ONE RCONST(1.0) /* real 1.0 */ #define TWENTY RCONST(20.0) /* real 20.0 */ /* nonlinear solver parameters */ #define MAXIT 4 /* default max number of nonlinear iterations */ #define RATEMAX RCONST(0.9) /* max convergence rate used in divergence check */ /* private functions passed to nonlinear solver */ static int idaNlsResidual(N_Vector ycor, N_Vector res, void* ida_mem); static int idaNlsLSetup(booleantype jbad, booleantype* jcur, void* ida_mem); static int idaNlsLSolve(N_Vector delta, void* ida_mem); static int idaNlsConvTest(SUNNonlinearSolver NLS, N_Vector ycor, N_Vector del, realtype tol, N_Vector ewt, void* ida_mem); /* ----------------------------------------------------------------------------- * Exported functions * ---------------------------------------------------------------------------*/ int IDASetNonlinearSolver(void *ida_mem, SUNNonlinearSolver NLS) { IDAMem IDA_mem; int retval; /* return immediately if IDA memory is NULL */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetNonlinearSolver", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* return immediately if NLS memory is NULL */ if (NLS == NULL) { IDAProcessError(NULL, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolver", "NLS must be non-NULL"); return(IDA_ILL_INPUT); } /* check for required nonlinear solver functions */ if ( NLS->ops->gettype == NULL || NLS->ops->solve == NULL || NLS->ops->setsysfn == NULL ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolver", "NLS does not support required operations"); return(IDA_ILL_INPUT); } /* check for allowed nonlinear solver types */ if (SUNNonlinSolGetType(NLS) != SUNNONLINEARSOLVER_ROOTFIND) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolver", "NLS type must be SUNNONLINEARSOLVER_ROOTFIND"); return(IDA_ILL_INPUT); } /* free any existing nonlinear solver */ if ((IDA_mem->NLS != NULL) && (IDA_mem->ownNLS)) retval = SUNNonlinSolFree(IDA_mem->NLS); /* set SUNNonlinearSolver pointer */ IDA_mem->NLS = NLS; /* Set NLS ownership flag. If this function was called to attach the default NLS, IDA will set the flag to SUNTRUE after this function returns. */ IDA_mem->ownNLS = SUNFALSE; /* set the nonlinear residual function */ retval = SUNNonlinSolSetSysFn(IDA_mem->NLS, idaNlsResidual); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolver", "Setting nonlinear system function failed"); return(IDA_ILL_INPUT); } /* set convergence test function */ retval = SUNNonlinSolSetConvTestFn(IDA_mem->NLS, idaNlsConvTest, ida_mem); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolver", "Setting convergence test function failed"); return(IDA_ILL_INPUT); } /* set max allowed nonlinear iterations */ retval = SUNNonlinSolSetMaxIters(IDA_mem->NLS, MAXIT); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolver", "Setting maximum number of nonlinear iterations failed"); return(IDA_ILL_INPUT); } /* Set the nonlinear system RES function */ if (!(IDA_mem->ida_res)) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolver", "The DAE residual function is NULL"); return(IDA_ILL_INPUT); } IDA_mem->nls_res = IDA_mem->ida_res; return(IDA_SUCCESS); } /*--------------------------------------------------------------- IDASetNlsResFn: This routine sets an alternative user-supplied DAE residual function to use in the evaluation of nonlinear system functions. ---------------------------------------------------------------*/ int IDASetNlsResFn(void *ida_mem, IDAResFn res) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetNlsResFn", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (res) IDA_mem->nls_res = res; else IDA_mem->nls_res = IDA_mem->ida_res; return(IDA_SUCCESS); } /*--------------------------------------------------------------- IDAGetNonlinearSystemData: This routine provides access to the relevant data needed to compute the nonlinear system function. ---------------------------------------------------------------*/ int IDAGetNonlinearSystemData(void *ida_mem, realtype *tcur, N_Vector *yypred, N_Vector *yppred, N_Vector *yyn, N_Vector *ypn, N_Vector *res, realtype *cj, void **user_data) { IDAMem IDA_mem; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNonlinearSystemData", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *tcur = IDA_mem->ida_tn; *yypred = IDA_mem->ida_yypredict; *yppred = IDA_mem->ida_yppredict; *yyn = IDA_mem->ida_yy; *ypn = IDA_mem->ida_yp; *res = IDA_mem->ida_savres; *cj = IDA_mem->ida_cj; *user_data = IDA_mem->ida_user_data; return(IDA_SUCCESS); } /* ----------------------------------------------------------------------------- * Private functions * ---------------------------------------------------------------------------*/ int idaNlsInit(IDAMem IDA_mem) { int retval; /* set the linear solver setup wrapper function */ if (IDA_mem->ida_lsetup) retval = SUNNonlinSolSetLSetupFn(IDA_mem->NLS, idaNlsLSetup); else retval = SUNNonlinSolSetLSetupFn(IDA_mem->NLS, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "idaNlsInit", "Setting the linear solver setup function failed"); return(IDA_NLS_INIT_FAIL); } /* set the linear solver solve wrapper function */ if (IDA_mem->ida_lsolve) retval = SUNNonlinSolSetLSolveFn(IDA_mem->NLS, idaNlsLSolve); else retval = SUNNonlinSolSetLSolveFn(IDA_mem->NLS, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "idaNlsInit", "Setting linear solver solve function failed"); return(IDA_NLS_INIT_FAIL); } /* initialize nonlinear solver */ retval = SUNNonlinSolInitialize(IDA_mem->NLS); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "idaNlsInit", MSG_NLS_INIT_FAIL); return(IDA_NLS_INIT_FAIL); } return(IDA_SUCCESS); } static int idaNlsLSetup(booleantype jbad, booleantype* jcur, void* ida_mem) { IDAMem IDA_mem; int retval; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsLSetup", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_nsetups++; IDA_mem->ida_forceSetup = SUNFALSE; retval = IDA_mem->ida_lsetup(IDA_mem, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_savres, IDA_mem->ida_tempv1, IDA_mem->ida_tempv2, IDA_mem->ida_tempv3); /* update Jacobian status */ *jcur = SUNTRUE; /* update convergence test constants */ IDA_mem->ida_cjold = IDA_mem->ida_cj; IDA_mem->ida_cjratio = ONE; IDA_mem->ida_ss = TWENTY; IDA_mem->ida_ssS = TWENTY; if (retval < 0) return(IDA_LSETUP_FAIL); if (retval > 0) return(IDA_LSETUP_RECVR); return(IDA_SUCCESS); } static int idaNlsLSolve(N_Vector delta, void* ida_mem) { IDAMem IDA_mem; int retval; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsLSolve", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; retval = IDA_mem->ida_lsolve(IDA_mem, delta, IDA_mem->ida_ewt, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_savres); if (retval < 0) return(IDA_LSOLVE_FAIL); if (retval > 0) return(IDA_LSOLVE_RECVR); return(IDA_SUCCESS); } static int idaNlsResidual(N_Vector ycor, N_Vector res, void* ida_mem) { IDAMem IDA_mem; int retval; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsResidual", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* update yy and yp based on the current correction */ N_VLinearSum(ONE, IDA_mem->ida_yypredict, ONE, ycor, IDA_mem->ida_yy); N_VLinearSum(ONE, IDA_mem->ida_yppredict, IDA_mem->ida_cj, ycor, IDA_mem->ida_yp); /* evaluate residual */ retval = IDA_mem->nls_res(IDA_mem->ida_tn, IDA_mem->ida_yy, IDA_mem->ida_yp, res, IDA_mem->ida_user_data); /* increment the number of residual evaluations */ IDA_mem->ida_nre++; /* save a copy of the residual vector in savres */ N_VScale(ONE, res, IDA_mem->ida_savres); if (retval < 0) return(IDA_RES_FAIL); if (retval > 0) return(IDA_RES_RECVR); return(IDA_SUCCESS); } static int idaNlsConvTest(SUNNonlinearSolver NLS, N_Vector ycor, N_Vector del, realtype tol, N_Vector ewt, void* ida_mem) { IDAMem IDA_mem; int m, retval; realtype delnrm; realtype rate; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsConvTest", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* compute the norm of the correction */ delnrm = N_VWrmsNorm(del, ewt); /* get the current nonlinear solver iteration count */ retval = SUNNonlinSolGetCurIter(NLS, &m); if (retval != IDA_SUCCESS) return(IDA_MEM_NULL); /* test for convergence, first directly, then with rate estimate. */ if (m == 0){ IDA_mem->ida_oldnrm = delnrm; if (delnrm <= PT0001 * IDA_mem->ida_toldel) return(SUN_NLS_SUCCESS); } else { rate = SUNRpowerR( delnrm/IDA_mem->ida_oldnrm, ONE/m ); if (rate > RATEMAX) return(SUN_NLS_CONV_RECVR); IDA_mem->ida_ss = rate/(ONE - rate); } if (IDA_mem->ida_ss*delnrm <= tol) return(SUN_NLS_SUCCESS); /* not yet converged */ return(SUN_NLS_CONTINUE); } StanHeaders/src/idas/idas_nls_stg.c0000644000176200001440000002726114645137106017016 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This the implementation file for the IDA nonlinear solver interface. * ---------------------------------------------------------------------------*/ #include "idas_impl.h" #include "sundials/sundials_math.h" #include "sundials/sundials_nvector_senswrapper.h" /* constant macros */ #define PT0001 RCONST(0.0001) /* real 0.0001 */ #define ONE RCONST(1.0) /* real 1.0 */ #define TWENTY RCONST(20.0) /* real 20.0 */ /* nonlinear solver parameters */ #define MAXIT 4 /* default max number of nonlinear iterations */ #define RATEMAX RCONST(0.9) /* max convergence rate used in divergence check */ /* private functions passed to nonlinear solver */ static int idaNlsResidualSensStg(N_Vector ycor, N_Vector res, void* ida_mem); static int idaNlsLSetupSensStg(booleantype jbad, booleantype* jcur, void* ida_mem); static int idaNlsLSolveSensStg(N_Vector delta, void* ida_mem); static int idaNlsConvTestSensStg(SUNNonlinearSolver NLS, N_Vector ycor, N_Vector del, realtype tol, N_Vector ewt, void* ida_mem); /* ----------------------------------------------------------------------------- * Exported functions * ---------------------------------------------------------------------------*/ int IDASetNonlinearSolverSensStg(void *ida_mem, SUNNonlinearSolver NLS) { IDAMem IDA_mem; int retval, is; /* return immediately if IDA memory is NULL */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetNonlinearSolverSensStg", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* return immediately if NLS memory is NULL */ if (NLS == NULL) { IDAProcessError(NULL, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensStg", "NLS must be non-NULL"); return(IDA_ILL_INPUT); } /* check for required nonlinear solver functions */ if ( NLS->ops->gettype == NULL || NLS->ops->solve == NULL || NLS->ops->setsysfn == NULL ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensStg", "NLS does not support required operations"); return(IDA_ILL_INPUT); } /* check for allowed nonlinear solver types */ if (SUNNonlinSolGetType(NLS) != SUNNONLINEARSOLVER_ROOTFIND) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensStg", "NLS type must be SUNNONLINEARSOLVER_ROOTFIND"); return(IDA_ILL_INPUT); } /* check that sensitivities were initialized */ if (!(IDA_mem->ida_sensi)) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensStg", MSG_NO_SENSI); return(IDA_ILL_INPUT); } /* check that the staggered corrector was selected */ if (IDA_mem->ida_ism != IDA_STAGGERED) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensStg", "Sensitivity solution method is not IDA_STAGGERED"); return(IDA_ILL_INPUT); } /* free any existing nonlinear solver */ if ((IDA_mem->NLSstg != NULL) && (IDA_mem->ownNLSstg)) retval = SUNNonlinSolFree(IDA_mem->NLSstg); /* set SUNNonlinearSolver pointer */ IDA_mem->NLSstg = NLS; /* Set NLS ownership flag. If this function was called to attach the default NLS, IDA will set the flag to SUNTRUE after this function returns. */ IDA_mem->ownNLSstg = SUNFALSE; /* set the nonlinear residual function */ retval = SUNNonlinSolSetSysFn(IDA_mem->NLSstg, idaNlsResidualSensStg); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensStg", "Setting nonlinear system function failed"); return(IDA_ILL_INPUT); } /* set convergence test function */ retval = SUNNonlinSolSetConvTestFn(IDA_mem->NLSstg, idaNlsConvTestSensStg, ida_mem); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensStg", "Setting convergence test function failed"); return(IDA_ILL_INPUT); } /* set max allowed nonlinear iterations */ retval = SUNNonlinSolSetMaxIters(IDA_mem->NLSstg, MAXIT); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensStg", "Setting maximum number of nonlinear iterations failed"); return(IDA_ILL_INPUT); } /* create vector wrappers if necessary */ if (IDA_mem->stgMallocDone == SUNFALSE) { IDA_mem->ypredictStg = N_VNewEmpty_SensWrapper(IDA_mem->ida_Ns,IDA_mem->ida_sunctx); if (IDA_mem->ypredictStg == NULL) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDASetNonlinearSolverSensStg", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } IDA_mem->ycorStg = N_VNewEmpty_SensWrapper(IDA_mem->ida_Ns,IDA_mem->ida_sunctx); if (IDA_mem->ycorStg == NULL) { N_VDestroy(IDA_mem->ypredictStg); IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDASetNonlinearSolverSensStg", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } IDA_mem->ewtStg = N_VNewEmpty_SensWrapper(IDA_mem->ida_Ns, IDA_mem->ida_sunctx); if (IDA_mem->ewtStg == NULL) { N_VDestroy(IDA_mem->ypredictStg); N_VDestroy(IDA_mem->ycorStg); IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDASetNonlinearSolverSensStg", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } IDA_mem->stgMallocDone = SUNTRUE; } /* attach vectors to vector wrappers */ for (is=0; is < IDA_mem->ida_Ns; is++) { NV_VEC_SW(IDA_mem->ypredictStg, is) = IDA_mem->ida_yySpredict[is]; NV_VEC_SW(IDA_mem->ycorStg, is) = IDA_mem->ida_eeS[is]; NV_VEC_SW(IDA_mem->ewtStg, is) = IDA_mem->ida_ewtS[is]; } return(IDA_SUCCESS); } /* ----------------------------------------------------------------------------- * Private functions * ---------------------------------------------------------------------------*/ int idaNlsInitSensStg(IDAMem IDA_mem) { int retval; /* set the linear solver setup wrapper function */ if (IDA_mem->ida_lsetup) retval = SUNNonlinSolSetLSetupFn(IDA_mem->NLSstg, idaNlsLSetupSensStg); else retval = SUNNonlinSolSetLSetupFn(IDA_mem->NLSstg, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "idaNlsInitSensStg", "Setting the linear solver setup function failed"); return(IDA_NLS_INIT_FAIL); } /* set the linear solver solve wrapper function */ if (IDA_mem->ida_lsolve) retval = SUNNonlinSolSetLSolveFn(IDA_mem->NLSstg, idaNlsLSolveSensStg); else retval = SUNNonlinSolSetLSolveFn(IDA_mem->NLSstg, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "idaNlsInitSensStg", "Setting linear solver solve function failed"); return(IDA_NLS_INIT_FAIL); } /* initialize nonlinear solver */ retval = SUNNonlinSolInitialize(IDA_mem->NLSstg); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "idaNlsInitSensStg", MSG_NLS_INIT_FAIL); return(IDA_NLS_INIT_FAIL); } return(IDA_SUCCESS); } static int idaNlsLSetupSensStg(booleantype jbad, booleantype* jcur, void* ida_mem) { IDAMem IDA_mem; int retval; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsLSetupSensStg", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_nsetupsS++; retval = IDA_mem->ida_lsetup(IDA_mem, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_delta, IDA_mem->ida_tmpS1, IDA_mem->ida_tmpS2, IDA_mem->ida_tmpS3); /* update Jacobian status */ *jcur = SUNTRUE; /* update convergence test constants */ IDA_mem->ida_cjold = IDA_mem->ida_cj; IDA_mem->ida_cjratio = ONE; IDA_mem->ida_ss = TWENTY; IDA_mem->ida_ssS = TWENTY; if (retval < 0) return(IDA_LSETUP_FAIL); if (retval > 0) return(IDA_LSETUP_RECVR); return(IDA_SUCCESS); } static int idaNlsLSolveSensStg(N_Vector deltaStg, void* ida_mem) { IDAMem IDA_mem; int retval, is; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsLSolveSensStg", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; for(is=0;isida_Ns;is++) { retval = IDA_mem->ida_lsolve(IDA_mem, NV_VEC_SW(deltaStg,is), IDA_mem->ida_ewtS[is], IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_delta); if (retval < 0) return(IDA_LSOLVE_FAIL); if (retval > 0) return(IDA_LSOLVE_RECVR); } return(IDA_SUCCESS); } static int idaNlsResidualSensStg(N_Vector ycorStg, N_Vector resStg, void* ida_mem) { IDAMem IDA_mem; int retval; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsResidualSensStg", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* update yS and ypS based on the current correction */ N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_yySpredict, ONE, NV_VECS_SW(ycorStg), IDA_mem->ida_yyS); N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_ypSpredict, IDA_mem->ida_cj, NV_VECS_SW(ycorStg), IDA_mem->ida_ypS); /* evaluate sens residual */ retval = IDA_mem->ida_resS(IDA_mem->ida_Ns, IDA_mem->ida_tn, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_delta, IDA_mem->ida_yyS, IDA_mem->ida_ypS, NV_VECS_SW(resStg), IDA_mem->ida_user_dataS, IDA_mem->ida_tmpS1, IDA_mem->ida_tmpS2, IDA_mem->ida_tmpS3); /* increment the number of sens residual evaluations */ IDA_mem->ida_nrSe++; if (retval < 0) return(IDA_SRES_FAIL); if (retval > 0) return(IDA_SRES_RECVR); return(IDA_SUCCESS); } static int idaNlsConvTestSensStg(SUNNonlinearSolver NLS, N_Vector ycor, N_Vector del, realtype tol, N_Vector ewt, void* ida_mem) { IDAMem IDA_mem; int m, retval; realtype delnrm; realtype rate; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsConvTestSensStg", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* compute the norm of the correction */ delnrm = N_VWrmsNorm(del, ewt); /* get the current nonlinear solver iteration count */ retval = SUNNonlinSolGetCurIter(NLS, &m); if (retval != IDA_SUCCESS) return(IDA_MEM_NULL); /* test for convergence, first directly, then with rate estimate. */ if (m == 0){ IDA_mem->ida_oldnrm = delnrm; if (delnrm <= IDA_mem->ida_toldel) return(SUN_NLS_SUCCESS); } else { rate = SUNRpowerR( delnrm/IDA_mem->ida_oldnrm, ONE/m ); if (rate > RATEMAX) return(SUN_NLS_CONV_RECVR); IDA_mem->ida_ssS = rate/(ONE - rate); } if (IDA_mem->ida_ssS*delnrm <= tol) return(SUN_NLS_SUCCESS); /* not yet converged */ return(SUN_NLS_CONTINUE); } StanHeaders/src/idas/idas_ls.c0000644000176200001440000025424214645137106015764 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Alan C. Hindmarsh and Radu Serban @ LLNL *----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *----------------------------------------------------------------- * Implementation file for IDAS' linear solver interface *-----------------------------------------------------------------*/ #include #include #include #include "idas_impl.h" #include "idas_ls_impl.h" #include #include #include #include #include /* constants */ #define MAX_ITERS 3 /* max. number of attempts to recover in DQ J*v */ #define ZERO RCONST(0.0) #define PT25 RCONST(0.25) #define PT05 RCONST(0.05) #define PT9 RCONST(0.9) #define ONE RCONST(1.0) #define TWO RCONST(2.0) /*================================================================= PRIVATE FUNCTION PROTOTYPES =================================================================*/ static int idaLsJacBWrapper(realtype tt, realtype c_jB, N_Vector yyB, N_Vector ypB, N_Vector rBr, SUNMatrix JacB, void *ida_mem, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B); static int idaLsJacBSWrapper(realtype tt, realtype c_jB, N_Vector yyB, N_Vector ypB, N_Vector rBr, SUNMatrix JacB, void *ida_mem, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B); static int idaLsPrecSetupB(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *idaadj_mem); static int idaLsPrecSetupBS(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *idaadj_mem); static int idaLsPrecSolveB(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector rvecB, N_Vector zvecB, realtype c_jB, realtype deltaB, void *idaadj_mem); static int idaLsPrecSolveBS(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector rvecB, N_Vector zvecB, realtype c_jB, realtype deltaB, void *idaadj_mem); static int idaLsJacTimesSetupB(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *idaadj_mem); static int idaLsJacTimesSetupBS(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *idaadj_mem); static int idaLsJacTimesVecB(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector vB, N_Vector JvB, realtype c_jB, void *idaadj_mem, N_Vector tmp1B, N_Vector tmp2B); static int idaLsJacTimesVecBS(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector vB, N_Vector JvB, realtype c_jB, void *idaadj_mem, N_Vector tmp1B, N_Vector tmp2B); /*================================================================ PART I - forward problems ================================================================*/ /*--------------------------------------------------------------- IDASetLinearSolver specifies the linear solver ---------------------------------------------------------------*/ int IDASetLinearSolver(void *ida_mem, SUNLinearSolver LS, SUNMatrix A) { IDAMem IDA_mem; IDALsMem idals_mem; int retval, LSType; booleantype iterative; /* is the solver iterative? */ booleantype matrixbased; /* is a matrix structure used? */ /* Return immediately if any input is NULL */ if (ida_mem == NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASLS", "IDASetLinearSolver", MSG_LS_IDAMEM_NULL); return(IDALS_MEM_NULL); } if (LS == NULL) { IDAProcessError(NULL, IDALS_ILL_INPUT, "IDASLS", "IDASetLinearSolver", "LS must be non-NULL"); return(IDALS_ILL_INPUT); } IDA_mem = (IDAMem) ida_mem; /* Test if solver is compatible with LS interface */ if ( (LS->ops->gettype == NULL) || (LS->ops->solve == NULL) ) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetLinearSolver", "LS object is missing a required operation"); return(IDALS_ILL_INPUT); } /* Retrieve the LS type */ LSType = SUNLinSolGetType(LS); /* Set flags based on LS type */ iterative = (LSType != SUNLINEARSOLVER_DIRECT); matrixbased = ((LSType != SUNLINEARSOLVER_ITERATIVE) && (LSType != SUNLINEARSOLVER_MATRIX_EMBEDDED)); /* Test if vector is compatible with LS interface */ if (IDA_mem->ida_tempv1->ops->nvconst == NULL || IDA_mem->ida_tempv1->ops->nvwrmsnorm == NULL) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetLinearSolver", MSG_LS_BAD_NVECTOR); return(IDALS_ILL_INPUT); } /* Ensure that A is NULL when LS is matrix-embedded */ if ((LSType == SUNLINEARSOLVER_MATRIX_EMBEDDED) && (A != NULL)) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetLinearSolver", "Incompatible inputs: matrix-embedded LS requires NULL matrix"); return(IDALS_ILL_INPUT); } /* Check for compatible LS type, matrix and "atimes" support */ if (iterative) { if (IDA_mem->ida_tempv1->ops->nvgetlength == NULL) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetLinearSolver", MSG_LS_BAD_NVECTOR); return(IDALS_ILL_INPUT); } if (LSType != SUNLINEARSOLVER_MATRIX_EMBEDDED) { if (LS->ops->resid == NULL || LS->ops->numiters == NULL) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetLinearSolver", "Iterative LS object requires 'resid' and 'numiters' routines"); return(IDALS_ILL_INPUT); } } if (!matrixbased && (LSType != SUNLINEARSOLVER_MATRIX_EMBEDDED) && (LS->ops->setatimes == NULL)) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetLinearSolver", "Incompatible inputs: iterative LS must support ATimes routine"); return(IDALS_ILL_INPUT); } if (matrixbased && (A == NULL)) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetLinearSolver", "Incompatible inputs: matrix-iterative LS requires non-NULL matrix"); return(IDALS_ILL_INPUT); } } else if (A == NULL) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetLinearSolver", "Incompatible inputs: direct LS requires non-NULL matrix"); return(IDALS_ILL_INPUT); } /* free any existing system solver attached to IDA */ if (IDA_mem->ida_lfree) IDA_mem->ida_lfree(IDA_mem); /* Set four main system linear solver function fields in IDA_mem */ IDA_mem->ida_linit = idaLsInitialize; IDA_mem->ida_lsetup = idaLsSetup; IDA_mem->ida_lsolve = idaLsSolve; IDA_mem->ida_lfree = idaLsFree; /* Set ida_lperf if using an iterative SUNLinearSolver object */ IDA_mem->ida_lperf = (iterative) ? idaLsPerf : NULL; /* Allocate memory for IDALsMemRec */ idals_mem = NULL; idals_mem = (IDALsMem) malloc(sizeof(struct IDALsMemRec)); if (idals_mem == NULL) { IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASLS", "IDASetLinearSolver", MSG_LS_MEM_FAIL); return(IDALS_MEM_FAIL); } memset(idals_mem, 0, sizeof(struct IDALsMemRec)); /* set SUNLinearSolver pointer */ idals_mem->LS = LS; /* Linear solver type information */ idals_mem->iterative = iterative; idals_mem->matrixbased = matrixbased; /* Set defaults for Jacobian-related fields */ idals_mem->J = A; if (A != NULL) { idals_mem->jacDQ = SUNTRUE; idals_mem->jac = idaLsDQJac; idals_mem->J_data = IDA_mem; } else { idals_mem->jacDQ = SUNFALSE; idals_mem->jac = NULL; idals_mem->J_data = NULL; } idals_mem->jtimesDQ = SUNTRUE; idals_mem->jtsetup = NULL; idals_mem->jtimes = idaLsDQJtimes; idals_mem->jt_res = IDA_mem->ida_res; idals_mem->jt_data = IDA_mem; /* Set defaults for preconditioner-related fields */ idals_mem->pset = NULL; idals_mem->psolve = NULL; idals_mem->pfree = NULL; idals_mem->pdata = IDA_mem->ida_user_data; /* Initialize counters */ idaLsInitializeCounters(idals_mem); /* Set default values for the rest of the Ls parameters */ idals_mem->eplifac = PT05; idals_mem->dqincfac = ONE; idals_mem->last_flag = IDALS_SUCCESS; /* If LS supports ATimes, attach IDALs routine */ if (LS->ops->setatimes) { retval = SUNLinSolSetATimes(LS, IDA_mem, idaLsATimes); if (retval != SUNLS_SUCCESS) { IDAProcessError(IDA_mem, IDALS_SUNLS_FAIL, "IDASLS", "IDASetLinearSolver", "Error in calling SUNLinSolSetATimes"); free(idals_mem); idals_mem = NULL; return(IDALS_SUNLS_FAIL); } } /* If LS supports preconditioning, initialize pset/psol to NULL */ if (LS->ops->setpreconditioner) { retval = SUNLinSolSetPreconditioner(LS, IDA_mem, NULL, NULL); if (retval != SUNLS_SUCCESS) { IDAProcessError(IDA_mem, IDALS_SUNLS_FAIL, "IDASLS", "IDASetLinearSolver", "Error in calling SUNLinSolSetPreconditioner"); free(idals_mem); idals_mem = NULL; return(IDALS_SUNLS_FAIL); } } /* Allocate memory for ytemp, yptemp and x */ idals_mem->ytemp = N_VClone(IDA_mem->ida_tempv1); if (idals_mem->ytemp == NULL) { IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASLS", "IDASetLinearSolver", MSG_LS_MEM_FAIL); free(idals_mem); idals_mem = NULL; return(IDALS_MEM_FAIL); } idals_mem->yptemp = N_VClone(IDA_mem->ida_tempv1); if (idals_mem->yptemp == NULL) { IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASLS", "IDASetLinearSolver", MSG_LS_MEM_FAIL); N_VDestroy(idals_mem->ytemp); free(idals_mem); idals_mem = NULL; return(IDALS_MEM_FAIL); } idals_mem->x = N_VClone(IDA_mem->ida_tempv1); if (idals_mem->x == NULL) { IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASLS", "IDASetLinearSolver", MSG_LS_MEM_FAIL); N_VDestroy(idals_mem->ytemp); N_VDestroy(idals_mem->yptemp); free(idals_mem); idals_mem = NULL; return(IDALS_MEM_FAIL); } /* For iterative LS, compute sqrtN */ if (iterative) idals_mem->nrmfac = SUNRsqrt( N_VGetLength(idals_mem->ytemp) ); /* For matrix-based LS, enable soltuion scaling */ if (matrixbased) idals_mem->scalesol = SUNTRUE; else idals_mem->scalesol = SUNFALSE; /* Attach linear solver memory to integrator memory */ IDA_mem->ida_lmem = idals_mem; return(IDALS_SUCCESS); } /*=============================================================== Optional input/output routines ===============================================================*/ /* IDASetJacFn specifies the Jacobian function */ int IDASetJacFn(void *ida_mem, IDALsJacFn jac) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "IDALsSetJacFn", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* return with failure if jac cannot be used */ if ((jac != NULL) && (idals_mem->J == NULL)) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetJacFn", "Jacobian routine cannot be supplied for NULL SUNMatrix"); return(IDALS_ILL_INPUT); } /* set Jacobian routine pointer, and update relevant flags */ if (jac != NULL) { idals_mem->jacDQ = SUNFALSE; idals_mem->jac = jac; idals_mem->J_data = IDA_mem->ida_user_data; } else { idals_mem->jacDQ = SUNTRUE; idals_mem->jac = idaLsDQJac; idals_mem->J_data = IDA_mem; } return(IDALS_SUCCESS); } /* IDASetEpsLin specifies the nonlinear -> linear tolerance scale factor */ int IDASetEpsLin(void *ida_mem, realtype eplifac) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "IDASetEpsLin", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* Check for legal eplifac */ if (eplifac < ZERO) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetEpsLin", MSG_LS_NEG_EPLIFAC); return(IDALS_ILL_INPUT); } idals_mem->eplifac = (eplifac == ZERO) ? PT05 : eplifac; return(IDALS_SUCCESS); } /* IDASetWRMSNormFactor sets or computes the factor to use when converting from the integrator tolerance to the linear solver tolerance (WRMS to L2 norm). */ int IDASetLSNormFactor(void *ida_mem, realtype nrmfac) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "IDASetLSNormFactor", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); if (nrmfac > ZERO) { /* user-provided factor */ idals_mem->nrmfac = nrmfac; } else if (nrmfac < ZERO) { /* compute factor for WRMS norm with dot product */ N_VConst(ONE, idals_mem->ytemp); idals_mem->nrmfac = SUNRsqrt(N_VDotProd(idals_mem->ytemp, idals_mem->ytemp)); } else { /* compute default factor for WRMS norm from vector legnth */ idals_mem->nrmfac = SUNRsqrt(N_VGetLength(idals_mem->ytemp)); } return(IDALS_SUCCESS); } /* IDASetLinearSolutionScaling enables or disables scaling the linear solver solution to account for changes in cj. */ int IDASetLinearSolutionScaling(void *ida_mem, booleantype onoff) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "IDASetLinearSolutionScaling", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* check for valid solver type */ if (!(idals_mem->matrixbased)) return(IDALS_ILL_INPUT); /* set solution scaling flag */ idals_mem->scalesol = onoff; return(IDALS_SUCCESS); } /* IDASetIncrementFactor specifies increment factor for DQ approximations to Jv */ int IDASetIncrementFactor(void *ida_mem, realtype dqincfac) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "IDASetIncrementFactor", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* Check for legal dqincfac */ if (dqincfac <= ZERO) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetIncrementFactor", MSG_LS_NEG_DQINCFAC); return(IDALS_ILL_INPUT); } idals_mem->dqincfac = dqincfac; return(IDALS_SUCCESS); } /* IDASetPreconditioner specifies the user-supplied psetup and psolve routines */ int IDASetPreconditioner(void *ida_mem, IDALsPrecSetupFn psetup, IDALsPrecSolveFn psolve) { IDAMem IDA_mem; IDALsMem idals_mem; SUNPSetupFn idals_psetup; SUNPSolveFn idals_psolve; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "IDASetPreconditioner", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* store function pointers for user-supplied routines in IDALs interface */ idals_mem->pset = psetup; idals_mem->psolve = psolve; /* issue error if LS object does not allow user-supplied preconditioning */ if (idals_mem->LS->ops->setpreconditioner == NULL) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetPreconditioner", "SUNLinearSolver object does not support user-supplied preconditioning"); return(IDALS_ILL_INPUT); } /* notify iterative linear solver to call IDALs interface routines */ idals_psetup = (psetup == NULL) ? NULL : idaLsPSetup; idals_psolve = (psolve == NULL) ? NULL : idaLsPSolve; retval = SUNLinSolSetPreconditioner(idals_mem->LS, IDA_mem, idals_psetup, idals_psolve); if (retval != SUNLS_SUCCESS) { IDAProcessError(IDA_mem, IDALS_SUNLS_FAIL, "IDASLS", "IDASetPreconditioner", "Error in calling SUNLinSolSetPreconditioner"); return(IDALS_SUNLS_FAIL); } return(IDALS_SUCCESS); } /* IDASetJacTimes specifies the user-supplied Jacobian-vector product setup and multiply routines */ int IDASetJacTimes(void *ida_mem, IDALsJacTimesSetupFn jtsetup, IDALsJacTimesVecFn jtimes) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "IDASetJacTimes", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* issue error if LS object does not allow user-supplied ATimes */ if (idals_mem->LS->ops->setatimes == NULL) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetJacTimes", "SUNLinearSolver object does not support user-supplied ATimes routine"); return(IDALS_ILL_INPUT); } /* store function pointers for user-supplied routines in IDALs interface (NULL jtimes implies use of DQ default) */ if (jtimes != NULL) { idals_mem->jtimesDQ = SUNFALSE; idals_mem->jtsetup = jtsetup; idals_mem->jtimes = jtimes; idals_mem->jt_data = IDA_mem->ida_user_data; } else { idals_mem->jtimesDQ = SUNTRUE; idals_mem->jtsetup = NULL; idals_mem->jtimes = idaLsDQJtimes; idals_mem->jt_res = IDA_mem->ida_res; idals_mem->jt_data = IDA_mem; } return(IDALS_SUCCESS); } /* IDASetJacTimesResFn specifies an alternative user-supplied DAE residual function to use in the internal finite difference Jacobian-vector product */ int IDASetJacTimesResFn(void *ida_mem, IDAResFn jtimesResFn) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "IDASetJacTimesResFn", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* check if using internal finite difference approximation */ if (!(idals_mem->jtimesDQ)) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetJacTimesResFn", "Internal finite-difference Jacobian-vector product is disabled."); return(IDALS_ILL_INPUT); } /* store function pointers for Res function (NULL implies use DAE Res) */ if (jtimesResFn != NULL) idals_mem->jt_res = jtimesResFn; else idals_mem->jt_res = IDA_mem->ida_res; return(IDALS_SUCCESS); } /* IDAGetLinWorkSpace returns the length of workspace allocated for the IDALS linear solver interface */ int IDAGetLinWorkSpace(void *ida_mem, long int *lenrwLS, long int *leniwLS) { IDAMem IDA_mem; IDALsMem idals_mem; sunindextype lrw1, liw1; long int lrw, liw; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "IDAGetLinWorkSpace", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* start with fixed sizes plus vector/matrix pointers */ *lenrwLS = 3; *leniwLS = 34; /* add N_Vector sizes */ if (IDA_mem->ida_tempv1->ops->nvspace) { N_VSpace(IDA_mem->ida_tempv1, &lrw1, &liw1); *lenrwLS += 3*lrw1; *leniwLS += 3*liw1; } /* add LS sizes */ if (idals_mem->LS->ops->space) { retval = SUNLinSolSpace(idals_mem->LS, &lrw, &liw); if (retval == 0) { *lenrwLS += lrw; *leniwLS += liw; } } return(IDALS_SUCCESS); } /* IDAGetNumJacEvals returns the number of Jacobian evaluations */ int IDAGetNumJacEvals(void *ida_mem, long int *njevals) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure; store output and return */ retval = idaLs_AccessLMem(ida_mem, "IDAGetNumJacEvals", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); *njevals = idals_mem->nje; return(IDALS_SUCCESS); } /* IDAGetNumPrecEvals returns the number of preconditioner evaluations */ int IDAGetNumPrecEvals(void *ida_mem, long int *npevals) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure; store output and return */ retval = idaLs_AccessLMem(ida_mem, "IDAGetNumPrecEvals", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); *npevals = idals_mem->npe; return(IDALS_SUCCESS); } /* IDAGetNumPrecSolves returns the number of preconditioner solves */ int IDAGetNumPrecSolves(void *ida_mem, long int *npsolves) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure; store output and return */ retval = idaLs_AccessLMem(ida_mem, "IDAGetNumPrecSolves", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); *npsolves = idals_mem->nps; return(IDALS_SUCCESS); } /* IDAGetNumLinIters returns the number of linear iterations */ int IDAGetNumLinIters(void *ida_mem, long int *nliters) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure; store output and return */ retval = idaLs_AccessLMem(ida_mem, "IDAGetNumLinIters", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); *nliters = idals_mem->nli; return(IDALS_SUCCESS); } /* IDAGetNumLinConvFails returns the number of linear convergence failures */ int IDAGetNumLinConvFails(void *ida_mem, long int *nlcfails) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure; store output and return */ retval = idaLs_AccessLMem(ida_mem, "IDAGetNumLinConvFails", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); *nlcfails = idals_mem->ncfl; return(IDALS_SUCCESS); } /* IDAGetNumJTSetupEvals returns the number of calls to the user-supplied Jacobian-vector product setup routine */ int IDAGetNumJTSetupEvals(void *ida_mem, long int *njtsetups) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure; store output and return */ retval = idaLs_AccessLMem(ida_mem, "IDAGetNumJTSetupEvals", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); *njtsetups = idals_mem->njtsetup; return(IDALS_SUCCESS); } /* IDAGetNumJtimesEvals returns the number of calls to the Jacobian-vector product multiply routine */ int IDAGetNumJtimesEvals(void *ida_mem, long int *njvevals) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure; store output and return */ retval = idaLs_AccessLMem(ida_mem, "IDAGetNumJtimesEvals", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); *njvevals = idals_mem->njtimes; return(IDALS_SUCCESS); } /* IDAGetNumLinResEvals returns the number of calls to the DAE residual needed for the DQ Jacobian approximation or J*v product approximation */ int IDAGetNumLinResEvals(void *ida_mem, long int *nrevalsLS) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure; store output and return */ retval = idaLs_AccessLMem(ida_mem, "IDAGetNumLinResEvals", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); *nrevalsLS = idals_mem->nreDQ; return(IDALS_SUCCESS); } /* IDAGetLastLinFlag returns the last flag set in a IDALS function */ int IDAGetLastLinFlag(void *ida_mem, long int *flag) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure; store output and return */ retval = idaLs_AccessLMem(ida_mem, "IDAGetLastLinFlag", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); *flag = idals_mem->last_flag; return(IDALS_SUCCESS); } /* IDAGetLinReturnFlagName translates from the integer error code returned by an IDALs routine to the corresponding string equivalent for that flag */ char *IDAGetLinReturnFlagName(long int flag) { char *name = (char *)malloc(30*sizeof(char)); switch(flag) { case IDALS_SUCCESS: sprintf(name,"IDALS_SUCCESS"); break; case IDALS_MEM_NULL: sprintf(name,"IDALS_MEM_NULL"); break; case IDALS_LMEM_NULL: sprintf(name,"IDALS_LMEM_NULL"); break; case IDALS_ILL_INPUT: sprintf(name,"IDALS_ILL_INPUT"); break; case IDALS_MEM_FAIL: sprintf(name,"IDALS_MEM_FAIL"); break; case IDALS_PMEM_NULL: sprintf(name,"IDALS_PMEM_NULL"); break; case IDALS_JACFUNC_UNRECVR: sprintf(name,"IDALS_JACFUNC_UNRECVR"); break; case IDALS_JACFUNC_RECVR: sprintf(name,"IDALS_JACFUNC_RECVR"); break; case IDALS_SUNMAT_FAIL: sprintf(name,"IDALS_SUNMAT_FAIL"); break; case IDALS_SUNLS_FAIL: sprintf(name,"IDALS_SUNLS_FAIL"); break; default: sprintf(name,"NONE"); } return(name); } /*=============================================================== IDASLS Private functions ===============================================================*/ /*--------------------------------------------------------------- idaLsATimes: This routine generates the matrix-vector product z = Jv, where J is the system Jacobian, by calling either the user provided routine or the internal DQ routine. The return value is the same as the value returned by jtimes -- 0 if successful, nonzero otherwise. ---------------------------------------------------------------*/ int idaLsATimes(void *ida_mem, N_Vector v, N_Vector z) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "idaLsATimes", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* call Jacobian-times-vector product routine (either user-supplied or internal DQ) */ retval = idals_mem->jtimes(IDA_mem->ida_tn, idals_mem->ycur, idals_mem->ypcur, idals_mem->rcur, v, z, IDA_mem->ida_cj, idals_mem->jt_data, idals_mem->ytemp, idals_mem->yptemp); idals_mem->njtimes++; return(retval); } /*--------------------------------------------------------------- idaLsPSetup: This routine interfaces between the generic iterative linear solvers and the user's psetup routine. It passes to psetup all required state information from ida_mem. Its return value is the same as that returned by psetup. Note that the generic iterative linear solvers guarantee that idaLsPSetup will only be called in the case that the user's psetup routine is non-NULL. ---------------------------------------------------------------*/ int idaLsPSetup(void *ida_mem) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "idaLsPSetup", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* Call user pset routine to update preconditioner and possibly reset jcur (pass !jbad as update suggestion) */ retval = idals_mem->pset(IDA_mem->ida_tn, idals_mem->ycur, idals_mem->ypcur, idals_mem->rcur, IDA_mem->ida_cj, idals_mem->pdata); idals_mem->npe++; return(retval); } /*--------------------------------------------------------------- idaLsPSolve: This routine interfaces between the generic SUNLinSolSolve routine and the user's psolve routine. It passes to psolve all required state information from ida_mem. Its return value is the same as that returned by psolve. Note that the generic SUNLinSol solver guarantees that IDASilsPSolve will not be called in the case in which preconditioning is not done. This is the only case in which the user's psolve routine is allowed to be NULL. ---------------------------------------------------------------*/ int idaLsPSolve(void *ida_mem, N_Vector r, N_Vector z, realtype tol, int lr) { IDAMem IDA_mem; IDALsMem idals_mem; int retval; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "idaLsPSolve", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); /* call the user-supplied psolve routine, and accumulate count */ retval = idals_mem->psolve(IDA_mem->ida_tn, idals_mem->ycur, idals_mem->ypcur, idals_mem->rcur, r, z, IDA_mem->ida_cj, tol, idals_mem->pdata); idals_mem->nps++; return(retval); } /*--------------------------------------------------------------- idaLsDQJac: This routine is a wrapper for the Dense and Band implementations of the difference quotient Jacobian approximation routines. ---------------------------------------------------------------*/ int idaLsDQJac(realtype t, realtype c_j, N_Vector y, N_Vector yp, N_Vector r, SUNMatrix Jac, void *ida_mem, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { int retval; IDAMem IDA_mem; IDA_mem = (IDAMem) ida_mem; /* access IDAMem structure */ if (ida_mem == NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASLS", "idaLsDQJac", MSG_LS_IDAMEM_NULL); return(IDALS_MEM_NULL); } /* verify that Jac is non-NULL */ if (Jac == NULL) { IDAProcessError(IDA_mem, IDALS_LMEM_NULL, "IDASLS", "idaLsDQJac", MSG_LS_LMEM_NULL); return(IDALS_LMEM_NULL); } /* Verify that N_Vector supports required operations */ if (IDA_mem->ida_tempv1->ops->nvcloneempty == NULL || IDA_mem->ida_tempv1->ops->nvlinearsum == NULL || IDA_mem->ida_tempv1->ops->nvdestroy == NULL || IDA_mem->ida_tempv1->ops->nvscale == NULL || IDA_mem->ida_tempv1->ops->nvgetarraypointer == NULL || IDA_mem->ida_tempv1->ops->nvsetarraypointer == NULL) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "idaLsDQJac", MSG_LS_BAD_NVECTOR); return(IDALS_ILL_INPUT); } /* Call the matrix-structure-specific DQ approximation routine */ if (SUNMatGetID(Jac) == SUNMATRIX_DENSE) { retval = idaLsDenseDQJac(t, c_j, y, yp, r, Jac, IDA_mem, tmp1); } else if (SUNMatGetID(Jac) == SUNMATRIX_BAND) { retval = idaLsBandDQJac(t, c_j, y, yp, r, Jac, IDA_mem, tmp1, tmp2, tmp3); } else { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDASLS", "idaLsDQJac", "unrecognized matrix type for idaLsDQJac"); retval = IDA_ILL_INPUT; } return(retval); } /*--------------------------------------------------------------- idaLsDenseDQJac This routine generates a dense difference quotient approximation to the Jacobian F_y + c_j*F_y'. It assumes a dense SUNmatrix input (stored column-wise, and that elements within each column are contiguous). The address of the jth column of J is obtained via the function SUNDenseMatrix_Column() and this pointer is associated with an N_Vector using the N_VGetArrayPointer/N_VSetArrayPointer functions. Finally, the actual computation of the jth column of the Jacobian is done with a call to N_VLinearSum. ---------------------------------------------------------------*/ int idaLsDenseDQJac(realtype tt, realtype c_j, N_Vector yy, N_Vector yp, N_Vector rr, SUNMatrix Jac, IDAMem IDA_mem, N_Vector tmp1) { realtype inc, inc_inv, yj, ypj, srur, conj; realtype *y_data, *yp_data, *ewt_data, *cns_data = NULL; N_Vector rtemp, jthCol; sunindextype j, N; IDALsMem idals_mem; int retval = 0; /* access LsMem interface structure */ idals_mem = (IDALsMem) IDA_mem->ida_lmem; /* access matrix dimension */ N = SUNDenseMatrix_Columns(Jac); /* Rename work vectors for readibility */ rtemp = tmp1; /* Create an empty vector for matrix column calculations */ jthCol = N_VCloneEmpty(tmp1); /* Obtain pointers to the data for ewt, yy, yp. */ ewt_data = N_VGetArrayPointer(IDA_mem->ida_ewt); y_data = N_VGetArrayPointer(yy); yp_data = N_VGetArrayPointer(yp); if(IDA_mem->ida_constraintsSet) cns_data = N_VGetArrayPointer(IDA_mem->ida_constraints); srur = SUNRsqrt(IDA_mem->ida_uround); for (j=0; j < N; j++) { /* Generate the jth col of J(tt,yy,yp) as delta(F)/delta(y_j). */ /* Set data address of jthCol, and save y_j and yp_j values. */ N_VSetArrayPointer(SUNDenseMatrix_Column(Jac,j), jthCol); yj = y_data[j]; ypj = yp_data[j]; /* Set increment inc to y_j based on sqrt(uround)*abs(y_j), with adjustments using yp_j and ewt_j if this is small, and a further adjustment to give it the same sign as hh*yp_j. */ inc = SUNMAX( srur * SUNMAX( SUNRabs(yj), SUNRabs(IDA_mem->ida_hh*ypj) ), ONE/ewt_data[j] ); if (IDA_mem->ida_hh*ypj < ZERO) inc = -inc; inc = (yj + inc) - yj; /* Adjust sign(inc) again if y_j has an inequality constraint. */ if (IDA_mem->ida_constraintsSet) { conj = cns_data[j]; if (SUNRabs(conj) == ONE) {if((yj+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if((yj+inc)*conj <= ZERO) inc = -inc;} } /* Increment y_j and yp_j, call res, and break on error return. */ y_data[j] += inc; yp_data[j] += c_j*inc; retval = IDA_mem->ida_res(tt, yy, yp, rtemp, IDA_mem->ida_user_data); idals_mem->nreDQ++; if (retval != 0) break; /* Construct difference quotient in jthCol */ inc_inv = ONE/inc; N_VLinearSum(inc_inv, rtemp, -inc_inv, rr, jthCol); /* reset y_j, yp_j */ y_data[j] = yj; yp_data[j] = ypj; } /* Destroy jthCol vector */ N_VSetArrayPointer(NULL, jthCol); /* SHOULDN'T BE NEEDED */ N_VDestroy(jthCol); return(retval); } /*--------------------------------------------------------------- idaLsBandDQJac This routine generates a banded difference quotient approximation JJ to the DAE system Jacobian J. It assumes a band SUNMatrix input (stored column-wise, and that elements within each column are contiguous). This makes it possible to get the address of a column of JJ via the function SUNBandMatrix_Column(). The columns of the Jacobian are constructed using mupper + mlower + 1 calls to the res routine, and appropriate differencing. The return value is either IDABAND_SUCCESS = 0, or the nonzero value returned by the res routine, if any. ---------------------------------------------------------------*/ int idaLsBandDQJac(realtype tt, realtype c_j, N_Vector yy, N_Vector yp, N_Vector rr, SUNMatrix Jac, IDAMem IDA_mem, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { realtype inc, inc_inv, yj, ypj, srur, conj, ewtj; realtype *y_data, *yp_data, *ewt_data, *cns_data = NULL; realtype *ytemp_data, *yptemp_data, *rtemp_data, *r_data, *col_j; N_Vector rtemp, ytemp, yptemp; sunindextype i, j, i1, i2, width, ngroups, group; sunindextype N, mupper, mlower; IDALsMem idals_mem; int retval = 0; /* access LsMem interface structure */ idals_mem = (IDALsMem) IDA_mem->ida_lmem; /* access matrix dimensions */ N = SUNBandMatrix_Columns(Jac); mupper = SUNBandMatrix_UpperBandwidth(Jac); mlower = SUNBandMatrix_LowerBandwidth(Jac); /* Rename work vectors for use as temporary values of r, y and yp */ rtemp = tmp1; ytemp = tmp2; yptemp= tmp3; /* Obtain pointers to the data for all eight vectors used. */ ewt_data = N_VGetArrayPointer(IDA_mem->ida_ewt); r_data = N_VGetArrayPointer(rr); y_data = N_VGetArrayPointer(yy); yp_data = N_VGetArrayPointer(yp); rtemp_data = N_VGetArrayPointer(rtemp); ytemp_data = N_VGetArrayPointer(ytemp); yptemp_data = N_VGetArrayPointer(yptemp); if (IDA_mem->ida_constraintsSet) cns_data = N_VGetArrayPointer(IDA_mem->ida_constraints); /* Initialize ytemp and yptemp. */ N_VScale(ONE, yy, ytemp); N_VScale(ONE, yp, yptemp); /* Compute miscellaneous values for the Jacobian computation. */ srur = SUNRsqrt(IDA_mem->ida_uround); width = mlower + mupper + 1; ngroups = SUNMIN(width, N); /* Loop over column groups. */ for (group=1; group <= ngroups; group++) { /* Increment all yy[j] and yp[j] for j in this group. */ for (j=group-1; jida_hh*ypj) ), ONE/ewtj ); if (IDA_mem->ida_hh*ypj < ZERO) inc = -inc; inc = (yj + inc) - yj; /* Adjust sign(inc) again if yj has an inequality constraint. */ if (IDA_mem->ida_constraintsSet) { conj = cns_data[j]; if (SUNRabs(conj) == ONE) {if((yj+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if((yj+inc)*conj <= ZERO) inc = -inc;} } /* Increment yj and ypj. */ ytemp_data[j] += inc; yptemp_data[j] += IDA_mem->ida_cj*inc; } /* Call res routine with incremented arguments. */ retval = IDA_mem->ida_res(tt, ytemp, yptemp, rtemp, IDA_mem->ida_user_data); idals_mem->nreDQ++; if (retval != 0) break; /* Loop over the indices j in this group again. */ for (j=group-1; jida_hh*ypj) ), ONE/ewtj ); if (IDA_mem->ida_hh*ypj < ZERO) inc = -inc; inc = (yj + inc) - yj; if (IDA_mem->ida_constraintsSet) { conj = cns_data[j]; if (SUNRabs(conj) == ONE) {if((yj+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if((yj+inc)*conj <= ZERO) inc = -inc;} } /* Load the difference quotient Jacobian elements for column j */ inc_inv = ONE/inc; i1 = SUNMAX(0, j-mupper); i2 = SUNMIN(j+mlower,N-1); for (i=i1; i<=i2; i++) SM_COLUMN_ELEMENT_B(col_j,i,j) = inc_inv * (rtemp_data[i]-r_data[i]); } } return(retval); } /*--------------------------------------------------------------- idaLsDQJtimes This routine generates a difference quotient approximation to the matrix-vector product z = Jv, where J is the system Jacobian. The approximation is Jv = [F(t,y1,yp1) - F(t,y,yp)]/sigma, where y1 = y + sigma*v, yp1 = yp + cj*sigma*v, sigma = sqrt(Neq)*dqincfac. The return value from the call to res is saved in order to set the return flag from idaLsSolve. ---------------------------------------------------------------*/ int idaLsDQJtimes(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, N_Vector v, N_Vector Jv, realtype c_j, void *ida_mem, N_Vector work1, N_Vector work2) { IDAMem IDA_mem; IDALsMem idals_mem; N_Vector y_tmp, yp_tmp; realtype sig, siginv; int iter, retval; SUNLinearSolver_ID LSID; /* access IDALsMem structure */ retval = idaLs_AccessLMem(ida_mem, "idaLsDQJtimes", &IDA_mem, &idals_mem); if (retval != IDALS_SUCCESS) return(retval); LSID = SUNLinSolGetID(idals_mem->LS); if (LSID == SUNLINEARSOLVER_SPGMR || LSID == SUNLINEARSOLVER_SPFGMR) sig = idals_mem->nrmfac * idals_mem->dqincfac; else sig = idals_mem->dqincfac / N_VWrmsNorm(v, IDA_mem->ida_ewt); /* Rename work1 and work2 for readibility */ y_tmp = work1; yp_tmp = work2; for (iter=0; iterjt_res(tt, y_tmp, yp_tmp, Jv, IDA_mem->ida_user_data); idals_mem->nreDQ++; if (retval == 0) break; if (retval < 0) return(-1); sig *= PT25; } if (retval > 0) return(+1); /* Set Jv to [Jv - rr]/sig and return. */ siginv = ONE/sig; N_VLinearSum(siginv, Jv, -siginv, rr, Jv); return(0); } /*--------------------------------------------------------------- idaLsInitialize This routine performs remaining initializations specific to the iterative linear solver interface (and solver itself) ---------------------------------------------------------------*/ int idaLsInitialize(IDAMem IDA_mem) { IDALsMem idals_mem; int retval; /* access IDALsMem structure */ if (IDA_mem->ida_lmem == NULL) { IDAProcessError(IDA_mem, IDALS_LMEM_NULL, "IDASLS", "idaLsInitialize", MSG_LS_LMEM_NULL); return(IDALS_LMEM_NULL); } idals_mem = (IDALsMem) IDA_mem->ida_lmem; /* Test for valid combinations of matrix & Jacobian routines: */ if (idals_mem->J == NULL) { /* If SUNMatrix A is NULL: ensure 'jac' function pointer is NULL */ idals_mem->jacDQ = SUNFALSE; idals_mem->jac = NULL; idals_mem->J_data = NULL; } else if (idals_mem->jacDQ) { /* If J is non-NULL, and 'jac' is not user-supplied: - if J is dense or band, ensure that our DQ approx. is used - otherwise => error */ retval = 0; if (idals_mem->J->ops->getid) { if ( (SUNMatGetID(idals_mem->J) == SUNMATRIX_DENSE) || (SUNMatGetID(idals_mem->J) == SUNMATRIX_BAND) ) { idals_mem->jac = idaLsDQJac; idals_mem->J_data = IDA_mem; } else { retval++; } } else { retval++; } if (retval) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "idaLsInitialize", "No Jacobian constructor available for SUNMatrix type"); idals_mem->last_flag = IDALS_ILL_INPUT; return(IDALS_ILL_INPUT); } } else { /* If J is non-NULL, and 'jac' is user-supplied, reset J_data pointer (just in case) */ idals_mem->J_data = IDA_mem->ida_user_data; } /* reset counters */ idaLsInitializeCounters(idals_mem); /* Set Jacobian-related fields, based on jtimesDQ */ if (idals_mem->jtimesDQ) { idals_mem->jtsetup = NULL; idals_mem->jtimes = idaLsDQJtimes; idals_mem->jt_data = IDA_mem; } else { idals_mem->jt_data = IDA_mem->ida_user_data; } /* if J is NULL and psetup is not present, then idaLsSetup does not need to be called, so set the lsetup function to NULL */ if ( (idals_mem->J == NULL) && (idals_mem->pset == NULL) ) IDA_mem->ida_lsetup = NULL; /* When using a matrix-embedded linear solver disable lsetup call */ if (SUNLinSolGetType(idals_mem->LS) == SUNLINEARSOLVER_MATRIX_EMBEDDED) { IDA_mem->ida_lsetup = NULL; idals_mem->scalesol = SUNFALSE; } /* Call LS initialize routine */ idals_mem->last_flag = SUNLinSolInitialize(idals_mem->LS); return(idals_mem->last_flag); } /*--------------------------------------------------------------- idaLsSetup This calls the Jacobian evaluation routine (if using a SUNMatrix object), updates counters, and calls the LS 'setup' routine to prepare for subsequent calls to the LS 'solve' routine. ---------------------------------------------------------------*/ int idaLsSetup(IDAMem IDA_mem, N_Vector y, N_Vector yp, N_Vector r, N_Vector vt1, N_Vector vt2, N_Vector vt3) { IDALsMem idals_mem; int retval; /* access IDALsMem structure */ if (IDA_mem->ida_lmem == NULL) { IDAProcessError(IDA_mem, IDALS_LMEM_NULL, "IDASLS", "idaLsSetup", MSG_LS_LMEM_NULL); return(IDALS_LMEM_NULL); } idals_mem = (IDALsMem) IDA_mem->ida_lmem; /* Immediately return when using matrix-embedded linear solver */ if (SUNLinSolGetType(idals_mem->LS) == SUNLINEARSOLVER_MATRIX_EMBEDDED) { idals_mem->last_flag = IDALS_SUCCESS; return(idals_mem->last_flag); } /* Set IDALs N_Vector pointers to inputs */ idals_mem->ycur = y; idals_mem->ypcur = yp; idals_mem->rcur = r; /* recompute if J if it is non-NULL */ if (idals_mem->J) { /* Increment nje counter. */ idals_mem->nje++; /* Clear the linear system matrix if necessary */ if (SUNLinSolGetType(idals_mem->LS) == SUNLINEARSOLVER_DIRECT) { retval = SUNMatZero(idals_mem->J); if (retval != 0) { IDAProcessError(IDA_mem, IDALS_SUNMAT_FAIL, "IDASLS", "idaLsSetup", MSG_LS_MATZERO_FAILED); idals_mem->last_flag = IDALS_SUNMAT_FAIL; return(idals_mem->last_flag); } } /* Call Jacobian routine */ retval = idals_mem->jac(IDA_mem->ida_tn, IDA_mem->ida_cj, y, yp, r, idals_mem->J, idals_mem->J_data, vt1, vt2, vt3); if (retval < 0) { IDAProcessError(IDA_mem, IDALS_JACFUNC_UNRECVR, "IDASLS", "idaLsSetup", MSG_LS_JACFUNC_FAILED); idals_mem->last_flag = IDALS_JACFUNC_UNRECVR; return(-1); } if (retval > 0) { idals_mem->last_flag = IDALS_JACFUNC_RECVR; return(1); } } /* Call LS setup routine -- the LS will call idaLsPSetup if applicable */ idals_mem->last_flag = SUNLinSolSetup(idals_mem->LS, idals_mem->J); return(idals_mem->last_flag); } /*--------------------------------------------------------------- idaLsSolve This routine interfaces between IDA and the generic SUNLinearSolver object LS, by setting the appropriate tolerance and scaling vectors, calling the solver, accumulating statistics from the solve for use/reporting by IDA, and scaling the result if using a non-NULL SUNMatrix and cjratio does not equal one. ---------------------------------------------------------------*/ int idaLsSolve(IDAMem IDA_mem, N_Vector b, N_Vector weight, N_Vector ycur, N_Vector ypcur, N_Vector rescur) { IDALsMem idals_mem; int nli_inc, retval; realtype tol, w_mean; /* access IDALsMem structure */ if (IDA_mem->ida_lmem == NULL) { IDAProcessError(IDA_mem, IDALS_LMEM_NULL, "IDASLS", "idaLsSolve", MSG_LS_LMEM_NULL); return(IDALS_LMEM_NULL); } idals_mem = (IDALsMem) IDA_mem->ida_lmem; /* If the linear solver is iterative: set convergence test constant tol, in terms of the Newton convergence test constant epsNewt and safety factors. The factor nrmfac assures that the convergence test is applied to the WRMS norm of the residual vector, rather than the weighted L2 norm. */ if (idals_mem->iterative) { tol = idals_mem->nrmfac * idals_mem->eplifac * IDA_mem->ida_epsNewt; } else { tol = ZERO; } /* Set vectors ycur, ypcur and rcur for use by the Atimes and Psolve interface routines */ idals_mem->ycur = ycur; idals_mem->ypcur = ypcur; idals_mem->rcur = rescur; /* Set scaling vectors for LS to use (if applicable) */ if (idals_mem->LS->ops->setscalingvectors) { retval = SUNLinSolSetScalingVectors(idals_mem->LS, weight, weight); if (retval != SUNLS_SUCCESS) { IDAProcessError(IDA_mem, IDALS_SUNLS_FAIL, "IDASLS", "idaLsSolve", "Error in calling SUNLinSolSetScalingVectors"); idals_mem->last_flag = IDALS_SUNLS_FAIL; return(idals_mem->last_flag); } /* If solver is iterative and does not support scaling vectors, update the tolerance in an attempt to account for weight vector. We make the following assumptions: 1. w_i = w_mean, for i=0,...,n-1 (i.e. the weights are homogeneous) 2. the linear solver uses a basic 2-norm to measure convergence Hence (using the notation from sunlinsol_spgmr.h, with S = diag(w)), || bbar - Abar xbar ||_2 < tol <=> || S b - S A x ||_2 < tol <=> || S (b - A x) ||_2 < tol <=> \sum_{i=0}^{n-1} (w_i (b - A x)_i)^2 < tol^2 <=> w_mean^2 \sum_{i=0}^{n-1} (b - A x_i)^2 < tol^2 <=> \sum_{i=0}^{n-1} (b - A x_i)^2 < tol^2 / w_mean^2 <=> || b - A x ||_2 < tol / w_mean So we compute w_mean = ||w||_RMS and scale the desired tolerance accordingly. */ } else if (idals_mem->iterative) { N_VConst(ONE, idals_mem->x); w_mean = N_VWrmsNorm(weight, idals_mem->x); tol /= w_mean; } /* Set initial guess x = 0 to LS */ N_VConst(ZERO, idals_mem->x); /* Set zero initial guess flag */ retval = SUNLinSolSetZeroGuess(idals_mem->LS, SUNTRUE); if (retval != SUNLS_SUCCESS) return(-1); /* If a user-provided jtsetup routine is supplied, call that here */ if (idals_mem->jtsetup) { idals_mem->last_flag = idals_mem->jtsetup(IDA_mem->ida_tn, ycur, ypcur, rescur, IDA_mem->ida_cj, idals_mem->jt_data); idals_mem->njtsetup++; if (idals_mem->last_flag != 0) { IDAProcessError(IDA_mem, retval, "IDASLS", "idaLsSolve", MSG_LS_JTSETUP_FAILED); return(idals_mem->last_flag); } } /* Call solver */ retval = SUNLinSolSolve(idals_mem->LS, idals_mem->J, idals_mem->x, b, tol); /* Copy appropriate result to b (depending on solver type) */ if (idals_mem->iterative) { /* Retrieve solver statistics */ nli_inc = SUNLinSolNumIters(idals_mem->LS); /* Copy x (or preconditioned residual vector if no iterations required) to b */ if ((nli_inc == 0) && (SUNLinSolGetType(idals_mem->LS) != SUNLINEARSOLVER_MATRIX_EMBEDDED)) N_VScale(ONE, SUNLinSolResid(idals_mem->LS), b); else N_VScale(ONE, idals_mem->x, b); /* Increment nli counter */ idals_mem->nli += nli_inc; } else { /* Copy x to b */ N_VScale(ONE, idals_mem->x, b); } /* If using a direct or matrix-iterative solver, scale the correction to account for change in cj */ if (idals_mem->scalesol && (IDA_mem->ida_cjratio != ONE)) N_VScale(TWO/(ONE + IDA_mem->ida_cjratio), b, b); /* Increment ncfl counter */ if (retval != SUNLS_SUCCESS) idals_mem->ncfl++; /* Interpret solver return value */ idals_mem->last_flag = retval; switch(retval) { case SUNLS_SUCCESS: return(0); break; case SUNLS_RES_REDUCED: case SUNLS_CONV_FAIL: case SUNLS_PSOLVE_FAIL_REC: case SUNLS_PACKAGE_FAIL_REC: case SUNLS_QRFACT_FAIL: case SUNLS_LUFACT_FAIL: return(1); break; case SUNLS_MEM_NULL: case SUNLS_ILL_INPUT: case SUNLS_MEM_FAIL: case SUNLS_GS_FAIL: case SUNLS_QRSOL_FAIL: return(-1); break; case SUNLS_PACKAGE_FAIL_UNREC: IDAProcessError(IDA_mem, SUNLS_PACKAGE_FAIL_UNREC, "IDASLS", "idaLsSolve", "Failure in SUNLinSol external package"); return(-1); break; case SUNLS_PSOLVE_FAIL_UNREC: IDAProcessError(IDA_mem, SUNLS_PSOLVE_FAIL_UNREC, "IDASLS", "idaLsSolve", MSG_LS_PSOLVE_FAILED); return(-1); break; } return(0); } /*--------------------------------------------------------------- idaLsPerf: accumulates performance statistics information for IDA ---------------------------------------------------------------*/ int idaLsPerf(IDAMem IDA_mem, int perftask) { IDALsMem idals_mem; realtype rcfn, rcfl; long int nstd, nnid; booleantype lcfn, lcfl; /* access IDALsMem structure */ if (IDA_mem->ida_lmem == NULL) { IDAProcessError(IDA_mem, IDALS_LMEM_NULL, "IDASLS", "idaLsPerf", MSG_LS_LMEM_NULL); return(IDALS_LMEM_NULL); } idals_mem = (IDALsMem) IDA_mem->ida_lmem; /* when perftask == 0, store current performance statistics */ if (perftask == 0) { idals_mem->nst0 = IDA_mem->ida_nst; idals_mem->nni0 = IDA_mem->ida_nni; idals_mem->ncfn0 = IDA_mem->ida_ncfn; idals_mem->ncfl0 = idals_mem->ncfl; idals_mem->nwarn = 0; return(0); } /* Compute statistics since last call Note: the performance monitor that checked whether the average number of linear iterations was too close to maxl has been removed, since the 'maxl' value is no longer owned by the IDALs interface. */ nstd = IDA_mem->ida_nst - idals_mem->nst0; nnid = IDA_mem->ida_nni - idals_mem->nni0; if (nstd == 0 || nnid == 0) return(0); rcfn = ((realtype) (IDA_mem->ida_ncfn - idals_mem->ncfn0)) / ((realtype) nstd); rcfl = ((realtype) (idals_mem->ncfl - idals_mem->ncfl0)) / ((realtype) nnid); lcfn = (rcfn > PT9); lcfl = (rcfl > PT9); if (!(lcfn || lcfl)) return(0); idals_mem->nwarn++; if (idals_mem->nwarn > 10) return(1); if (lcfn) IDAProcessError(IDA_mem, IDA_WARNING, "IDASLS", "idaLsPerf", MSG_LS_CFN_WARN, IDA_mem->ida_tn, rcfn); if (lcfl) IDAProcessError(IDA_mem, IDA_WARNING, "IDASLS", "idaLsPerf", MSG_LS_CFL_WARN, IDA_mem->ida_tn, rcfl); return(0); } /*--------------------------------------------------------------- idaLsFree frees memory associates with the IDALs system solver interface. ---------------------------------------------------------------*/ int idaLsFree(IDAMem IDA_mem) { IDALsMem idals_mem; /* Return immediately if IDA_mem or IDA_mem->ida_lmem are NULL */ if (IDA_mem == NULL) return (IDALS_SUCCESS); if (IDA_mem->ida_lmem == NULL) return(IDALS_SUCCESS); idals_mem = (IDALsMem) IDA_mem->ida_lmem; /* Free N_Vector memory */ if (idals_mem->ytemp) { N_VDestroy(idals_mem->ytemp); idals_mem->ytemp = NULL; } if (idals_mem->yptemp) { N_VDestroy(idals_mem->yptemp); idals_mem->yptemp = NULL; } if (idals_mem->x) { N_VDestroy(idals_mem->x); idals_mem->x = NULL; } /* Nullify other N_Vector pointers */ idals_mem->ycur = NULL; idals_mem->ypcur = NULL; idals_mem->rcur = NULL; /* Nullify SUNMatrix pointer */ idals_mem->J = NULL; /* Free preconditioner memory (if applicable) */ if (idals_mem->pfree) idals_mem->pfree(IDA_mem); /* free IDALs interface structure */ free(IDA_mem->ida_lmem); return(IDALS_SUCCESS); } /*--------------------------------------------------------------- idaLsInitializeCounters resets all counters from an IDALsMem structure. ---------------------------------------------------------------*/ int idaLsInitializeCounters(IDALsMem idals_mem) { idals_mem->nje = 0; idals_mem->nreDQ = 0; idals_mem->npe = 0; idals_mem->nli = 0; idals_mem->nps = 0; idals_mem->ncfl = 0; idals_mem->njtsetup = 0; idals_mem->njtimes = 0; return(0); } /*--------------------------------------------------------------- idaLs_AccessLMem This routine unpacks the IDA_mem and idals_mem structures from the void* ida_mem pointer. If either is missing it returns IDALS_MEM_NULL or IDALS_LMEM_NULL. ---------------------------------------------------------------*/ int idaLs_AccessLMem(void* ida_mem, const char* fname, IDAMem* IDA_mem, IDALsMem* idals_mem) { if (ida_mem==NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASLS", fname, MSG_LS_IDAMEM_NULL); return(IDALS_MEM_NULL); } *IDA_mem = (IDAMem) ida_mem; if ((*IDA_mem)->ida_lmem==NULL) { IDAProcessError(*IDA_mem, IDALS_LMEM_NULL, "IDASLS", fname, MSG_LS_LMEM_NULL); return(IDALS_LMEM_NULL); } *idals_mem = (IDALsMem) (*IDA_mem)->ida_lmem; return(IDALS_SUCCESS); } /*================================================================ PART II - backward problems ================================================================*/ /*--------------------------------------------------------------- IDASLS Exported functions -- Required ---------------------------------------------------------------*/ /* IDASetLinearSolverB specifies the iterative linear solver for backward integration */ int IDASetLinearSolverB(void *ida_mem, int which, SUNLinearSolver LS, SUNMatrix A) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; IDALsMemB idalsB_mem; void *ida_memB; int retval; /* Check if ida_mem exists */ if (ida_mem == NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASLS", "IDASetLinearSolverB", MSG_LS_IDAMEM_NULL); return(IDALS_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Was ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDALS_NO_ADJ, "IDASLS", "IDASetLinearSolverB", MSG_LS_NO_ADJ); return(IDALS_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASLS", "IDASetLinearSolverB", MSG_LS_BAD_WHICH); return(IDALS_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; IDAB_mem = IDAB_mem->ida_next; } /* Get memory for IDALsMemRecB */ idalsB_mem = NULL; idalsB_mem = (IDALsMemB) malloc(sizeof(struct IDALsMemRecB)); if (idalsB_mem == NULL) { IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASLS", "IDASetLinearSolverB", MSG_LS_MEM_FAIL); return(IDALS_MEM_FAIL); } /* initialize Jacobian and preconditioner functions */ idalsB_mem->jacB = NULL; idalsB_mem->jacBS = NULL; idalsB_mem->jtsetupB = NULL; idalsB_mem->jtsetupBS = NULL; idalsB_mem->jtimesB = NULL; idalsB_mem->jtimesBS = NULL; idalsB_mem->psetB = NULL; idalsB_mem->psetBS = NULL; idalsB_mem->psolveB = NULL; idalsB_mem->psolveBS = NULL; idalsB_mem->P_dataB = NULL; /* free any existing system solver attached to IDAB */ if (IDAB_mem->ida_lfree) IDAB_mem->ida_lfree(IDAB_mem); /* Attach lmemB data and lfreeB function. */ IDAB_mem->ida_lmem = idalsB_mem; IDAB_mem->ida_lfree = idaLsFreeB; /* set the linear solver for this backward problem */ ida_memB = (void *)IDAB_mem->IDA_mem; retval = IDASetLinearSolver(ida_memB, LS, A); if (retval != IDALS_SUCCESS) { free(idalsB_mem); idalsB_mem = NULL; } return(retval); } /*--------------------------------------------------------------- IDASLS Exported functions -- Optional input/output ---------------------------------------------------------------*/ int IDASetJacFnB(void *ida_mem, int which, IDALsJacFnB jacB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; IDALsMemB idalsB_mem; void *ida_memB; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetJacFnB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* set jacB function pointer */ idalsB_mem->jacB = jacB; /* call corresponding routine for IDAB_mem structure */ ida_memB = (void*) IDAB_mem->IDA_mem; if (jacB != NULL) { retval = IDASetJacFn(ida_memB, idaLsJacBWrapper); } else { retval = IDASetJacFn(ida_memB, NULL); } return(retval); } int IDASetJacFnBS(void *ida_mem, int which, IDALsJacFnBS jacBS) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; IDALsMemB idalsB_mem; void *ida_memB; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetJacFnBS", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* set jacBS function pointer */ idalsB_mem->jacBS = jacBS; /* call corresponding routine for IDAB_mem structure */ ida_memB = (void*) IDAB_mem->IDA_mem; if (jacBS != NULL) { retval = IDASetJacFn(ida_memB, idaLsJacBSWrapper); } else { retval = IDASetJacFn(ida_memB, NULL); } return(retval); } int IDASetEpsLinB(void *ida_mem, int which, realtype eplifacB) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; IDALsMemB idalsB_mem; void *ida_memB; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetEpsLinB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* call corresponding routine for IDAB_mem structure */ ida_memB = (void *) IDAB_mem->IDA_mem; return(IDASetEpsLin(ida_memB, eplifacB)); } int IDASetLSNormFactorB(void *ida_mem, int which, realtype nrmfacB) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; IDALsMemB idalsB_mem; void *ida_memB; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetLSNormFactorB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* call corresponding routine for IDAB_mem structure */ ida_memB = (void *) IDAB_mem->IDA_mem; return(IDASetLSNormFactor(ida_memB, nrmfacB)); } int IDASetLinearSolutionScalingB(void *ida_mem, int which, booleantype onoffB) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; IDALsMemB idalsB_mem; void *ida_memB; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetLinearSolutionScalingB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* call corresponding routine for IDAB_mem structure */ ida_memB = (void *) IDAB_mem->IDA_mem; return(IDASetLinearSolutionScaling(ida_memB, onoffB)); } int IDASetIncrementFactorB(void *ida_mem, int which, realtype dqincfacB) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; IDALsMemB idalsB_mem; void *ida_memB; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetIncrementFactorB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* call corresponding routine for IDAB_mem structure */ ida_memB = (void *) IDAB_mem->IDA_mem; return(IDASetIncrementFactor(ida_memB, dqincfacB)); } int IDASetPreconditionerB(void *ida_mem, int which, IDALsPrecSetupFnB psetupB, IDALsPrecSolveFnB psolveB) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; void *ida_memB; IDALsMemB idalsB_mem; IDALsPrecSetupFn idals_psetup; IDALsPrecSolveFn idals_psolve; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetPreconditionerB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* Set preconditioners for the backward problem. */ idalsB_mem->psetB = psetupB; idalsB_mem->psolveB = psolveB; /* Call the corresponding "set" routine for the backward problem */ ida_memB = (void *) IDAB_mem->IDA_mem; idals_psetup = (psetupB == NULL) ? NULL : idaLsPrecSetupB; idals_psolve = (psolveB == NULL) ? NULL : idaLsPrecSolveB; return(IDASetPreconditioner(ida_memB, idals_psetup, idals_psolve)); } int IDASetPreconditionerBS(void *ida_mem, int which, IDALsPrecSetupFnBS psetupBS, IDALsPrecSolveFnBS psolveBS) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; void *ida_memB; IDALsMemB idalsB_mem; IDALsPrecSetupFn idals_psetup; IDALsPrecSolveFn idals_psolve; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetPreconditionerBS", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* Set preconditioners for the backward problem. */ idalsB_mem->psetBS = psetupBS; idalsB_mem->psolveBS = psolveBS; /* Call the corresponding "set" routine for the backward problem */ ida_memB = (void *) IDAB_mem->IDA_mem; idals_psetup = (psetupBS == NULL) ? NULL : idaLsPrecSetupBS; idals_psolve = (psolveBS == NULL) ? NULL : idaLsPrecSolveBS; return(IDASetPreconditioner(ida_memB, idals_psetup, idals_psolve)); } int IDASetJacTimesB(void *ida_mem, int which, IDALsJacTimesSetupFnB jtsetupB, IDALsJacTimesVecFnB jtimesB) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; void *ida_memB; IDALsMemB idalsB_mem; IDALsJacTimesSetupFn idals_jtsetup; IDALsJacTimesVecFn idals_jtimes; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetJacTimesB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* Set jacobian routines for the backward problem. */ idalsB_mem->jtsetupB = jtsetupB; idalsB_mem->jtimesB = jtimesB; /* Call the corresponding "set" routine for the backward problem */ ida_memB = (void *) IDAB_mem->IDA_mem; idals_jtsetup = (jtsetupB == NULL) ? NULL : idaLsJacTimesSetupB; idals_jtimes = (jtimesB == NULL) ? NULL : idaLsJacTimesVecB; return(IDASetJacTimes(ida_memB, idals_jtsetup, idals_jtimes)); } int IDASetJacTimesBS(void *ida_mem, int which, IDALsJacTimesSetupFnBS jtsetupBS, IDALsJacTimesVecFnBS jtimesBS) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; void *ida_memB; IDALsMemB idalsB_mem; IDALsJacTimesSetupFn idals_jtsetup; IDALsJacTimesVecFn idals_jtimes; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetJacTimesBS", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* Set jacobian routines for the backward problem. */ idalsB_mem->jtsetupBS = jtsetupBS; idalsB_mem->jtimesBS = jtimesBS; /* Call the corresponding "set" routine for the backward problem */ ida_memB = (void *) IDAB_mem->IDA_mem; idals_jtsetup = (jtsetupBS == NULL) ? NULL : idaLsJacTimesSetupBS; idals_jtimes = (jtimesBS == NULL) ? NULL : idaLsJacTimesVecBS; return(IDASetJacTimes(ida_memB, idals_jtsetup, idals_jtimes)); } int IDASetJacTimesResFnB(void *ida_mem, int which, IDAResFn jtimesResFn) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; IDALsMemB idalsB_mem; void *ida_memB; int retval; /* access relevant memory structures */ retval = idaLs_AccessLMemB(ida_mem, which, "IDASetJacTimesResFnB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); if (retval != IDALS_SUCCESS) return(retval); /* call corresponding routine for IDAB_mem structure */ ida_memB = (void *) IDAB_mem->IDA_mem; return(IDASetJacTimesResFn(ida_memB, jtimesResFn)); } /*----------------------------------------------------------------- IDASLS Private functions for backwards problems -----------------------------------------------------------------*/ /* idaLsJacBWrapper interfaces to the IDAJacFnB routine provided by the user. idaLsJacBWrapper is of type IDALsJacFn. */ static int idaLsJacBWrapper(realtype tt, realtype c_jB, N_Vector yyB, N_Vector ypB, N_Vector rrB, SUNMatrix JacB, void *ida_mem, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; IDALsMemB idalsB_mem; int retval; /* access relevant memory structures */ IDA_mem = NULL; IDAADJ_mem = NULL; idalsB_mem = NULL; IDAB_mem = NULL; retval = idaLs_AccessLMemBCur(ida_mem, "idaLsJacBWrapper", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); /* Forward solution from interpolation */ if (IDAADJ_mem->ia_noInterp == SUNFALSE) { retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDAB_mem->IDA_mem, -1, "IDASLS", "idaLsJacBWrapper", MSG_LS_BAD_T); return(-1); } } /* Call user's adjoint jacB routine */ return(idalsB_mem->jacB(tt, c_jB, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, yyB, ypB, rrB, JacB, IDAB_mem->ida_user_data, tmp1B, tmp2B, tmp3B)); } /* idaLsJacBSWrapper interfaces to the IDAJacFnBS routine provided by the user. idaLsJacBSWrapper is of type IDALsJacFn. */ static int idaLsJacBSWrapper(realtype tt, realtype c_jB, N_Vector yyB, N_Vector ypB, N_Vector rrB, SUNMatrix JacB, void *ida_mem, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; IDALsMemB idalsB_mem; int retval; /* access relevant memory structures */ IDA_mem = NULL; IDAADJ_mem = NULL; idalsB_mem = NULL; IDAB_mem = NULL; retval = idaLs_AccessLMemBCur(ida_mem, "idaLsJacBSWrapper", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); /* Get forward solution from interpolation. */ if(IDAADJ_mem->ia_noInterp == SUNFALSE) { if (IDAADJ_mem->ia_interpSensi) retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp); else retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDAB_mem->IDA_mem, -1, "IDASLS", "idaLsJacBSWrapper", MSG_LS_BAD_T); return(-1); } } /* Call user's adjoint jacBS routine */ return(idalsB_mem->jacBS(tt, c_jB, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp, yyB, ypB, rrB, JacB, IDAB_mem->ida_user_data, tmp1B, tmp2B, tmp3B)); } /* idaLsPrecSetupB interfaces to the IDALsPrecSetupFnB routine provided by the user */ static int idaLsPrecSetupB(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDALsMemB idalsB_mem; IDABMem IDAB_mem; int retval; /* access relevant memory structures */ IDA_mem = NULL; IDAADJ_mem = NULL; idalsB_mem = NULL; IDAB_mem = NULL; retval = idaLs_AccessLMemBCur(ida_mem, "idaLsPrecSetupB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); /* Get forward solution from interpolation. */ if (IDAADJ_mem->ia_noInterp==SUNFALSE) { retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDAB_mem->IDA_mem, -1, "IDASLS", "idaLsPrecSetupB", MSG_LS_BAD_T); return(-1); } } /* Call user's adjoint precondB routine */ return(idalsB_mem->psetB(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, yyB, ypB, rrB, c_jB, IDAB_mem->ida_user_data)); } /* idaLsPrecSetupBS interfaces to the IDALsPrecSetupFnBS routine provided by the user */ static int idaLsPrecSetupBS(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDALsMemB idalsB_mem; IDABMem IDAB_mem; int retval; /* access relevant memory structures */ IDA_mem = NULL; IDAADJ_mem = NULL; idalsB_mem = NULL; IDAB_mem = NULL; retval = idaLs_AccessLMemBCur(ida_mem, "idaLsPrecSetupBS", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); /* Get forward solution from interpolation. */ if(IDAADJ_mem->ia_noInterp == SUNFALSE) { if (IDAADJ_mem->ia_interpSensi) retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp); else retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDAB_mem->IDA_mem, -1, "IDASLS", "idaLsPrecSetupBS", MSG_LS_BAD_T); return(-1); } } /* Call user's adjoint precondBS routine */ return(idalsB_mem->psetBS(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp, yyB, ypB, rrB, c_jB, IDAB_mem->ida_user_data)); } /* idaLsPrecSolveB interfaces to the IDALsPrecSolveFnB routine provided by the user */ static int idaLsPrecSolveB(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector rvecB, N_Vector zvecB, realtype c_jB, realtype deltaB, void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDALsMemB idalsB_mem; IDABMem IDAB_mem; int retval; /* access relevant memory structures */ IDA_mem = NULL; IDAADJ_mem = NULL; idalsB_mem = NULL; IDAB_mem = NULL; retval = idaLs_AccessLMemBCur(ida_mem, "idaLsPrecSolveB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); /* Get forward solution from interpolation. */ if (IDAADJ_mem->ia_noInterp==SUNFALSE) { retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDAB_mem->IDA_mem, -1, "IDASLS", "idaLsPrecSolveB", MSG_LS_BAD_T); return(-1); } } /* Call user's adjoint psolveB routine */ return(idalsB_mem->psolveB(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, yyB, ypB, rrB, rvecB, zvecB, c_jB, deltaB, IDAB_mem->ida_user_data)); } /* idaLsPrecSolveBS interfaces to the IDALsPrecSolveFnBS routine provided by the user */ static int idaLsPrecSolveBS(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector rvecB, N_Vector zvecB, realtype c_jB, realtype deltaB, void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDALsMemB idalsB_mem; IDABMem IDAB_mem; int retval; /* access relevant memory structures */ IDA_mem = NULL; IDAADJ_mem = NULL; idalsB_mem = NULL; IDAB_mem = NULL; retval = idaLs_AccessLMemBCur(ida_mem, "idaLsPrecSolveBS", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); /* Get forward solution from interpolation. */ if(IDAADJ_mem->ia_noInterp == SUNFALSE) { if (IDAADJ_mem->ia_interpSensi) retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp); else retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDAB_mem->IDA_mem, -1, "IDASLS", "idaLsPrecSolveBS", MSG_LS_BAD_T); return(-1); } } /* Call user's adjoint psolveBS routine */ return(idalsB_mem->psolveBS(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp, yyB, ypB, rrB, rvecB, zvecB, c_jB, deltaB, IDAB_mem->ida_user_data)); } /* idaLsJacTimesSetupB interfaces to the IDALsJacTimesSetupFnB routine provided by the user */ static int idaLsJacTimesSetupB(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDALsMemB idalsB_mem; IDABMem IDAB_mem; int retval; /* access relevant memory structures */ IDA_mem = NULL; IDAADJ_mem = NULL; idalsB_mem = NULL; IDAB_mem = NULL; retval = idaLs_AccessLMemBCur(ida_mem, "idaLsJacTimesSetupB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); /* Get forward solution from interpolation. */ if (IDAADJ_mem->ia_noInterp==SUNFALSE) { retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDAB_mem->IDA_mem, -1, "IDASLS", "idaLsJacTimesSetupB", MSG_LS_BAD_T); return(-1); } } /* Call user's adjoint jtsetupB routine */ return(idalsB_mem->jtsetupB(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, yyB, ypB, rrB, c_jB, IDAB_mem->ida_user_data)); } /* idaLsJacTimesSetupBS interfaces to the IDALsJacTimesSetupFnBS routine provided by the user */ static int idaLsJacTimesSetupBS(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDALsMemB idalsB_mem; IDABMem IDAB_mem; int retval; /* access relevant memory structures */ IDA_mem = NULL; IDAADJ_mem = NULL; idalsB_mem = NULL; IDAB_mem = NULL; retval = idaLs_AccessLMemBCur(ida_mem, "idaLsJacTimesSetupBS", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); /* Get forward solution from interpolation. */ if(IDAADJ_mem->ia_noInterp == SUNFALSE) { if (IDAADJ_mem->ia_interpSensi) retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp); else retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDAB_mem->IDA_mem, -1, "IDASLS", "idaLsJacTimesSetupBS", MSG_LS_BAD_T); return(-1); } } /* Call user's adjoint jtimesBS routine */ return(idalsB_mem->jtsetupBS(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp, yyB, ypB, rrB, c_jB, IDAB_mem->ida_user_data)); } /* idaLsJacTimesVecB interfaces to the IDALsJacTimesVecFnB routine provided by the user */ static int idaLsJacTimesVecB(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector vB, N_Vector JvB, realtype c_jB, void *ida_mem, N_Vector tmp1B, N_Vector tmp2B) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDALsMemB idalsB_mem; IDABMem IDAB_mem; int retval; /* access relevant memory structures */ IDA_mem = NULL; IDAADJ_mem = NULL; idalsB_mem = NULL; IDAB_mem = NULL; retval = idaLs_AccessLMemBCur(ida_mem, "idaLsJacTimesVecB", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); /* Get forward solution from interpolation. */ if (IDAADJ_mem->ia_noInterp==SUNFALSE) { retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDAB_mem->IDA_mem, -1, "IDASLS", "idaLsJacTimesVecB", MSG_LS_BAD_T); return(-1); } } /* Call user's adjoint jtimesB routine */ return(idalsB_mem->jtimesB(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, yyB, ypB, rrB, vB, JvB, c_jB, IDAB_mem->ida_user_data, tmp1B, tmp2B)); } /* idaLsJacTimesVecBS interfaces to the IDALsJacTimesVecFnBS routine provided by the user */ static int idaLsJacTimesVecBS(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector vB, N_Vector JvB, realtype c_jB, void *ida_mem, N_Vector tmp1B, N_Vector tmp2B) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDALsMemB idalsB_mem; IDABMem IDAB_mem; int retval; /* access relevant memory structures */ IDA_mem = NULL; IDAADJ_mem = NULL; idalsB_mem = NULL; IDAB_mem = NULL; retval = idaLs_AccessLMemBCur(ida_mem, "idaLsJacTimesVecBS", &IDA_mem, &IDAADJ_mem, &IDAB_mem, &idalsB_mem); /* Get forward solution from interpolation. */ if(IDAADJ_mem->ia_noInterp == SUNFALSE) { if (IDAADJ_mem->ia_interpSensi) retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp); else retval = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDAB_mem->IDA_mem, -1, "IDASLS", "idaLsJacTimesVecBS", MSG_LS_BAD_T); return(-1); } } /* Call user's adjoint jtimesBS routine */ return(idalsB_mem->jtimesBS(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp, yyB, ypB, rrB, vB, JvB, c_jB, IDAB_mem->ida_user_data, tmp1B, tmp2B)); } /* idaLsFreeB frees memory associated with the IDASLS wrapper */ int idaLsFreeB(IDABMem IDAB_mem) { IDALsMemB idalsB_mem; /* Return immediately if IDAB_mem or IDAB_mem->ida_lmem are NULL */ if (IDAB_mem == NULL) return(IDALS_SUCCESS); if (IDAB_mem->ida_lmem == NULL) return(IDALS_SUCCESS); idalsB_mem = (IDALsMemB) IDAB_mem->ida_lmem; /* free IDALsMemB interface structure */ free(idalsB_mem); return(IDALS_SUCCESS); } /* idaLs_AccessLMemB unpacks the IDA_mem, IDAADJ_mem, IDAB_mem and idalsB_mem structures from the void* ida_mem pointer. If any are missing it returns IDALS_MEM_NULL, IDALS_NO_ADJ, IDAS_ILL_INPUT, or IDALS_LMEMB_NULL. */ int idaLs_AccessLMemB(void *ida_mem, int which, const char *fname, IDAMem *IDA_mem, IDAadjMem *IDAADJ_mem, IDABMem *IDAB_mem, IDALsMemB *idalsB_mem) { /* access IDAMem structure */ if (ida_mem==NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASLS", fname, MSG_LS_IDAMEM_NULL); return(IDALS_MEM_NULL); } *IDA_mem = (IDAMem) ida_mem; /* access IDAadjMem structure */ if ((*IDA_mem)->ida_adjMallocDone == SUNFALSE) { IDAProcessError(*IDA_mem, IDALS_NO_ADJ, "IDASLS", fname, MSG_LS_NO_ADJ); return(IDALS_NO_ADJ); } *IDAADJ_mem = (*IDA_mem)->ida_adj_mem; /* Check the value of which */ if ( which >= (*IDAADJ_mem)->ia_nbckpbs ) { IDAProcessError(*IDA_mem, IDALS_ILL_INPUT, "IDASLS", fname, MSG_LS_BAD_WHICH); return(IDALS_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to which */ *IDAB_mem = (*IDAADJ_mem)->IDAB_mem; while ((*IDAB_mem) != NULL) { if ( which == (*IDAB_mem)->ida_index ) break; *IDAB_mem = (*IDAB_mem)->ida_next; } /* access IDALsMemB structure */ if ((*IDAB_mem)->ida_lmem == NULL) { IDAProcessError(*IDA_mem, IDALS_LMEMB_NULL, "IDASLS", fname, MSG_LS_LMEMB_NULL); return(IDALS_LMEMB_NULL); } *idalsB_mem = (IDALsMemB) ((*IDAB_mem)->ida_lmem); return(IDALS_SUCCESS); } /* idaLs_AccessLMemBCur unpacks the ida_mem, ca_mem, idaB_mem and idalsB_mem structures from the void* ida_mem pointer. If any are missing it returns IDALS_MEM_NULL, IDALS_NO_ADJ, or IDALS_LMEMB_NULL. */ int idaLs_AccessLMemBCur(void *ida_mem, const char *fname, IDAMem *IDA_mem, IDAadjMem *IDAADJ_mem, IDABMem *IDAB_mem, IDALsMemB *idalsB_mem) { /* access IDAMem structure */ if (ida_mem==NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASLS", fname, MSG_LS_IDAMEM_NULL); return(IDALS_MEM_NULL); } *IDA_mem = (IDAMem) ida_mem; /* access IDAadjMem structure */ if ((*IDA_mem)->ida_adjMallocDone == SUNFALSE) { IDAProcessError(*IDA_mem, IDALS_NO_ADJ, "IDASLS", fname, MSG_LS_NO_ADJ); return(IDALS_NO_ADJ); } *IDAADJ_mem = (*IDA_mem)->ida_adj_mem; /* get current backward problem */ if ((*IDAADJ_mem)->ia_bckpbCrt == NULL) { IDAProcessError(*IDA_mem, IDALS_LMEMB_NULL, "IDASLS", fname, MSG_LS_LMEMB_NULL); return(IDALS_LMEMB_NULL); } *IDAB_mem = (*IDAADJ_mem)->ia_bckpbCrt; /* access IDALsMemB structure */ if ((*IDAB_mem)->ida_lmem == NULL) { IDAProcessError(*IDA_mem, IDALS_LMEMB_NULL, "IDASLS", fname, MSG_LS_LMEMB_NULL); return(IDALS_LMEMB_NULL); } *idalsB_mem = (IDALsMemB) ((*IDAB_mem)->ida_lmem); return(IDALS_SUCCESS); } /*--------------------------------------------------------------- EOF ---------------------------------------------------------------*/ StanHeaders/src/idas/idas_bbdpre.c0000644000176200001440000007212414645137106016601 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Alan C. Hindmarsh and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This file contains implementations of routines for a * band-block-diagonal preconditioner, i.e. a block-diagonal * matrix with banded blocks, for use with IDA, the IDASLS * linear solver interface. * * NOTE: With only one processor in use, a banded matrix results * rather than a block-diagonal matrix with banded blocks. * Diagonal blocking occurs at the processor level. * ----------------------------------------------------------------- */ #include #include #include "idas_impl.h" #include "idas_ls_impl.h" #include "idas_bbdpre_impl.h" #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define TWO RCONST(2.0) /* Prototypes of IDABBDPrecSetup and IDABBDPrecSolve */ static int IDABBDPrecSetup(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, realtype c_j, void *prec_data); static int IDABBDPrecSolve(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype c_j, realtype delta, void *prec_data); /* Prototype for IDABBDPrecFree */ static int IDABBDPrecFree(IDAMem ida_mem); /* Prototype for difference quotient Jacobian calculation routine */ static int IBBDDQJac(IBBDPrecData pdata, realtype tt, realtype cj, N_Vector yy, N_Vector yp, N_Vector gref, N_Vector ytemp, N_Vector yptemp, N_Vector gtemp); /* Wrapper functions for adjoint code */ static int IDAAglocal(sunindextype NlocalB, realtype tt, N_Vector yyB, N_Vector ypB, N_Vector gvalB, void *user_dataB); static int IDAAgcomm(sunindextype NlocalB, realtype tt, N_Vector yyB, N_Vector ypB, void *user_dataB); /* Prototype for the pfree routine for backward problems. */ static int IDABBDPrecFreeB(IDABMem IDAB_mem); /*================================================================ PART I - forward problems ================================================================*/ /*--------------------------------------------------------------- User-Callable Functions: initialization, reinit and free ---------------------------------------------------------------*/ int IDABBDPrecInit(void *ida_mem, sunindextype Nlocal, sunindextype mudq, sunindextype mldq, sunindextype mukeep, sunindextype mlkeep, realtype dq_rel_yy, IDABBDLocalFn Gres, IDABBDCommFn Gcomm) { IDAMem IDA_mem; IDALsMem idals_mem; IBBDPrecData pdata; sunindextype muk, mlk, storage_mu, lrw1, liw1; long int lrw, liw; int flag; if (ida_mem == NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_MEM_NULL); return(IDALS_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Test if the LS linear solver interface has been created */ if (IDA_mem->ida_lmem == NULL) { IDAProcessError(IDA_mem, IDALS_LMEM_NULL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_LMEM_NULL); return(IDALS_LMEM_NULL); } idals_mem = (IDALsMem) IDA_mem->ida_lmem; /* Test compatibility of NVECTOR package with the BBD preconditioner */ if(IDA_mem->ida_tempv1->ops->nvgetarraypointer == NULL) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_BAD_NVECTOR); return(IDALS_ILL_INPUT); } /* Allocate data memory. */ pdata = NULL; pdata = (IBBDPrecData) malloc(sizeof *pdata); if (pdata == NULL) { IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_MEM_FAIL); return(IDALS_MEM_FAIL); } /* Set pointers to glocal and gcomm; load half-bandwidths. */ pdata->ida_mem = IDA_mem; pdata->glocal = Gres; pdata->gcomm = Gcomm; pdata->mudq = SUNMIN(Nlocal-1, SUNMAX(0, mudq)); pdata->mldq = SUNMIN(Nlocal-1, SUNMAX(0, mldq)); muk = SUNMIN(Nlocal-1, SUNMAX(0, mukeep)); mlk = SUNMIN(Nlocal-1, SUNMAX(0, mlkeep)); pdata->mukeep = muk; pdata->mlkeep = mlk; /* Set extended upper half-bandwidth for PP (required for pivoting). */ storage_mu = SUNMIN(Nlocal-1, muk+mlk); /* Allocate memory for preconditioner matrix. */ pdata->PP = NULL; pdata->PP = SUNBandMatrixStorage(Nlocal, muk, mlk, storage_mu, IDA_mem->ida_sunctx); if (pdata->PP == NULL) { free(pdata); pdata = NULL; IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_MEM_FAIL); return(IDALS_MEM_FAIL); } /* Allocate memory for temporary N_Vectors */ pdata->zlocal = NULL; pdata->zlocal = N_VNewEmpty_Serial(Nlocal, IDA_mem->ida_sunctx); if (pdata->zlocal == NULL) { SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_MEM_FAIL); return(IDALS_MEM_FAIL); } pdata->rlocal = NULL; pdata->rlocal = N_VNewEmpty_Serial(Nlocal, IDA_mem->ida_sunctx); if (pdata->rlocal == NULL) { N_VDestroy(pdata->zlocal); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_MEM_FAIL); return(IDALS_MEM_FAIL); } pdata->tempv1 = NULL; pdata->tempv1 = N_VClone(IDA_mem->ida_tempv1); if (pdata->tempv1 == NULL){ N_VDestroy(pdata->rlocal); N_VDestroy(pdata->zlocal); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_MEM_FAIL); return(IDALS_MEM_FAIL); } pdata->tempv2 = NULL; pdata->tempv2 = N_VClone(IDA_mem->ida_tempv1); if (pdata->tempv2 == NULL){ N_VDestroy(pdata->rlocal); N_VDestroy(pdata->zlocal); N_VDestroy(pdata->tempv1); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_MEM_FAIL); return(IDALS_MEM_FAIL); } pdata->tempv3 = NULL; pdata->tempv3 = N_VClone(IDA_mem->ida_tempv1); if (pdata->tempv3 == NULL){ N_VDestroy(pdata->rlocal); N_VDestroy(pdata->zlocal); N_VDestroy(pdata->tempv1); N_VDestroy(pdata->tempv2); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_MEM_FAIL); return(IDALS_MEM_FAIL); } pdata->tempv4 = NULL; pdata->tempv4 = N_VClone(IDA_mem->ida_tempv1); if (pdata->tempv4 == NULL){ N_VDestroy(pdata->rlocal); N_VDestroy(pdata->zlocal); N_VDestroy(pdata->tempv1); N_VDestroy(pdata->tempv2); N_VDestroy(pdata->tempv3); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_MEM_FAIL); return(IDALS_MEM_FAIL); } /* Allocate memory for banded linear solver */ pdata->LS = NULL; pdata->LS = SUNLinSol_Band(pdata->rlocal, pdata->PP, IDA_mem->ida_sunctx); if (pdata->LS == NULL) { N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); N_VDestroy(pdata->tempv1); N_VDestroy(pdata->tempv2); N_VDestroy(pdata->tempv3); N_VDestroy(pdata->tempv4); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_MEM_FAIL); return(IDALS_MEM_FAIL); } /* initialize band linear solver object */ flag = SUNLinSolInitialize(pdata->LS); if (flag != SUNLS_SUCCESS) { N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); N_VDestroy(pdata->tempv1); N_VDestroy(pdata->tempv2); N_VDestroy(pdata->tempv3); N_VDestroy(pdata->tempv4); SUNMatDestroy(pdata->PP); SUNLinSolFree(pdata->LS); free(pdata); pdata = NULL; IDAProcessError(IDA_mem, IDALS_SUNLS_FAIL, "IDASBBDPRE", "IDABBDPrecInit", MSGBBD_SUNLS_FAIL); return(IDALS_SUNLS_FAIL); } /* Set rel_yy based on input value dq_rel_yy (0 implies default). */ pdata->rel_yy = (dq_rel_yy > ZERO) ? dq_rel_yy : SUNRsqrt(IDA_mem->ida_uround); /* Store Nlocal to be used in IDABBDPrecSetup */ pdata->n_local = Nlocal; /* Set work space sizes and initialize nge. */ pdata->rpwsize = 0; pdata->ipwsize = 0; if (IDA_mem->ida_tempv1->ops->nvspace) { N_VSpace(IDA_mem->ida_tempv1, &lrw1, &liw1); pdata->rpwsize += 4*lrw1; pdata->ipwsize += 4*liw1; } if (pdata->rlocal->ops->nvspace) { N_VSpace(pdata->rlocal, &lrw1, &liw1); pdata->rpwsize += 2*lrw1; pdata->ipwsize += 2*liw1; } if (pdata->PP->ops->space) { flag = SUNMatSpace(pdata->PP, &lrw, &liw); pdata->rpwsize += lrw; pdata->ipwsize += liw; } if (pdata->LS->ops->space) { flag = SUNLinSolSpace(pdata->LS, &lrw, &liw); pdata->rpwsize += lrw; pdata->ipwsize += liw; } pdata->nge = 0; /* make sure pdata is free from any previous allocations */ if (idals_mem->pfree) idals_mem->pfree(IDA_mem); /* Point to the new pdata field in the LS memory */ idals_mem->pdata = pdata; /* Attach the pfree function */ idals_mem->pfree = IDABBDPrecFree; /* Attach preconditioner solve and setup functions */ flag = IDASetPreconditioner(ida_mem, IDABBDPrecSetup, IDABBDPrecSolve); return(flag); } /*-------------------------------------------------------------*/ int IDABBDPrecReInit(void *ida_mem, sunindextype mudq, sunindextype mldq, realtype dq_rel_yy) { IDAMem IDA_mem; IDALsMem idals_mem; IBBDPrecData pdata; sunindextype Nlocal; if (ida_mem == NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASBBDPRE", "IDABBDPrecReInit", MSGBBD_MEM_NULL); return(IDALS_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Test if the LS linear solver interface has been created */ if (IDA_mem->ida_lmem == NULL) { IDAProcessError(IDA_mem, IDALS_LMEM_NULL, "IDASBBDPRE", "IDABBDPrecReInit", MSGBBD_LMEM_NULL); return(IDALS_LMEM_NULL); } idals_mem = (IDALsMem) IDA_mem->ida_lmem; /* Test if the preconditioner data is non-NULL */ if (idals_mem->pdata == NULL) { IDAProcessError(IDA_mem, IDALS_PMEM_NULL, "IDASBBDPRE", "IDABBDPrecReInit", MSGBBD_PMEM_NULL); return(IDALS_PMEM_NULL); } pdata = (IBBDPrecData) idals_mem->pdata; /* Load half-bandwidths. */ Nlocal = pdata->n_local; pdata->mudq = SUNMIN(Nlocal-1, SUNMAX(0, mudq)); pdata->mldq = SUNMIN(Nlocal-1, SUNMAX(0, mldq)); /* Set rel_yy based on input value dq_rel_yy (0 implies default). */ pdata->rel_yy = (dq_rel_yy > ZERO) ? dq_rel_yy : SUNRsqrt(IDA_mem->ida_uround); /* Re-initialize nge */ pdata->nge = 0; return(IDALS_SUCCESS); } /*-------------------------------------------------------------*/ int IDABBDPrecGetWorkSpace(void *ida_mem, long int *lenrwBBDP, long int *leniwBBDP) { IDAMem IDA_mem; IDALsMem idals_mem; IBBDPrecData pdata; if (ida_mem == NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASBBDPRE", "IDABBDPrecGetWorkSpace", MSGBBD_MEM_NULL); return(IDALS_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_lmem == NULL) { IDAProcessError(IDA_mem, IDALS_LMEM_NULL, "IDASBBDPRE", "IDABBDPrecGetWorkSpace", MSGBBD_LMEM_NULL); return(IDALS_LMEM_NULL); } idals_mem = (IDALsMem) IDA_mem->ida_lmem; if (idals_mem->pdata == NULL) { IDAProcessError(IDA_mem, IDALS_PMEM_NULL, "IDASBBDPRE", "IDABBDPrecGetWorkSpace", MSGBBD_PMEM_NULL); return(IDALS_PMEM_NULL); } pdata = (IBBDPrecData) idals_mem->pdata; *lenrwBBDP = pdata->rpwsize; *leniwBBDP = pdata->ipwsize; return(IDALS_SUCCESS); } /*-------------------------------------------------------------*/ int IDABBDPrecGetNumGfnEvals(void *ida_mem, long int *ngevalsBBDP) { IDAMem IDA_mem; IDALsMem idals_mem; IBBDPrecData pdata; if (ida_mem == NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASBBDPRE", "IDABBDPrecGetNumGfnEvals", MSGBBD_MEM_NULL); return(IDALS_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_lmem == NULL) { IDAProcessError(IDA_mem, IDALS_LMEM_NULL, "IDASBBDPRE", "IDABBDPrecGetNumGfnEvals", MSGBBD_LMEM_NULL); return(IDALS_LMEM_NULL); } idals_mem = (IDALsMem) IDA_mem->ida_lmem; if (idals_mem->pdata == NULL) { IDAProcessError(IDA_mem, IDALS_PMEM_NULL, "IDASBBDPRE", "IDABBDPrecGetNumGfnEvals", MSGBBD_PMEM_NULL); return(IDALS_PMEM_NULL); } pdata = (IBBDPrecData) idals_mem->pdata; *ngevalsBBDP = pdata->nge; return(IDALS_SUCCESS); } /*--------------------------------------------------------------- IDABBDPrecSetup: IDABBDPrecSetup generates a band-block-diagonal preconditioner matrix, where the local block (on this processor) is a band matrix. Each local block is computed by a difference quotient scheme via calls to the user-supplied routines glocal, gcomm. After generating the block in the band matrix PP, this routine does an LU factorization in place in PP. The IDABBDPrecSetup parameters used here are as follows: tt is the current value of the independent variable t. yy is the current value of the dependent variable vector, namely the predicted value of y(t). yp is the current value of the derivative vector y', namely the predicted value of y'(t). c_j is the scalar in the system Jacobian, proportional to 1/hh. bbd_data is the pointer to BBD memory set by IDABBDInit The argument rr is not used. Return value: The value returned by this IDABBDPrecSetup function is a int flag indicating whether it was successful. This value is 0 if successful, > 0 for a recoverable error (step will be retried), or < 0 for a nonrecoverable error (step fails). ----------------------------------------------------------------*/ static int IDABBDPrecSetup(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, realtype c_j, void *bbd_data) { IBBDPrecData pdata; IDAMem IDA_mem; int retval; pdata =(IBBDPrecData) bbd_data; IDA_mem = (IDAMem) pdata->ida_mem; /* Call IBBDDQJac for a new Jacobian calculation and store in PP. */ retval = SUNMatZero(pdata->PP); retval = IBBDDQJac(pdata, tt, c_j, yy, yp, pdata->tempv1, pdata->tempv2, pdata->tempv3, pdata->tempv4); if (retval < 0) { IDAProcessError(IDA_mem, -1, "IDASBBDPRE", "IDABBDPrecSetup", MSGBBD_FUNC_FAILED); return(-1); } if (retval > 0) { return(+1); } /* Do LU factorization of matrix and return error flag */ retval = SUNLinSolSetup_Band(pdata->LS, pdata->PP); return(retval); } /*--------------------------------------------------------------- IDABBDPrecSolve The function IDABBDPrecSolve computes a solution to the linear system P z = r, where P is the left preconditioner defined by the routine IDABBDPrecSetup. The IDABBDPrecSolve parameters used here are as follows: rvec is the input right-hand side vector r. zvec is the computed solution vector z. bbd_data is the pointer to BBD data set by IDABBDInit. The arguments tt, yy, yp, rr, c_j and delta are NOT used. IDABBDPrecSolve returns the value returned from the linear solver object. ---------------------------------------------------------------*/ static int IDABBDPrecSolve(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype c_j, realtype delta, void *bbd_data) { IBBDPrecData pdata; int retval; pdata = (IBBDPrecData) bbd_data; /* Attach local data arrays for rvec and zvec to rlocal and zlocal */ N_VSetArrayPointer(N_VGetArrayPointer(rvec), pdata->rlocal); N_VSetArrayPointer(N_VGetArrayPointer(zvec), pdata->zlocal); /* Call banded solver object to do the work */ retval = SUNLinSolSolve(pdata->LS, pdata->PP, pdata->zlocal, pdata->rlocal, ZERO); /* Detach local data arrays from rlocal and zlocal */ N_VSetArrayPointer(NULL, pdata->rlocal); N_VSetArrayPointer(NULL, pdata->zlocal); return(retval); } /*-------------------------------------------------------------*/ static int IDABBDPrecFree(IDAMem IDA_mem) { IDALsMem idals_mem; IBBDPrecData pdata; if (IDA_mem->ida_lmem == NULL) return(0); idals_mem = (IDALsMem) IDA_mem->ida_lmem; if (idals_mem->pdata == NULL) return(0); pdata = (IBBDPrecData) idals_mem->pdata; SUNLinSolFree(pdata->LS); N_VDestroy(pdata->rlocal); N_VDestroy(pdata->zlocal); N_VDestroy(pdata->tempv1); N_VDestroy(pdata->tempv2); N_VDestroy(pdata->tempv3); N_VDestroy(pdata->tempv4); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; return(0); } /*--------------------------------------------------------------- IBBDDQJac This routine generates a banded difference quotient approximation to the local block of the Jacobian of G(t,y,y'). It assumes that a band matrix of type SUNMatrix is stored column-wise, and that elements within each column are contiguous. All matrix elements are generated as difference quotients, by way of calls to the user routine glocal. By virtue of the band structure, the number of these calls is bandwidth + 1, where bandwidth = mldq + mudq + 1. But the band matrix kept has bandwidth = mlkeep + mukeep + 1. This routine also assumes that the local elements of a vector are stored contiguously. Return values are: 0 (success), > 0 (recoverable error), or < 0 (nonrecoverable error). ----------------------------------------------------------------*/ static int IBBDDQJac(IBBDPrecData pdata, realtype tt, realtype cj, N_Vector yy, N_Vector yp, N_Vector gref, N_Vector ytemp, N_Vector yptemp, N_Vector gtemp) { IDAMem IDA_mem; realtype inc, inc_inv; int retval; sunindextype group, i, j, width, ngroups, i1, i2; realtype *ydata, *ypdata, *ytempdata, *yptempdata, *grefdata, *gtempdata; realtype *cnsdata = NULL, *ewtdata; realtype *col_j, conj, yj, ypj, ewtj; IDA_mem = (IDAMem) pdata->ida_mem; /* Initialize ytemp and yptemp. */ N_VScale(ONE, yy, ytemp); N_VScale(ONE, yp, yptemp); /* Obtain pointers as required to the data array of vectors. */ ydata = N_VGetArrayPointer(yy); ypdata = N_VGetArrayPointer(yp); gtempdata = N_VGetArrayPointer(gtemp); ewtdata = N_VGetArrayPointer(IDA_mem->ida_ewt); if (IDA_mem->ida_constraintsSet) cnsdata = N_VGetArrayPointer(IDA_mem->ida_constraints); ytempdata = N_VGetArrayPointer(ytemp); yptempdata= N_VGetArrayPointer(yptemp); grefdata = N_VGetArrayPointer(gref); /* Call gcomm and glocal to get base value of G(t,y,y'). */ if (pdata->gcomm != NULL) { retval = pdata->gcomm(pdata->n_local, tt, yy, yp, IDA_mem->ida_user_data); if (retval != 0) return(retval); } retval = pdata->glocal(pdata->n_local, tt, yy, yp, gref, IDA_mem->ida_user_data); pdata->nge++; if (retval != 0) return(retval); /* Set bandwidth and number of column groups for band differencing. */ width = pdata->mldq + pdata->mudq + 1; ngroups = SUNMIN(width, pdata->n_local); /* Loop over groups. */ for(group = 1; group <= ngroups; group++) { /* Loop over the components in this group. */ for(j = group-1; j < pdata->n_local; j += width) { yj = ydata[j]; ypj = ypdata[j]; ewtj = ewtdata[j]; /* Set increment inc to yj based on rel_yy*abs(yj), with adjustments using ypj and ewtj if this is small, and a further adjustment to give it the same sign as hh*ypj. */ inc = pdata->rel_yy * SUNMAX(SUNRabs(yj), SUNMAX( SUNRabs(IDA_mem->ida_hh*ypj), ONE/ewtj)); if (IDA_mem->ida_hh*ypj < ZERO) inc = -inc; inc = (yj + inc) - yj; /* Adjust sign(inc) again if yj has an inequality constraint. */ if (IDA_mem->ida_constraintsSet) { conj = cnsdata[j]; if (SUNRabs(conj) == ONE) {if ((yj+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if ((yj+inc)*conj <= ZERO) inc = -inc;} } /* Increment yj and ypj. */ ytempdata[j] += inc; yptempdata[j] += cj*inc; } /* Evaluate G with incremented y and yp arguments. */ retval = pdata->glocal(pdata->n_local, tt, ytemp, yptemp, gtemp, IDA_mem->ida_user_data); pdata->nge++; if (retval != 0) return(retval); /* Loop over components of the group again; restore ytemp and yptemp. */ for(j = group-1; j < pdata->n_local; j += width) { yj = ytempdata[j] = ydata[j]; ypj = yptempdata[j] = ypdata[j]; ewtj = ewtdata[j]; /* Set increment inc as before .*/ inc = pdata->rel_yy * SUNMAX(SUNRabs(yj), SUNMAX( SUNRabs(IDA_mem->ida_hh*ypj), ONE/ewtj)); if (IDA_mem->ida_hh*ypj < ZERO) inc = -inc; inc = (yj + inc) - yj; if (IDA_mem->ida_constraintsSet) { conj = cnsdata[j]; if (SUNRabs(conj) == ONE) {if ((yj+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if ((yj+inc)*conj <= ZERO) inc = -inc;} } /* Form difference quotients and load into PP. */ inc_inv = ONE/inc; col_j = SUNBandMatrix_Column(pdata->PP,j); i1 = SUNMAX(0, j-pdata->mukeep); i2 = SUNMIN(j + pdata->mlkeep, pdata->n_local-1); for(i = i1; i <= i2; i++) SM_COLUMN_ELEMENT_B(col_j,i,j) = inc_inv * (gtempdata[i] - grefdata[i]); } } return(0); } /*================================================================ PART II - backward problems ================================================================*/ /*--------------------------------------------------------------- User-Callable Functions: initialization, reinit and free ---------------------------------------------------------------*/ int IDABBDPrecInitB(void *ida_mem, int which, sunindextype NlocalB, sunindextype mudqB, sunindextype mldqB, sunindextype mukeepB, sunindextype mlkeepB, realtype dq_rel_yyB, IDABBDLocalFnB glocalB, IDABBDCommFnB gcommB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; IDABBDPrecDataB idabbdB_mem; void *ida_memB; int flag; /* Check if ida_mem is allright. */ if (ida_mem == NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASBBDPRE", "IDABBDPrecInitB", MSG_LS_IDAMEM_NULL); return(IDALS_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDALS_NO_ADJ, "IDASBBDPRE", "IDABBDPrecInitB", MSG_LS_NO_ADJ); return(IDALS_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASBBDPRE", "IDABBDPrecInitB", MSG_LS_BAD_WHICH); return(IDALS_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } /* ida_mem corresponding to 'which' problem. */ ida_memB = (void *) IDAB_mem->IDA_mem; /* Initialize the BBD preconditioner for this backward problem. */ flag = IDABBDPrecInit(ida_memB, NlocalB, mudqB, mldqB, mukeepB, mlkeepB, dq_rel_yyB, IDAAglocal, IDAAgcomm); if (flag != IDA_SUCCESS) return(flag); /* Allocate memory for IDABBDPrecDataB to store the user-provided functions which will be called from the wrappers */ idabbdB_mem = NULL; idabbdB_mem = (IDABBDPrecDataB) malloc(sizeof(* idabbdB_mem)); if (idabbdB_mem == NULL) { IDAProcessError(IDA_mem, IDALS_MEM_FAIL, "IDASBBDPRE", "IDABBDPrecInitB", MSGBBD_MEM_FAIL); return(IDALS_MEM_FAIL); } /* set pointers to user-provided functions */ idabbdB_mem->glocalB = glocalB; idabbdB_mem->gcommB = gcommB; /* Attach pmem and pfree */ IDAB_mem->ida_pmem = idabbdB_mem; IDAB_mem->ida_pfree = IDABBDPrecFreeB; return(IDALS_SUCCESS); } /*-------------------------------------------------------------*/ int IDABBDPrecReInitB(void *ida_mem, int which, sunindextype mudqB, sunindextype mldqB, realtype dq_rel_yyB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; int flag; /* Check if ida_mem is allright. */ if (ida_mem == NULL) { IDAProcessError(NULL, IDALS_MEM_NULL, "IDASBBDPRE", "IDABBDPrecReInitB", MSG_LS_IDAMEM_NULL); return(IDALS_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDALS_NO_ADJ, "IDASBBDPRE", "IDABBDPrecReInitB", MSG_LS_NO_ADJ); return(IDALS_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDALS_ILL_INPUT, "IDASBBDPRE", "IDABBDPrecReInitB", MSG_LS_BAD_WHICH); return(IDALS_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } /* ida_mem corresponding to 'which' backward problem. */ ida_memB = (void *) IDAB_mem->IDA_mem; /* ReInitialize the BBD preconditioner for this backward problem. */ flag = IDABBDPrecReInit(ida_memB, mudqB, mldqB, dq_rel_yyB); return(flag); } /*-------------------------------------------------------------*/ static int IDABBDPrecFreeB(IDABMem IDAB_mem) { free(IDAB_mem->ida_pmem); IDAB_mem->ida_pmem = NULL; return(0); } /*---------------------------------------------------------------- Wrapper functions ----------------------------------------------------------------*/ /*---------------------------------------------------------------- IDAAglocal This routine interfaces to the IDALocalFnB routine provided by the user. ----------------------------------------------------------------*/ static int IDAAglocal(sunindextype NlocalB, realtype tt, N_Vector yyB, N_Vector ypB, N_Vector gvalB, void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; IDABBDPrecDataB idabbdB_mem; int flag; IDA_mem = (IDAMem) ida_mem; IDAADJ_mem = IDA_mem->ida_adj_mem; /* Get current backward problem. */ IDAB_mem = IDAADJ_mem->ia_bckpbCrt; /* Get the preconditioner's memory. */ idabbdB_mem = (IDABBDPrecDataB) IDAB_mem->ida_pmem; /* Get forward solution from interpolation. */ if (IDAADJ_mem->ia_noInterp == SUNFALSE) { flag = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (flag != IDA_SUCCESS) { IDAProcessError(IDA_mem, -1, "IDASBBDPRE", "IDAAglocal", MSGBBD_BAD_T); return(-1); } } /* Call user's adjoint LocalFnB function. */ return idabbdB_mem->glocalB(NlocalB, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, yyB, ypB, gvalB, IDAB_mem->ida_user_data); } /*---------------------------------------------------------------- IDAAgcomm This routine interfaces to the IDACommFnB routine provided by the user. ----------------------------------------------------------------*/ static int IDAAgcomm(sunindextype NlocalB, realtype tt, N_Vector yyB, N_Vector ypB, void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; IDABBDPrecDataB idabbdB_mem; int flag; IDA_mem = (IDAMem) ida_mem; IDAADJ_mem = IDA_mem->ida_adj_mem; /* Get current backward problem. */ IDAB_mem = IDAADJ_mem->ia_bckpbCrt; /* Get the preconditioner's memory. */ idabbdB_mem = (IDABBDPrecDataB) IDAB_mem->ida_pmem; if (idabbdB_mem->gcommB == NULL) return(0); /* Get forward solution from interpolation. */ if (IDAADJ_mem->ia_noInterp == SUNFALSE) { flag = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (flag != IDA_SUCCESS) { IDAProcessError(IDA_mem, -1, "IDASBBDPRE", "IDAAgcomm", MSGBBD_BAD_T); return(-1); } } /* Call user's adjoint CommFnB routine */ return idabbdB_mem->gcommB(NlocalB, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, yyB, ypB, IDAB_mem->ida_user_data); } StanHeaders/src/idas/idas_ls_impl.h0000644000176200001440000002427714645137106017015 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Alan C. Hindmarsh and Radu Serban @ LLNL *----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *----------------------------------------------------------------- * Implementation header file for IDAS's linear solver interface. *-----------------------------------------------------------------*/ #ifndef _IDASLS_IMPL_H #define _IDASLS_IMPL_H #include #include "idas_impl.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*----------------------------------------------------------------- Types : struct IDALsMemRec, struct *IDALsMem The type IDALsMem is a pointer to a IDALsMemRec, which is a structure containing fields that must be accessible by LS module routines. -----------------------------------------------------------------*/ typedef struct IDALsMemRec { /* Linear solver type information */ booleantype iterative; /* is the solver iterative? */ booleantype matrixbased; /* is a matrix structure used? */ /* Jacobian construction & storage */ booleantype jacDQ; /* SUNTRUE if using internal DQ Jacobian approx. */ IDALsJacFn jac; /* Jacobian routine to be called */ void *J_data; /* J_data is passed to jac */ /* Linear solver, matrix and vector objects/pointers */ SUNLinearSolver LS; /* generic linear solver object */ SUNMatrix J; /* J = dF/dy + cj*dF/dy' */ N_Vector ytemp; /* temp vector used by IDAAtimesDQ */ N_Vector yptemp; /* temp vector used by IDAAtimesDQ */ N_Vector x; /* temp vector used by the solve function */ N_Vector ycur; /* current y vector in Newton iteration */ N_Vector ypcur; /* current yp vector in Newton iteration */ N_Vector rcur; /* rcur = F(tn, ycur, ypcur) */ /* Matrix-based solver, scale solution to account for change in cj */ booleantype scalesol; /* Iterative solver tolerance */ realtype eplifac; /* nonlinear -> linear tol scaling factor */ realtype nrmfac; /* integrator -> LS norm conversion factor */ /* Statistics and associated parameters */ realtype dqincfac; /* dqincfac = optional increment factor in Jv */ long int nje; /* nje = no. of calls to jac */ long int npe; /* npe = total number of precond calls */ long int nli; /* nli = total number of linear iterations */ long int nps; /* nps = total number of psolve calls */ long int ncfl; /* ncfl = total number of convergence failures */ long int nreDQ; /* nreDQ = total number of calls to res */ long int njtsetup; /* njtsetup = total number of calls to jtsetup */ long int njtimes; /* njtimes = total number of calls to jtimes */ long int nst0; /* nst0 = saved nst (for performance monitor) */ long int nni0; /* nni0 = saved nni (for performance monitor) */ long int ncfn0; /* ncfn0 = saved ncfn (for performance monitor) */ long int ncfl0; /* ncfl0 = saved ncfl (for performance monitor) */ long int nwarn; /* nwarn = no. of warnings (for perf. monitor) */ int last_flag; /* last error return flag */ /* Preconditioner computation (a) user-provided: - pdata == user_data - pfree == NULL (the user dealocates memory) (b) internal preconditioner module - pdata == ida_mem - pfree == set by the prec. module and called in idaLsFree */ IDALsPrecSetupFn pset; IDALsPrecSolveFn psolve; int (*pfree)(IDAMem IDA_mem); void *pdata; /* Jacobian times vector compuation (a) jtimes function provided by the user: - jt_data == user_data - jtimesDQ == SUNFALSE (b) internal jtimes - jt_data == ida_mem - jtimesDQ == SUNTRUE */ booleantype jtimesDQ; IDALsJacTimesSetupFn jtsetup; IDALsJacTimesVecFn jtimes; IDAResFn jt_res; void *jt_data; } *IDALsMem; /*----------------------------------------------------------------- Prototypes of internal functions -----------------------------------------------------------------*/ /* Interface routines called by system SUNLinearSolver */ int idaLsATimes(void *ida_mem, N_Vector v, N_Vector z); int idaLsPSetup(void *ida_mem); int idaLsPSolve(void *ida_mem, N_Vector r, N_Vector z, realtype tol, int lr); /* Difference quotient approximation for Jac times vector */ int idaLsDQJtimes(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, N_Vector v, N_Vector Jv, realtype c_j, void *data, N_Vector work1, N_Vector work2); /* Difference-quotient Jacobian approximation routines */ int idaLsDQJac(realtype tt, realtype c_j, N_Vector yy, N_Vector yp, N_Vector rr, SUNMatrix Jac, void *data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); int idaLsDenseDQJac(realtype tt, realtype c_j, N_Vector yy, N_Vector yp, N_Vector rr, SUNMatrix Jac, IDAMem IDA_mem, N_Vector tmp1); int idaLsBandDQJac(realtype tt, realtype c_j, N_Vector yy, N_Vector yp, N_Vector rr, SUNMatrix Jac, IDAMem IDA_mem, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); /* Generic linit/lsetup/lsolve/lperf/lfree interface routines for IDA to call */ int idaLsInitialize(IDAMem IDA_mem); int idaLsSetup(IDAMem IDA_mem, N_Vector y, N_Vector yp, N_Vector r, N_Vector vt1, N_Vector vt2, N_Vector vt3); int idaLsSolve(IDAMem IDA_mem, N_Vector b, N_Vector weight, N_Vector ycur, N_Vector ypcur, N_Vector rescur); int idaLsPerf(IDAMem IDA_mem, int perftask); int idaLsFree(IDAMem IDA_mem); /* Auxilliary functions */ int idaLsInitializeCounters(IDALsMem idals_mem); int idaLs_AccessLMem(void* ida_mem, const char* fname, IDAMem* IDA_mem, IDALsMem* idals_mem); /*--------------------------------------------------------------- Error and Warning Messages ---------------------------------------------------------------*/ #if defined(SUNDIALS_EXTENDED_PRECISION) #define MSG_LS_TIME "at t = %Lg, " #define MSG_LS_FRMT "%Le." #elif defined(SUNDIALS_DOUBLE_PRECISION) #define MSG_LS_TIME "at t = %lg, " #define MSG_LS_FRMT "%le." #else #define MSG_LS_TIME "at t = %g, " #define MSG_LS_FRMT "%e." #endif /* Error Messages */ #define MSG_LS_IDAMEM_NULL "Integrator memory is NULL." #define MSG_LS_MEM_FAIL "A memory request failed." #define MSG_LS_BAD_NVECTOR "A required vector operation is not implemented." #define MSG_LS_BAD_SIZES "Illegal bandwidth parameter(s). Must have 0 <= ml, mu <= N-1." #define MSG_LS_BAD_LSTYPE "Incompatible linear solver type." #define MSG_LS_LMEM_NULL "Linear solver memory is NULL." #define MSG_LS_BAD_GSTYPE "gstype has an illegal value." #define MSG_LS_NEG_MAXRS "maxrs < 0 illegal." #define MSG_LS_NEG_EPLIFAC "eplifac < 0.0 illegal." #define MSG_LS_NEG_DQINCFAC "dqincfac < 0.0 illegal." #define MSG_LS_PSET_FAILED "The preconditioner setup routine failed in an unrecoverable manner." #define MSG_LS_PSOLVE_FAILED "The preconditioner solve routine failed in an unrecoverable manner." #define MSG_LS_JTSETUP_FAILED "The Jacobian x vector setup routine failed in an unrecoverable manner." #define MSG_LS_JTIMES_FAILED "The Jacobian x vector routine failed in an unrecoverable manner." #define MSG_LS_JACFUNC_FAILED "The Jacobian routine failed in an unrecoverable manner." #define MSG_LS_MATZERO_FAILED "The SUNMatZero routine failed in an unrecoverable manner." /* Warning Messages */ #define MSG_LS_WARN "Warning: " MSG_LS_TIME "poor iterative algorithm performance. " #define MSG_LS_CFN_WARN MSG_LS_WARN "Nonlinear convergence failure rate is " MSG_LS_FRMT #define MSG_LS_CFL_WARN MSG_LS_WARN "Linear convergence failure rate is " MSG_LS_FRMT /*----------------------------------------------------------------- PART II - backward problems -----------------------------------------------------------------*/ /*----------------------------------------------------------------- Types : IDALsMemRecB, IDALsMemB IDASetLinearSolverB attaches such a structure to the lmemB field of IDAadjMem -----------------------------------------------------------------*/ typedef struct IDALsMemRecB { IDALsJacFnB jacB; IDALsJacFnBS jacBS; IDALsJacTimesSetupFnB jtsetupB; IDALsJacTimesSetupFnBS jtsetupBS; IDALsJacTimesVecFnB jtimesB; IDALsJacTimesVecFnBS jtimesBS; IDALsPrecSetupFnB psetB; IDALsPrecSetupFnBS psetBS; IDALsPrecSolveFnB psolveB; IDALsPrecSolveFnBS psolveBS; void *P_dataB; } *IDALsMemB; /*----------------------------------------------------------------- Prototypes of internal functions -----------------------------------------------------------------*/ int idaLsFreeB(IDABMem IDAB_mem); int idaLs_AccessLMemB(void *ida_mem, int which, const char *fname, IDAMem *IDA_mem, IDAadjMem *IDAADJ_mem, IDABMem *IDAB_mem, IDALsMemB *idalsB_mem); int idaLs_AccessLMemBCur(void *ida_mem, const char *fname, IDAMem *IDA_mem, IDAadjMem *IDAADJ_mem, IDABMem *IDAB_mem, IDALsMemB *idalsB_mem); /*----------------------------------------------------------------- Error Messages -----------------------------------------------------------------*/ #define MSG_LS_CAMEM_NULL "idaadj_mem = NULL illegal." #define MSG_LS_LMEMB_NULL "Linear solver memory is NULL for the backward integration." #define MSG_LS_BAD_T "Bad t for interpolation." #define MSG_LS_BAD_WHICH "Illegal value for which." #define MSG_LS_NO_ADJ "Illegal attempt to call before calling IDAAdjInit." #ifdef __cplusplus } #endif #endif StanHeaders/src/idas/idaa.c0000644000176200001440000030535314645137106015244 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the IDAA adjoint integrator. * ----------------------------------------------------------------- */ /*=================================================================*/ /* Import Header Files */ /*=================================================================*/ #include #include #include "idas_impl.h" #include /*=================================================================*/ /* IDAA Private Constants */ /*=================================================================*/ #define ZERO RCONST(0.0) /* real 0.0 */ #define ONE RCONST(1.0) /* real 1.0 */ #define TWO RCONST(2.0) /* real 2.0 */ #define HUNDRED RCONST(100.0) /* real 100.0 */ #define FUZZ_FACTOR RCONST(1000000.0) /* fuzz factor for IDAAgetY */ /*=================================================================*/ /* Shortcuts */ /*=================================================================*/ #define IDA_PROFILER IDA_mem->ida_sunctx->profiler /*=================================================================*/ /* Private Functions Prototypes */ /*=================================================================*/ static CkpntMem IDAAckpntInit(IDAMem IDA_mem); static CkpntMem IDAAckpntNew(IDAMem IDA_mem); static void IDAAckpntCopyVectors(IDAMem IDA_mem, CkpntMem ck_mem); static booleantype IDAAckpntAllocVectors(IDAMem IDA_mem, CkpntMem ck_mem); static void IDAAckpntDelete(CkpntMem *ck_memPtr); static void IDAAbckpbDelete(IDABMem *IDAB_memPtr); static booleantype IDAAdataMalloc(IDAMem IDA_mem); static void IDAAdataFree(IDAMem IDA_mem); static int IDAAdataStore(IDAMem IDA_mem, CkpntMem ck_mem); static int IDAAckpntGet(IDAMem IDA_mem, CkpntMem ck_mem); static booleantype IDAAhermiteMalloc(IDAMem IDA_mem); static void IDAAhermiteFree(IDAMem IDA_mem); static int IDAAhermiteStorePnt(IDAMem IDA_mem, DtpntMem d); static int IDAAhermiteGetY(IDAMem IDA_mem, realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS); static booleantype IDAApolynomialMalloc(IDAMem IDA_mem); static void IDAApolynomialFree(IDAMem IDA_mem); static int IDAApolynomialStorePnt(IDAMem IDA_mem, DtpntMem d); static int IDAApolynomialGetY(IDAMem IDA_mem, realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS); static int IDAAfindIndex(IDAMem ida_mem, realtype t, long int *indx, booleantype *newpoint); static int IDAAres(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector resvalB, void *ida_mem); static int IDAArhsQ(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrQB, void *ida_mem); static int IDAAGettnSolutionYp(IDAMem IDA_mem, N_Vector yp); static int IDAAGettnSolutionYpS(IDAMem IDA_mem, N_Vector *ypS); extern int IDAGetSolution(void *ida_mem, realtype t, N_Vector yret, N_Vector ypret); /*=================================================================*/ /* Exported Functions */ /*=================================================================*/ /* * IDAAdjInit * * This routine allocates space for the global IDAA memory * structure. */ int IDAAdjInit(void *ida_mem, long int steps, int interp) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; /* Check arguments */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAAdjInit", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem)ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); if (steps <= 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAAdjInit", MSGAM_BAD_STEPS); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if ( (interp != IDA_HERMITE) && (interp != IDA_POLYNOMIAL) ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAAdjInit", MSGAM_BAD_INTERP); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Allocate memory block for IDAadjMem. */ IDAADJ_mem = (IDAadjMem) malloc(sizeof(struct IDAadjMemRec)); if (IDAADJ_mem == NULL) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAA", "IDAAdjInit", MSGAM_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* Attach IDAS memory for forward runs */ IDA_mem->ida_adj_mem = IDAADJ_mem; /* Initialization of check points. */ IDAADJ_mem->ck_mem = NULL; IDAADJ_mem->ia_nckpnts = 0; IDAADJ_mem->ia_ckpntData = NULL; /* Initialization of interpolation data. */ IDAADJ_mem->ia_interpType = interp; IDAADJ_mem->ia_nsteps = steps; /* Last index used in IDAAfindIndex, initailize to invalid value */ IDAADJ_mem->ia_ilast = -1; /* Allocate space for the array of Data Point structures. */ if (IDAAdataMalloc(IDA_mem) == SUNFALSE) { free(IDAADJ_mem); IDAADJ_mem = NULL; IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAA", "IDAAdjInit", MSGAM_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* Attach functions for the appropriate interpolation module */ switch(interp) { case IDA_HERMITE: IDAADJ_mem->ia_malloc = IDAAhermiteMalloc; IDAADJ_mem->ia_free = IDAAhermiteFree; IDAADJ_mem->ia_getY = IDAAhermiteGetY; IDAADJ_mem->ia_storePnt = IDAAhermiteStorePnt; break; case IDA_POLYNOMIAL: IDAADJ_mem->ia_malloc = IDAApolynomialMalloc; IDAADJ_mem->ia_free = IDAApolynomialFree; IDAADJ_mem->ia_getY = IDAApolynomialGetY; IDAADJ_mem->ia_storePnt = IDAApolynomialStorePnt; break; } /* The interpolation module has not been initialized yet */ IDAADJ_mem->ia_mallocDone = SUNFALSE; /* By default we will store but not interpolate sensitivities * - storeSensi will be set in IDASolveF to SUNFALSE if FSA is not enabled * or if the user forced this through IDAAdjSetNoSensi * - interpSensi will be set in IDASolveB to SUNTRUE if storeSensi is SUNTRUE * and if at least one backward problem requires sensitivities * - noInterp will be set in IDACalcICB to SUNTRUE before the call to * IDACalcIC and SUNFALSE after.*/ IDAADJ_mem->ia_storeSensi = SUNTRUE; IDAADJ_mem->ia_interpSensi = SUNFALSE; IDAADJ_mem->ia_noInterp = SUNFALSE; /* Initialize backward problems. */ IDAADJ_mem->IDAB_mem = NULL; IDAADJ_mem->ia_bckpbCrt = NULL; IDAADJ_mem->ia_nbckpbs = 0; /* IDASolveF and IDASolveB not called yet. */ IDAADJ_mem->ia_firstIDAFcall = SUNTRUE; IDAADJ_mem->ia_tstopIDAFcall = SUNFALSE; IDAADJ_mem->ia_firstIDABcall = SUNTRUE; IDAADJ_mem->ia_rootret = SUNFALSE; /* Adjoint module initialized and allocated. */ IDA_mem->ida_adj = SUNTRUE; IDA_mem->ida_adjMallocDone = SUNTRUE; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAAdjReInit * * IDAAdjReInit reinitializes the IDAS memory structure for ASA */ int IDAAdjReInit(void *ida_mem) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; /* Check arguments */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAAdjReInit", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem)ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Was ASA previously initialized? */ if(IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAAdjReInit", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Free all stored checkpoints. */ while (IDAADJ_mem->ck_mem != NULL) IDAAckpntDelete(&(IDAADJ_mem->ck_mem)); IDAADJ_mem->ck_mem = NULL; IDAADJ_mem->ia_nckpnts = 0; IDAADJ_mem->ia_ckpntData = NULL; /* Flags for tracking the first calls to IDASolveF and IDASolveF. */ IDAADJ_mem->ia_firstIDAFcall = SUNTRUE; IDAADJ_mem->ia_tstopIDAFcall = SUNFALSE; IDAADJ_mem->ia_firstIDABcall = SUNTRUE; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAAdjFree * * IDAAdjFree routine frees the memory allocated by IDAAdjInit. */ void IDAAdjFree(void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; if (ida_mem == NULL) return; IDA_mem = (IDAMem) ida_mem; if(IDA_mem->ida_adjMallocDone) { /* Data for adjoint. */ IDAADJ_mem = IDA_mem->ida_adj_mem; /* Delete check points one by one */ while (IDAADJ_mem->ck_mem != NULL) { IDAAckpntDelete(&(IDAADJ_mem->ck_mem)); } IDAAdataFree(IDA_mem); /* Free all backward problems. */ while (IDAADJ_mem->IDAB_mem != NULL) IDAAbckpbDelete( &(IDAADJ_mem->IDAB_mem) ); /* Free IDAA memory. */ free(IDAADJ_mem); IDA_mem->ida_adj_mem = NULL; } } /* * ================================================================= * PRIVATE FUNCTIONS FOR BACKWARD PROBLEMS * ================================================================= */ static void IDAAbckpbDelete(IDABMem *IDAB_memPtr) { IDABMem IDAB_mem = (*IDAB_memPtr); void * ida_mem; if (IDAB_mem == NULL) return; /* Move head to the next element in list. */ *IDAB_memPtr = IDAB_mem->ida_next; /* IDAB_mem is going to be deallocated. */ /* Free IDAS memory for this backward problem. */ ida_mem = (void *)IDAB_mem->IDA_mem; IDAFree(&ida_mem); /* Free linear solver memory. */ if (IDAB_mem->ida_lfree != NULL) IDAB_mem->ida_lfree(IDAB_mem); /* Free preconditioner memory. */ if (IDAB_mem->ida_pfree != NULL) IDAB_mem->ida_pfree(IDAB_mem); /* Free any workspace vectors. */ N_VDestroy(IDAB_mem->ida_yy); N_VDestroy(IDAB_mem->ida_yp); /* Free the node itself. */ free(IDAB_mem); IDAB_mem = NULL; } /*=================================================================*/ /* Wrappers for IDAA */ /*=================================================================*/ /* * IDASolveF * * This routine integrates to tout and returns solution into yout. * In the same time, it stores check point data every 'steps' steps. * * IDASolveF can be called repeatedly by the user. The last tout * will be used as the starting time for the backward integration. * * ncheckPtr points to the number of check points stored so far. */ int IDASolveF(void *ida_mem, realtype tout, realtype *tret, N_Vector yret, N_Vector ypret, int itask, int *ncheckPtr) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; CkpntMem tmp; DtpntMem *dt_mem; long int nstloc; int flag, i; booleantype allocOK, earlyret; realtype ttest; /* Is the mem OK? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASolveF", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Is ASA initialized ? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASolveF", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check for yret != NULL */ if (yret == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASolveF", MSG_YRET_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Check for ypret != NULL */ if (ypret == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASolveF", MSG_YPRET_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Check for tret != NULL */ if (tret == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASolveF", MSG_TRET_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Check for valid itask */ if ( (itask != IDA_NORMAL) && (itask != IDA_ONE_STEP) ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASolveF", MSG_BAD_ITASK); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* All memory checks done, proceed ... */ dt_mem = IDAADJ_mem->dt_mem; /* If tstop is enabled, store some info */ if (IDA_mem->ida_tstopset) { IDAADJ_mem->ia_tstopIDAFcall = SUNTRUE; IDAADJ_mem->ia_tstopIDAF = IDA_mem->ida_tstop; } /* On the first step: * - set tinitial * - initialize list of check points * - if needed, initialize the interpolation module * - load dt_mem[0] * On subsequent steps, test if taking a new step is necessary. */ if ( IDAADJ_mem->ia_firstIDAFcall ) { IDAADJ_mem->ia_tinitial = IDA_mem->ida_tn; IDAADJ_mem->ck_mem = IDAAckpntInit(IDA_mem); if (IDAADJ_mem->ck_mem == NULL) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAA", "IDASolveF", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } if (!IDAADJ_mem->ia_mallocDone) { /* Do we need to store sensitivities? */ if (!IDA_mem->ida_sensi) IDAADJ_mem->ia_storeSensi = SUNFALSE; /* Allocate space for interpolation data */ allocOK = IDAADJ_mem->ia_malloc(IDA_mem); if (!allocOK) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAA", "IDASolveF", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* Rename phi and, if needed, phiS for use in interpolation */ for (i=0;iia_Y[i] = IDA_mem->ida_phi[i]; if (IDAADJ_mem->ia_storeSensi) { for (i=0;iia_YS[i] = IDA_mem->ida_phiS[i]; } IDAADJ_mem->ia_mallocDone = SUNTRUE; } dt_mem[0]->t = IDAADJ_mem->ck_mem->ck_t0; IDAADJ_mem->ia_storePnt(IDA_mem, dt_mem[0]); IDAADJ_mem->ia_firstIDAFcall = SUNFALSE; } else if ( itask == IDA_NORMAL ) { /* When in normal mode, check if tout was passed or if a previous root was not reported and return an interpolated solution. No changes to ck_mem or dt_mem are needed. */ /* flag to signal if an early return is needed */ earlyret = SUNFALSE; /* if a root needs to be reported compare tout to troot otherwise compare to the current time tn */ ttest = (IDAADJ_mem->ia_rootret) ? IDAADJ_mem->ia_troot : IDA_mem->ida_tn; if ((ttest - tout)*IDA_mem->ida_hh >= ZERO) { /* ttest is after tout, interpolate to tout */ *tret = tout; flag = IDAGetSolution(IDA_mem, tout, yret, ypret); earlyret = SUNTRUE; } else if (IDAADJ_mem->ia_rootret) { /* tout is after troot, interpolate to troot */ *tret = IDAADJ_mem->ia_troot; flag = IDAGetSolution(IDA_mem, IDAADJ_mem->ia_troot, yret, ypret); flag = IDA_ROOT_RETURN; IDAADJ_mem->ia_rootret = SUNFALSE; earlyret = SUNTRUE; } /* return if necessary */ if (earlyret) { *ncheckPtr = IDAADJ_mem->ia_nckpnts; IDAADJ_mem->ia_newData = SUNTRUE; IDAADJ_mem->ia_ckpntData = IDAADJ_mem->ck_mem; IDAADJ_mem->ia_np = IDA_mem->ida_nst % IDAADJ_mem->ia_nsteps + 1; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } } /* Integrate to tout (in IDA_ONE_STEP mode) while loading check points */ nstloc = 0; for(;;) { /* Check for too many steps */ if ( (IDA_mem->ida_mxstep>0) && (nstloc >= IDA_mem->ida_mxstep) ) { IDAProcessError(IDA_mem, IDA_TOO_MUCH_WORK, "IDAA", "IDASolveF", MSG_MAX_STEPS, IDA_mem->ida_tn); flag = IDA_TOO_MUCH_WORK; break; } /* Perform one step of the integration */ flag = IDASolve(IDA_mem, tout, tret, yret, ypret, IDA_ONE_STEP); if (flag < 0) break; nstloc++; /* Test if a new check point is needed */ if ( IDA_mem->ida_nst % IDAADJ_mem->ia_nsteps == 0 ) { IDAADJ_mem->ck_mem->ck_t1 = IDA_mem->ida_tn; /* Create a new check point, load it, and append it to the list */ tmp = IDAAckpntNew(IDA_mem); if (tmp == NULL) { flag = IDA_MEM_FAIL; break; } tmp->ck_next = IDAADJ_mem->ck_mem; IDAADJ_mem->ck_mem = tmp; IDAADJ_mem->ia_nckpnts++; IDA_mem->ida_forceSetup = SUNTRUE; /* Reset i=0 and load dt_mem[0] */ dt_mem[0]->t = IDAADJ_mem->ck_mem->ck_t0; IDAADJ_mem->ia_storePnt(IDA_mem, dt_mem[0]); } else { /* Load next point in dt_mem */ dt_mem[IDA_mem->ida_nst%IDAADJ_mem->ia_nsteps]->t = IDA_mem->ida_tn; IDAADJ_mem->ia_storePnt(IDA_mem, dt_mem[IDA_mem->ida_nst % IDAADJ_mem->ia_nsteps]); } /* Set t1 field of the current ckeck point structure for the case in which there will be no future check points */ IDAADJ_mem->ck_mem->ck_t1 = IDA_mem->ida_tn; /* tfinal is now set to tn */ IDAADJ_mem->ia_tfinal = IDA_mem->ida_tn; /* Return if in IDA_ONE_STEP mode */ if (itask == IDA_ONE_STEP) break; /* IDA_NORMAL_STEP returns */ /* Return if tout reached */ if ( (*tret - tout)*IDA_mem->ida_hh >= ZERO ) { /* If this was a root return, save the root time to return later */ if (flag == IDA_ROOT_RETURN) { IDAADJ_mem->ia_rootret = SUNTRUE; IDAADJ_mem->ia_troot = *tret; } /* Get solution value at tout to return now */ *tret = tout; flag = IDAGetSolution(IDA_mem, tout, yret, ypret); /* Reset tretlast in IDA_mem so that IDAGetQuad and IDAGetSens * evaluate quadratures and/or sensitivities at the proper time */ IDA_mem->ida_tretlast = tout; break; } /* Return if tstop or a root was found */ if ((flag == IDA_TSTOP_RETURN) || (flag == IDA_ROOT_RETURN)) break; } /* end of for(;;) */ /* Get ncheck from IDAADJ_mem */ *ncheckPtr = IDAADJ_mem->ia_nckpnts; /* Data is available for the last interval */ IDAADJ_mem->ia_newData = SUNTRUE; IDAADJ_mem->ia_ckpntData = IDAADJ_mem->ck_mem; IDAADJ_mem->ia_np = IDA_mem->ida_nst % IDAADJ_mem->ia_nsteps + 1; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } /* * ================================================================= * FUNCTIONS FOR BACKWARD PROBLEMS * ================================================================= */ int IDACreateB(void *ida_mem, int *which) { IDAMem IDA_mem; void* ida_memB; IDABMem new_IDAB_mem; IDAadjMem IDAADJ_mem; /* Is the mem OK? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDACreateB", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized ? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDACreateB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Allocate a new IDABMem struct. */ new_IDAB_mem = (IDABMem) malloc( sizeof( struct IDABMemRec ) ); if (new_IDAB_mem == NULL) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAA", "IDACreateB", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } /* Allocate the IDAMem struct needed by this backward problem. */ ida_memB = IDACreate(IDA_mem->ida_sunctx); if (ida_memB == NULL) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAA", "IDACreateB", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } /* Save ida_mem in ida_memB as user data. */ IDASetUserData(ida_memB, ida_mem); /* Set same error output and handler for ida_memB. */ IDASetErrHandlerFn(ida_memB, IDA_mem->ida_ehfun, IDA_mem->ida_eh_data); IDASetErrFile(ida_memB, IDA_mem->ida_errfp); /* Initialize fields in the IDABMem struct. */ new_IDAB_mem->ida_index = IDAADJ_mem->ia_nbckpbs; new_IDAB_mem->IDA_mem = (IDAMem) ida_memB; new_IDAB_mem->ida_res = NULL; new_IDAB_mem->ida_resS = NULL; new_IDAB_mem->ida_rhsQ = NULL; new_IDAB_mem->ida_rhsQS = NULL; new_IDAB_mem->ida_user_data = NULL; new_IDAB_mem->ida_lmem = NULL; new_IDAB_mem->ida_lfree = NULL; new_IDAB_mem->ida_pmem = NULL; new_IDAB_mem->ida_pfree = NULL; new_IDAB_mem->ida_yy = NULL; new_IDAB_mem->ida_yp = NULL; new_IDAB_mem->ida_res_withSensi = SUNFALSE; new_IDAB_mem->ida_rhsQ_withSensi = SUNFALSE; /* Attach the new object to the beginning of the linked list IDAADJ_mem->IDAB_mem. */ new_IDAB_mem->ida_next = IDAADJ_mem->IDAB_mem; IDAADJ_mem->IDAB_mem = new_IDAB_mem; /* Return the assigned index. This id is used as identificator and has to be passed to IDAInitB and other ***B functions that set the optional inputs for this backward problem. */ *which = IDAADJ_mem->ia_nbckpbs; /*Increase the counter of the backward problems stored. */ IDAADJ_mem->ia_nbckpbs++; return(IDA_SUCCESS); } int IDAInitB(void *ida_mem, int which, IDAResFnB resB, realtype tB0, N_Vector yyB0, N_Vector ypB0) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; void * ida_memB; int flag; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAInitB", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Is ASA initialized ? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAInitB", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the initial time for this backward problem against the adjoint data. */ if ( (tB0 < IDAADJ_mem->ia_tinitial) || (tB0 > IDAADJ_mem->ia_tfinal) ) { IDAProcessError(IDA_mem, IDA_BAD_TB0, "IDAA", "IDAInitB", MSGAM_BAD_TB0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_TB0); } /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAInitB", MSGAM_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } /* Get the IDAMem corresponding to this backward problem. */ ida_memB = (void*) IDAB_mem->IDA_mem; /* Call the IDAInit for this backward problem. */ flag = IDAInit(ida_memB, IDAAres, tB0, yyB0, ypB0); if (IDA_SUCCESS != flag) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } /* Copy residual function in IDAB_mem. */ IDAB_mem->ida_res = resB; IDAB_mem->ida_res_withSensi = SUNFALSE; /* Initialized the initial time field. */ IDAB_mem->ida_t0 = tB0; /* Allocate and initialize space workspace vectors. */ IDAB_mem->ida_yy = N_VClone(yyB0); IDAB_mem->ida_yp = N_VClone(yyB0); N_VScale(ONE, yyB0, IDAB_mem->ida_yy); N_VScale(ONE, ypB0, IDAB_mem->ida_yp); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } int IDAInitBS(void *ida_mem, int which, IDAResFnBS resS, realtype tB0, N_Vector yyB0, N_Vector ypB0) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; void * ida_memB; int flag; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAInitBS", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Is ASA initialized ? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAInitBS", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the initial time for this backward problem against the adjoint data. */ if ( (tB0 < IDAADJ_mem->ia_tinitial) || (tB0 > IDAADJ_mem->ia_tfinal) ) { IDAProcessError(IDA_mem, IDA_BAD_TB0, "IDAA", "IDAInitBS", MSGAM_BAD_TB0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_TB0); } /* Were sensitivities active during the forward integration? */ if (!IDAADJ_mem->ia_storeSensi) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAInitBS", MSGAM_BAD_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAInitBS", MSGAM_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } /* Get the IDAMem corresponding to this backward problem. */ ida_memB = (void*) IDAB_mem->IDA_mem; /* Allocate and set the IDAS object */ flag = IDAInit(ida_memB, IDAAres, tB0, yyB0, ypB0); if (flag != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } /* Copy residual function pointer in IDAB_mem. */ IDAB_mem->ida_res_withSensi = SUNTRUE; IDAB_mem->ida_resS = resS; /* Allocate space and initialize the yy and yp vectors. */ IDAB_mem->ida_t0 = tB0; IDAB_mem->ida_yy = N_VClone(yyB0); IDAB_mem->ida_yp = N_VClone(ypB0); N_VScale(ONE, yyB0, IDAB_mem->ida_yy); N_VScale(ONE, ypB0, IDAB_mem->ida_yp); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } int IDAReInitB(void *ida_mem, int which, realtype tB0, N_Vector yyB0, N_Vector ypB0) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; void * ida_memB; int flag; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAReInitB", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Is ASA initialized ? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAReInitB", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the initial time for this backward problem against the adjoint data. */ if ( (tB0 < IDAADJ_mem->ia_tinitial) || (tB0 > IDAADJ_mem->ia_tfinal) ) { IDAProcessError(IDA_mem, IDA_BAD_TB0, "IDAA", "IDAReInitB", MSGAM_BAD_TB0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_TB0); } /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAReInitB", MSGAM_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } /* Get the IDAMem corresponding to this backward problem. */ ida_memB = (void*) IDAB_mem->IDA_mem; /* Call the IDAReInit for this backward problem. */ flag = IDAReInit(ida_memB, tB0, yyB0, ypB0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } int IDASStolerancesB(void *ida_mem, int which, realtype relTolB, realtype absTolB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASStolerancesB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASStolerancesB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASStolerancesB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } /* Get the IDAMem corresponding to this backward problem. */ ida_memB = (void*) IDAB_mem->IDA_mem; /* Set tolerances and return. */ return IDASStolerances(ida_memB, relTolB, absTolB); } int IDASVtolerancesB(void *ida_mem, int which, realtype relTolB, N_Vector absTolB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASVtolerancesB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASVtolerancesB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASVtolerancesB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } /* Get the IDAMem corresponding to this backward problem. */ ida_memB = (void*) IDAB_mem->IDA_mem; /* Set tolerances and return. */ return IDASVtolerances(ida_memB, relTolB, absTolB); } int IDAQuadSStolerancesB(void *ida_mem, int which, realtype reltolQB, realtype abstolQB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAQuadSStolerancesB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAQuadSStolerancesB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAQuadSStolerancesB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return IDAQuadSStolerances(ida_memB, reltolQB, abstolQB); } int IDAQuadSVtolerancesB(void *ida_mem, int which, realtype reltolQB, N_Vector abstolQB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAQuadSVtolerancesB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAQuadSVtolerancesB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAQuadSVtolerancesB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; return IDAQuadSVtolerances(ida_memB, reltolQB, abstolQB); } int IDAQuadInitB(void *ida_mem, int which, IDAQuadRhsFnB rhsQB, N_Vector yQB0) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; int flag; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAQuadInitB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAQuadInitB", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAQuadInitB", MSGAM_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; flag = IDAQuadInit(ida_memB, IDAArhsQ, yQB0); if (IDA_SUCCESS != flag) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } IDAB_mem->ida_rhsQ_withSensi = SUNFALSE; IDAB_mem->ida_rhsQ = rhsQB; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } int IDAQuadInitBS(void *ida_mem, int which, IDAQuadRhsFnBS rhsQS, N_Vector yQB0) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; IDABMem IDAB_mem; void * ida_memB; int flag; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAQuadInitBS", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Is ASA initialized ? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAQuadInitBS", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAQuadInitBS", MSGAM_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } /* Get the IDAMem corresponding to this backward problem. */ ida_memB = (void*) IDAB_mem->IDA_mem; /* Allocate and set the IDAS object */ flag = IDAQuadInit(ida_memB, IDAArhsQ, yQB0); if (flag != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } /* Copy RHS function pointer in IDAB_mem and enable quad sensitivities. */ IDAB_mem->ida_rhsQ_withSensi = SUNTRUE; IDAB_mem->ida_rhsQS = rhsQS; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } int IDAQuadReInitB(void *ida_mem, int which, N_Vector yQB0) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; int retval; void *ida_memB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAQuadInitB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAQuadInitB", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAQuadInitB", MSGAM_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; retval = IDAQuadReInit(ida_memB, yQB0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(retval); } /* * ---------------------------------------------------------------- * Function : IDACalcICB * ---------------------------------------------------------------- * IDACalcIC calculates corrected initial conditions for a DAE * backward system (index-one in semi-implicit form). * It uses Newton iteration combined with a Linesearch algorithm. * Calling IDACalcICB is optional. It is only necessary when the * initial conditions do not solve the given system. I.e., if * yB0 and ypB0 are known to satisfy the backward problem, then * a call to IDACalcIC is NOT necessary (for index-one problems). */ int IDACalcICB(void *ida_mem, int which, realtype tout1, N_Vector yy0, N_Vector yp0) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; int flag; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDACalcICB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDACalcICB", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDACalcICB", MSGAM_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; /* The wrapper for user supplied res function requires ia_bckpbCrt from IDAAdjMem to be set to curent problem. */ IDAADJ_mem->ia_bckpbCrt = IDAB_mem; /* Save (y, y') in yyTmp and ypTmp for use in the res wrapper.*/ /* yyTmp and ypTmp workspaces are safe to use if IDAADataStore is not called.*/ N_VScale(ONE, yy0, IDAADJ_mem->ia_yyTmp); N_VScale(ONE, yp0, IDAADJ_mem->ia_ypTmp); /* Set noInterp flag to SUNTRUE, so IDAARes will use user provided values for y and y' and will not call the interpolation routine(s). */ IDAADJ_mem->ia_noInterp = SUNTRUE; flag = IDACalcIC(ida_memB, IDA_YA_YDP_INIT, tout1); /* Set interpolation on in IDAARes. */ IDAADJ_mem->ia_noInterp = SUNFALSE; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } /* * ---------------------------------------------------------------- * Function : IDACalcICBS * ---------------------------------------------------------------- * IDACalcIC calculates corrected initial conditions for a DAE * backward system (index-one in semi-implicit form) that also * dependes on the sensivities. * * It calls IDACalcIC for the 'which' backward problem. */ int IDACalcICBS(void *ida_mem, int which, realtype tout1, N_Vector yy0, N_Vector yp0, N_Vector *yyS0, N_Vector *ypS0) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; int flag, is, retval; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDACalcICBS", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDACalcICBS", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Were sensitivities active during the forward integration? */ if (!IDAADJ_mem->ia_storeSensi) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDACalcICBS", MSGAM_BAD_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDACalcICBS", MSGAM_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; /* Was InitBS called for this problem? */ if (!IDAB_mem->ida_res_withSensi) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDACalcICBS", MSGAM_NO_INITBS); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* The wrapper for user supplied res function requires ia_bckpbCrt from IDAAdjMem to be set to curent problem. */ IDAADJ_mem->ia_bckpbCrt = IDAB_mem; /* Save (y, y') and (y_p, y'_p) in yyTmp, ypTmp and yySTmp, ypSTmp.The wrapper for residual will use these values instead of calling interpolation routine.*/ /* The four workspaces variables are safe to use if IDAADataStore is not called.*/ N_VScale(ONE, yy0, IDAADJ_mem->ia_yyTmp); N_VScale(ONE, yp0, IDAADJ_mem->ia_ypTmp); for (is=0; isida_Ns; is++) IDA_mem->ida_cvals[is] = ONE; retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, yyS0, IDAADJ_mem->ia_yySTmp); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, ypS0, IDAADJ_mem->ia_ypSTmp); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } /* Set noInterp flag to SUNTRUE, so IDAARes will use user provided values for y and y' and will not call the interpolation routine(s). */ IDAADJ_mem->ia_noInterp = SUNTRUE; flag = IDACalcIC(ida_memB, IDA_YA_YDP_INIT, tout1); /* Set interpolation on in IDAARes. */ IDAADJ_mem->ia_noInterp = SUNFALSE; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } /* * IDASolveB * * This routine performs the backward integration from tB0 * to tinitial through a sequence of forward-backward runs in * between consecutive check points. It returns the values of * the adjoint variables and any existing quadrature variables * at tinitial. * * On a successful return, IDASolveB returns IDA_SUCCESS. * * NOTE that IDASolveB DOES NOT return the solution for the * backward problem(s). Use IDAGetB to extract the solution * for any given backward problem. * * If there are multiple backward problems and multiple check points, * IDASolveB may not succeed in getting all problems to take one step * when called in ONE_STEP mode. */ int IDASolveB(void *ida_mem, realtype tBout, int itaskB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; CkpntMem ck_mem; IDABMem IDAB_mem, tmp_IDAB_mem; int flag=0, sign; realtype tfuzz, tBret, tBn; booleantype gotCkpnt, reachedTBout, isActive; /* Is the mem OK? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDASolveB", MSGAM_NULL_IDAMEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Is ASA initialized ? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDASolveB", MSGAM_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; if ( IDAADJ_mem->ia_nbckpbs == 0 ) { IDAProcessError(IDA_mem, IDA_NO_BCK, "IDAA", "IDASolveB", MSGAM_NO_BCK); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_BCK); } IDAB_mem = IDAADJ_mem->IDAB_mem; /* Check whether IDASolveF has been called */ if ( IDAADJ_mem->ia_firstIDAFcall ) { IDAProcessError(IDA_mem, IDA_NO_FWD, "IDAA", "IDASolveB", MSGAM_NO_FWD); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_FWD); } sign = (IDAADJ_mem->ia_tfinal - IDAADJ_mem->ia_tinitial > ZERO) ? 1 : -1; /* If this is the first call, loop over all backward problems and * - check that tB0 is valid * - check that tBout is ahead of tB0 in the backward direction * - check whether we need to interpolate forward sensitivities */ if (IDAADJ_mem->ia_firstIDABcall) { /* First IDABMem struct. */ tmp_IDAB_mem = IDAB_mem; while (tmp_IDAB_mem != NULL) { tBn = tmp_IDAB_mem->IDA_mem->ida_tn; if ( (sign*(tBn-IDAADJ_mem->ia_tinitial) < ZERO) || (sign*(IDAADJ_mem->ia_tfinal-tBn) < ZERO) ) { IDAProcessError(IDA_mem, IDA_BAD_TB0, "IDAA", "IDASolveB", MSGAM_BAD_TB0, tmp_IDAB_mem->ida_index); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_TB0); } if (sign*(tBn-tBout) <= ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASolveB", MSGAM_BAD_TBOUT, tmp_IDAB_mem->ida_index); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if ( tmp_IDAB_mem->ida_res_withSensi || tmp_IDAB_mem->ida_rhsQ_withSensi ) IDAADJ_mem->ia_interpSensi = SUNTRUE; /* Advance in list. */ tmp_IDAB_mem = tmp_IDAB_mem->ida_next; } if ( IDAADJ_mem->ia_interpSensi && !IDAADJ_mem->ia_storeSensi) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASolveB", MSGAM_BAD_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } IDAADJ_mem->ia_firstIDABcall = SUNFALSE; } /* Check for valid itask */ if ( (itaskB != IDA_NORMAL) && (itaskB != IDA_ONE_STEP) ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASolveB", MSG_BAD_ITASK); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Check if tBout is legal */ if ( (sign*(tBout-IDAADJ_mem->ia_tinitial) < ZERO) || (sign*(IDAADJ_mem->ia_tfinal-tBout) < ZERO) ) { tfuzz = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDAADJ_mem->ia_tinitial) + SUNRabs(IDAADJ_mem->ia_tfinal)); if ( (sign*(tBout-IDAADJ_mem->ia_tinitial) < ZERO) && (SUNRabs(tBout-IDAADJ_mem->ia_tinitial) < tfuzz) ) { tBout = IDAADJ_mem->ia_tinitial; } else { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDASolveB", MSGAM_BAD_TBOUT); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } } /* Loop through the check points and stop as soon as a backward * problem has its tn value behind the current check point's t0_ * value (in the backward direction) */ ck_mem = IDAADJ_mem->ck_mem; gotCkpnt = SUNFALSE; for(;;) { tmp_IDAB_mem = IDAB_mem; while(tmp_IDAB_mem != NULL) { tBn = tmp_IDAB_mem->IDA_mem->ida_tn; if ( sign*(tBn-ck_mem->ck_t0) > ZERO ) { gotCkpnt = SUNTRUE; break; } if ( (itaskB == IDA_NORMAL) && (tBn == ck_mem->ck_t0) && (sign*(tBout-ck_mem->ck_t0) >= ZERO) ) { gotCkpnt = SUNTRUE; break; } tmp_IDAB_mem = tmp_IDAB_mem->ida_next; } if (gotCkpnt) break; if (ck_mem->ck_next == NULL) break; ck_mem = ck_mem->ck_next; } /* Loop while propagating backward problems */ for(;;) { /* Store interpolation data if not available. This is the 2nd forward integration pass */ if (ck_mem != IDAADJ_mem->ia_ckpntData) { flag = IDAAdataStore(IDA_mem, ck_mem); if (flag != IDA_SUCCESS) break; } /* Starting with the current check point from above, loop over check points while propagating backward problems */ tmp_IDAB_mem = IDAB_mem; while (tmp_IDAB_mem != NULL) { /* Decide if current backward problem is "active" in this check point */ isActive = SUNTRUE; tBn = tmp_IDAB_mem->IDA_mem->ida_tn; if ( (tBn == ck_mem->ck_t0) && (sign*(tBout-ck_mem->ck_t0) < ZERO ) ) isActive = SUNFALSE; if ( (tBn == ck_mem->ck_t0) && (itaskB == IDA_ONE_STEP) ) isActive = SUNFALSE; if ( sign*(tBn - ck_mem->ck_t0) < ZERO ) isActive = SUNFALSE; if ( isActive ) { /* Store the address of current backward problem memory * in IDAADJ_mem to be used in the wrapper functions */ IDAADJ_mem->ia_bckpbCrt = tmp_IDAB_mem; /* Integrate current backward problem */ IDASetStopTime(tmp_IDAB_mem->IDA_mem, ck_mem->ck_t0); flag = IDASolve(tmp_IDAB_mem->IDA_mem, tBout, &tBret, tmp_IDAB_mem->ida_yy, tmp_IDAB_mem->ida_yp, itaskB); /* Set the time at which we will report solution and/or quadratures */ tmp_IDAB_mem->ida_tout = tBret; /* If an error occurred, exit while loop */ if (flag < 0) break; } else { flag = IDA_SUCCESS; tmp_IDAB_mem->ida_tout = tBn; } /* Move to next backward problem */ tmp_IDAB_mem = tmp_IDAB_mem->ida_next; } /* End of while: iteration through backward problems. */ /* If an error occurred, return now */ if (flag <0) { IDAProcessError(IDA_mem, flag, "IDAA", "IDASolveB", MSGAM_BACK_ERROR, tmp_IDAB_mem->ida_index); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } /* If in IDA_ONE_STEP mode, return now (flag = IDA_SUCCESS) */ if (itaskB == IDA_ONE_STEP) break; /* If all backward problems have succesfully reached tBout, return now */ reachedTBout = SUNTRUE; tmp_IDAB_mem = IDAB_mem; while(tmp_IDAB_mem != NULL) { if ( sign*(tmp_IDAB_mem->ida_tout - tBout) > ZERO ) { reachedTBout = SUNFALSE; break; } tmp_IDAB_mem = tmp_IDAB_mem->ida_next; } if ( reachedTBout ) break; /* Move check point in linked list to next one */ ck_mem = ck_mem->ck_next; } /* End of loop. */ SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(flag); } /* * IDAGetB * * IDAGetB returns the state variables at the same time (also returned * in tret) as that at which IDASolveBreturned the solution. */ int IDAGetB(void* ida_mem, int which, realtype *tret, N_Vector yy, N_Vector yp) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAGetB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAGetB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAGetB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } N_VScale(ONE, IDAB_mem->ida_yy, yy); N_VScale(ONE, IDAB_mem->ida_yp, yp); *tret = IDAB_mem->ida_tout; return(IDA_SUCCESS); } /* * IDAGetQuadB * * IDAGetQuadB returns the quadrature variables at the same * time (also returned in tret) as that at which IDASolveB * returned the solution. */ int IDAGetQuadB(void *ida_mem, int which, realtype *tret, N_Vector qB) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; void *ida_memB; int flag; long int nstB; /* Is ida_mem valid? */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAGetQuadB", MSGAM_NULL_IDAMEM); return IDA_MEM_NULL; } IDA_mem = (IDAMem) ida_mem; /* Is ASA initialized? */ if (IDA_mem->ida_adjMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_ADJ, "IDAA", "IDAGetQuadB", MSGAM_NO_ADJ); return(IDA_NO_ADJ); } IDAADJ_mem = IDA_mem->ida_adj_mem; /* Check the value of which */ if ( which >= IDAADJ_mem->ia_nbckpbs ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAA", "IDAGetQuadB", MSGAM_BAD_WHICH); return(IDA_ILL_INPUT); } /* Find the IDABMem entry in the linked list corresponding to 'which'. */ IDAB_mem = IDAADJ_mem->IDAB_mem; while (IDAB_mem != NULL) { if( which == IDAB_mem->ida_index ) break; /* advance */ IDAB_mem = IDAB_mem->ida_next; } ida_memB = (void *) IDAB_mem->IDA_mem; /* If the integration for this backward problem has not started yet, * simply return the current value of qB (i.e. the final conditions) */ flag = IDAGetNumSteps(ida_memB, &nstB); if (IDA_SUCCESS != flag) return(flag); if (nstB == 0) { N_VScale(ONE, IDAB_mem->IDA_mem->ida_phiQ[0], qB); *tret = IDAB_mem->ida_tout; } else { flag = IDAGetQuad(ida_memB, tret, qB); } return(flag); } /*=================================================================*/ /* Private Functions Implementation */ /*=================================================================*/ /* * IDAAckpntInit * * This routine initializes the check point linked list with * information from the initial time. */ static CkpntMem IDAAckpntInit(IDAMem IDA_mem) { CkpntMem ck_mem; /* Allocate space for ckdata */ ck_mem = (CkpntMem) malloc(sizeof(struct CkpntMemRec)); if (NULL==ck_mem) return(NULL); ck_mem->ck_t0 = IDA_mem->ida_tn; ck_mem->ck_nst = 0; ck_mem->ck_kk = 1; ck_mem->ck_hh = ZERO; /* Test if we need to carry quadratures */ ck_mem->ck_quadr = IDA_mem->ida_quadr && IDA_mem->ida_errconQ; /* Test if we need to carry sensitivities */ ck_mem->ck_sensi = IDA_mem->ida_sensi; if(ck_mem->ck_sensi) ck_mem->ck_Ns = IDA_mem->ida_Ns; /* Test if we need to carry quadrature sensitivities */ ck_mem->ck_quadr_sensi = IDA_mem->ida_quadr_sensi && IDA_mem->ida_errconQS; /* Alloc 3: current order, i.e. 1, + 2. */ ck_mem->ck_phi_alloc = 3; if (!IDAAckpntAllocVectors(IDA_mem, ck_mem)) { free(ck_mem); ck_mem = NULL; return(NULL); } /* Save phi* vectors from IDA_mem to ck_mem. */ IDAAckpntCopyVectors(IDA_mem, ck_mem); /* Next in list */ ck_mem->ck_next = NULL; return(ck_mem); } /* * IDAAckpntNew * * This routine allocates space for a new check point and sets * its data from current values in IDA_mem. */ static CkpntMem IDAAckpntNew(IDAMem IDA_mem) { CkpntMem ck_mem; int j; /* Allocate space for ckdata */ ck_mem = (CkpntMem) malloc(sizeof(struct CkpntMemRec)); if (ck_mem == NULL) return(NULL); ck_mem->ck_nst = IDA_mem->ida_nst; ck_mem->ck_tretlast = IDA_mem->ida_tretlast; ck_mem->ck_kk = IDA_mem->ida_kk; ck_mem->ck_kused = IDA_mem->ida_kused; ck_mem->ck_knew = IDA_mem->ida_knew; ck_mem->ck_phase = IDA_mem->ida_phase; ck_mem->ck_ns = IDA_mem->ida_ns; ck_mem->ck_hh = IDA_mem->ida_hh; ck_mem->ck_hused = IDA_mem->ida_hused; ck_mem->ck_rr = IDA_mem->ida_rr; ck_mem->ck_cj = IDA_mem->ida_cj; ck_mem->ck_cjlast = IDA_mem->ida_cjlast; ck_mem->ck_cjold = IDA_mem->ida_cjold; ck_mem->ck_cjratio = IDA_mem->ida_cjratio; ck_mem->ck_ss = IDA_mem->ida_ss; ck_mem->ck_ssS = IDA_mem->ida_ssS; ck_mem->ck_t0 = IDA_mem->ida_tn; for (j=0; jck_psi[j] = IDA_mem->ida_psi[j]; ck_mem->ck_alpha[j] = IDA_mem->ida_alpha[j]; ck_mem->ck_beta[j] = IDA_mem->ida_beta[j]; ck_mem->ck_sigma[j] = IDA_mem->ida_sigma[j]; ck_mem->ck_gamma[j] = IDA_mem->ida_gamma[j]; } /* Test if we need to carry quadratures */ ck_mem->ck_quadr = IDA_mem->ida_quadr && IDA_mem->ida_errconQ; /* Test if we need to carry sensitivities */ ck_mem->ck_sensi = IDA_mem->ida_sensi; if(ck_mem->ck_sensi) ck_mem->ck_Ns = IDA_mem->ida_Ns; /* Test if we need to carry quadrature sensitivities */ ck_mem->ck_quadr_sensi = IDA_mem->ida_quadr_sensi && IDA_mem->ida_errconQS; ck_mem->ck_phi_alloc = (IDA_mem->ida_kk+2 < MXORDP1) ? IDA_mem->ida_kk+2 : MXORDP1; if (!IDAAckpntAllocVectors(IDA_mem, ck_mem)) { free(ck_mem); ck_mem = NULL; return(NULL); } /* Save phi* vectors from IDA_mem to ck_mem. */ IDAAckpntCopyVectors(IDA_mem, ck_mem); return(ck_mem); } /* IDAAckpntDelete * * This routine deletes the first check point in list. */ static void IDAAckpntDelete(CkpntMem *ck_memPtr) { CkpntMem tmp; int j; if (*ck_memPtr != NULL) { /* store head of list */ tmp = *ck_memPtr; /* move head of list */ *ck_memPtr = (*ck_memPtr)->ck_next; /* free N_Vectors in tmp */ for (j=0; jck_phi_alloc; j++) N_VDestroy(tmp->ck_phi[j]); /* free N_Vectors for quadratures in tmp */ if (tmp->ck_quadr) { for (j=0; jck_phi_alloc; j++) N_VDestroy(tmp->ck_phiQ[j]); } /* Free sensitivity related data. */ if (tmp->ck_sensi) { for (j=0; jck_phi_alloc; j++) N_VDestroyVectorArray(tmp->ck_phiS[j], tmp->ck_Ns); } if (tmp->ck_quadr_sensi) { for (j=0; jck_phi_alloc; j++) N_VDestroyVectorArray(tmp->ck_phiQS[j], tmp->ck_Ns); } free(tmp); tmp=NULL; } } /* * IDAAckpntAllocVectors * * Allocate checkpoint's phi, phiQ, phiS, phiQS vectors needed to save * current state of IDAMem. * */ static booleantype IDAAckpntAllocVectors(IDAMem IDA_mem, CkpntMem ck_mem) { int j, jj; for (j=0; jck_phi_alloc; j++) { ck_mem->ck_phi[j] = N_VClone(IDA_mem->ida_tempv1); if(ck_mem->ck_phi[j] == NULL) { for(jj=0; jjck_phi[jj]); return(SUNFALSE); } } /* Do we need to carry quadratures? */ if(ck_mem->ck_quadr) { for (j=0; jck_phi_alloc; j++) { ck_mem->ck_phiQ[j] = N_VClone(IDA_mem->ida_eeQ); if(ck_mem->ck_phiQ[j] == NULL) { for (jj=0; jjck_phiQ[jj]); for(jj=0; jjck_phi_alloc; jj++) N_VDestroy(ck_mem->ck_phi[jj]); return(SUNFALSE); } } } /* Do we need to carry sensitivities? */ if(ck_mem->ck_sensi) { for (j=0; jck_phi_alloc; j++) { ck_mem->ck_phiS[j] = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_tempv1); if (ck_mem->ck_phiS[j] == NULL) { for (jj=0; jjck_phiS[jj], IDA_mem->ida_Ns); if (ck_mem->ck_quadr) for (jj=0; jjck_phi_alloc; jj++) N_VDestroy(ck_mem->ck_phiQ[jj]); for (jj=0; jjck_phi_alloc; jj++) N_VDestroy(ck_mem->ck_phi[jj]); return(SUNFALSE); } } } /* Do we need to carry quadrature sensitivities? */ if (ck_mem->ck_quadr_sensi) { for (j=0; jck_phi_alloc; j++) { ck_mem->ck_phiQS[j] = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_eeQ); if (ck_mem->ck_phiQS[j] == NULL) { for (jj=0; jjck_phiQS[jj], IDA_mem->ida_Ns); for (jj=0; jjck_phi_alloc; jj++) N_VDestroyVectorArray(ck_mem->ck_phiS[jj], IDA_mem->ida_Ns); if (ck_mem->ck_quadr) for (jj=0; jjck_phi_alloc; jj++) N_VDestroy(ck_mem->ck_phiQ[jj]); for (jj=0; jjck_phi_alloc; jj++) N_VDestroy(ck_mem->ck_phi[jj]); return(SUNFALSE); } } } return(SUNTRUE); } /* * IDAAckpntCopyVectors * * Copy phi* vectors from IDAMem in the corresponding vectors from checkpoint * */ static void IDAAckpntCopyVectors(IDAMem IDA_mem, CkpntMem ck_mem) { int j, is; /* Save phi* arrays from IDA_mem */ for (j=0; jck_phi_alloc; j++) IDA_mem->ida_cvals[j] = ONE; (void) N_VScaleVectorArray(ck_mem->ck_phi_alloc, IDA_mem->ida_cvals, IDA_mem->ida_phi, ck_mem->ck_phi); if (ck_mem->ck_quadr) (void) N_VScaleVectorArray(ck_mem->ck_phi_alloc, IDA_mem->ida_cvals, IDA_mem->ida_phiQ, ck_mem->ck_phiQ); if (ck_mem->ck_sensi || ck_mem->ck_quadr_sensi) { for (j=0; jck_phi_alloc; j++) { for (is=0; isida_Ns; is++) { IDA_mem->ida_cvals[j*IDA_mem->ida_Ns + is] = ONE; } } } if (ck_mem->ck_sensi) { for (j=0; jck_phi_alloc; j++) { for (is=0; isida_Ns; is++) { IDA_mem->ida_Xvecs[j*IDA_mem->ida_Ns + is] = IDA_mem->ida_phiS[j][is]; IDA_mem->ida_Zvecs[j*IDA_mem->ida_Ns + is] = ck_mem->ck_phiS[j][is]; } } (void) N_VScaleVectorArray(ck_mem->ck_phi_alloc * IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_Xvecs, IDA_mem->ida_Zvecs); } if(ck_mem->ck_quadr_sensi) { for (j=0; jck_phi_alloc; j++) { for (is=0; isida_Ns; is++) { IDA_mem->ida_Xvecs[j*IDA_mem->ida_Ns + is] = IDA_mem->ida_phiQS[j][is]; IDA_mem->ida_Zvecs[j*IDA_mem->ida_Ns + is] = ck_mem->ck_phiQS[j][is]; } } (void) N_VScaleVectorArray(ck_mem->ck_phi_alloc * IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_Xvecs, IDA_mem->ida_Zvecs); } } /* * IDAAdataMalloc * * This routine allocates memory for storing information at all * intermediate points between two consecutive check points. * This data is then used to interpolate the forward solution * at any other time. */ static booleantype IDAAdataMalloc(IDAMem IDA_mem) { IDAadjMem IDAADJ_mem; DtpntMem *dt_mem; long int i, j; IDAADJ_mem = IDA_mem->ida_adj_mem; IDAADJ_mem->dt_mem = NULL; dt_mem = (DtpntMem *)malloc((IDAADJ_mem->ia_nsteps+1)*sizeof(struct DtpntMemRec *)); if (dt_mem==NULL) return(SUNFALSE); for (i=0; i<=IDAADJ_mem->ia_nsteps; i++) { dt_mem[i] = (DtpntMem)malloc(sizeof(struct DtpntMemRec)); /* On failure, free any allocated memory and return NULL. */ if (dt_mem[i] == NULL) { for(j=0; jcontent = NULL; } /* Attach the allocated dt_mem to IDAADJ_mem. */ IDAADJ_mem->dt_mem = dt_mem; return(SUNTRUE); } /* * IDAAdataFree * * This routine frees the memory allocated for data storage. */ static void IDAAdataFree(IDAMem IDA_mem) { IDAadjMem IDAADJ_mem; long int i; IDAADJ_mem = IDA_mem->ida_adj_mem; if (IDAADJ_mem == NULL) return; /* Destroy data points by calling the interpolation's 'free' routine. */ IDAADJ_mem->ia_free(IDA_mem); for (i=0; i<=IDAADJ_mem->ia_nsteps; i++) { free(IDAADJ_mem->dt_mem[i]); IDAADJ_mem->dt_mem[i] = NULL; } free(IDAADJ_mem->dt_mem); IDAADJ_mem->dt_mem = NULL; } /* * IDAAdataStore * * This routine integrates the forward model starting at the check * point ck_mem and stores y and yprime at all intermediate * steps. * * Return values: * - the flag that IDASolve may return on error * - IDA_REIFWD_FAIL if no check point is available for this hot start * - IDA_SUCCESS */ static int IDAAdataStore(IDAMem IDA_mem, CkpntMem ck_mem) { IDAadjMem IDAADJ_mem; DtpntMem *dt_mem; realtype t; long int i; int flag, sign; IDAADJ_mem = IDA_mem->ida_adj_mem; dt_mem = IDAADJ_mem->dt_mem; /* Initialize IDA_mem with data from ck_mem. */ flag = IDAAckpntGet(IDA_mem, ck_mem); if (flag != IDA_SUCCESS) return(IDA_REIFWD_FAIL); /* Set first structure in dt_mem[0] */ dt_mem[0]->t = ck_mem->ck_t0; IDAADJ_mem->ia_storePnt(IDA_mem, dt_mem[0]); /* Decide whether TSTOP must be activated */ if (IDAADJ_mem->ia_tstopIDAFcall) { IDASetStopTime(IDA_mem, IDAADJ_mem->ia_tstopIDAF); } sign = (IDAADJ_mem->ia_tfinal - IDAADJ_mem->ia_tinitial > ZERO) ? 1 : -1; /* Run IDASolve in IDA_ONE_STEP mode to set following structures in dt_mem[i]. */ i = 1; do { flag = IDASolve(IDA_mem, ck_mem->ck_t1, &t, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDA_ONE_STEP); if (flag < 0) return(IDA_FWD_FAIL); dt_mem[i]->t = t; IDAADJ_mem->ia_storePnt(IDA_mem, dt_mem[i]); i++; } while ( sign*(ck_mem->ck_t1 - t) > ZERO ); /* New data is now available. */ IDAADJ_mem->ia_ckpntData = ck_mem; IDAADJ_mem->ia_newData = SUNTRUE; IDAADJ_mem->ia_np = i; return(IDA_SUCCESS); } /* * CVAckpntGet * * This routine prepares IDAS for a hot restart from * the check point ck_mem */ static int IDAAckpntGet(IDAMem IDA_mem, CkpntMem ck_mem) { int flag, j, is; if (ck_mem->ck_next == NULL) { /* In this case, we just call the reinitialization routine, * but make sure we use the same initial stepsize as on * the first run. */ IDASetInitStep(IDA_mem, IDA_mem->ida_h0u); flag = IDAReInit(IDA_mem, ck_mem->ck_t0, ck_mem->ck_phi[0], ck_mem->ck_phi[1]); if (flag != IDA_SUCCESS) return(flag); if (ck_mem->ck_quadr) { flag = IDAQuadReInit(IDA_mem, ck_mem->ck_phiQ[0]); if (flag != IDA_SUCCESS) return(flag); } if (ck_mem->ck_sensi) { flag = IDASensReInit(IDA_mem, IDA_mem->ida_ism, ck_mem->ck_phiS[0], ck_mem->ck_phiS[1]); if (flag != IDA_SUCCESS) return(flag); } if (ck_mem->ck_quadr_sensi) { flag = IDAQuadSensReInit(IDA_mem, ck_mem->ck_phiQS[0]); if (flag != IDA_SUCCESS) return(flag); } } else { /* Copy parameters from check point data structure */ IDA_mem->ida_nst = ck_mem->ck_nst; IDA_mem->ida_tretlast = ck_mem->ck_tretlast; IDA_mem->ida_kk = ck_mem->ck_kk; IDA_mem->ida_kused = ck_mem->ck_kused; IDA_mem->ida_knew = ck_mem->ck_knew; IDA_mem->ida_phase = ck_mem->ck_phase; IDA_mem->ida_ns = ck_mem->ck_ns; IDA_mem->ida_hh = ck_mem->ck_hh; IDA_mem->ida_hused = ck_mem->ck_hused; IDA_mem->ida_rr = ck_mem->ck_rr; IDA_mem->ida_cj = ck_mem->ck_cj; IDA_mem->ida_cjlast = ck_mem->ck_cjlast; IDA_mem->ida_cjold = ck_mem->ck_cjold; IDA_mem->ida_cjratio = ck_mem->ck_cjratio; IDA_mem->ida_tn = ck_mem->ck_t0; IDA_mem->ida_ss = ck_mem->ck_ss; IDA_mem->ida_ssS = ck_mem->ck_ssS; /* Copy the arrays from check point data structure */ for (j=0; jck_phi_alloc; j++) N_VScale(ONE, ck_mem->ck_phi[j], IDA_mem->ida_phi[j]); if(ck_mem->ck_quadr) { for (j=0; jck_phi_alloc; j++) N_VScale(ONE, ck_mem->ck_phiQ[j], IDA_mem->ida_phiQ[j]); } if (ck_mem->ck_sensi) { for (is=0; isida_Ns; is++) { for (j=0; jck_phi_alloc; j++) N_VScale(ONE, ck_mem->ck_phiS[j][is], IDA_mem->ida_phiS[j][is]); } } if (ck_mem->ck_quadr_sensi) { for (is=0; isida_Ns; is++) { for (j=0; jck_phi_alloc; j++) N_VScale(ONE, ck_mem->ck_phiQS[j][is], IDA_mem->ida_phiQS[j][is]); } } for (j=0; jida_psi[j] = ck_mem->ck_psi[j]; IDA_mem->ida_alpha[j] = ck_mem->ck_alpha[j]; IDA_mem->ida_beta[j] = ck_mem->ck_beta[j]; IDA_mem->ida_sigma[j] = ck_mem->ck_sigma[j]; IDA_mem->ida_gamma[j] = ck_mem->ck_gamma[j]; } /* Force a call to setup */ IDA_mem->ida_forceSetup = SUNTRUE; } return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * Functions specific to cubic Hermite interpolation * ----------------------------------------------------------------- */ /* * IDAAhermiteMalloc * * This routine allocates memory for storing information at all * intermediate points between two consecutive check points. * This data is then used to interpolate the forward solution * at any other time. */ static booleantype IDAAhermiteMalloc(IDAMem IDA_mem) { IDAadjMem IDAADJ_mem; DtpntMem *dt_mem; HermiteDataMem content; long int i, ii=0; booleantype allocOK; allocOK = SUNTRUE; IDAADJ_mem = IDA_mem->ida_adj_mem; /* Allocate space for the vectors yyTmp and ypTmp. */ IDAADJ_mem->ia_yyTmp = N_VClone(IDA_mem->ida_tempv1); if (IDAADJ_mem->ia_yyTmp == NULL) { return(SUNFALSE); } IDAADJ_mem->ia_ypTmp = N_VClone(IDA_mem->ida_tempv1); if (IDAADJ_mem->ia_ypTmp == NULL) { return(SUNFALSE); } /* Allocate space for sensitivities temporary vectors. */ if (IDAADJ_mem->ia_storeSensi) { IDAADJ_mem->ia_yySTmp = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_tempv1); if (IDAADJ_mem->ia_yySTmp == NULL) { N_VDestroy(IDAADJ_mem->ia_yyTmp); N_VDestroy(IDAADJ_mem->ia_ypTmp); return(SUNFALSE); } IDAADJ_mem->ia_ypSTmp = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_tempv1); if (IDAADJ_mem->ia_ypSTmp == NULL) { N_VDestroy(IDAADJ_mem->ia_yyTmp); N_VDestroy(IDAADJ_mem->ia_ypTmp); N_VDestroyVectorArray(IDAADJ_mem->ia_yySTmp, IDA_mem->ida_Ns); return(SUNFALSE); } } /* Allocate space for the content field of the dt structures */ dt_mem = IDAADJ_mem->dt_mem; for (i=0; i<=IDAADJ_mem->ia_nsteps; i++) { content = NULL; content = (HermiteDataMem) malloc(sizeof(struct HermiteDataMemRec)); if (content == NULL) { ii = i; allocOK = SUNFALSE; break; } content->y = N_VClone(IDA_mem->ida_tempv1); if (content->y == NULL) { free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } content->yd = N_VClone(IDA_mem->ida_tempv1); if (content->yd == NULL) { N_VDestroy(content->y); free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } if (IDAADJ_mem->ia_storeSensi) { content->yS = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_tempv1); if (content->yS == NULL) { N_VDestroy(content->y); N_VDestroy(content->yd); free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } content->ySd = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_tempv1); if (content->ySd == NULL) { N_VDestroy(content->y); N_VDestroy(content->yd); N_VDestroyVectorArray(content->yS, IDA_mem->ida_Ns); free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } } dt_mem[i]->content = content; } /* If an error occurred, deallocate and return */ if (!allocOK) { N_VDestroy(IDAADJ_mem->ia_yyTmp); N_VDestroy(IDAADJ_mem->ia_ypTmp); if (IDAADJ_mem->ia_storeSensi) { N_VDestroyVectorArray(IDAADJ_mem->ia_yySTmp, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDAADJ_mem->ia_ypSTmp, IDA_mem->ida_Ns); } for (i=0; icontent); N_VDestroy(content->y); N_VDestroy(content->yd); if (IDAADJ_mem->ia_storeSensi) { N_VDestroyVectorArray(content->yS, IDA_mem->ida_Ns); N_VDestroyVectorArray(content->ySd, IDA_mem->ida_Ns); } free(dt_mem[i]->content); dt_mem[i]->content = NULL; } } return(allocOK); } /* * IDAAhermiteFree * * This routine frees the memory allocated for data storage. */ static void IDAAhermiteFree(IDAMem IDA_mem) { IDAadjMem IDAADJ_mem; DtpntMem *dt_mem; HermiteDataMem content; long int i; IDAADJ_mem = IDA_mem->ida_adj_mem; N_VDestroy(IDAADJ_mem->ia_yyTmp); N_VDestroy(IDAADJ_mem->ia_ypTmp); if (IDAADJ_mem->ia_storeSensi) { N_VDestroyVectorArray(IDAADJ_mem->ia_yySTmp, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDAADJ_mem->ia_ypSTmp, IDA_mem->ida_Ns); } dt_mem = IDAADJ_mem->dt_mem; for (i=0; i<=IDAADJ_mem->ia_nsteps; i++) { content = (HermiteDataMem) (dt_mem[i]->content); /* content might be NULL, if IDAAdjInit was called but IDASolveF was not. */ if(content) { N_VDestroy(content->y); N_VDestroy(content->yd); if (IDAADJ_mem->ia_storeSensi) { N_VDestroyVectorArray(content->yS, IDA_mem->ida_Ns); N_VDestroyVectorArray(content->ySd, IDA_mem->ida_Ns); } free(dt_mem[i]->content); dt_mem[i]->content = NULL; } } } /* * IDAAhermiteStorePnt * * This routine stores a new point (y,yd) in the structure d for use * in the cubic Hermite interpolation. * Note that the time is already stored. */ static int IDAAhermiteStorePnt(IDAMem IDA_mem, DtpntMem d) { IDAadjMem IDAADJ_mem; HermiteDataMem content; int is, retval; IDAADJ_mem = IDA_mem->ida_adj_mem; content = (HermiteDataMem) d->content; /* Load solution(s) */ N_VScale(ONE, IDA_mem->ida_phi[0], content->y); if (IDAADJ_mem->ia_storeSensi) { for (is=0; isida_Ns; is++) IDA_mem->ida_cvals[is] = ONE; retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_phiS[0], content->yS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); } /* Load derivative(s). */ IDAAGettnSolutionYp(IDA_mem, content->yd); if (IDAADJ_mem->ia_storeSensi) { IDAAGettnSolutionYpS(IDA_mem, content->ySd); } return(0); } /* * IDAAhermiteGetY * * This routine uses cubic piece-wise Hermite interpolation for * the forward solution vector. * It is typically called by the wrapper routines before calling * user provided routines (fB, djacB, bjacB, jtimesB, psolB) but * can be directly called by the user through IDAGetAdjY */ static int IDAAhermiteGetY(IDAMem IDA_mem, realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS) { IDAadjMem IDAADJ_mem; DtpntMem *dt_mem; HermiteDataMem content0, content1; realtype t0, t1, delta; realtype factor1, factor2, factor3; N_Vector y0, yd0, y1, yd1; N_Vector *yS0=NULL, *ySd0=NULL, *yS1, *ySd1; int flag, is, NS; long int indx; booleantype newpoint; /* local variables for fused vector oerations */ int retval; realtype cvals[4]; N_Vector Xvecs[4]; N_Vector* XXvecs[4]; IDAADJ_mem = IDA_mem->ida_adj_mem; dt_mem = IDAADJ_mem->dt_mem; /* Local value of Ns */ NS = (IDAADJ_mem->ia_interpSensi && (yyS != NULL)) ? IDA_mem->ida_Ns : 0; /* Get the index in dt_mem */ flag = IDAAfindIndex(IDA_mem, t, &indx, &newpoint); if (flag != IDA_SUCCESS) return(flag); /* If we are beyond the left limit but close enough, then return y at the left limit. */ if (indx == 0) { content0 = (HermiteDataMem) (dt_mem[0]->content); N_VScale(ONE, content0->y, yy); N_VScale(ONE, content0->yd, yp); if (NS > 0) { for (is=0; isida_cvals[is] = ONE; retval = N_VScaleVectorArray(NS, IDA_mem->ida_cvals, content0->yS, yyS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); retval = N_VScaleVectorArray(NS, IDA_mem->ida_cvals, content0->ySd, ypS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); } return(IDA_SUCCESS); } /* Extract stuff from the appropriate data points */ t0 = dt_mem[indx-1]->t; t1 = dt_mem[indx]->t; delta = t1 - t0; content0 = (HermiteDataMem) (dt_mem[indx-1]->content); y0 = content0->y; yd0 = content0->yd; if (IDAADJ_mem->ia_interpSensi) { yS0 = content0->yS; ySd0 = content0->ySd; } if (newpoint) { /* Recompute Y0 and Y1 */ content1 = (HermiteDataMem) (dt_mem[indx]->content); y1 = content1->y; yd1 = content1->yd; /* Y1 = delta (yd1 + yd0) - 2 (y1 - y0) */ cvals[0] = -TWO; Xvecs[0] = y1; cvals[1] = TWO; Xvecs[1] = y0; cvals[2] = delta; Xvecs[2] = yd1; cvals[3] = delta; Xvecs[3] = yd0; retval = N_VLinearCombination(4, cvals, Xvecs, IDAADJ_mem->ia_Y[1]); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); /* Y0 = y1 - y0 - delta * yd0 */ cvals[0] = ONE; Xvecs[0] = y1; cvals[1] = -ONE; Xvecs[1] = y0; cvals[2] = -delta; Xvecs[2] = yd0; retval = N_VLinearCombination(3, cvals, Xvecs, IDAADJ_mem->ia_Y[0]); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); /* Recompute YS0 and YS1, if needed */ if (NS > 0) { yS1 = content1->yS; ySd1 = content1->ySd; /* YS1 = delta (ySd1 + ySd0) - 2 (yS1 - yS0) */ cvals[0] = -TWO; XXvecs[0] = yS1; cvals[1] = TWO; XXvecs[1] = yS0; cvals[2] = delta; XXvecs[2] = ySd1; cvals[3] = delta; XXvecs[3] = ySd0; retval = N_VLinearCombinationVectorArray(NS, 4, cvals, XXvecs, IDAADJ_mem->ia_YS[1]); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); /* YS0 = yS1 - yS0 - delta * ySd0 */ cvals[0] = ONE; XXvecs[0] = yS1; cvals[1] = -ONE; XXvecs[1] = yS0; cvals[2] = -delta; XXvecs[2] = ySd0; retval = N_VLinearCombinationVectorArray(NS, 3, cvals, XXvecs, IDAADJ_mem->ia_YS[0]); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); } } /* Perform the actual interpolation. */ /* For y. */ factor1 = t - t0; factor2 = factor1/delta; factor2 = factor2*factor2; factor3 = factor2*(t-t1)/delta; cvals[0] = ONE; cvals[1] = factor1; cvals[2] = factor2; cvals[3] = factor3; /* y = y0 + factor1 yd0 + factor2 * Y[0] + factor3 Y[1] */ Xvecs[0] = y0; Xvecs[1] = yd0; Xvecs[2] = IDAADJ_mem->ia_Y[0]; Xvecs[3] = IDAADJ_mem->ia_Y[1]; retval = N_VLinearCombination(4, cvals, Xvecs, yy); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); /* Sensi Interpolation. */ /* yS = yS0 + factor1 ySd0 + factor2 * YS[0] + factor3 YS[1], if needed */ if (NS > 0) { XXvecs[0] = yS0; XXvecs[1] = ySd0; XXvecs[2] = IDAADJ_mem->ia_YS[0]; XXvecs[3] = IDAADJ_mem->ia_YS[1]; retval = N_VLinearCombinationVectorArray(NS, 4, cvals, XXvecs, yyS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); } /* For y'. */ factor1 = factor1/delta/delta; /* factor1 = 2(t-t0)/(t1-t0)^2 */ factor2 = factor1*((3*t-2*t1-t0)/delta); /* factor2 = (t-t0)(3*t-2*t1-t0)/(t1-t0)^3 */ factor1 *= 2; cvals[0] = ONE; cvals[1] = factor1; cvals[2] = factor2; /* yp = yd0 + factor1 Y[0] + factor 2 Y[1] */ Xvecs[0] = yd0; Xvecs[1] = IDAADJ_mem->ia_Y[0]; Xvecs[2] = IDAADJ_mem->ia_Y[1]; retval = N_VLinearCombination(3, cvals, Xvecs, yp); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); /* Sensi interpolation for 1st derivative. */ /* ypS = ySd0 + factor1 YS[0] + factor 2 YS[1], if needed */ if (NS > 0) { XXvecs[0] = ySd0; XXvecs[1] = IDAADJ_mem->ia_YS[0]; XXvecs[2] = IDAADJ_mem->ia_YS[1]; retval = N_VLinearCombinationVectorArray(NS, 3, cvals, XXvecs, ypS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); } return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * Functions specific to Polynomial interpolation * ----------------------------------------------------------------- */ /* * IDAApolynomialMalloc * * This routine allocates memory for storing information at all * intermediate points between two consecutive check points. * This data is then used to interpolate the forward solution * at any other time. * * Information about the first derivative is stored only for the first * data point. */ static booleantype IDAApolynomialMalloc(IDAMem IDA_mem) { IDAadjMem IDAADJ_mem; DtpntMem *dt_mem; PolynomialDataMem content; long int i, ii=0; booleantype allocOK; allocOK = SUNTRUE; IDAADJ_mem = IDA_mem->ida_adj_mem; /* Allocate space for the vectors yyTmp and ypTmp */ IDAADJ_mem->ia_yyTmp = N_VClone(IDA_mem->ida_tempv1); if (IDAADJ_mem->ia_yyTmp == NULL) { return(SUNFALSE); } IDAADJ_mem->ia_ypTmp = N_VClone(IDA_mem->ida_tempv1); if (IDAADJ_mem->ia_ypTmp == NULL) { return(SUNFALSE); } if (IDAADJ_mem->ia_storeSensi) { IDAADJ_mem->ia_yySTmp = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_tempv1); if (IDAADJ_mem->ia_yySTmp == NULL) { N_VDestroy(IDAADJ_mem->ia_yyTmp); N_VDestroy(IDAADJ_mem->ia_ypTmp); return(SUNFALSE); } IDAADJ_mem->ia_ypSTmp = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_tempv1); if (IDAADJ_mem->ia_ypSTmp == NULL) { N_VDestroy(IDAADJ_mem->ia_yyTmp); N_VDestroy(IDAADJ_mem->ia_ypTmp); N_VDestroyVectorArray(IDAADJ_mem->ia_yySTmp, IDA_mem->ida_Ns); return(SUNFALSE); } } /* Allocate space for the content field of the dt structures */ dt_mem = IDAADJ_mem->dt_mem; for (i=0; i<=IDAADJ_mem->ia_nsteps; i++) { content = NULL; content = (PolynomialDataMem) malloc(sizeof(struct PolynomialDataMemRec)); if (content == NULL) { ii = i; allocOK = SUNFALSE; break; } content->y = N_VClone(IDA_mem->ida_tempv1); if (content->y == NULL) { free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } /* Allocate space for yp also. Needed for the most left point interpolation. */ if (i == 0) { content->yd = N_VClone(IDA_mem->ida_tempv1); /* Memory allocation failure ? */ if (content->yd == NULL) { N_VDestroy(content->y); free(content); content = NULL; ii = i; allocOK = SUNFALSE; } } else { /* Not the first data point. */ content->yd = NULL; } if (IDAADJ_mem->ia_storeSensi) { content->yS = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_tempv1); if (content->yS == NULL) { N_VDestroy(content->y); if (content->yd) N_VDestroy(content->yd); free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } if (i==0) { content->ySd = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_tempv1); if (content->ySd == NULL) { N_VDestroy(content->y); if (content->yd) N_VDestroy(content->yd); N_VDestroyVectorArray(content->yS, IDA_mem->ida_Ns); free(content); content = NULL; ii = i; allocOK = SUNFALSE; } } else { content->ySd = NULL; } } dt_mem[i]->content = content; } /* If an error occurred, deallocate and return */ if (!allocOK) { N_VDestroy(IDAADJ_mem->ia_yyTmp); N_VDestroy(IDAADJ_mem->ia_ypTmp); if (IDAADJ_mem->ia_storeSensi) { N_VDestroyVectorArray(IDAADJ_mem->ia_yySTmp, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDAADJ_mem->ia_ypSTmp, IDA_mem->ida_Ns); } for (i=0; icontent); N_VDestroy(content->y); if (content->yd) N_VDestroy(content->yd); if (IDAADJ_mem->ia_storeSensi) { N_VDestroyVectorArray(content->yS, IDA_mem->ida_Ns); if (content->ySd) N_VDestroyVectorArray(content->ySd, IDA_mem->ida_Ns); } free(dt_mem[i]->content); dt_mem[i]->content = NULL; } } return(allocOK); } /* * IDAApolynomialFree * * This routine frees the memory allocated for data storage. */ static void IDAApolynomialFree(IDAMem IDA_mem) { IDAadjMem IDAADJ_mem; DtpntMem *dt_mem; PolynomialDataMem content; long int i; IDAADJ_mem = IDA_mem->ida_adj_mem; N_VDestroy(IDAADJ_mem->ia_yyTmp); N_VDestroy(IDAADJ_mem->ia_ypTmp); if (IDAADJ_mem->ia_storeSensi) { N_VDestroyVectorArray(IDAADJ_mem->ia_yySTmp, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDAADJ_mem->ia_ypSTmp, IDA_mem->ida_Ns); } dt_mem = IDAADJ_mem->dt_mem; for (i=0; i<=IDAADJ_mem->ia_nsteps; i++) { content = (PolynomialDataMem) (dt_mem[i]->content); /* content might be NULL, if IDAAdjInit was called but IDASolveF was not. */ if(content) { N_VDestroy(content->y); if (content->yd) N_VDestroy(content->yd); if (IDAADJ_mem->ia_storeSensi) { N_VDestroyVectorArray(content->yS, IDA_mem->ida_Ns); if (content->ySd) N_VDestroyVectorArray(content->ySd, IDA_mem->ida_Ns); } free(dt_mem[i]->content); dt_mem[i]->content = NULL; } } } /* * IDAApolynomialStorePnt * * This routine stores a new point y in the structure d for use * in the Polynomial interpolation. * * Note that the time is already stored. Information about the * first derivative is available only for the first data point, * in which case content->yp is non-null. */ static int IDAApolynomialStorePnt(IDAMem IDA_mem, DtpntMem d) { IDAadjMem IDAADJ_mem; PolynomialDataMem content; int is, retval; IDAADJ_mem = IDA_mem->ida_adj_mem; content = (PolynomialDataMem) d->content; N_VScale(ONE, IDA_mem->ida_phi[0], content->y); /* copy also the derivative for the first data point (in this case content->yp is non-null). */ if (content->yd) IDAAGettnSolutionYp(IDA_mem, content->yd); if (IDAADJ_mem->ia_storeSensi) { for (is=0; isida_Ns; is++) IDA_mem->ida_cvals[is] = ONE; retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_phiS[0], content->yS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); /* store the derivative if it is the first data point. */ if(content->ySd) IDAAGettnSolutionYpS(IDA_mem, content->ySd); } content->order = IDA_mem->ida_kused; return(0); } /* * IDAApolynomialGetY * * This routine uses polynomial interpolation for the forward solution vector. * It is typically called by the wrapper routines before calling * user provided routines (fB, djacB, bjacB, jtimesB, psolB)) but * can be directly called by the user through CVodeGetAdjY. */ static int IDAApolynomialGetY(IDAMem IDA_mem, realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS) { IDAadjMem IDAADJ_mem; DtpntMem *dt_mem; PolynomialDataMem content; int flag, dir, order, i, j, is, NS, retval; long int indx, base; booleantype newpoint; realtype delt, factor, Psi, Psiprime; IDAADJ_mem = IDA_mem->ida_adj_mem; dt_mem = IDAADJ_mem->dt_mem; /* Local value of Ns */ NS = (IDAADJ_mem->ia_interpSensi && (yyS != NULL)) ? IDA_mem->ida_Ns : 0; /* Get the index in dt_mem */ flag = IDAAfindIndex(IDA_mem, t, &indx, &newpoint); if (flag != IDA_SUCCESS) return(flag); /* If we are beyond the left limit but close enough, then return y at the left limit. */ if (indx == 0) { content = (PolynomialDataMem) (dt_mem[0]->content); N_VScale(ONE, content->y, yy); N_VScale(ONE, content->yd, yp); if (NS > 0) { for (is=0; isida_cvals[is] = ONE; retval = N_VScaleVectorArray(NS, IDA_mem->ida_cvals, content->yS, yyS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); retval = N_VScaleVectorArray(NS, IDA_mem->ida_cvals, content->ySd, ypS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); } return(IDA_SUCCESS); } /* Scaling factor */ delt = SUNRabs(dt_mem[indx]->t - dt_mem[indx-1]->t); /* Find the direction of the forward integration */ dir = (IDAADJ_mem->ia_tfinal - IDAADJ_mem->ia_tinitial > ZERO) ? 1 : -1; /* Establish the base point depending on the integration direction. Modify the base if there are not enough points for the current order */ if (dir == 1) { base = indx; content = (PolynomialDataMem) (dt_mem[base]->content); order = content->order; if(indx < order) base += order-indx; } else { base = indx-1; content = (PolynomialDataMem) (dt_mem[base]->content); order = content->order; if (IDAADJ_mem->ia_np-indx > order) base -= indx+order-IDAADJ_mem->ia_np; } /* Recompute Y (divided differences for Newton polynomial) if needed */ if (newpoint) { /* Store 0-th order DD */ if (dir == 1) { for(j=0;j<=order;j++) { IDAADJ_mem->ia_T[j] = dt_mem[base-j]->t; content = (PolynomialDataMem) (dt_mem[base-j]->content); N_VScale(ONE, content->y, IDAADJ_mem->ia_Y[j]); if (NS > 0) { for (is=0; isida_cvals[is] = ONE; retval = N_VScaleVectorArray(NS, IDA_mem->ida_cvals, content->yS, IDAADJ_mem->ia_YS[j]); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); } } } else { for(j=0;j<=order;j++) { IDAADJ_mem->ia_T[j] = dt_mem[base-1+j]->t; content = (PolynomialDataMem) (dt_mem[base-1+j]->content); N_VScale(ONE, content->y, IDAADJ_mem->ia_Y[j]); if (NS > 0) { for (is=0; isida_cvals[is] = ONE; retval = N_VScaleVectorArray(NS, IDA_mem->ida_cvals, content->yS, IDAADJ_mem->ia_YS[j]); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); } } } /* Compute higher-order DD */ for(i=1;i<=order;i++) { for(j=order;j>=i;j--) { factor = delt/(IDAADJ_mem->ia_T[j]-IDAADJ_mem->ia_T[j-i]); N_VLinearSum(factor, IDAADJ_mem->ia_Y[j], -factor, IDAADJ_mem->ia_Y[j-1], IDAADJ_mem->ia_Y[j]); for (is=0; isia_YS[j][is], -factor, IDAADJ_mem->ia_YS[j-1][is], IDAADJ_mem->ia_YS[j][is]); } } } /* Perform the actual interpolation for yy using nested multiplications */ IDA_mem->ida_cvals[0] = ONE; for (i=0; iida_cvals[i+1] = IDA_mem->ida_cvals[i] * (t-IDAADJ_mem->ia_T[i]) / delt; retval = N_VLinearCombination(order+1, IDA_mem->ida_cvals, IDAADJ_mem->ia_Y, yy); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); if (NS > 0) { retval = N_VLinearCombinationVectorArray(NS, order+1, IDA_mem->ida_cvals, IDAADJ_mem->ia_YS, yyS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); } /* Perform the actual interpolation for yp. Writing p(t) = y0 + (t-t0)*f[t0,t1] + ... + (t-t0)(t-t1)...(t-tn)*f[t0,t1,...tn], denote psi_k(t) = (t-t0)(t-t1)...(t-tk). The formula used for p'(t) is: - p'(t) = f[t0,t1] + psi_1'(t)*f[t0,t1,t2] + ... + psi_n'(t)*f[t0,t1,...,tn] We reccursively compute psi_k'(t) from: - psi_k'(t) = (t-tk)*psi_{k-1}'(t) + psi_{k-1} psi_k is rescaled with 1/delt each time is computed, because the Newton DDs from Y were scaled with delt. */ Psi = ONE; Psiprime = ZERO; for(i=1; i<=order; i++) { factor = (t-IDAADJ_mem->ia_T[i-1])/delt; Psiprime = Psi/delt + factor * Psiprime; Psi = Psi * factor; IDA_mem->ida_cvals[i-1] = Psiprime; } retval = N_VLinearCombination(order, IDA_mem->ida_cvals, IDAADJ_mem->ia_Y+1, yp); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); if (NS > 0) { retval = N_VLinearCombinationVectorArray(NS, order, IDA_mem->ida_cvals, IDAADJ_mem->ia_YS+1, ypS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); } return(IDA_SUCCESS); } /* * IDAAGettnSolutionYp * * Evaluates the first derivative of the solution at the last time returned by * IDASolve (tretlast). * * The function implements the same algorithm as in IDAGetSolution but in the * particular case when t=tn (i.e. delta=0). * * This function was implemented to avoid calls to IDAGetSolution which computes * y by doing a loop that is not necessary for this particular situation. */ static int IDAAGettnSolutionYp(IDAMem IDA_mem, N_Vector yp) { int j, kord, retval; realtype C, D, gam; if (IDA_mem->ida_nst==0) { /* If no integration was done, return the yp supplied by user.*/ N_VScale(ONE, IDA_mem->ida_phi[1], yp); return(0); } /* Compute yp as in IDAGetSolution for this particular case when t=tn. */ kord = IDA_mem->ida_kused; if(IDA_mem->ida_kused==0) kord=1; C = ONE; D = ZERO; gam = ZERO; for (j=1; j <= kord; j++) { D = D*gam + C/IDA_mem->ida_psi[j-1]; C = C*gam; gam = IDA_mem->ida_psi[j-1] / IDA_mem->ida_psi[j]; IDA_mem->ida_dvals[j-1] = D; } retval = N_VLinearCombination(kord, IDA_mem->ida_dvals, IDA_mem->ida_phi+1, yp); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); return(0); } /* * IDAAGettnSolutionYpS * * Same as IDAAGettnSolutionYp, but for first derivative of the sensitivities. * */ static int IDAAGettnSolutionYpS(IDAMem IDA_mem, N_Vector *ypS) { int j, kord, is, retval; realtype C, D, gam; if (IDA_mem->ida_nst==0) { /* If no integration was done, return the ypS supplied by user.*/ for (is=0; isida_Ns; is++) IDA_mem->ida_cvals[is] = ONE; retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_phiS[1], ypS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); return(0); } kord = IDA_mem->ida_kused; if(IDA_mem->ida_kused==0) kord=1; C = ONE; D = ZERO; gam = ZERO; for (j=1; j <= kord; j++) { D = D*gam + C/IDA_mem->ida_psi[j-1]; C = C*gam; gam = IDA_mem->ida_psi[j-1] / IDA_mem->ida_psi[j]; IDA_mem->ida_dvals[j-1] = D; } retval = N_VLinearCombinationVectorArray(IDA_mem->ida_Ns, kord, IDA_mem->ida_dvals, IDA_mem->ida_phiS+1, ypS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); return(0); } /* * IDAAfindIndex * * Finds the index in the array of data point strctures such that * dt_mem[indx-1].t <= t < dt_mem[indx].t * If indx is changed from the previous invocation, then newpoint = SUNTRUE * * If t is beyond the leftmost limit, but close enough, indx=0. * * Returns IDA_SUCCESS if successful and IDA_GETY_BADT if unable to * find indx (t is too far beyond limits). */ static int IDAAfindIndex(IDAMem ida_mem, realtype t, long int *indx, booleantype *newpoint) { IDAadjMem IDAADJ_mem; IDAMem IDA_mem; DtpntMem *dt_mem; int sign; booleantype to_left, to_right; IDA_mem = (IDAMem) ida_mem; IDAADJ_mem = IDA_mem->ida_adj_mem; dt_mem = IDAADJ_mem->dt_mem; *newpoint = SUNFALSE; /* Find the direction of integration */ sign = (IDAADJ_mem->ia_tfinal - IDAADJ_mem->ia_tinitial > ZERO) ? 1 : -1; /* If this is the first time we use new data */ if (IDAADJ_mem->ia_newData) { IDAADJ_mem->ia_ilast = IDAADJ_mem->ia_np-1; *newpoint = SUNTRUE; IDAADJ_mem->ia_newData = SUNFALSE; } /* Search for indx starting from ilast */ to_left = ( sign*(t - dt_mem[IDAADJ_mem->ia_ilast-1]->t) < ZERO); to_right = ( sign*(t - dt_mem[IDAADJ_mem->ia_ilast]->t) > ZERO); if ( to_left ) { /* look for a new indx to the left */ *newpoint = SUNTRUE; *indx = IDAADJ_mem->ia_ilast; for(;;) { if ( *indx == 0 ) break; if ( sign*(t - dt_mem[*indx-1]->t) <= ZERO ) (*indx)--; else break; } if ( *indx == 0 ) IDAADJ_mem->ia_ilast = 1; else IDAADJ_mem->ia_ilast = *indx; if ( *indx == 0 ) { /* t is beyond leftmost limit. Is it too far? */ if ( SUNRabs(t - dt_mem[0]->t) > FUZZ_FACTOR * IDA_mem->ida_uround ) { return(IDA_GETY_BADT); } } } else if ( to_right ) { /* look for a new indx to the right */ *newpoint = SUNTRUE; *indx = IDAADJ_mem->ia_ilast; for(;;) { if ( sign*(t - dt_mem[*indx]->t) > ZERO) (*indx)++; else break; } IDAADJ_mem->ia_ilast = *indx; } else { /* ilast is still OK */ *indx = IDAADJ_mem->ia_ilast; } return(IDA_SUCCESS); } /* * IDAGetAdjY * * This routine returns the interpolated forward solution at time t. * The user must allocate space for y. */ int IDAGetAdjY(void *ida_mem, realtype t, N_Vector yy, N_Vector yp) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; int flag; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAA", "IDAGetAdjY", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDAADJ_mem = IDA_mem->ida_adj_mem; flag = IDAADJ_mem->ia_getY(IDA_mem, t, yy, yp, NULL, NULL); return(flag); } /*=================================================================*/ /* Wrappers for adjoint system */ /*=================================================================*/ /* * IDAAres * * This routine interfaces to the RhsFnB routine provided by * the user. */ static int IDAAres(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector rrB, void *ida_mem) { IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; IDAMem IDA_mem; int flag, retval; IDA_mem = (IDAMem) ida_mem; IDAADJ_mem = IDA_mem->ida_adj_mem; /* Get the current backward problem. */ IDAB_mem = IDAADJ_mem->ia_bckpbCrt; /* Get forward solution from interpolation. */ if( IDAADJ_mem->ia_noInterp == SUNFALSE) { if (IDAADJ_mem->ia_interpSensi) flag = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp); else flag = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); if (flag != IDA_SUCCESS) { IDAProcessError(IDA_mem, -1, "IDAA", "IDAAres", MSGAM_BAD_TINTERP, tt); return(-1); } } /* Call the user supplied residual. */ if(IDAB_mem->ida_res_withSensi) { retval = IDAB_mem->ida_resS(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp, yyB, ypB, rrB, IDAB_mem->ida_user_data); }else { retval = IDAB_mem->ida_res(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, yyB, ypB, rrB, IDAB_mem->ida_user_data); } return(retval); } /* *IDAArhsQ * * This routine interfaces to the IDAQuadRhsFnB routine provided by * the user. * * It is passed to IDAQuadInit calls for backward problem, so it must * be of IDAQuadRhsFn type. */ static int IDAArhsQ(realtype tt, N_Vector yyB, N_Vector ypB, N_Vector resvalQB, void *ida_mem) { IDAMem IDA_mem; IDAadjMem IDAADJ_mem; IDABMem IDAB_mem; int retval, flag; IDA_mem = (IDAMem) ida_mem; IDAADJ_mem = IDA_mem->ida_adj_mem; /* Get current backward problem. */ IDAB_mem = IDAADJ_mem->ia_bckpbCrt; retval = IDA_SUCCESS; /* Get forward solution from interpolation. */ if (IDAADJ_mem->ia_noInterp == SUNFALSE) { if (IDAADJ_mem->ia_interpSensi) { flag = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp); } else { flag = IDAADJ_mem->ia_getY(IDA_mem, tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, NULL, NULL); } if (flag != IDA_SUCCESS) { IDAProcessError(IDA_mem, -1, "IDAA", "IDAArhsQ", MSGAM_BAD_TINTERP, tt); return(-1); } } /* Call user's adjoint quadrature RHS routine */ if (IDAB_mem->ida_rhsQ_withSensi) { retval = IDAB_mem->ida_rhsQS(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, IDAADJ_mem->ia_yySTmp, IDAADJ_mem->ia_ypSTmp, yyB, ypB, resvalQB, IDAB_mem->ida_user_data); } else { retval = IDAB_mem->ida_rhsQ(tt, IDAADJ_mem->ia_yyTmp, IDAADJ_mem->ia_ypTmp, yyB, ypB, resvalQB, IDAB_mem->ida_user_data); } return(retval); } StanHeaders/src/idas/idas_nls_sim.c0000644000176200001440000003540214645137106017005 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This the implementation file for the IDA nonlinear solver interface. * ---------------------------------------------------------------------------*/ #include "idas_impl.h" #include "sundials/sundials_math.h" #include "sundials/sundials_nvector_senswrapper.h" /* constant macros */ #define PT0001 RCONST(0.0001) /* real 0.0001 */ #define ONE RCONST(1.0) /* real 1.0 */ #define TWENTY RCONST(20.0) /* real 20.0 */ /* nonlinear solver parameters */ #define MAXIT 4 /* default max number of nonlinear iterations */ #define RATEMAX RCONST(0.9) /* max convergence rate used in divergence check */ /* private functions passed to nonlinear solver */ static int idaNlsResidualSensSim(N_Vector ycor, N_Vector res, void* ida_mem); static int idaNlsLSetupSensSim(booleantype jbad, booleantype* jcur, void* ida_mem); static int idaNlsLSolveSensSim(N_Vector delta, void* ida_mem); static int idaNlsConvTestSensSim(SUNNonlinearSolver NLS, N_Vector ycor, N_Vector del, realtype tol, N_Vector ewt, void* ida_mem); /* ----------------------------------------------------------------------------- * Exported functions * ---------------------------------------------------------------------------*/ int IDASetNonlinearSolverSensSim(void *ida_mem, SUNNonlinearSolver NLS) { IDAMem IDA_mem; int retval, is; /* return immediately if IDA memory is NULL */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetNonlinearSolverSensSim", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* return immediately if NLS memory is NULL */ if (NLS == NULL) { IDAProcessError(NULL, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensSim", "NLS must be non-NULL"); return(IDA_ILL_INPUT); } /* check for required nonlinear solver functions */ if ( NLS->ops->gettype == NULL || NLS->ops->solve == NULL || NLS->ops->setsysfn == NULL ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensSim", "NLS does not support required operations"); return(IDA_ILL_INPUT); } /* check for allowed nonlinear solver types */ if (SUNNonlinSolGetType(NLS) != SUNNONLINEARSOLVER_ROOTFIND) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensSim", "NLS type must be SUNNONLINEARSOLVER_ROOTFIND"); return(IDA_ILL_INPUT); } /* check that sensitivities were initialized */ if (!(IDA_mem->ida_sensi)) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensSim", MSG_NO_SENSI); return(IDA_ILL_INPUT); } /* check that the simultaneous corrector was selected */ if (IDA_mem->ida_ism != IDA_SIMULTANEOUS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensSim", "Sensitivity solution method is not IDA_SIMULTANEOUS"); return(IDA_ILL_INPUT); } /* free any existing nonlinear solver */ if ((IDA_mem->NLSsim != NULL) && (IDA_mem->ownNLSsim)) retval = SUNNonlinSolFree(IDA_mem->NLSsim); /* set SUNNonlinearSolver pointer */ IDA_mem->NLSsim = NLS; /* Set NLS ownership flag. If this function was called to attach the default NLS, IDA will set the flag to SUNTRUE after this function returns. */ IDA_mem->ownNLSsim = SUNFALSE; /* set the nonlinear residual function */ retval = SUNNonlinSolSetSysFn(IDA_mem->NLSsim, idaNlsResidualSensSim); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensSim", "Setting nonlinear system function failed"); return(IDA_ILL_INPUT); } /* set convergence test function */ retval = SUNNonlinSolSetConvTestFn(IDA_mem->NLSsim, idaNlsConvTestSensSim, ida_mem); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensSim", "Setting convergence test function failed"); return(IDA_ILL_INPUT); } /* set max allowed nonlinear iterations */ retval = SUNNonlinSolSetMaxIters(IDA_mem->NLSsim, MAXIT); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensSim", "Setting maximum number of nonlinear iterations failed"); return(IDA_ILL_INPUT); } /* create vector wrappers if necessary */ if (IDA_mem->simMallocDone == SUNFALSE) { IDA_mem->ypredictSim = N_VNewEmpty_SensWrapper(IDA_mem->ida_Ns+1, IDA_mem->ida_sunctx); if (IDA_mem->ypredictSim == NULL) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDASetNonlinearSolverSensSim", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } IDA_mem->ycorSim = N_VNewEmpty_SensWrapper(IDA_mem->ida_Ns+1, IDA_mem->ida_sunctx); if (IDA_mem->ycorSim == NULL) { N_VDestroy(IDA_mem->ypredictSim); IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDASetNonlinearSolverSensSim", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } IDA_mem->ewtSim = N_VNewEmpty_SensWrapper(IDA_mem->ida_Ns+1, IDA_mem->ida_sunctx); if (IDA_mem->ewtSim == NULL) { N_VDestroy(IDA_mem->ypredictSim); N_VDestroy(IDA_mem->ycorSim); IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDASetNonlinearSolverSensSim", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } IDA_mem->simMallocDone = SUNTRUE; } /* attach vectors to vector wrappers */ NV_VEC_SW(IDA_mem->ypredictSim, 0) = IDA_mem->ida_yypredict; NV_VEC_SW(IDA_mem->ycorSim, 0) = IDA_mem->ida_ee; NV_VEC_SW(IDA_mem->ewtSim, 0) = IDA_mem->ida_ewt; for (is=0; is < IDA_mem->ida_Ns; is++) { NV_VEC_SW(IDA_mem->ypredictSim, is+1) = IDA_mem->ida_yySpredict[is]; NV_VEC_SW(IDA_mem->ycorSim, is+1) = IDA_mem->ida_eeS[is]; NV_VEC_SW(IDA_mem->ewtSim, is+1) = IDA_mem->ida_ewtS[is]; } /* Set the nonlinear system RES function */ if (!(IDA_mem->ida_res)) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinearSolverSensSim", "The DAE residual function is NULL"); return(IDA_ILL_INPUT); } IDA_mem->nls_res = IDA_mem->ida_res; return(IDA_SUCCESS); } /*--------------------------------------------------------------- IDAGetNonlinearSystemDataSens: This routine provides access to the relevant data needed to compute the nonlinear system function. ---------------------------------------------------------------*/ int IDAGetNonlinearSystemDataSens(void *ida_mem, realtype *tcur, N_Vector **yySpred, N_Vector **ypSpred, N_Vector **yySn, N_Vector **ypSn, realtype *cj, void **user_data) { IDAMem IDA_mem; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNonlinearSystemData", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *tcur = IDA_mem->ida_tn; *yySpred = IDA_mem->ida_yySpredict; *ypSpred = IDA_mem->ida_ypSpredict; *yySn = IDA_mem->ida_yyS; *ypSn = IDA_mem->ida_ypS; *cj = IDA_mem->ida_cj; *user_data = IDA_mem->ida_user_data; return(IDA_SUCCESS); } /* ----------------------------------------------------------------------------- * Private functions * ---------------------------------------------------------------------------*/ int idaNlsInitSensSim(IDAMem IDA_mem) { int retval; /* set the linear solver setup wrapper function */ if (IDA_mem->ida_lsetup) retval = SUNNonlinSolSetLSetupFn(IDA_mem->NLSsim, idaNlsLSetupSensSim); else retval = SUNNonlinSolSetLSetupFn(IDA_mem->NLSsim, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "idaNlsInitSnesSim", "Setting the linear solver setup function failed"); return(IDA_NLS_INIT_FAIL); } /* set the linear solver solve wrapper function */ if (IDA_mem->ida_lsolve) retval = SUNNonlinSolSetLSolveFn(IDA_mem->NLSsim, idaNlsLSolveSensSim); else retval = SUNNonlinSolSetLSolveFn(IDA_mem->NLSsim, NULL); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "idaNlsInitSnesSim", "Setting linear solver solve function failed"); return(IDA_NLS_INIT_FAIL); } /* initialize nonlinear solver */ retval = SUNNonlinSolInitialize(IDA_mem->NLSsim); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "idaNlsInitSnesSim", MSG_NLS_INIT_FAIL); return(IDA_NLS_INIT_FAIL); } return(IDA_SUCCESS); } static int idaNlsLSetupSensSim(booleantype jbad, booleantype* jcur, void* ida_mem) { IDAMem IDA_mem; int retval; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsLSetupSensSim", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_nsetups++; IDA_mem->ida_forceSetup = SUNFALSE; retval = IDA_mem->ida_lsetup(IDA_mem, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_savres, IDA_mem->ida_tempv1, IDA_mem->ida_tempv2, IDA_mem->ida_tempv3); /* update Jacobian status */ *jcur = SUNTRUE; /* update convergence test constants */ IDA_mem->ida_cjold = IDA_mem->ida_cj; IDA_mem->ida_cjratio = ONE; IDA_mem->ida_ss = TWENTY; IDA_mem->ida_ssS = TWENTY; if (retval < 0) return(IDA_LSETUP_FAIL); if (retval > 0) return(IDA_LSETUP_RECVR); return(IDA_SUCCESS); } static int idaNlsLSolveSensSim(N_Vector deltaSim, void* ida_mem) { IDAMem IDA_mem; int retval, is; N_Vector delta; N_Vector *deltaS; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsLSolveSensSim", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* extract state update vector from the vector wrapper */ delta = NV_VEC_SW(deltaSim,0); /* solve the state linear system */ retval = IDA_mem->ida_lsolve(IDA_mem, delta, IDA_mem->ida_ewt, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_savres); if (retval < 0) return(IDA_LSOLVE_FAIL); if (retval > 0) return(IDA_LSOLVE_RECVR); /* extract sensitivity deltas from the vector wrapper */ deltaS = NV_VECS_SW(deltaSim)+1; /* solve the sensitivity linear systems */ for(is=0; isida_Ns; is++) { retval = IDA_mem->ida_lsolve(IDA_mem, deltaS[is], IDA_mem->ida_ewtS[is], IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_savres); if (retval < 0) return(IDA_LSOLVE_FAIL); if (retval > 0) return(IDA_LSOLVE_RECVR); } return(IDA_SUCCESS); } static int idaNlsResidualSensSim(N_Vector ycorSim, N_Vector resSim, void* ida_mem) { IDAMem IDA_mem; int retval; N_Vector ycor, res; N_Vector *ycorS, *resS; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsResidualSensSim", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* extract state and residual vectors from the vector wrapper */ ycor = NV_VEC_SW(ycorSim,0); res = NV_VEC_SW(resSim,0); /* update yy and yp based on the current correction */ N_VLinearSum(ONE, IDA_mem->ida_yypredict, ONE, ycor, IDA_mem->ida_yy); N_VLinearSum(ONE, IDA_mem->ida_yppredict, IDA_mem->ida_cj, ycor, IDA_mem->ida_yp); /* evaluate residual */ retval = IDA_mem->nls_res(IDA_mem->ida_tn, IDA_mem->ida_yy, IDA_mem->ida_yp, res, IDA_mem->ida_user_data); /* increment the number of residual evaluations */ IDA_mem->ida_nre++; /* save a copy of the residual vector in savres */ N_VScale(ONE, res, IDA_mem->ida_savres); if (retval < 0) return(IDA_RES_FAIL); if (retval > 0) return(IDA_RES_RECVR); /* extract sensitivity and residual vectors from the vector wrapper */ ycorS = NV_VECS_SW(ycorSim)+1; resS = NV_VECS_SW(resSim)+1; /* update yS and ypS based on the current correction */ N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_yySpredict, ONE, ycorS, IDA_mem->ida_yyS); N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_ypSpredict, IDA_mem->ida_cj, ycorS, IDA_mem->ida_ypS); /* evaluate sens residual */ retval = IDA_mem->ida_resS(IDA_mem->ida_Ns, IDA_mem->ida_tn, IDA_mem->ida_yy, IDA_mem->ida_yp, res, IDA_mem->ida_yyS, IDA_mem->ida_ypS, resS, IDA_mem->ida_user_dataS, IDA_mem->ida_tmpS1, IDA_mem->ida_tmpS2, IDA_mem->ida_tmpS3); /* increment the number of sens residual evaluations */ IDA_mem->ida_nrSe++; if (retval < 0) return(IDA_SRES_FAIL); if (retval > 0) return(IDA_SRES_RECVR); return(IDA_SUCCESS); } static int idaNlsConvTestSensSim(SUNNonlinearSolver NLS, N_Vector ycor, N_Vector del, realtype tol, N_Vector ewt, void* ida_mem) { IDAMem IDA_mem; int m, retval; realtype delnrm; realtype rate; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "idaNlsConvTestSensSim", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* compute the norm of the correction */ delnrm = N_VWrmsNorm(del, ewt); /* get the current nonlinear solver iteration count */ retval = SUNNonlinSolGetCurIter(NLS, &m); if (retval != IDA_SUCCESS) return(IDA_MEM_NULL); /* test for convergence, first directly, then with rate estimate. */ if (m == 0){ IDA_mem->ida_oldnrm = delnrm; if (delnrm <= PT0001 * IDA_mem->ida_toldel) return(SUN_NLS_SUCCESS); } else { rate = SUNRpowerR( delnrm/IDA_mem->ida_oldnrm, ONE/m ); if (rate > RATEMAX) return(SUN_NLS_CONV_RECVR); IDA_mem->ida_ss = rate/(ONE - rate); } if (IDA_mem->ida_ss*delnrm <= tol) return(SUN_NLS_SUCCESS); /* not yet converged */ return(SUN_NLS_CONTINUE); } StanHeaders/src/idas/idas_bbdpre_impl.h0000644000176200001440000000643714645137106017633 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Alan C. Hindmarsh and Radu Serban @ LLNL *----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *----------------------------------------------------------------- * This is the header file (private version) for the IDABBDPRE * module, for a band-block-diagonal preconditioner, i.e. a * block-diagonal matrix with banded blocks, for use with IDA * and an IDASPILS linear solver. *-----------------------------------------------------------------*/ #ifndef _IDASBBDPRE_IMPL_H #define _IDASBBDPRE_IMPL_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * Definition of IBBDPrecData * ----------------------------------------------------------------- */ typedef struct IBBDPrecDataRec { /* passed by user to IDABBDPrecAlloc and used by IDABBDPrecSetup/IDABBDPrecSolve functions */ sunindextype mudq, mldq, mukeep, mlkeep; realtype rel_yy; IDABBDLocalFn glocal; IDABBDCommFn gcomm; /* set by IDABBDPrecSetup and used by IDABBDPrecSetup and IDABBDPrecSolve functions */ sunindextype n_local; SUNMatrix PP; SUNLinearSolver LS; N_Vector zlocal; N_Vector rlocal; N_Vector tempv1; N_Vector tempv2; N_Vector tempv3; N_Vector tempv4; /* available for optional output */ long int rpwsize; long int ipwsize; long int nge; /* pointer to ida_mem */ void *ida_mem; } *IBBDPrecData; /* * ----------------------------------------------------------------- * Type: IDABBDPrecDataB * ----------------------------------------------------------------- */ typedef struct IDABBDPrecDataRecB { /* BBD user functions (glocB and cfnB) for backward run */ IDABBDLocalFnB glocalB; IDABBDCommFnB gcommB; } *IDABBDPrecDataB; /* * ----------------------------------------------------------------- * IDABBDPRE error messages * ----------------------------------------------------------------- */ #define MSGBBD_MEM_NULL "Integrator memory is NULL." #define MSGBBD_LMEM_NULL "Linear solver memory is NULL. One of the SPILS linear solvers must be attached." #define MSGBBD_MEM_FAIL "A memory request failed." #define MSGBBD_BAD_NVECTOR "A required vector operation is not implemented." #define MSGBBD_SUNMAT_FAIL "An error arose from a SUNBandMatrix routine." #define MSGBBD_SUNLS_FAIL "An error arose from a SUNBandLinearSolver routine." #define MSGBBD_PMEM_NULL "BBD peconditioner memory is NULL. IDABBDPrecInit must be called." #define MSGBBD_FUNC_FAILED "The Glocal or Gcomm routine failed in an unrecoverable manner." #define MSGBBD_AMEM_NULL "idaadj_mem = NULL illegal." #define MSGBBD_PDATAB_NULL "IDABBDPRE memory is NULL for the backward integration." #define MSGBBD_BAD_T "Bad t for interpolation." #ifdef __cplusplus } #endif #endif StanHeaders/src/idas/idas_spils.c0000644000176200001440000001070414645137106016471 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU *----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *----------------------------------------------------------------- * Implementation file for the deprecated Scaled and Preconditioned * Iterative Linear Solver interface in IDAS; these routines now just * wrap the updated IDA generic linear solver interface in idas_ls.h. *-----------------------------------------------------------------*/ #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*================================================================= Exported Functions (wrappers for equivalent routines in idas_ls.h) =================================================================*/ int IDASpilsSetLinearSolver(void *ida_mem, SUNLinearSolver LS) { return(IDASetLinearSolver(ida_mem, LS, NULL)); } int IDASpilsSetPreconditioner(void *ida_mem, IDASpilsPrecSetupFn pset, IDASpilsPrecSolveFn psolve) { return(IDASetPreconditioner(ida_mem, pset, psolve)); } int IDASpilsSetJacTimes(void *ida_mem, IDASpilsJacTimesSetupFn jtsetup, IDASpilsJacTimesVecFn jtimes) { return(IDASetJacTimes(ida_mem, jtsetup, jtimes)); } int IDASpilsSetEpsLin(void *ida_mem, realtype eplifac) { return(IDASetEpsLin(ida_mem, eplifac)); } int IDASpilsSetIncrementFactor(void *ida_mem, realtype dqincfac) { return(IDASetIncrementFactor(ida_mem, dqincfac)); } int IDASpilsGetWorkSpace(void *ida_mem, long int *lenrwLS, long int *leniwLS) { return(IDAGetLinWorkSpace(ida_mem, lenrwLS, leniwLS)); } int IDASpilsGetNumPrecEvals(void *ida_mem, long int *npevals) { return(IDAGetNumPrecEvals(ida_mem, npevals)); } int IDASpilsGetNumPrecSolves(void *ida_mem, long int *npsolves) { return(IDAGetNumPrecSolves(ida_mem, npsolves)); } int IDASpilsGetNumLinIters(void *ida_mem, long int *nliters) { return(IDAGetNumLinIters(ida_mem, nliters)); } int IDASpilsGetNumConvFails(void *ida_mem, long int *nlcfails) { return(IDAGetNumLinConvFails(ida_mem, nlcfails)); } int IDASpilsGetNumJTSetupEvals(void *ida_mem, long int *njtsetups) { return(IDAGetNumJTSetupEvals(ida_mem, njtsetups)); } int IDASpilsGetNumJtimesEvals(void *ida_mem, long int *njvevals) { return(IDAGetNumJtimesEvals(ida_mem, njvevals)); } int IDASpilsGetNumResEvals(void *ida_mem, long int *nrevalsLS) { return(IDAGetNumLinResEvals(ida_mem, nrevalsLS)); } int IDASpilsGetLastFlag(void *ida_mem, long int *flag) { return(IDAGetLastLinFlag(ida_mem, flag)); } char *IDASpilsGetReturnFlagName(long int flag) { return(IDAGetLinReturnFlagName(flag)); } int IDASpilsSetLinearSolverB(void *ida_mem, int which, SUNLinearSolver LS) { return(IDASetLinearSolverB(ida_mem, which, LS, NULL)); } int IDASpilsSetEpsLinB(void *ida_mem, int which, realtype eplifacB) { return(IDASetEpsLinB(ida_mem, which, eplifacB)); } int IDASpilsSetIncrementFactorB(void *ida_mem, int which, realtype dqincfacB) { return(IDASetIncrementFactorB(ida_mem, which, dqincfacB)); } int IDASpilsSetPreconditionerB(void *ida_mem, int which, IDASpilsPrecSetupFnB psetB, IDASpilsPrecSolveFnB psolveB) { return(IDASetPreconditionerB(ida_mem, which, psetB, psolveB)); } int IDASpilsSetPreconditionerBS(void *ida_mem, int which, IDASpilsPrecSetupFnBS psetBS, IDASpilsPrecSolveFnBS psolveBS) { return(IDASetPreconditionerBS(ida_mem, which, psetBS, psolveBS)); } int IDASpilsSetJacTimesB(void *ida_mem, int which, IDASpilsJacTimesSetupFnB jtsetupB, IDASpilsJacTimesVecFnB jtimesB) { return(IDASetJacTimesB(ida_mem, which, jtsetupB, jtimesB)); } int IDASpilsSetJacTimesBS(void *ida_mem, int which, IDASpilsJacTimesSetupFnBS jtsetupBS, IDASpilsJacTimesVecFnBS jtimesBS) { return(IDASetJacTimesBS(ida_mem, which, jtsetupBS, jtimesBS)); } #ifdef __cplusplus } #endif StanHeaders/src/idas/NOTICE0000644000176200001440000000221614645137106015076 0ustar liggesusersThis work was produced under the auspices of the U.S. Department of Energy by Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344. This work was prepared as an account of work sponsored by an agency of the United States Government. Neither the United States Government nor Lawrence Livermore National Security, LLC, nor any of their employees makes any warranty, expressed or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately owned rights. Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or Lawrence Livermore National Security, LLC, and shall not be used for advertising or product endorsement purposes.StanHeaders/src/idas/idas_impl.h0000644000176200001440000013511514645137106016311 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file (private version) for the main IDAS solver. * ----------------------------------------------------------------- */ #ifndef _IDAS_IMPL_H #define _IDAS_IMPL_H #include #include "idas/idas.h" #include "sundials_context_impl.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ================================================================= * M A I N I N T E G R A T O R M E M O R Y B L O C K * ================================================================= */ /* Basic IDA constants */ #define HMAX_INV_DEFAULT RCONST(0.0) /* hmax_inv default value */ #define MAXORD_DEFAULT 5 /* maxord default value */ #define MXORDP1 6 /* max. number of N_Vectors in phi */ #define MXSTEP_DEFAULT 500 /* mxstep default value */ /* Return values for lower level routines used by IDASolve and functions provided to the nonlinear solver */ #define IDA_RES_RECVR +1 #define IDA_LSETUP_RECVR +2 #define IDA_LSOLVE_RECVR +3 #define IDA_CONSTR_RECVR +5 #define IDA_NLS_SETUP_RECVR +6 #define IDA_QRHS_RECVR +10 #define IDA_SRES_RECVR +11 #define IDA_QSRHS_RECVR +12 /* itol */ #define IDA_NN 0 #define IDA_SS 1 #define IDA_SV 2 #define IDA_WF 3 #define IDA_EE 4 /* * ---------------------------------------------------------------- * Types: struct IDAMemRec, IDAMem * ---------------------------------------------------------------- * The type IDAMem is type pointer to struct IDAMemRec. * This structure contains fields to keep track of problem state. * ---------------------------------------------------------------- */ typedef struct IDAMemRec { SUNContext ida_sunctx; realtype ida_uround; /* machine unit roundoff */ /*-------------------------- Problem Specification Data --------------------------*/ IDAResFn ida_res; /* F(t,y(t),y'(t))=0; the function F */ void *ida_user_data; /* user pointer passed to res */ int ida_itol; /* itol = IDA_SS, IDA_SV, IDA_WF, IDA_NN */ realtype ida_rtol; /* relative tolerance */ realtype ida_Satol; /* scalar absolute tolerance */ N_Vector ida_Vatol; /* vector absolute tolerance */ booleantype ida_atolmin0; /* flag indicating that min(atol) = 0 */ booleantype ida_user_efun; /* SUNTRUE if user provides efun */ IDAEwtFn ida_efun; /* function to set ewt */ void *ida_edata; /* user pointer passed to efun */ booleantype ida_constraintsSet; /* constraints vector present: do constraints calc */ booleantype ida_suppressalg; /* SUNTRUE means suppress algebraic vars in local error tests */ /*----------------------- Quadrature Related Data -----------------------*/ booleantype ida_quadr; IDAQuadRhsFn ida_rhsQ; void *ida_user_dataQ; booleantype ida_errconQ; int ida_itolQ; realtype ida_rtolQ; realtype ida_SatolQ; /* scalar absolute tolerance for quadratures */ N_Vector ida_VatolQ; /* vector absolute tolerance for quadratures */ booleantype ida_atolQmin0; /* flag indicating that min(atolQ) = 0 */ /*------------------------ Sensitivity Related Data ------------------------*/ booleantype ida_sensi; int ida_Ns; int ida_ism; IDASensResFn ida_resS; void *ida_user_dataS; booleantype ida_resSDQ; realtype *ida_p; realtype *ida_pbar; int *ida_plist; int ida_DQtype; realtype ida_DQrhomax; booleantype ida_errconS; /* SUNTRUE if sensitivities in err. control */ int ida_itolS; realtype ida_rtolS; /* relative tolerance for sensitivities */ realtype *ida_SatolS; /* scalar absolute tolerances for sensi. */ N_Vector *ida_VatolS; /* vector absolute tolerances for sensi. */ booleantype *ida_atolSmin0; /* flag indicating that min(atolS[is]) = 0 */ /*----------------------------------- Quadrature Sensitivity Related Data -----------------------------------*/ booleantype ida_quadr_sensi; /* SUNTRUE if computing sensitivities of quadrs. */ IDAQuadSensRhsFn ida_rhsQS; /* fQS = (dfQ/dy)*yS + (dfQ/dp) */ void *ida_user_dataQS; /* data pointer passed to fQS */ booleantype ida_rhsQSDQ; /* SUNTRUE if using internal DQ functions */ booleantype ida_errconQS; /* SUNTRUE if yQS are considered in err. con. */ int ida_itolQS; realtype ida_rtolQS; /* relative tolerance for yQS */ realtype *ida_SatolQS; /* scalar absolute tolerances for yQS */ N_Vector *ida_VatolQS; /* vector absolute tolerances for yQS */ booleantype *ida_atolQSmin0; /* flag indicating that min(atolQS[is]) = 0 */ /*----------------------------------------------- Divided differences array and associated arrays -----------------------------------------------*/ N_Vector ida_phi[MXORDP1]; /* phi = (maxord+1) arrays of divided differences */ realtype ida_psi[MXORDP1]; /* differences in t (sums of recent step sizes) */ realtype ida_alpha[MXORDP1]; /* ratios of current stepsize to psi values */ realtype ida_beta[MXORDP1]; /* ratios of current to previous product of psi's */ realtype ida_sigma[MXORDP1]; /* product successive alpha values and factorial */ realtype ida_gamma[MXORDP1]; /* sum of reciprocals of psi values */ /*------------------------- N_Vectors for integration -------------------------*/ N_Vector ida_ewt; /* error weight vector */ N_Vector ida_yy; /* work space for y vector (= user's yret) */ N_Vector ida_yp; /* work space for y' vector (= user's ypret) */ N_Vector ida_yypredict; /* predicted y vector */ N_Vector ida_yppredict; /* predicted y' vector */ N_Vector ida_delta; /* residual vector */ N_Vector ida_id; /* bit vector for diff./algebraic components */ N_Vector ida_constraints; /* vector of inequality constraint options */ N_Vector ida_savres; /* saved residual vector */ N_Vector ida_ee; /* accumulated corrections to y vector, but set equal to estimated local errors upon successful return */ N_Vector ida_tempv1; /* work space vector */ N_Vector ida_tempv2; /* work space vector */ N_Vector ida_tempv3; /* work space vector */ N_Vector ida_ynew; /* work vector for y in IDACalcIC (= tempv2) */ N_Vector ida_ypnew; /* work vector for yp in IDACalcIC (= ee) */ N_Vector ida_delnew; /* work vector for delta in IDACalcIC (= phi[2]) */ N_Vector ida_dtemp; /* work vector in IDACalcIC (= phi[3]) */ /*---------------------------- Quadrature Related N_Vectors ----------------------------*/ N_Vector ida_phiQ[MXORDP1]; N_Vector ida_yyQ; N_Vector ida_ypQ; N_Vector ida_ewtQ; N_Vector ida_eeQ; /*--------------------------- Sensitivity Related Vectors ---------------------------*/ N_Vector *ida_phiS[MXORDP1]; N_Vector *ida_ewtS; N_Vector *ida_eeS; /* cumulative sensitivity corrections */ N_Vector *ida_yyS; /* allocated and used for: */ N_Vector *ida_ypS; /* ism = SIMULTANEOUS */ N_Vector *ida_yySpredict; /* ism = STAGGERED */ N_Vector *ida_ypSpredict; N_Vector *ida_deltaS; N_Vector ida_tmpS1; /* work space vectors | tmpS1 = tempv1 */ N_Vector ida_tmpS2; /* for resS | tmpS2 = tempv2 */ N_Vector ida_tmpS3; /* | tmpS3 = allocated */ N_Vector *ida_savresS; /* work vector in IDACalcIC for stg (= phiS[2]) */ N_Vector *ida_delnewS; /* work vector in IDACalcIC for stg (= phiS[3]) */ N_Vector *ida_yyS0; /* initial yS, ypS vectors allocated and */ N_Vector *ida_ypS0; /* deallocated in IDACalcIC function */ N_Vector *ida_yyS0new; /* work vector in IDASensLineSrch (= phiS[4]) */ N_Vector *ida_ypS0new; /* work vector in IDASensLineSrch (= eeS) */ /*-------------------------------------- Quadrature Sensitivity Related Vectors --------------------------------------*/ N_Vector *ida_phiQS[MXORDP1];/* Mod. div. diffs. for quadr. sensitivities */ N_Vector *ida_ewtQS; /* error weight vectors for sensitivities */ N_Vector *ida_eeQS; /* cumulative quadr.sensi.corrections */ N_Vector *ida_yyQS; /* Unlike yS, yQS is not allocated by the user */ N_Vector *ida_tempvQS; /* temporary storage vector (~ tempv) */ N_Vector ida_savrhsQ; /* saved quadr. rhs (needed for rhsQS calls) */ /*------------------------------ Variables for use by IDACalcIC ------------------------------*/ realtype ida_t0; /* initial t */ N_Vector ida_yy0; /* initial y vector (user-supplied). */ N_Vector ida_yp0; /* initial y' vector (user-supplied). */ int ida_icopt; /* IC calculation user option */ booleantype ida_lsoff; /* IC calculation linesearch turnoff option */ int ida_maxnh; /* max. number of h tries in IC calculation */ int ida_maxnj; /* max. number of J tries in IC calculation */ int ida_maxnit; /* max. number of Netwon iterations in IC calc. */ int ida_nbacktr; /* number of IC linesearch backtrack operations */ int ida_sysindex; /* computed system index (0 or 1) */ int ida_maxbacks; /* max backtracks per Newton step */ realtype ida_epiccon; /* IC nonlinear convergence test constant */ realtype ida_steptol; /* minimum Newton step size in IC calculation */ realtype ida_tscale; /* time scale factor = abs(tout1 - t0) */ /* Tstop information */ booleantype ida_tstopset; realtype ida_tstop; /* Step Data */ int ida_kk; /* current BDF method order */ int ida_kused; /* method order used on last successful step */ int ida_knew; /* order for next step from order decrease decision */ int ida_phase; /* flag to trigger step doubling in first few steps */ int ida_ns; /* counts steps at fixed stepsize and order */ realtype ida_hin; /* initial step */ realtype ida_h0u; /* actual initial stepsize */ realtype ida_hh; /* current step size h */ realtype ida_hused; /* step size used on last successful step */ realtype ida_rr; /* rr = hnext / hused */ realtype ida_tn; /* current internal value of t */ realtype ida_tretlast; /* value of tret previously returned by IDASolve */ realtype ida_cj; /* current value of scalar (-alphas/hh) in Jacobian */ realtype ida_cjlast; /* cj value saved from last successful step */ realtype ida_cjold; /* cj value saved from last call to lsetup */ realtype ida_cjratio; /* ratio of cj values: cj/cjold */ realtype ida_ss; /* scalar used in Newton iteration convergence test */ realtype ida_oldnrm; /* norm of previous nonlinear solver update */ realtype ida_epsNewt; /* test constant in Newton convergence test */ realtype ida_epcon; /* coeficient of the Newton covergence test */ realtype ida_toldel; /* tolerance in direct test on Newton corrections */ realtype ida_ssS; /* scalar ss for staggered sensitivities */ /*------ Limits ------*/ int ida_maxncf; /* max numer of convergence failures */ int ida_maxnef; /* max number of error test failures */ int ida_maxord; /* max value of method order k: */ int ida_maxord_alloc; /* value of maxord used when allocating memory */ long int ida_mxstep; /* max number of internal steps for one user call */ realtype ida_hmax_inv; /* inverse of max. step size hmax (default = 0.0) */ /*-------- Counters --------*/ long int ida_nst; /* number of internal steps taken */ long int ida_nre; /* number of function (res) calls */ long int ida_nrQe; long int ida_nrSe; long int ida_nrQSe; /* number of fQS calls */ long int ida_nreS; long int ida_nrQeS; /* number of fQ calls from sensi DQ */ long int ida_ncfn; /* number of corrector convergence failures */ long int ida_ncfnQ; long int ida_ncfnS; long int ida_netf; /* number of error test failures */ long int ida_netfQ; long int ida_netfS; long int ida_netfQS; /* number of quadr. sensi. error test failures */ long int ida_nni; /* number of Newton iterations performed */ long int ida_nniS; long int ida_nsetups; /* number of lsetup calls */ long int ida_nsetupsS; /*------------------ Space requirements ------------------*/ sunindextype ida_lrw1; /* no. of realtype words in 1 N_Vector */ sunindextype ida_liw1; /* no. of integer words in 1 N_Vector */ sunindextype ida_lrw1Q; sunindextype ida_liw1Q; long int ida_lrw; /* number of realtype words in IDA work vectors */ long int ida_liw; /* no. of integer words in IDA work vectors */ realtype ida_tolsf; /* tolerance scale factor (saved value) */ /*------------------------------------------- Error handler function and error ouput file -------------------------------------------*/ IDAErrHandlerFn ida_ehfun; /* Error messages are handled by ehfun */ void *ida_eh_data; /* dats pointer passed to ehfun */ FILE *ida_errfp; /* IDA error messages are sent to errfp */ /* Flags to verify correct calling sequence */ booleantype ida_SetupDone; /* set to SUNFALSE by IDAMalloc and IDAReInit set to SUNTRUE by IDACalcIC or IDASolve */ booleantype ida_VatolMallocDone; booleantype ida_constraintsMallocDone; booleantype ida_idMallocDone; booleantype ida_MallocDone; /* set to SUNFALSE by IDACreate set to SUNTRUE by IDAMAlloc tested by IDAReInit and IDASolve */ booleantype ida_VatolQMallocDone; booleantype ida_quadMallocDone; booleantype ida_VatolSMallocDone; booleantype ida_SatolSMallocDone; booleantype ida_sensMallocDone; booleantype ida_VatolQSMallocDone; booleantype ida_SatolQSMallocDone; booleantype ida_quadSensMallocDone; /*--------------------- Nonlinear Solver Data ---------------------*/ SUNNonlinearSolver NLS; /* nonlinear solver object */ booleantype ownNLS; /* flag indicating NLS ownership */ SUNNonlinearSolver NLSsim; /* nonlinear solver object for DAE+Sens solves with the simultaneous corrector option */ booleantype ownNLSsim; /* flag indicating NLS ownership */ SUNNonlinearSolver NLSstg; /* nonlinear solver object for DAE+Sens solves with the staggered corrector option */ booleantype ownNLSstg; /* flag indicating NLS ownership */ /* The following vectors are NVector wrappers for use with the simultaneous and staggered corrector methods: Simult: ypredictSim = [ida_delta, ida_deltaS] ycorSim = [ida_ee, ida_eeS] ewtSim = [ida_ewt, ida_ewtS] Stagger: ypredictStg = ida_deltaS ycorStg = ida_eeS ewtStg = ida_ewtS */ N_Vector ypredictSim, ycorSim, ewtSim; N_Vector ypredictStg, ycorStg, ewtStg; /* flags indicating if vector wrappers for the simultaneous and staggered correctors have been allocated */ booleantype simMallocDone; booleantype stgMallocDone; IDAResFn nls_res; /* F(t,y(t),y'(t))=0; used in the nonlinear solver */ /*------------------ Linear Solver Data ------------------*/ /* Linear Solver functions to be called */ int (*ida_linit)(struct IDAMemRec *idamem); int (*ida_lsetup)(struct IDAMemRec *idamem, N_Vector yyp, N_Vector ypp, N_Vector resp, N_Vector tempv1, N_Vector tempv2, N_Vector tempv3); int (*ida_lsolve)(struct IDAMemRec *idamem, N_Vector b, N_Vector weight, N_Vector ycur, N_Vector ypcur, N_Vector rescur); int (*ida_lperf)(struct IDAMemRec *idamem, int perftask); int (*ida_lfree)(struct IDAMemRec *idamem); /* Linear Solver specific memory */ void *ida_lmem; /* Flag to request a call to the setup routine */ booleantype ida_forceSetup; /* Flag to indicate successful ida_linit call */ booleantype ida_linitOK; /*---------------- Rootfinding Data ----------------*/ IDARootFn ida_gfun; /* Function g for roots sought */ int ida_nrtfn; /* number of components of g */ int *ida_iroots; /* array for root information */ int *ida_rootdir; /* array specifying direction of zero-crossing */ realtype ida_tlo; /* nearest endpoint of interval in root search */ realtype ida_thi; /* farthest endpoint of interval in root search */ realtype ida_trout; /* t return value from rootfinder routine */ realtype *ida_glo; /* saved array of g values at t = tlo */ realtype *ida_ghi; /* saved array of g values at t = thi */ realtype *ida_grout; /* array of g values at t = trout */ realtype ida_toutc; /* copy of tout (if NORMAL mode) */ realtype ida_ttol; /* tolerance on root location */ int ida_taskc; /* copy of parameter itask */ int ida_irfnd; /* flag showing whether last step had a root */ long int ida_nge; /* counter for g evaluations */ booleantype *ida_gactive; /* array with active/inactive event functions */ int ida_mxgnull; /* number of warning messages about possible g==0 */ /* Arrays for Fused Vector Operations */ /* scalar arrays */ realtype* ida_cvals; realtype ida_dvals[MAXORD_DEFAULT]; /* vector arrays */ N_Vector* ida_Xvecs; N_Vector* ida_Zvecs; /*------------------------ Adjoint sensitivity data ------------------------*/ booleantype ida_adj; /* SUNTRUE if performing ASA */ struct IDAadjMemRec *ida_adj_mem; /* Pointer to adjoint memory structure */ booleantype ida_adjMallocDone; } *IDAMem; /* * ================================================================= * A D J O I N T M O D U L E M E M O R Y B L O C K * ================================================================= */ /* * ----------------------------------------------------------------- * Forward references for pointers to various structures * ----------------------------------------------------------------- */ typedef struct IDAadjMemRec *IDAadjMem; typedef struct CkpntMemRec *CkpntMem; typedef struct DtpntMemRec *DtpntMem; typedef struct IDABMemRec *IDABMem; /* * ----------------------------------------------------------------- * Types for functions provided by an interpolation module * ----------------------------------------------------------------- * IDAAMMallocFn: Type for a function that initializes the content * field of the structures in the dt array * IDAAMFreeFn: Type for a function that deallocates the content * field of the structures in the dt array * IDAAGetYFn: Function type for a function that returns the * interpolated forward solution. * IDAAStorePnt: Function type for a function that stores a new * point in the structure d * ----------------------------------------------------------------- */ typedef booleantype (*IDAAMMallocFn)(IDAMem IDA_mem); typedef void (*IDAAMFreeFn)(IDAMem IDA_mem); typedef int (*IDAAGetYFn)(IDAMem IDA_mem, realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS); typedef int (*IDAAStorePntFn)(IDAMem IDA_mem, DtpntMem d); /* * ----------------------------------------------------------------- * Types : struct CkpntMemRec, CkpntMem * ----------------------------------------------------------------- * The type CkpntMem is type pointer to struct CkpntMemRec. * This structure contains fields to store all information at a * check point that is needed to 'hot' start IDAS. * ----------------------------------------------------------------- */ struct CkpntMemRec { /* Integration limits */ realtype ck_t0; realtype ck_t1; /* Modified divided difference array */ N_Vector ck_phi[MXORDP1]; /* Do we need to carry quadratures? */ booleantype ck_quadr; /* Modified divided difference array for quadratures */ N_Vector ck_phiQ[MXORDP1]; /* Do we need to carry sensitivities? */ booleantype ck_sensi; /* number of sensitivities */ int ck_Ns; /* Modified divided difference array for sensitivities */ N_Vector *ck_phiS[MXORDP1]; /* Do we need to carry quadrature sensitivities? */ booleantype ck_quadr_sensi; /* Modified divided difference array for quadrature sensitivities */ N_Vector *ck_phiQS[MXORDP1]; /* Step data */ long int ck_nst; realtype ck_tretlast; int ck_ns; int ck_kk; int ck_kused; int ck_knew; int ck_phase; realtype ck_hh; realtype ck_hused; realtype ck_rr; realtype ck_cj; realtype ck_cjlast; realtype ck_cjold; realtype ck_cjratio; realtype ck_ss; realtype ck_ssS; realtype ck_psi[MXORDP1]; realtype ck_alpha[MXORDP1]; realtype ck_beta[MXORDP1]; realtype ck_sigma[MXORDP1]; realtype ck_gamma[MXORDP1]; /* How many phi, phiS, phiQ and phiQS were allocated? */ int ck_phi_alloc; /* Pointer to next structure in list */ struct CkpntMemRec *ck_next; }; /* * ----------------------------------------------------------------- * Type : struct DtpntMemRec * ----------------------------------------------------------------- * This structure contains fields to store all information at a * data point that is needed to interpolate solution of forward * simulations. Its content field is interpType-dependent. * ----------------------------------------------------------------- */ struct DtpntMemRec { realtype t; /* time */ void *content; /* interpType-dependent content */ }; /* Data for cubic Hermite interpolation */ typedef struct HermiteDataMemRec { N_Vector y; N_Vector yd; N_Vector *yS; N_Vector *ySd; } *HermiteDataMem; /* Data for polynomial interpolation */ typedef struct PolynomialDataMemRec { N_Vector y; N_Vector *yS; /* yd and ySd store the derivative(s) only for the first dt point. NULL otherwise. */ N_Vector yd; N_Vector *ySd; int order; } *PolynomialDataMem; /* * ----------------------------------------------------------------- * Type : struct IDABMemRec * ----------------------------------------------------------------- * The type IDABMemRec is a pointer to a structure which stores all * information for ONE backward problem. * The IDAadjMem struct contains a linked list of IDABMem pointers * ----------------------------------------------------------------- */ struct IDABMemRec { /* Index of this backward problem */ int ida_index; /* Time at which the backward problem is initialized. */ realtype ida_t0; /* Memory for this backward problem */ IDAMem IDA_mem; /* Flags to indicate that this backward problem's RHS or quad RHS * require forward sensitivities */ booleantype ida_res_withSensi; booleantype ida_rhsQ_withSensi; /* Residual function for backward run */ IDAResFnB ida_res; IDAResFnBS ida_resS; /* Right hand side quadrature function (fQB) for backward run */ IDAQuadRhsFnB ida_rhsQ; IDAQuadRhsFnBS ida_rhsQS; /* User user_data */ void *ida_user_data; /* Linear solver's data and functions */ /* Memory block for a linear solver's interface to IDAA */ void *ida_lmem; /* Function to free any memory allocated by the linear solver */ int (*ida_lfree)(IDABMem IDAB_mem); /* Memory block for a preconditioner's module interface to IDAA */ void *ida_pmem; /* Function to free any memory allocated by the preconditioner module */ int (*ida_pfree)(IDABMem IDAB_mem); /* Time at which to extract solution / quadratures */ realtype ida_tout; /* Workspace Nvectors */ N_Vector ida_yy; N_Vector ida_yp; /* Link to next structure in list. */ struct IDABMemRec *ida_next; }; /* * ----------------------------------------------------------------- * Type : struct IDAadjMemRec * ----------------------------------------------------------------- * The type IDAadjMem is type pointer to struct IDAadjMemRec. * This structure contins fields to store all information * necessary for adjoint sensitivity analysis. * ----------------------------------------------------------------- */ struct IDAadjMemRec { /* -------------------- * Forward problem data * -------------------- */ /* Integration interval */ realtype ia_tinitial, ia_tfinal; /* Flag for first call to IDASolveF */ booleantype ia_firstIDAFcall; /* Flag if IDASolveF was called with TSTOP */ booleantype ia_tstopIDAFcall; realtype ia_tstopIDAF; /* Flag if IDASolveF was called in IDA_NORMAL_MODE and encountered a root after tout */ booleantype ia_rootret; realtype ia_troot; /* ---------------------- * Backward problems data * ---------------------- */ /* Storage for backward problems */ struct IDABMemRec *IDAB_mem; /* Number of backward problems. */ int ia_nbckpbs; /* Address of current backward problem (iterator). */ struct IDABMemRec *ia_bckpbCrt; /* Flag for first call to IDASolveB */ booleantype ia_firstIDABcall; /* ---------------- * Check point data * ---------------- */ /* Storage for check point information */ struct CkpntMemRec *ck_mem; /* address of the check point structure for which data is available */ struct CkpntMemRec *ia_ckpntData; /* Number of checkpoints. */ int ia_nckpnts; /* ------------------ * Interpolation data * ------------------ */ /* Number of steps between 2 check points */ long int ia_nsteps; /* Last index used in IDAAfindIndex */ long int ia_ilast; /* Storage for data from forward runs */ struct DtpntMemRec **dt_mem; /* Actual number of data points saved in current dt_mem */ /* Commonly, np = nsteps+1 */ long int ia_np; /* Interpolation type */ int ia_interpType; /* Functions set by the interpolation module */ IDAAStorePntFn ia_storePnt; /* store a new interpolation point */ IDAAGetYFn ia_getY; /* interpolate forward solution */ IDAAMMallocFn ia_malloc; /* allocate new data point */ IDAAMFreeFn ia_free; /* destroys data point */ /* Flags controlling the interpolation module */ booleantype ia_mallocDone; /* IM initialized? */ booleantype ia_newData; /* new data available in dt_mem? */ booleantype ia_storeSensi; /* store sensitivities? */ booleantype ia_interpSensi; /* interpolate sensitivities? */ booleantype ia_noInterp; /* interpolations are temporarly */ /* disabled ( IDACalcICB ) */ /* Workspace for polynomial interpolation */ N_Vector ia_Y[MXORDP1]; /* pointers phi[i] */ N_Vector *ia_YS[MXORDP1]; /* pointers phiS[i] */ realtype ia_T[MXORDP1]; /* Workspace for wrapper functions */ N_Vector ia_yyTmp, ia_ypTmp; N_Vector *ia_yySTmp, *ia_ypSTmp; }; /* * ================================================================= * I N T E R F A C E T O L I N E A R S O L V E R S * ================================================================= */ /* * ----------------------------------------------------------------- * int (*ida_linit)(IDAMem IDA_mem); * ----------------------------------------------------------------- * The purpose of ida_linit is to allocate memory for the * solver-specific fields in the structure *(idamem->ida_lmem) and * perform any needed initializations of solver-specific memory, * such as counters/statistics. An (*ida_linit) should return * 0 if it has successfully initialized the IDA linear solver and * a non-zero value otherwise. If an error does occur, an * appropriate message should be sent to the error handler function. * ---------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*ida_lsetup)(IDAMem IDA_mem, N_Vector yyp, N_Vector ypp, * N_Vector resp, N_Vector tempv1, * N_Vector tempv2, N_Vector tempv3); * ----------------------------------------------------------------- * The job of ida_lsetup is to prepare the linear solver for * subsequent calls to ida_lsolve. Its parameters are as follows: * * idamem - problem memory pointer of type IDAMem. See the big * typedef earlier in this file. * * yyp - the predicted y vector for the current IDA internal * step. * * ypp - the predicted y' vector for the current IDA internal * step. * * resp - F(tn, yyp, ypp). * * tempv1, tempv2, tempv3 - temporary N_Vectors provided for use * by ida_lsetup. * * The ida_lsetup routine should return 0 if successful, * a positive value for a recoverable error, and a negative value * for an unrecoverable error. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*ida_lsolve)(IDAMem IDA_mem, N_Vector b, N_Vector weight, * N_Vector ycur, N_Vector ypcur, N_Vector rescur); * ----------------------------------------------------------------- * ida_lsolve must solve the linear equation P x = b, where * P is some approximation to the system Jacobian * J = (dF/dy) + cj (dF/dy') * evaluated at (tn,ycur,ypcur) and the RHS vector b is input. * The N-vector ycur contains the solver's current approximation * to y(tn), ypcur contains that for y'(tn), and the vector rescur * contains the N-vector residual F(tn,ycur,ypcur). * The solution is to be returned in the vector b. * * The ida_lsolve routine should return 0 if successful, * a positive value for a recoverable error, and a negative value * for an unrecoverable error. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*ida_lperf)(IDAMem IDA_mem, int perftask); * ----------------------------------------------------------------- * ida_lperf is called two places in IDAS where linear solver * performance data is required by IDAS. For perftask = 0, an * initialization of performance variables is performed, while for * perftask = 1, the performance is evaluated. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*ida_lfree)(IDAMem IDA_mem); * ----------------------------------------------------------------- * ida_lfree should free up any memory allocated by the linear * solver. This routine is called once a problem has been * completed and the linear solver is no longer needed. It should * return 0 upon success, nonzero on failure. * ----------------------------------------------------------------- */ /* * ================================================================= * I N T E R N A L F U N C T I O N S * ================================================================= */ /* Prototype of internal ewtSet function */ int IDAEwtSet(N_Vector ycur, N_Vector weight, void *data); /* High level error handler */ void IDAProcessError(IDAMem IDA_mem, int error_code, const char *module, const char *fname, const char *msgfmt, ...); /* Prototype of internal errHandler function */ void IDAErrHandler(int error_code, const char *module, const char *function, char *msg, void *data); /* Norm functions. Also used for IC, so they are global.*/ realtype IDAWrmsNorm(IDAMem IDA_mem, N_Vector x, N_Vector w, booleantype mask); realtype IDASensWrmsNorm(IDAMem IDA_mem, N_Vector *xS, N_Vector *wS, booleantype mask); realtype IDASensWrmsNormUpdate(IDAMem IDA_mem, realtype old_nrm, N_Vector *xS, N_Vector *wS, booleantype mask); /* Nonlinear solver initialization */ int idaNlsInit(IDAMem IDA_mem); int idaNlsInitSensSim(IDAMem IDA_mem); int idaNlsInitSensStg(IDAMem IDA_mem); /* Prototype for internal sensitivity residual DQ function */ int IDASensResDQ(int Ns, realtype t, N_Vector yy, N_Vector yp, N_Vector resval, N_Vector *yyS, N_Vector *ypS, N_Vector *resvalS, void *user_dataS, N_Vector ytemp, N_Vector yptemp, N_Vector restemp); /* * ================================================================= * E R R O R M E S S A G E S * ================================================================= */ #if defined(SUNDIALS_EXTENDED_PRECISION) #define MSG_TIME "t = %Lg, " #define MSG_TIME_H "t = %Lg and h = %Lg, " #define MSG_TIME_INT "t = %Lg is not between tcur - hu = %Lg and tcur = %Lg." #define MSG_TIME_TOUT "tout = %Lg" #define MSG_TIME_TSTOP "tstop = %Lg" #elif defined(SUNDIALS_DOUBLE_PRECISION) #define MSG_TIME "t = %lg, " #define MSG_TIME_H "t = %lg and h = %lg, " #define MSG_TIME_INT "t = %lg is not between tcur - hu = %lg and tcur = %lg." #define MSG_TIME_TOUT "tout = %lg" #define MSG_TIME_TSTOP "tstop = %lg" #else #define MSG_TIME "t = %g, " #define MSG_TIME_H "t = %g and h = %g, " #define MSG_TIME_INT "t = %g is not between tcur - hu = %g and tcur = %g." #define MSG_TIME_TOUT "tout = %g" #define MSG_TIME_TSTOP "tstop = %g" #endif /* General errors */ #define MSG_MEM_FAIL "A memory request failed." #define MSG_NULL_SUNCTX "sunctx = NULL illegal." #define MSG_NO_MEM "ida_mem = NULL illegal." #define MSG_NO_MALLOC "Attempt to call before IDAMalloc." #define MSG_BAD_NVECTOR "A required vector operation is not implemented." /* Initialization errors */ #define MSG_Y0_NULL "y0 = NULL illegal." #define MSG_YP0_NULL "yp0 = NULL illegal." #define MSG_BAD_ITOL "Illegal value for itol. The legal values are IDA_SS, IDA_SV, and IDA_WF." #define MSG_RES_NULL "res = NULL illegal." #define MSG_BAD_RTOL "rtol < 0 illegal." #define MSG_ATOL_NULL "atol = NULL illegal." #define MSG_BAD_ATOL "Some atol component < 0.0 illegal." #define MSG_ROOT_FUNC_NULL "g = NULL illegal." #define MSG_MISSING_ID "id = NULL but suppressalg option on." #define MSG_NO_TOLS "No integration tolerances have been specified." #define MSG_FAIL_EWT "The user-provide EwtSet function failed." #define MSG_BAD_EWT "Some initial ewt component = 0.0 illegal." #define MSG_Y0_FAIL_CONSTR "y0 fails to satisfy constraints." #define MSG_BAD_ISM_CONSTR "Constraints can not be enforced while forward sensitivity is used with simultaneous method." #define MSG_LSOLVE_NULL "The linear solver's solve routine is NULL." #define MSG_LINIT_FAIL "The linear solver's init routine failed." #define MSG_NLS_INIT_FAIL "The nonlinear solver's init routine failed." #define MSG_NO_QUAD "Illegal attempt to call before calling IDAQuadInit." #define MSG_BAD_EWTQ "Initial ewtQ has component(s) equal to zero (illegal)." #define MSG_BAD_ITOLQ "Illegal value for itolQ. The legal values are IDA_SS and IDA_SV." #define MSG_NO_TOLQ "No integration tolerances for quadrature variables have been specified." #define MSG_NULL_ATOLQ "atolQ = NULL illegal." #define MSG_BAD_RTOLQ "rtolQ < 0 illegal." #define MSG_BAD_ATOLQ "atolQ has negative component(s) (illegal)." #define MSG_NO_SENSI "Illegal attempt to call before calling IDASensInit." #define MSG_BAD_EWTS "Initial ewtS has component(s) equal to zero (illegal)." #define MSG_BAD_ITOLS "Illegal value for itolS. The legal values are IDA_SS, IDA_SV, and IDA_EE." #define MSG_NULL_ATOLS "atolS = NULL illegal." #define MSG_BAD_RTOLS "rtolS < 0 illegal." #define MSG_BAD_ATOLS "atolS has negative component(s) (illegal)." #define MSG_BAD_PBAR "pbar has zero component(s) (illegal)." #define MSG_BAD_PLIST "plist has negative component(s) (illegal)." #define MSG_BAD_NS "NS <= 0 illegal." #define MSG_NULL_YYS0 "yyS0 = NULL illegal." #define MSG_NULL_YPS0 "ypS0 = NULL illegal." #define MSG_BAD_ISM "Illegal value for ism. Legal values are: IDA_SIMULTANEOUS and IDA_STAGGERED." #define MSG_BAD_IS "Illegal value for is." #define MSG_NULL_DKYA "dkyA = NULL illegal." #define MSG_BAD_DQTYPE "Illegal value for DQtype. Legal values are: IDA_CENTERED and IDA_FORWARD." #define MSG_BAD_DQRHO "DQrhomax < 0 illegal." #define MSG_NULL_ABSTOLQS "abstolQS = NULL illegal parameter." #define MSG_BAD_RELTOLQS "reltolQS < 0 illegal parameter." #define MSG_BAD_ABSTOLQS "abstolQS has negative component(s) (illegal)." #define MSG_NO_QUADSENSI "Forward sensitivity analysis for quadrature variables was not activated." #define MSG_NULL_YQS0 "yQS0 = NULL illegal parameter." /* IDACalcIC error messages */ #define MSG_IC_BAD_ICOPT "icopt has an illegal value." #define MSG_IC_BAD_MAXBACKS "maxbacks <= 0 illegal." #define MSG_IC_MISSING_ID "id = NULL conflicts with icopt." #define MSG_IC_TOO_CLOSE "tout1 too close to t0 to attempt initial condition calculation." #define MSG_IC_BAD_ID "id has illegal values." #define MSG_IC_BAD_EWT "Some initial ewt component = 0.0 illegal." #define MSG_IC_RES_NONREC "The residual function failed unrecoverably. " #define MSG_IC_RES_FAIL "The residual function failed at the first call. " #define MSG_IC_SETUP_FAIL "The linear solver setup failed unrecoverably." #define MSG_IC_SOLVE_FAIL "The linear solver solve failed unrecoverably." #define MSG_IC_NO_RECOVERY "The residual routine or the linear setup or solve routine had a recoverable error, but IDACalcIC was unable to recover." #define MSG_IC_FAIL_CONSTR "Unable to satisfy the inequality constraints." #define MSG_IC_FAILED_LINS "The linesearch algorithm failed: step too small or too many backtracks." #define MSG_IC_CONV_FAILED "Newton/Linesearch algorithm failed to converge." /* IDASolve error messages */ #define MSG_YRET_NULL "yret = NULL illegal." #define MSG_YPRET_NULL "ypret = NULL illegal." #define MSG_TRET_NULL "tret = NULL illegal." #define MSG_BAD_ITASK "itask has an illegal value." #define MSG_TOO_CLOSE "tout too close to t0 to start integration." #define MSG_BAD_HINIT "Initial step is not towards tout." #define MSG_BAD_TSTOP "The value " MSG_TIME_TSTOP " is behind current " MSG_TIME "in the direction of integration." #define MSG_CLOSE_ROOTS "Root found at and very near " MSG_TIME "." #define MSG_MAX_STEPS "At " MSG_TIME ", mxstep steps taken before reaching tout." #define MSG_EWT_NOW_FAIL "At " MSG_TIME "the user-provide EwtSet function failed." #define MSG_EWT_NOW_BAD "At " MSG_TIME "some ewt component has become <= 0.0." #define MSG_TOO_MUCH_ACC "At " MSG_TIME "too much accuracy requested." #define MSG_BAD_T "Illegal value for t. " MSG_TIME_INT #define MSG_BAD_TOUT "Trouble interpolating at " MSG_TIME_TOUT ". tout too far back in direction of integration." #define MSG_BAD_K "Illegal value for k." #define MSG_NULL_DKY "dky = NULL illegal." #define MSG_NULL_DKYP "dkyp = NULL illegal." #define MSG_ERR_FAILS "At " MSG_TIME_H "the error test failed repeatedly or with |h| = hmin." #define MSG_CONV_FAILS "At " MSG_TIME_H "the corrector convergence failed repeatedly or with |h| = hmin." #define MSG_SETUP_FAILED "At " MSG_TIME "the linear solver setup failed unrecoverably." #define MSG_SOLVE_FAILED "At " MSG_TIME "the linear solver solve failed unrecoverably." #define MSG_REP_RES_ERR "At " MSG_TIME "repeated recoverable residual errors." #define MSG_RES_NONRECOV "At " MSG_TIME "the residual function failed unrecoverably." #define MSG_FAILED_CONSTR "At " MSG_TIME "unable to satisfy inequality constraints." #define MSG_RTFUNC_FAILED "At " MSG_TIME ", the rootfinding routine failed in an unrecoverable manner." #define MSG_NO_ROOT "Rootfinding was not initialized." #define MSG_INACTIVE_ROOTS "At the end of the first step, there are still some root functions identically 0. This warning will not be issued again." #define MSG_NLS_INPUT_NULL "At " MSG_TIME ", the nonlinear solver was passed a NULL input." #define MSG_NLS_SETUP_FAILED "At " MSG_TIME ", the nonlinear solver setup failed unrecoverably." #define MSG_NLS_FAIL "At " MSG_TIME ", the nonlinear solver failed in an unrecoverable manner." #define MSG_EWTQ_NOW_BAD "At " MSG_TIME ", a component of ewtQ has become <= 0." #define MSG_QRHSFUNC_FAILED "At " MSG_TIME ", the quadrature right-hand side routine failed in an unrecoverable manner." #define MSG_QRHSFUNC_UNREC "At " MSG_TIME ", the quadrature right-hand side failed in a recoverable manner, but no recovery is possible." #define MSG_QRHSFUNC_REPTD "At " MSG_TIME "repeated recoverable quadrature right-hand side function errors." #define MSG_QRHSFUNC_FIRST "The quadrature right-hand side routine failed at the first call." #define MSG_NULL_P "p = NULL when using internal DQ for sensitivity residual is illegal." #define MSG_EWTS_NOW_BAD "At " MSG_TIME ", a component of ewtS has become <= 0." #define MSG_SRES_FAILED "At " MSG_TIME ", the sensitivity residual routine failed in an unrecoverable manner." #define MSG_SRES_UNREC "At " MSG_TIME ", the sensitivity residual failed in a recoverable manner, but no recovery is possible." #define MSG_SRES_REPTD "At " MSG_TIME "repeated recoverable sensitivity residual function errors." #define MSG_NO_TOLQS "No integration tolerances for quadrature sensitivity variables have been specified." #define MSG_NULL_RHSQ "IDAS is expected to use DQ to evaluate the RHS of quad. sensi., but quadratures were not initialized." #define MSG_BAD_EWTQS "Initial ewtQS has component(s) equal to zero (illegal)." #define MSG_EWTQS_NOW_BAD "At " MSG_TIME ", a component of ewtQS has become <= 0." #define MSG_QSRHSFUNC_FAILED "At " MSG_TIME ", the sensitivity quadrature right-hand side routine failed in an unrecoverable manner." #define MSG_QSRHSFUNC_REPTD "At " MSG_TIME "repeated recoverable sensitivity quadrature right-hand side function errors." #define MSG_QSRHSFUNC_FIRST "The quadrature right-hand side routine failed at the first call." /* IDASet* / IDAGet* error messages */ #define MSG_NEG_MAXORD "maxord<=0 illegal." #define MSG_BAD_MAXORD "Illegal attempt to increase maximum order." #define MSG_NEG_HMAX "hmax < 0 illegal." #define MSG_NEG_EPCON "epcon <= 0.0 illegal." #define MSG_BAD_CONSTR "Illegal values in constraints vector." #define MSG_BAD_EPICCON "epiccon <= 0.0 illegal." #define MSG_BAD_MAXNH "maxnh <= 0 illegal." #define MSG_BAD_MAXNJ "maxnj <= 0 illegal." #define MSG_BAD_MAXNIT "maxnit <= 0 illegal." #define MSG_BAD_STEPTOL "steptol <= 0.0 illegal." #define MSG_TOO_LATE "IDAGetConsistentIC can only be called before IDASolve." /* * ================================================================= * I D A A E R R O R M E S S A G E S * ================================================================= */ #define MSGAM_NULL_IDAMEM "ida_mem = NULL illegal." #define MSGAM_NO_ADJ "Illegal attempt to call before calling IDAadjInit." #define MSGAM_BAD_INTERP "Illegal value for interp." #define MSGAM_BAD_STEPS "Steps nonpositive illegal." #define MSGAM_BAD_WHICH "Illegal value for which." #define MSGAM_NO_BCK "No backward problems have been defined yet." #define MSGAM_NO_FWD "Illegal attempt to call before calling IDASolveF." #define MSGAM_BAD_TB0 "The initial time tB0 is outside the interval over which the forward problem was solved." #define MSGAM_BAD_SENSI "At least one backward problem requires sensitivities, but they were not stored for interpolation." #define MSGAM_BAD_ITASKB "Illegal value for itaskB. Legal values are IDA_NORMAL and IDA_ONE_STEP." #define MSGAM_BAD_TBOUT "The final time tBout is outside the interval over which the forward problem was solved." #define MSGAM_BACK_ERROR "Error occured while integrating backward problem # %d" #define MSGAM_BAD_TINTERP "Bad t = %g for interpolation." #define MSGAM_BAD_T "Bad t for interpolation." #define MSGAM_WRONG_INTERP "This function cannot be called for the specified interp type." #define MSGAM_MEM_FAIL "A memory request failed." #define MSGAM_NO_INITBS "Illegal attempt to call before calling IDAInitBS." #ifdef __cplusplus } #endif #endif StanHeaders/src/idas/idas_io.c0000644000176200001440000013325414645137106015754 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Radu Serban and Cosmin Petra @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the optional inputs and * outputs for the IDAS solver. * -----------------------------------------------------------------*/ #include #include #include "idas_impl.h" #include "sundials/sundials_types.h" #include "sundials/sundials_math.h" #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWOPT5 RCONST(2.5) /* * ================================================================= * IDA optional input functions * ================================================================= */ int IDASetErrHandlerFn(void *ida_mem, IDAErrHandlerFn ehfun, void *eh_data) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetErrHandlerFn", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_ehfun = ehfun; IDA_mem->ida_eh_data = eh_data; return(IDA_SUCCESS); } int IDASetErrFile(void *ida_mem, FILE *errfp) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetErrFile", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_errfp = errfp; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetUserData(void *ida_mem, void *user_data) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetUserData", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_user_data = user_data; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetMaxOrd(void *ida_mem, int maxord) { IDAMem IDA_mem; int maxord_alloc; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetMaxOrd", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (maxord <= 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetMaxOrd", MSG_NEG_MAXORD); return(IDA_ILL_INPUT); } /* Cannot increase maximum order beyond the value that was used when allocating memory */ maxord_alloc = IDA_mem->ida_maxord_alloc; if (maxord > maxord_alloc) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetMaxOrd", MSG_BAD_MAXORD); return(IDA_ILL_INPUT); } IDA_mem->ida_maxord = SUNMIN(maxord,MAXORD_DEFAULT); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetMaxNumSteps(void *ida_mem, long int mxsteps) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetMaxNumSteps", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Passing mxsteps=0 sets the default. Passing mxsteps<0 disables the test. */ if (mxsteps == 0) IDA_mem->ida_mxstep = MXSTEP_DEFAULT; else IDA_mem->ida_mxstep = mxsteps; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetInitStep(void *ida_mem, realtype hin) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetInitStep", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_hin = hin; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetMaxStep(void *ida_mem, realtype hmax) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetMaxStep", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (hmax < 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetMaxStep", MSG_NEG_HMAX); return(IDA_ILL_INPUT); } /* Passing 0 sets hmax = infinity */ if (hmax == ZERO) { IDA_mem->ida_hmax_inv = HMAX_INV_DEFAULT; return(IDA_SUCCESS); } IDA_mem->ida_hmax_inv = ONE/hmax; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetStopTime(void *ida_mem, realtype tstop) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetStopTime", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* If IDASolve was called at least once, test if tstop is legal * (i.e. if it was not already passed). * If IDASetStopTime is called before the first call to IDASolve, * tstop will be checked in IDASolve. */ if (IDA_mem->ida_nst > 0) { if ( (tstop - IDA_mem->ida_tn) * IDA_mem->ida_hh < ZERO ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDA", "IDASetStopTime", MSG_BAD_TSTOP, tstop, IDA_mem->ida_tn); return(IDA_ILL_INPUT); } } IDA_mem->ida_tstop = tstop; IDA_mem->ida_tstopset = SUNTRUE; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetNonlinConvCoef(void *ida_mem, realtype epcon) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetNonlinConvCoef", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (epcon <= ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinConvCoef", MSG_NEG_EPCON); return(IDA_ILL_INPUT); } IDA_mem->ida_epcon = epcon; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetMaxErrTestFails(void *ida_mem, int maxnef) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetMaxErrTestFails", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_maxnef = maxnef; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetMaxConvFails(void *ida_mem, int maxncf) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetMaxConvFails", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_maxncf = maxncf; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetMaxNonlinIters(void *ida_mem, int maxcor) { IDAMem IDA_mem; booleantype sensi_sim; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetMaxNonlinIters", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Are we computing sensitivities with the simultaneous approach? */ sensi_sim = (IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_SIMULTANEOUS)); if (sensi_sim) { /* check that the NLS is non-NULL */ if (IDA_mem->NLSsim == NULL) { IDAProcessError(NULL, IDA_MEM_FAIL, "IDAS", "IDASetMaxNonlinIters", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } return(SUNNonlinSolSetMaxIters(IDA_mem->NLSsim, maxcor)); } else { /* check that the NLS is non-NULL */ if (IDA_mem->NLS == NULL) { IDAProcessError(NULL, IDA_MEM_FAIL, "IDAS", "IDASetMaxNonlinIters", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } return(SUNNonlinSolSetMaxIters(IDA_mem->NLS, maxcor)); } } /*-----------------------------------------------------------------*/ int IDASetSuppressAlg(void *ida_mem, booleantype suppressalg) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetSuppressAlg", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_suppressalg = suppressalg; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetId(void *ida_mem, N_Vector id) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetId", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (id == NULL) { if (IDA_mem->ida_idMallocDone) { N_VDestroy(IDA_mem->ida_id); IDA_mem->ida_lrw -= IDA_mem->ida_lrw1; IDA_mem->ida_liw -= IDA_mem->ida_liw1; } IDA_mem->ida_idMallocDone = SUNFALSE; return(IDA_SUCCESS); } if ( !(IDA_mem->ida_idMallocDone) ) { IDA_mem->ida_id = N_VClone(id); IDA_mem->ida_lrw += IDA_mem->ida_lrw1; IDA_mem->ida_liw += IDA_mem->ida_liw1; IDA_mem->ida_idMallocDone = SUNTRUE; } /* Load the id vector */ N_VScale(ONE, id, IDA_mem->ida_id); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetConstraints(void *ida_mem, N_Vector constraints) { IDAMem IDA_mem; realtype temptest; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetConstraints", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (constraints == NULL) { if (IDA_mem->ida_constraintsMallocDone) { N_VDestroy(IDA_mem->ida_constraints); IDA_mem->ida_lrw -= IDA_mem->ida_lrw1; IDA_mem->ida_liw -= IDA_mem->ida_liw1; } IDA_mem->ida_constraintsMallocDone = SUNFALSE; IDA_mem->ida_constraintsSet = SUNFALSE; return(IDA_SUCCESS); } /* Test if required vector ops. are defined */ if (constraints->ops->nvdiv == NULL || constraints->ops->nvmaxnorm == NULL || constraints->ops->nvcompare == NULL || constraints->ops->nvconstrmask == NULL || constraints->ops->nvminquotient == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetConstraints", MSG_BAD_NVECTOR); return(IDA_ILL_INPUT); } /* Check the constraints vector */ temptest = N_VMaxNorm(constraints); if((temptest > TWOPT5) || (temptest < HALF)){ IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetConstraints", MSG_BAD_CONSTR); return(IDA_ILL_INPUT); } if ( !(IDA_mem->ida_constraintsMallocDone) ) { IDA_mem->ida_constraints = N_VClone(constraints); IDA_mem->ida_lrw += IDA_mem->ida_lrw1; IDA_mem->ida_liw += IDA_mem->ida_liw1; IDA_mem->ida_constraintsMallocDone = SUNTRUE; } /* Load the constraints vector */ N_VScale(ONE, constraints, IDA_mem->ida_constraints); IDA_mem->ida_constraintsSet = SUNTRUE; return(IDA_SUCCESS); } /* * IDASetRootDirection * * Specifies the direction of zero-crossings to be monitored. * The default is to monitor both crossings. */ int IDASetRootDirection(void *ida_mem, int *rootdir) { IDAMem IDA_mem; int i, nrt; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetRootDirection", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; nrt = IDA_mem->ida_nrtfn; if (nrt==0) { IDAProcessError(NULL, IDA_ILL_INPUT, "IDAS", "IDASetRootDirection", MSG_NO_ROOT); return(IDA_ILL_INPUT); } for(i=0; iida_rootdir[i] = rootdir[i]; return(IDA_SUCCESS); } /* * IDASetNoInactiveRootWarn * * Disables issuing a warning if some root function appears * to be identically zero at the beginning of the integration */ int IDASetNoInactiveRootWarn(void *ida_mem) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetNoInactiveRootWarn", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_mxgnull = 0; return(IDA_SUCCESS); } /* * ================================================================= * IDA IC optional input functions * ================================================================= */ int IDASetNonlinConvCoefIC(void *ida_mem, realtype epiccon) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetNonlinConvCoefIC", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (epiccon <= ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetNonlinConvCoefIC", MSG_BAD_EPICCON); return(IDA_ILL_INPUT); } IDA_mem->ida_epiccon = epiccon; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetMaxNumStepsIC(void *ida_mem, int maxnh) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetMaxNumStepsIC", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (maxnh <= 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetMaxNumStepsIC", MSG_BAD_MAXNH); return(IDA_ILL_INPUT); } IDA_mem->ida_maxnh = maxnh; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetMaxNumJacsIC(void *ida_mem, int maxnj) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetMaxNumJacsIC", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (maxnj <= 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetMaxNumJacsIC", MSG_BAD_MAXNJ); return(IDA_ILL_INPUT); } IDA_mem->ida_maxnj = maxnj; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetMaxNumItersIC(void *ida_mem, int maxnit) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetMaxNumItersIC", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (maxnit <= 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetMaxNumItersIC", MSG_BAD_MAXNIT); return(IDA_ILL_INPUT); } IDA_mem->ida_maxnit = maxnit; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetMaxBacksIC(void *ida_mem, int maxbacks) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDA", "IDASetMaxBacksIC", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (maxbacks <= 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDA", "IDASetMaxBacksIC", MSG_IC_BAD_MAXBACKS); return(IDA_ILL_INPUT); } IDA_mem->ida_maxbacks = maxbacks; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetLineSearchOffIC(void *ida_mem, booleantype lsoff) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetLineSearchOffIC", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_lsoff = lsoff; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetStepToleranceIC(void *ida_mem, realtype steptol) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetStepToleranceIC", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (steptol <= ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetStepToleranceIC", MSG_BAD_STEPTOL); return(IDA_ILL_INPUT); } IDA_mem->ida_steptol = steptol; return(IDA_SUCCESS); } /* * ================================================================= * Quadrature optional input functions * ================================================================= */ /*-----------------------------------------------------------------*/ int IDASetQuadErrCon(void *ida_mem, booleantype errconQ) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetQuadErrCon", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_quadMallocDone == SUNFALSE) { IDAProcessError(NULL, IDA_NO_QUAD, "IDAS", "IDASetQuadErrCon", MSG_NO_QUAD); return(IDA_NO_QUAD); } IDA_mem->ida_errconQ = errconQ; return (IDA_SUCCESS); } /* * ================================================================= * FSA optional input functions * ================================================================= */ int IDASetSensDQMethod(void *ida_mem, int DQtype, realtype DQrhomax) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetSensDQMethod", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if ( (DQtype != IDA_CENTERED) && (DQtype != IDA_FORWARD) ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetSensDQMethod", MSG_BAD_DQTYPE); return(IDA_ILL_INPUT); } if (DQrhomax < ZERO ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASetSensDQMethod", MSG_BAD_DQRHO); return(IDA_ILL_INPUT); } IDA_mem->ida_DQtype = DQtype; IDA_mem->ida_DQrhomax = DQrhomax; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetSensErrCon(void *ida_mem, booleantype errconS) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetSensErrCon", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; IDA_mem->ida_errconS = errconS; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDASetSensMaxNonlinIters(void *ida_mem, int maxcorS) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetSensMaxNonlinIters", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* check that the NLS is non-NULL */ if (IDA_mem->NLSstg == NULL) { IDAProcessError(NULL, IDA_MEM_FAIL, "IDAS", "IDASetSensMaxNonlinIters", MSG_MEM_FAIL); return(IDA_MEM_FAIL); } return(SUNNonlinSolSetMaxIters(IDA_mem->NLSstg, maxcorS)); } /*-----------------------------------------------------------------*/ int IDASetSensParams(void *ida_mem, realtype *p, realtype *pbar, int *plist) { IDAMem IDA_mem; int Ns, is; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetSensParams", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Was sensitivity initialized? */ if (IDA_mem->ida_sensMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDASetSensParams", MSG_NO_SENSI); return(IDA_NO_SENS); } Ns = IDA_mem->ida_Ns; /* Parameters */ IDA_mem->ida_p = p; /* pbar */ if (pbar != NULL) for (is=0; isida_pbar[is] = SUNRabs(pbar[is]); } else for (is=0; isida_pbar[is] = ONE; /* plist */ if (plist != NULL) for (is=0; isida_plist[is] = plist[is]; } else for (is=0; isida_plist[is] = is; return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * Function: IDASetQuadSensErrCon * ----------------------------------------------------------------- * IDASetQuadSensErrCon specifies if quadrature sensitivity variables * are considered or not in the error control. * ----------------------------------------------------------------- */ int IDASetQuadSensErrCon(void *ida_mem, booleantype errconQS) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASetQuadSensErrCon", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Was sensitivity initialized? */ if (IDA_mem->ida_sensMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDASetQuadSensErrCon", MSG_NO_SENSI); return(IDA_NO_SENS); } /* Was quadrature sensitivity initialized? */ if (IDA_mem->ida_quadSensMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDASetQuadSensErrCon", MSG_NO_SENSI); return(IDA_NO_QUADSENS); } IDA_mem->ida_errconQS = errconQS; return(IDA_SUCCESS); } /* * ================================================================= * IDA optional output functions * ================================================================= */ int IDAGetNumSteps(void *ida_mem, long int *nsteps) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNumSteps", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *nsteps = IDA_mem->ida_nst; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetNumResEvals(void *ida_mem, long int *nrevals) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNumResEvals", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *nrevals = IDA_mem->ida_nre; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetNumLinSolvSetups(void *ida_mem, long int *nlinsetups) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNumLinSolvSetups", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *nlinsetups = IDA_mem->ida_nsetups; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetNumErrTestFails(void *ida_mem, long int *netfails) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNumErrTestFails", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *netfails = IDA_mem->ida_netf; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetNumBacktrackOps(void *ida_mem, long int *nbacktracks) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNumBacktrackOps", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *nbacktracks = IDA_mem->ida_nbacktr; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetConsistentIC(void *ida_mem, N_Vector yy0, N_Vector yp0) { IDAMem IDA_mem; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetConsistentIC", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_kused != 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAGetConsistentIC", MSG_TOO_LATE); return(IDA_ILL_INPUT); } if(yy0 != NULL) N_VScale(ONE, IDA_mem->ida_phi[0], yy0); if(yp0 != NULL) N_VScale(ONE, IDA_mem->ida_phi[1], yp0); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetLastOrder(void *ida_mem, int *klast) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetLastOrder", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *klast = IDA_mem->ida_kused; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetCurrentOrder(void *ida_mem, int *kcur) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetCurrentOrder", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *kcur = IDA_mem->ida_kk; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetCurrentCj(void *ida_mem, realtype *cj) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetCurrentCjRatio", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *cj = IDA_mem->ida_cj; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetCurrentY(void *ida_mem, N_Vector *ycur) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetCurrentY", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *ycur = IDA_mem->ida_yy; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetCurrentYSens(void *ida_mem, N_Vector **yS) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetCurrentYSens", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *yS = IDA_mem->ida_yyS; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetCurrentYp(void *ida_mem, N_Vector *ypcur) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetCurrentYp", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *ypcur = IDA_mem->ida_yp; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetCurrentYpSens(void *ida_mem, N_Vector **ypS) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetCurrentYpSens", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *ypS = IDA_mem->ida_ypS; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetActualInitStep(void *ida_mem, realtype *hinused) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetActualInitStep", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *hinused = IDA_mem->ida_h0u; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetLastStep(void *ida_mem, realtype *hlast) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetLastStep", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *hlast = IDA_mem->ida_hused; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetCurrentStep(void *ida_mem, realtype *hcur) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetCurrentStep", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *hcur = IDA_mem->ida_hh; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetCurrentTime(void *ida_mem, realtype *tcur) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetCurrentTime", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *tcur = IDA_mem->ida_tn; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetTolScaleFactor(void *ida_mem, realtype *tolsfact) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetTolScaleFactor", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *tolsfact = IDA_mem->ida_tolsf; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetErrWeights(void *ida_mem, N_Vector eweight) { IDAMem IDA_mem; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetErrWeights", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; N_VScale(ONE, IDA_mem->ida_ewt, eweight); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetEstLocalErrors(void *ida_mem, N_Vector ele) { IDAMem IDA_mem; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetEstLocalErrors", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; N_VScale(ONE, IDA_mem->ida_ee, ele); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetWorkSpace(void *ida_mem, long int *lenrw, long int *leniw) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetWorkSpace", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *leniw = IDA_mem->ida_liw; *lenrw = IDA_mem->ida_lrw; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetIntegratorStats(void *ida_mem, long int *nsteps, long int *nrevals, long int *nlinsetups, long int *netfails, int *klast, int *kcur, realtype *hinused, realtype *hlast, realtype *hcur, realtype *tcur) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetIntegratorStats", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *nsteps = IDA_mem->ida_nst; *nrevals = IDA_mem->ida_nre; *nlinsetups = IDA_mem->ida_nsetups; *netfails = IDA_mem->ida_netf; *klast = IDA_mem->ida_kused; *kcur = IDA_mem->ida_kk; *hinused = IDA_mem->ida_h0u; *hlast = IDA_mem->ida_hused; *hcur = IDA_mem->ida_hh; *tcur = IDA_mem->ida_tn; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetNumGEvals(void *ida_mem, long int *ngevals) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNumGEvals", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *ngevals = IDA_mem->ida_nge; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetRootInfo(void *ida_mem, int *rootsfound) { IDAMem IDA_mem; int i, nrt; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetRootInfo", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; nrt = IDA_mem->ida_nrtfn; for (i=0; iida_iroots[i]; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetNumNonlinSolvIters(void *ida_mem, long int *nniters) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNumNonlinSolvIters", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *nniters = IDA_mem->ida_nni; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetNumNonlinSolvConvFails(void *ida_mem, long int *nncfails) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNumNonlinSolvConvFails", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *nncfails = IDA_mem->ida_ncfn; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetNonlinSolvStats(void *ida_mem, long int *nniters, long int *nncfails) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNonlinSolvStats", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; *nniters = IDA_mem->ida_nni; *nncfails = IDA_mem->ida_ncfn; return(IDA_SUCCESS); } /* * ================================================================= * Quadrature optional output functions * ================================================================= */ /*-----------------------------------------------------------------*/ int IDAGetQuadNumRhsEvals(void *ida_mem, long int *nrQevals) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadNumRhsEvals", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_quadr==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUAD, "IDAS", "IDAGetQuadNumRhsEvals", MSG_NO_QUAD); return(IDA_NO_QUAD); } *nrQevals = IDA_mem->ida_nrQe; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetQuadNumErrTestFails(void *ida_mem, long int *nQetfails) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadNumErrTestFails", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_quadr==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUAD, "IDAS", "IDAGetQuadNumErrTestFails", MSG_NO_QUAD); return(IDA_NO_QUAD); } *nQetfails = IDA_mem->ida_netfQ; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetQuadErrWeights(void *ida_mem, N_Vector eQweight) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadErrWeights", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_quadr==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUAD, "IDAS", "IDAGetQuadErrWeights", MSG_NO_QUAD); return(IDA_NO_QUAD); } if(IDA_mem->ida_errconQ) N_VScale(ONE, IDA_mem->ida_ewtQ, eQweight); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetQuadStats(void *ida_mem, long int *nrQevals, long int *nQetfails) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadStats", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_quadr==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUAD, "IDAS", "IDAGetQuadStats", MSG_NO_QUAD); return(IDA_NO_QUAD); } *nrQevals = IDA_mem->ida_nrQe; *nQetfails = IDA_mem->ida_netfQ; return(IDA_SUCCESS); } /* * ================================================================= * Quadrature FSA optional output functions * ================================================================= */ /*-----------------------------------------------------------------*/ int IDAGetQuadSensNumRhsEvals(void *ida_mem, long int *nrhsQSevals) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadSensNumRhsEvals", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_quadr_sensi == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAGetQuadSensNumRhsEvals", MSG_NO_QUADSENSI); return(IDA_NO_QUADSENS); } *nrhsQSevals = IDA_mem->ida_nrQSe; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetQuadSensNumErrTestFails(void *ida_mem, long int *nQSetfails) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadSensNumErrTestFails", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_quadr_sensi == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAGetQuadSensNumErrTestFails", MSG_NO_QUADSENSI); return(IDA_NO_QUADSENS); } *nQSetfails = IDA_mem->ida_netfQS; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetQuadSensErrWeights(void *ida_mem, N_Vector *eQSweight) { IDAMem IDA_mem; int is, Ns; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadSensErrWeights", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_quadr_sensi == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAGetQuadSensErrWeights", MSG_NO_QUADSENSI); return(IDA_NO_QUADSENS); } Ns = IDA_mem->ida_Ns; if (IDA_mem->ida_errconQS) for (is=0; isida_ewtQS[is], eQSweight[is]); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetQuadSensStats(void *ida_mem, long int *nrhsQSevals, long int *nQSetfails) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadSensStats", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_quadr_sensi == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAGetQuadSensStats", MSG_NO_QUADSENSI); return(IDA_NO_QUADSENS); } *nrhsQSevals = IDA_mem->ida_nrQSe; *nQSetfails = IDA_mem->ida_netfQS; return(IDA_SUCCESS); } /* * ================================================================= * FSA optional output functions * ================================================================= */ /*-----------------------------------------------------------------*/ int IDAGetSensConsistentIC(void *ida_mem, N_Vector *yyS0, N_Vector *ypS0) { IDAMem IDA_mem; int is; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSensConsistentIC", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensConsistentIC", MSG_NO_SENSI); return(IDA_NO_SENS); } if (IDA_mem->ida_kused != 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAGetSensConsistentIC", MSG_TOO_LATE); return(IDA_ILL_INPUT); } if(yyS0 != NULL) { for (is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_phiS[0][is], yyS0[is]); } if(ypS0 != NULL) { for (is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_phiS[1][is], ypS0[is]); } return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetSensNumResEvals(void *ida_mem, long int *nrSevals) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGeSensNumResEvals", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensNumResEvals", MSG_NO_SENSI); return(IDA_NO_SENS); } *nrSevals = IDA_mem->ida_nrSe; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetNumResEvalsSens(void *ida_mem, long int *nrevalsS) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetNumResEvalsSens", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetNumResEvalsSens", MSG_NO_SENSI); return(IDA_NO_SENS); } *nrevalsS = IDA_mem->ida_nreS; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetSensNumErrTestFails(void *ida_mem, long int *nSetfails) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSensNumErrTestFails", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensNumErrTestFails", MSG_NO_SENSI); return(IDA_NO_SENS); } *nSetfails = IDA_mem->ida_netfS; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetSensNumLinSolvSetups(void *ida_mem, long int *nlinsetupsS) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSensNumLinSolvSetups", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensNumLinSolvSetups", MSG_NO_SENSI); return(IDA_NO_SENS); } *nlinsetupsS = IDA_mem->ida_nsetupsS; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetSensErrWeights(void *ida_mem, N_Vector_S eSweight) { IDAMem IDA_mem; int is; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSensErrWeights", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensErrWeights", MSG_NO_SENSI); return(IDA_NO_SENS); } for (is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_ewtS[is], eSweight[is]); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetSensStats(void *ida_mem, long int *nrSevals, long int *nrevalsS, long int *nSetfails, long int *nlinsetupsS) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSensStats", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensStats", MSG_NO_SENSI); return(IDA_NO_SENS); } *nrSevals = IDA_mem->ida_nrSe; *nrevalsS = IDA_mem->ida_nreS; *nSetfails = IDA_mem->ida_netfS; *nlinsetupsS = IDA_mem->ida_nsetupsS; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetSensNumNonlinSolvIters(void *ida_mem, long int *nSniters) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSensNumNonlinSolvIters", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensNumNonlinSolvIters", MSG_NO_SENSI); return(IDA_NO_SENS); } *nSniters = IDA_mem->ida_nniS; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetSensNumNonlinSolvConvFails(void *ida_mem, long int *nSncfails) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSensNumNonlinSolvConvFails", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensNumNonlinSolvConvFails", MSG_NO_SENSI); return(IDA_NO_SENS); } *nSncfails = IDA_mem->ida_ncfnS; return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ int IDAGetSensNonlinSolvStats(void *ida_mem, long int *nSniters, long int *nSncfails) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSensNonlinSolvstats", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensNonlinSolvStats", MSG_NO_SENSI); return(IDA_NO_SENS); } *nSniters = IDA_mem->ida_nniS; *nSncfails = IDA_mem->ida_ncfnS; return(IDA_SUCCESS); } /* * ================================================================= * IDAGetReturnFlagName * ================================================================= */ char *IDAGetReturnFlagName(long int flag) { char *name; name = (char *)malloc(24*sizeof(char)); switch(flag) { case IDA_SUCCESS: sprintf(name,"IDA_SUCCESS"); break; case IDA_TSTOP_RETURN: sprintf(name,"IDA_TSTOP_RETURN"); break; case IDA_ROOT_RETURN: sprintf(name,"IDA_ROOT_RETURN"); break; case IDA_TOO_MUCH_WORK: sprintf(name,"IDA_TOO_MUCH_WORK"); break; case IDA_TOO_MUCH_ACC: sprintf(name,"IDA_TOO_MUCH_ACC"); break; case IDA_ERR_FAIL: sprintf(name,"IDA_ERR_FAIL"); break; case IDA_CONV_FAIL: sprintf(name,"IDA_CONV_FAIL"); break; case IDA_LINIT_FAIL: sprintf(name,"IDA_LINIT_FAIL"); break; case IDA_LSETUP_FAIL: sprintf(name,"IDA_LSETUP_FAIL"); break; case IDA_LSOLVE_FAIL: sprintf(name,"IDA_LSOLVE_FAIL"); break; case IDA_CONSTR_FAIL: sprintf(name,"IDA_CONSTR_FAIL"); break; case IDA_RES_FAIL: sprintf(name,"IDA_RES_FAIL"); break; case IDA_FIRST_RES_FAIL: sprintf(name,"IDA_FIRST_RES_FAIL"); break; case IDA_REP_RES_ERR: sprintf(name,"IDA_REP_RES_ERR"); break; case IDA_RTFUNC_FAIL: sprintf(name,"IDA_RTFUNC_FAIL"); break; case IDA_MEM_FAIL: sprintf(name,"IDA_MEM_FAIL"); break; case IDA_MEM_NULL: sprintf(name,"IDA_MEM_NULL"); break; case IDA_ILL_INPUT: sprintf(name,"IDA_ILL_INPUT"); break; case IDA_NO_MALLOC: sprintf(name,"IDA_NO_MALLOC"); break; case IDA_BAD_T: sprintf(name,"IDA_BAD_T"); break; case IDA_BAD_K: sprintf(name,"IDA_BAD_K"); break; case IDA_BAD_DKY: sprintf(name,"IDA_BAD_DKY"); break; case IDA_BAD_EWT: sprintf(name,"IDA_BAD_EWT"); break; case IDA_NO_RECOVERY: sprintf(name,"IDA_NO_RECOVERY"); break; case IDA_LINESEARCH_FAIL: sprintf(name,"IDA_LINESEARCH_FAIL"); break; case IDA_NO_SENS: sprintf(name,"IDA_NO_SENS"); break; case IDA_SRES_FAIL: sprintf(name, "IDA_SRES_FAIL"); break; case IDA_REP_SRES_ERR: sprintf(name, "IDA_REP_SRES_ERR"); break; case IDA_BAD_IS: sprintf(name,"IDA_BAD_IS"); break; case IDA_NO_QUAD: sprintf(name,"IDA_NO_QUAD"); break; case IDA_NO_QUADSENS: sprintf(name, "IDA_NO_QUADSENS"); break; case IDA_QRHS_FAIL: sprintf(name,"IDA_QRHS_FAIL"); break; case IDA_REP_QRHS_ERR: sprintf(name,"IDA_REP_QRHS_ERR"); break; case IDA_QSRHS_FAIL: sprintf(name, "IDA_QSRHS_FAIL"); break; case IDA_REP_QSRHS_ERR: sprintf(name,"IDA_REP_QSRHS_ERR"); break; /* IDAA flags follow below. */ case IDA_NO_ADJ: sprintf(name, "IDA_NO_ADJ"); break; case IDA_BAD_TB0: sprintf(name, "IDA_BAD_TB0"); break; case IDA_REIFWD_FAIL: sprintf(name, "IDA_REIFWD_FAIL"); break; case IDA_FWD_FAIL: sprintf(name, "IDA_FWD_FAIL"); break; case IDA_GETY_BADT: sprintf(name, "IDA_GETY_BADT"); break; case IDA_NO_BCK: sprintf(name, "IDA_NO_BCK"); break; case IDA_NO_FWD: sprintf(name,"IDA_NO_FWD"); break; case IDA_NLS_SETUP_FAIL: sprintf(name,"IDA_NLS_SETUP_FAIL"); break; case IDA_NLS_FAIL: sprintf(name,"IDA_NLS_FAIL"); break; default: sprintf(name,"NONE"); } return(name); } StanHeaders/src/idas/LICENSE0000644000176200001440000000305014645137106015174 0ustar liggesusersBSD 3-Clause License Copyright (c) 2002-2022, Lawrence Livermore National Security and Southern Methodist University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. StanHeaders/src/idas/fmod/0000755000176200001440000000000014511135055015107 5ustar liggesusersStanHeaders/src/idas/fmod/fidas_mod.c0000644000176200001440000026373414645137106017226 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } enum { SWIG_MEM_OWN = 0x01, SWIG_MEM_RVALUE = 0x02, SWIG_MEM_CONST = 0x04 }; #define SWIG_check_mutable(SWIG_CLASS_WRAPPER, TYPENAME, FNAME, FUNCNAME, RETURNNULL) \ if ((SWIG_CLASS_WRAPPER).cmemflags & SWIG_MEM_CONST) { \ SWIG_exception_impl(FUNCNAME, SWIG_TypeError, \ "Cannot pass const " TYPENAME " (class " FNAME ") " \ "as a mutable reference", \ RETURNNULL); \ } #define SWIG_check_nonnull(SWIG_CLASS_WRAPPER, TYPENAME, FNAME, FUNCNAME, RETURNNULL) \ if (!(SWIG_CLASS_WRAPPER).cptr) { \ SWIG_exception_impl(FUNCNAME, SWIG_TypeError, \ "Cannot pass null " TYPENAME " (class " FNAME ") " \ "as a reference", RETURNNULL); \ } #define SWIG_check_mutable_nonnull(SWIG_CLASS_WRAPPER, TYPENAME, FNAME, FUNCNAME, RETURNNULL) \ SWIG_check_nonnull(SWIG_CLASS_WRAPPER, TYPENAME, FNAME, FUNCNAME, RETURNNULL); \ SWIG_check_mutable(SWIG_CLASS_WRAPPER, TYPENAME, FNAME, FUNCNAME, RETURNNULL); #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "idas/idas.h" #include "idas/idas_bbdpre.h" #include "idas/idas_ls.h" #include #ifdef _MSC_VER # ifndef strtoull # define strtoull _strtoui64 # endif # ifndef strtoll # define strtoll _strtoi64 # endif #endif typedef struct { void* data; size_t size; } SwigArrayWrapper; SWIGINTERN SwigArrayWrapper SwigArrayWrapper_uninitialized() { SwigArrayWrapper result; result.data = NULL; result.size = 0; return result; } #include typedef struct { void* cptr; int cmemflags; } SwigClassWrapper; SWIGINTERN SwigClassWrapper SwigClassWrapper_uninitialized() { SwigClassWrapper result; result.cptr = NULL; result.cmemflags = 0; return result; } SWIGINTERN void SWIG_assign(SwigClassWrapper* self, SwigClassWrapper other) { if (self->cptr == NULL) { /* LHS is unassigned */ if (other.cmemflags & SWIG_MEM_RVALUE) { /* Capture pointer from RHS, clear 'moving' flag */ self->cptr = other.cptr; self->cmemflags = other.cmemflags & (~SWIG_MEM_RVALUE); } else { /* Become a reference to the other object */ self->cptr = other.cptr; self->cmemflags = other.cmemflags & (~SWIG_MEM_OWN); } } else if (other.cptr == NULL) { /* Replace LHS with a null pointer */ free(self->cptr); *self = SwigClassWrapper_uninitialized(); } else { if (self->cmemflags & SWIG_MEM_OWN) { free(self->cptr); } self->cptr = other.cptr; if (other.cmemflags & SWIG_MEM_RVALUE) { /* Capture RHS */ self->cmemflags = other.cmemflags & ~SWIG_MEM_RVALUE; } else { /* Point to RHS */ self->cmemflags = other.cmemflags & ~SWIG_MEM_OWN; } } } SWIGEXPORT void * _wrap_FIDACreate(void *farg1) { void * fresult ; SUNContext arg1 = (SUNContext) 0 ; void *result = 0 ; arg1 = (SUNContext)(farg1); result = (void *)IDACreate(arg1); fresult = result; return fresult; } SWIGEXPORT int _wrap_FIDAInit(void *farg1, IDAResFn farg2, double const *farg3, N_Vector farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; IDAResFn arg2 = (IDAResFn) 0 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (IDAResFn)(farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); result = (int)IDAInit(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAReInit(void *farg1, double const *farg2, N_Vector farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); result = (int)IDAReInit(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASStolerances(void *farg1, double const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype)(*farg3); result = (int)IDASStolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASVtolerances(void *farg1, double const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); result = (int)IDASVtolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAWFtolerances(void *farg1, IDAEwtFn farg2) { int fresult ; void *arg1 = (void *) 0 ; IDAEwtFn arg2 = (IDAEwtFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (IDAEwtFn)(farg2); result = (int)IDAWFtolerances(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDACalcIC(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)IDACalcIC(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetNonlinConvCoefIC(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)IDASetNonlinConvCoefIC(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxNumStepsIC(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetMaxNumStepsIC(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxNumJacsIC(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetMaxNumJacsIC(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxNumItersIC(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetMaxNumItersIC(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetLineSearchOffIC(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetLineSearchOffIC(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetStepToleranceIC(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)IDASetStepToleranceIC(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxBacksIC(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetMaxBacksIC(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetErrHandlerFn(void *farg1, IDAErrHandlerFn farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; IDAErrHandlerFn arg2 = (IDAErrHandlerFn) 0 ; void *arg3 = (void *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (IDAErrHandlerFn)(farg2); arg3 = (void *)(farg3); result = (int)IDASetErrHandlerFn(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetErrFile(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (FILE *)(farg2); result = (int)IDASetErrFile(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetUserData(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (void *)(farg2); result = (int)IDASetUserData(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxOrd(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetMaxOrd(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxNumSteps(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)IDASetMaxNumSteps(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetInitStep(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)IDASetInitStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxStep(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)IDASetMaxStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetStopTime(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)IDASetStopTime(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetNonlinConvCoef(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)IDASetNonlinConvCoef(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxErrTestFails(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetMaxErrTestFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxNonlinIters(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetMaxNonlinIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxConvFails(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetMaxConvFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetSuppressAlg(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetSuppressAlg(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetId(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)IDASetId(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetConstraints(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)IDASetConstraints(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetNonlinearSolver(void *farg1, SUNNonlinearSolver farg2) { int fresult ; void *arg1 = (void *) 0 ; SUNNonlinearSolver arg2 = (SUNNonlinearSolver) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNNonlinearSolver)(farg2); result = (int)IDASetNonlinearSolver(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetNlsResFn(void *farg1, IDAResFn farg2) { int fresult ; void *arg1 = (void *) 0 ; IDAResFn arg2 = (IDAResFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (IDAResFn)(farg2); result = (int)IDASetNlsResFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDARootInit(void *farg1, int const *farg2, IDARootFn farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDARootFn arg3 = (IDARootFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDARootFn)(farg3); result = (int)IDARootInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetRootDirection(void *farg1, int *farg2) { int fresult ; void *arg1 = (void *) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int *)(farg2); result = (int)IDASetRootDirection(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetNoInactiveRootWarn(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)IDASetNoInactiveRootWarn(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASolve(void *farg1, double const *farg2, double *farg3, N_Vector farg4, N_Vector farg5, int const *farg6) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype *arg3 = (realtype *) 0 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; int arg6 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); arg6 = (int)(*farg6); result = (int)IDASolve(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAComputeY(void *farg1, N_Vector farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)IDAComputeY(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAComputeYp(void *farg1, N_Vector farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)IDAComputeYp(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAComputeYSens(void *farg1, void *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); result = (int)IDAComputeYSens(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAComputeYpSens(void *farg1, void *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); result = (int)IDAComputeYpSens(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetDky(void *farg1, double const *farg2, int const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (N_Vector)(farg4); result = (int)IDAGetDky(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)IDAGetWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumSteps(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumSteps(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumResEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumResEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumLinSolvSetups(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumLinSolvSetups(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumErrTestFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumErrTestFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumBacktrackOps(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumBacktrackOps(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetConsistentIC(void *farg1, N_Vector farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)IDAGetConsistentIC(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetLastOrder(void *farg1, int *farg2) { int fresult ; void *arg1 = (void *) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int *)(farg2); result = (int)IDAGetLastOrder(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetCurrentOrder(void *farg1, int *farg2) { int fresult ; void *arg1 = (void *) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int *)(farg2); result = (int)IDAGetCurrentOrder(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetCurrentCj(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)IDAGetCurrentCj(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetCurrentY(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); result = (int)IDAGetCurrentY(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetCurrentYSens(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector **arg2 = (N_Vector **) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector **)(farg2); result = (int)IDAGetCurrentYSens(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetCurrentYp(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); result = (int)IDAGetCurrentYp(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetCurrentYpSens(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector **arg2 = (N_Vector **) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector **)(farg2); result = (int)IDAGetCurrentYpSens(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetActualInitStep(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)IDAGetActualInitStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetLastStep(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)IDAGetLastStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetCurrentStep(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)IDAGetCurrentStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetCurrentTime(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)IDAGetCurrentTime(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetTolScaleFactor(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)IDAGetTolScaleFactor(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetErrWeights(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)IDAGetErrWeights(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetEstLocalErrors(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)IDAGetEstLocalErrors(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumGEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumGEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetRootInfo(void *farg1, int *farg2) { int fresult ; void *arg1 = (void *) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int *)(farg2); result = (int)IDAGetRootInfo(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetIntegratorStats(void *farg1, long *farg2, long *farg3, long *farg4, long *farg5, int *farg6, int *farg7, double *farg8, double *farg9, double *farg10, double *farg11) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; long *arg4 = (long *) 0 ; long *arg5 = (long *) 0 ; int *arg6 = (int *) 0 ; int *arg7 = (int *) 0 ; realtype *arg8 = (realtype *) 0 ; realtype *arg9 = (realtype *) 0 ; realtype *arg10 = (realtype *) 0 ; realtype *arg11 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); arg4 = (long *)(farg4); arg5 = (long *)(farg5); arg6 = (int *)(farg6); arg7 = (int *)(farg7); arg8 = (realtype *)(farg8); arg9 = (realtype *)(farg9); arg10 = (realtype *)(farg10); arg11 = (realtype *)(farg11); result = (int)IDAGetIntegratorStats(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNonlinearSystemData(void *farg1, double *farg2, void *farg3, void *farg4, void *farg5, void *farg6, void *farg7, double *farg8, void *farg9) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; N_Vector *arg6 = (N_Vector *) 0 ; N_Vector *arg7 = (N_Vector *) 0 ; realtype *arg8 = (realtype *) 0 ; void **arg9 = (void **) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector *)(farg4); arg5 = (N_Vector *)(farg5); arg6 = (N_Vector *)(farg6); arg7 = (N_Vector *)(farg7); arg8 = (realtype *)(farg8); arg9 = (void **)(farg9); result = (int)IDAGetNonlinearSystemData(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNonlinearSystemDataSens(void *farg1, double *farg2, void *farg3, void *farg4, void *farg5, void *farg6, double *farg7, void *farg8) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector **arg3 = (N_Vector **) 0 ; N_Vector **arg4 = (N_Vector **) 0 ; N_Vector **arg5 = (N_Vector **) 0 ; N_Vector **arg6 = (N_Vector **) 0 ; realtype *arg7 = (realtype *) 0 ; void **arg8 = (void **) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector **)(farg3); arg4 = (N_Vector **)(farg4); arg5 = (N_Vector **)(farg5); arg6 = (N_Vector **)(farg6); arg7 = (realtype *)(farg7); arg8 = (void **)(farg8); result = (int)IDAGetNonlinearSystemDataSens(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumNonlinSolvIters(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumNonlinSolvIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumNonlinSolvConvFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumNonlinSolvConvFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNonlinSolvStats(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)IDAGetNonlinSolvStats(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT SwigArrayWrapper _wrap_FIDAGetReturnFlagName(long const *farg1) { SwigArrayWrapper fresult ; long arg1 ; char *result = 0 ; arg1 = (long)(*farg1); result = (char *)IDAGetReturnFlagName(arg1); fresult.size = strlen((const char*)(result)); fresult.data = (char *)(result); return fresult; } SWIGEXPORT void _wrap_FIDAFree(void *farg1) { void **arg1 = (void **) 0 ; arg1 = (void **)(farg1); IDAFree(arg1); } SWIGEXPORT int _wrap_FIDASetJacTimesResFn(void *farg1, IDAResFn farg2) { int fresult ; void *arg1 = (void *) 0 ; IDAResFn arg2 = (IDAResFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (IDAResFn)(farg2); result = (int)IDASetJacTimesResFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadInit(void *farg1, IDAQuadRhsFn farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; IDAQuadRhsFn arg2 = (IDAQuadRhsFn) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (IDAQuadRhsFn)(farg2); arg3 = (N_Vector)(farg3); result = (int)IDAQuadInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadReInit(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)IDAQuadReInit(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadSStolerances(void *farg1, double const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype)(*farg3); result = (int)IDAQuadSStolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadSVtolerances(void *farg1, double const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); result = (int)IDAQuadSVtolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetQuadErrCon(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetQuadErrCon(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuad(void *farg1, double *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); result = (int)IDAGetQuad(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadDky(void *farg1, double const *farg2, int const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (N_Vector)(farg4); result = (int)IDAGetQuadDky(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadNumRhsEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetQuadNumRhsEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadNumErrTestFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetQuadNumErrTestFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadErrWeights(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)IDAGetQuadErrWeights(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadStats(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)IDAGetQuadStats(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FIDAQuadFree(void *farg1) { void *arg1 = (void *) 0 ; arg1 = (void *)(farg1); IDAQuadFree(arg1); } SWIGEXPORT int _wrap_FIDASensInit(void *farg1, int const *farg2, int const *farg3, IDASensResFn farg4, void *farg5, void *farg6) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; IDASensResFn arg4 = (IDASensResFn) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; N_Vector *arg6 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); arg4 = (IDASensResFn)(farg4); arg5 = (N_Vector *)(farg5); arg6 = (N_Vector *)(farg6); result = (int)IDASensInit(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASensReInit(void *farg1, int const *farg2, void *farg3, void *farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector *)(farg4); result = (int)IDASensReInit(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASensSStolerances(void *farg1, double const *farg2, double *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype *arg3 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype *)(farg3); result = (int)IDASensSStolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASensSVtolerances(void *farg1, double const *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)IDASensSVtolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASensEEtolerances(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)IDASensEEtolerances(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensConsistentIC(void *farg1, void *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); result = (int)IDAGetSensConsistentIC(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetSensDQMethod(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)IDASetSensDQMethod(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetSensErrCon(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetSensErrCon(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetSensMaxNonlinIters(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetSensMaxNonlinIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetSensParams(void *farg1, double *farg2, double *farg3, int *farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; realtype *arg3 = (realtype *) 0 ; int *arg4 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (realtype *)(farg3); arg4 = (int *)(farg4); result = (int)IDASetSensParams(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetNonlinearSolverSensSim(void *farg1, SUNNonlinearSolver farg2) { int fresult ; void *arg1 = (void *) 0 ; SUNNonlinearSolver arg2 = (SUNNonlinearSolver) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNNonlinearSolver)(farg2); result = (int)IDASetNonlinearSolverSensSim(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetNonlinearSolverSensStg(void *farg1, SUNNonlinearSolver farg2) { int fresult ; void *arg1 = (void *) 0 ; SUNNonlinearSolver arg2 = (SUNNonlinearSolver) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNNonlinearSolver)(farg2); result = (int)IDASetNonlinearSolverSensStg(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASensToggleOff(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)IDASensToggleOff(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSens(void *farg1, double *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); result = (int)IDAGetSens(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSens1(void *farg1, double *farg2, int const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (int)(*farg3); arg4 = (N_Vector)(farg4); result = (int)IDAGetSens1(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensDky(void *farg1, double const *farg2, int const *farg3, void *farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (N_Vector *)(farg4); result = (int)IDAGetSensDky(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensDky1(void *farg1, double const *farg2, int const *farg3, int const *farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; int arg4 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (int)(*farg4); arg5 = (N_Vector)(farg5); result = (int)IDAGetSensDky1(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensNumResEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetSensNumResEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumResEvalsSens(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumResEvalsSens(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensNumErrTestFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetSensNumErrTestFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensNumLinSolvSetups(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetSensNumLinSolvSetups(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensErrWeights(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector_S arg2 = (N_Vector_S) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector_S)(farg2); result = (int)IDAGetSensErrWeights(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensStats(void *farg1, long *farg2, long *farg3, long *farg4, long *farg5) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; long *arg4 = (long *) 0 ; long *arg5 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); arg4 = (long *)(farg4); arg5 = (long *)(farg5); result = (int)IDAGetSensStats(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensNumNonlinSolvIters(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetSensNumNonlinSolvIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensNumNonlinSolvConvFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetSensNumNonlinSolvConvFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetSensNonlinSolvStats(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)IDAGetSensNonlinSolvStats(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FIDASensFree(void *farg1) { void *arg1 = (void *) 0 ; arg1 = (void *)(farg1); IDASensFree(arg1); } SWIGEXPORT int _wrap_FIDAQuadSensInit(void *farg1, IDAQuadSensRhsFn farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; IDAQuadSensRhsFn arg2 = (IDAQuadSensRhsFn) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (IDAQuadSensRhsFn)(farg2); arg3 = (N_Vector *)(farg3); result = (int)IDAQuadSensInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadSensReInit(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); result = (int)IDAQuadSensReInit(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadSensSStolerances(void *farg1, double const *farg2, double *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype *arg3 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype *)(farg3); result = (int)IDAQuadSensSStolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadSensSVtolerances(void *farg1, double const *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)IDAQuadSensSVtolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadSensEEtolerances(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)IDAQuadSensEEtolerances(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetQuadSensErrCon(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetQuadSensErrCon(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadSens(void *farg1, double *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); result = (int)IDAGetQuadSens(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadSens1(void *farg1, double *farg2, int const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (int)(*farg3); arg4 = (N_Vector)(farg4); result = (int)IDAGetQuadSens1(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadSensDky(void *farg1, double const *farg2, int const *farg3, void *farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (N_Vector *)(farg4); result = (int)IDAGetQuadSensDky(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadSensDky1(void *farg1, double const *farg2, int const *farg3, int const *farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; int arg4 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (int)(*farg4); arg5 = (N_Vector)(farg5); result = (int)IDAGetQuadSensDky1(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadSensNumRhsEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetQuadSensNumRhsEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadSensNumErrTestFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetQuadSensNumErrTestFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadSensErrWeights(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); result = (int)IDAGetQuadSensErrWeights(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadSensStats(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)IDAGetQuadSensStats(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FIDAQuadSensFree(void *farg1) { void *arg1 = (void *) 0 ; arg1 = (void *)(farg1); IDAQuadSensFree(arg1); } SWIGEXPORT int _wrap_FIDAAdjInit(void *farg1, long const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); arg3 = (int)(*farg3); result = (int)IDAAdjInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAAdjReInit(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)IDAAdjReInit(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FIDAAdjFree(void *farg1) { void *arg1 = (void *) 0 ; arg1 = (void *)(farg1); IDAAdjFree(arg1); } SWIGEXPORT int _wrap_FIDACreateB(void *farg1, int *farg2) { int fresult ; void *arg1 = (void *) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int *)(farg2); result = (int)IDACreateB(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAInitB(void *farg1, int const *farg2, IDAResFnB farg3, double const *farg4, N_Vector farg5, N_Vector farg6) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDAResFnB arg3 = (IDAResFnB) 0 ; realtype arg4 ; N_Vector arg5 = (N_Vector) 0 ; N_Vector arg6 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDAResFnB)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector)(farg5); arg6 = (N_Vector)(farg6); result = (int)IDAInitB(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAInitBS(void *farg1, int const *farg2, IDAResFnBS farg3, double const *farg4, N_Vector farg5, N_Vector farg6) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDAResFnBS arg3 = (IDAResFnBS) 0 ; realtype arg4 ; N_Vector arg5 = (N_Vector) 0 ; N_Vector arg6 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDAResFnBS)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector)(farg5); arg6 = (N_Vector)(farg6); result = (int)IDAInitBS(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAReInitB(void *farg1, int const *farg2, double const *farg3, N_Vector farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); result = (int)IDAReInitB(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASStolerancesB(void *farg1, int const *farg2, double const *farg3, double const *farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; realtype arg4 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (realtype)(*farg4); result = (int)IDASStolerancesB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASVtolerancesB(void *farg1, int const *farg2, double const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); result = (int)IDASVtolerancesB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadInitB(void *farg1, int const *farg2, IDAQuadRhsFnB farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDAQuadRhsFnB arg3 = (IDAQuadRhsFnB) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDAQuadRhsFnB)(farg3); arg4 = (N_Vector)(farg4); result = (int)IDAQuadInitB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadInitBS(void *farg1, int const *farg2, IDAQuadRhsFnBS farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDAQuadRhsFnBS arg3 = (IDAQuadRhsFnBS) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDAQuadRhsFnBS)(farg3); arg4 = (N_Vector)(farg4); result = (int)IDAQuadInitBS(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadReInitB(void *farg1, int const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (N_Vector)(farg3); result = (int)IDAQuadReInitB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadSStolerancesB(void *farg1, int const *farg2, double const *farg3, double const *farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; realtype arg4 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (realtype)(*farg4); result = (int)IDAQuadSStolerancesB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAQuadSVtolerancesB(void *farg1, int const *farg2, double const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); result = (int)IDAQuadSVtolerancesB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDACalcICB(void *farg1, int const *farg2, double const *farg3, N_Vector farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); result = (int)IDACalcICB(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDACalcICBS(void *farg1, int const *farg2, double const *farg3, N_Vector farg4, N_Vector farg5, void *farg6, void *farg7) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; N_Vector *arg6 = (N_Vector *) 0 ; N_Vector *arg7 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); arg6 = (N_Vector *)(farg6); arg7 = (N_Vector *)(farg7); result = (int)IDACalcICBS(arg1,arg2,arg3,arg4,arg5,arg6,arg7); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASolveF(void *farg1, double const *farg2, double *farg3, N_Vector farg4, N_Vector farg5, int const *farg6, int *farg7) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype *arg3 = (realtype *) 0 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; int arg6 ; int *arg7 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); arg6 = (int)(*farg6); arg7 = (int *)(farg7); result = (int)IDASolveF(arg1,arg2,arg3,arg4,arg5,arg6,arg7); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASolveB(void *farg1, double const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); result = (int)IDASolveB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAAdjSetNoSensi(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)IDAAdjSetNoSensi(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetUserDataB(void *farg1, int const *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; void *arg3 = (void *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (void *)(farg3); result = (int)IDASetUserDataB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxOrdB(void *farg1, int const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); result = (int)IDASetMaxOrdB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxNumStepsB(void *farg1, int const *farg2, long const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; long arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (long)(*farg3); result = (int)IDASetMaxNumStepsB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetInitStepB(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)IDASetInitStepB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetMaxStepB(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)IDASetMaxStepB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetSuppressAlgB(void *farg1, int const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); result = (int)IDASetSuppressAlgB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetIdB(void *farg1, int const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (N_Vector)(farg3); result = (int)IDASetIdB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetConstraintsB(void *farg1, int const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (N_Vector)(farg3); result = (int)IDASetConstraintsB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetQuadErrConB(void *farg1, int const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); result = (int)IDASetQuadErrConB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetNonlinearSolverB(void *farg1, int const *farg2, SUNNonlinearSolver farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; SUNNonlinearSolver arg3 = (SUNNonlinearSolver) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (SUNNonlinearSolver)(farg3); result = (int)IDASetNonlinearSolverB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetB(void *farg1, int const *farg2, double *farg3, N_Vector farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype *arg3 = (realtype *) 0 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); result = (int)IDAGetB(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetQuadB(void *farg1, int const *farg2, double *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype *arg3 = (realtype *) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype *)(farg3); arg4 = (N_Vector)(farg4); result = (int)IDAGetQuadB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT void * _wrap_FIDAGetAdjIDABmem(void *farg1, int const *farg2) { void * fresult ; void *arg1 = (void *) 0 ; int arg2 ; void *result = 0 ; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (void *)IDAGetAdjIDABmem(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT int _wrap_FIDAGetConsistentICB(void *farg1, int const *farg2, N_Vector farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); result = (int)IDAGetConsistentICB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetAdjY(void *farg1, double const *farg2, N_Vector farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); result = (int)IDAGetAdjY(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_IDAadjCheckPointRec_my_addr_set(SwigClassWrapper const *farg1, void *farg2) { IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; void *arg2 = (void *) 0 ; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::my_addr", return ); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); arg2 = (void *)(farg2); if (arg1) (arg1)->my_addr = arg2; } SWIGEXPORT void * _wrap_IDAadjCheckPointRec_my_addr_get(SwigClassWrapper const *farg1) { void * fresult ; IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; void *result = 0 ; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::my_addr", return 0); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); result = (void *) ((arg1)->my_addr); fresult = result; return fresult; } SWIGEXPORT void _wrap_IDAadjCheckPointRec_next_addr_set(SwigClassWrapper const *farg1, void *farg2) { IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; void *arg2 = (void *) 0 ; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::next_addr", return ); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); arg2 = (void *)(farg2); if (arg1) (arg1)->next_addr = arg2; } SWIGEXPORT void * _wrap_IDAadjCheckPointRec_next_addr_get(SwigClassWrapper const *farg1) { void * fresult ; IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; void *result = 0 ; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::next_addr", return 0); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); result = (void *) ((arg1)->next_addr); fresult = result; return fresult; } SWIGEXPORT void _wrap_IDAadjCheckPointRec_t0_set(SwigClassWrapper const *farg1, double const *farg2) { IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; realtype arg2 ; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::t0", return ); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); arg2 = (realtype)(*farg2); if (arg1) (arg1)->t0 = arg2; } SWIGEXPORT double _wrap_IDAadjCheckPointRec_t0_get(SwigClassWrapper const *farg1) { double fresult ; IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; realtype result; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::t0", return 0); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); result = (realtype) ((arg1)->t0); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_IDAadjCheckPointRec_t1_set(SwigClassWrapper const *farg1, double const *farg2) { IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; realtype arg2 ; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::t1", return ); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); arg2 = (realtype)(*farg2); if (arg1) (arg1)->t1 = arg2; } SWIGEXPORT double _wrap_IDAadjCheckPointRec_t1_get(SwigClassWrapper const *farg1) { double fresult ; IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; realtype result; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::t1", return 0); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); result = (realtype) ((arg1)->t1); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_IDAadjCheckPointRec_nstep_set(SwigClassWrapper const *farg1, long const *farg2) { IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; long arg2 ; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::nstep", return ); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); arg2 = (long)(*farg2); if (arg1) (arg1)->nstep = arg2; } SWIGEXPORT long _wrap_IDAadjCheckPointRec_nstep_get(SwigClassWrapper const *farg1) { long fresult ; IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; long result; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::nstep", return 0); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); result = (long) ((arg1)->nstep); fresult = (long)(result); return fresult; } SWIGEXPORT void _wrap_IDAadjCheckPointRec_order_set(SwigClassWrapper const *farg1, int const *farg2) { IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; int arg2 ; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::order", return ); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); arg2 = (int)(*farg2); if (arg1) (arg1)->order = arg2; } SWIGEXPORT int _wrap_IDAadjCheckPointRec_order_get(SwigClassWrapper const *farg1) { int fresult ; IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; int result; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::order", return 0); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); result = (int) ((arg1)->order); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_IDAadjCheckPointRec_step_set(SwigClassWrapper const *farg1, double const *farg2) { IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; realtype arg2 ; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::step", return ); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); arg2 = (realtype)(*farg2); if (arg1) (arg1)->step = arg2; } SWIGEXPORT double _wrap_IDAadjCheckPointRec_step_get(SwigClassWrapper const *farg1) { double fresult ; IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; realtype result; SWIG_check_mutable_nonnull(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::step", return 0); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); result = (realtype) ((arg1)->step); fresult = (realtype)(result); return fresult; } SWIGEXPORT SwigClassWrapper _wrap_new_IDAadjCheckPointRec() { SwigClassWrapper fresult ; IDAadjCheckPointRec *result = 0 ; result = (IDAadjCheckPointRec *)calloc(1, sizeof(IDAadjCheckPointRec)); fresult.cptr = result; fresult.cmemflags = SWIG_MEM_RVALUE | (1 ? SWIG_MEM_OWN : 0); return fresult; } SWIGEXPORT void _wrap_delete_IDAadjCheckPointRec(SwigClassWrapper *farg1) { IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; SWIG_check_mutable(*farg1, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAadjCheckPointRec::~IDAadjCheckPointRec()", return ); arg1 = (IDAadjCheckPointRec *)(farg1->cptr); free((char *) arg1); } SWIGEXPORT void _wrap_IDAadjCheckPointRec_op_assign__(SwigClassWrapper *farg1, SwigClassWrapper const *farg2) { IDAadjCheckPointRec *arg1 = (IDAadjCheckPointRec *) 0 ; IDAadjCheckPointRec *arg2 = 0 ; (void)sizeof(arg1); (void)sizeof(arg2); SWIG_assign(farg1, *farg2); } SWIGEXPORT int _wrap_FIDAGetAdjCheckPointsInfo(void *farg1, SwigClassWrapper const *farg2) { int fresult ; void *arg1 = (void *) 0 ; IDAadjCheckPointRec *arg2 = (IDAadjCheckPointRec *) 0 ; int result; arg1 = (void *)(farg1); SWIG_check_mutable(*farg2, "IDAadjCheckPointRec *", "IDAadjCheckPointRec", "IDAGetAdjCheckPointsInfo(void *,IDAadjCheckPointRec *)", return 0); arg2 = (IDAadjCheckPointRec *)(farg2->cptr); result = (int)IDAGetAdjCheckPointsInfo(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetJacTimesResFnB(void *farg1, int const *farg2, IDAResFn farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDAResFn arg3 = (IDAResFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDAResFn)(farg3); result = (int)IDASetJacTimesResFnB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetAdjDataPointHermite(void *farg1, int const *farg2, double *farg3, N_Vector farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype *arg3 = (realtype *) 0 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); result = (int)IDAGetAdjDataPointHermite(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetAdjDataPointPolynomial(void *farg1, int const *farg2, double *farg3, int *farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype *arg3 = (realtype *) 0 ; int *arg4 = (int *) 0 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype *)(farg3); arg4 = (int *)(farg4); arg5 = (N_Vector)(farg5); result = (int)IDAGetAdjDataPointPolynomial(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetAdjCurrentCheckPoint(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; void **arg2 = (void **) 0 ; int result; arg1 = (void *)(farg1); arg2 = (void **)(farg2); result = (int)IDAGetAdjCurrentCheckPoint(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDABBDPrecInit(void *farg1, int64_t const *farg2, int64_t const *farg3, int64_t const *farg4, int64_t const *farg5, int64_t const *farg6, double const *farg7, IDABBDLocalFn farg8, IDABBDCommFn farg9) { int fresult ; void *arg1 = (void *) 0 ; sunindextype arg2 ; sunindextype arg3 ; sunindextype arg4 ; sunindextype arg5 ; sunindextype arg6 ; realtype arg7 ; IDABBDLocalFn arg8 = (IDABBDLocalFn) 0 ; IDABBDCommFn arg9 = (IDABBDCommFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (sunindextype)(*farg4); arg5 = (sunindextype)(*farg5); arg6 = (sunindextype)(*farg6); arg7 = (realtype)(*farg7); arg8 = (IDABBDLocalFn)(farg8); arg9 = (IDABBDCommFn)(farg9); result = (int)IDABBDPrecInit(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDABBDPrecReInit(void *farg1, int64_t const *farg2, int64_t const *farg3, double const *farg4) { int fresult ; void *arg1 = (void *) 0 ; sunindextype arg2 ; sunindextype arg3 ; realtype arg4 ; int result; arg1 = (void *)(farg1); arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (realtype)(*farg4); result = (int)IDABBDPrecReInit(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDABBDPrecGetWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)IDABBDPrecGetWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDABBDPrecGetNumGfnEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDABBDPrecGetNumGfnEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDABBDPrecInitB(void *farg1, int const *farg2, int64_t const *farg3, int64_t const *farg4, int64_t const *farg5, int64_t const *farg6, int64_t const *farg7, double const *farg8, IDABBDLocalFnB farg9, IDABBDCommFnB farg10) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; sunindextype arg3 ; sunindextype arg4 ; sunindextype arg5 ; sunindextype arg6 ; sunindextype arg7 ; realtype arg8 ; IDABBDLocalFnB arg9 = (IDABBDLocalFnB) 0 ; IDABBDCommFnB arg10 = (IDABBDCommFnB) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (sunindextype)(*farg4); arg5 = (sunindextype)(*farg5); arg6 = (sunindextype)(*farg6); arg7 = (sunindextype)(*farg7); arg8 = (realtype)(*farg8); arg9 = (IDABBDLocalFnB)(farg9); arg10 = (IDABBDCommFnB)(farg10); result = (int)IDABBDPrecInitB(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDABBDPrecReInitB(void *farg1, int const *farg2, int64_t const *farg3, int64_t const *farg4, double const *farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; sunindextype arg3 ; sunindextype arg4 ; realtype arg5 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (sunindextype)(*farg4); arg5 = (realtype)(*farg5); result = (int)IDABBDPrecReInitB(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetLinearSolver(void *farg1, SUNLinearSolver farg2, SUNMatrix farg3) { int fresult ; void *arg1 = (void *) 0 ; SUNLinearSolver arg2 = (SUNLinearSolver) 0 ; SUNMatrix arg3 = (SUNMatrix) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNLinearSolver)(farg2); arg3 = (SUNMatrix)(farg3); result = (int)IDASetLinearSolver(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetJacFn(void *farg1, IDALsJacFn farg2) { int fresult ; void *arg1 = (void *) 0 ; IDALsJacFn arg2 = (IDALsJacFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (IDALsJacFn)(farg2); result = (int)IDASetJacFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetPreconditioner(void *farg1, IDALsPrecSetupFn farg2, IDALsPrecSolveFn farg3) { int fresult ; void *arg1 = (void *) 0 ; IDALsPrecSetupFn arg2 = (IDALsPrecSetupFn) 0 ; IDALsPrecSolveFn arg3 = (IDALsPrecSolveFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (IDALsPrecSetupFn)(farg2); arg3 = (IDALsPrecSolveFn)(farg3); result = (int)IDASetPreconditioner(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetJacTimes(void *farg1, IDALsJacTimesSetupFn farg2, IDALsJacTimesVecFn farg3) { int fresult ; void *arg1 = (void *) 0 ; IDALsJacTimesSetupFn arg2 = (IDALsJacTimesSetupFn) 0 ; IDALsJacTimesVecFn arg3 = (IDALsJacTimesVecFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (IDALsJacTimesSetupFn)(farg2); arg3 = (IDALsJacTimesVecFn)(farg3); result = (int)IDASetJacTimes(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetEpsLin(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)IDASetEpsLin(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetLSNormFactor(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)IDASetLSNormFactor(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetLinearSolutionScaling(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)IDASetLinearSolutionScaling(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetIncrementFactor(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)IDASetIncrementFactor(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetLinWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)IDAGetLinWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumJacEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumJacEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumPrecEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumPrecEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumPrecSolves(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumPrecSolves(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumLinIters(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumLinIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumLinConvFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumLinConvFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumJTSetupEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumJTSetupEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumJtimesEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumJtimesEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetNumLinResEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetNumLinResEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDAGetLastLinFlag(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)IDAGetLastLinFlag(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT SwigArrayWrapper _wrap_FIDAGetLinReturnFlagName(long const *farg1) { SwigArrayWrapper fresult ; long arg1 ; char *result = 0 ; arg1 = (long)(*farg1); result = (char *)IDAGetLinReturnFlagName(arg1); fresult.size = strlen((const char*)(result)); fresult.data = (char *)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetLinearSolverB(void *farg1, int const *farg2, SUNLinearSolver farg3, SUNMatrix farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; SUNLinearSolver arg3 = (SUNLinearSolver) 0 ; SUNMatrix arg4 = (SUNMatrix) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (SUNLinearSolver)(farg3); arg4 = (SUNMatrix)(farg4); result = (int)IDASetLinearSolverB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetJacFnB(void *farg1, int const *farg2, IDALsJacFnB farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDALsJacFnB arg3 = (IDALsJacFnB) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDALsJacFnB)(farg3); result = (int)IDASetJacFnB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetJacFnBS(void *farg1, int const *farg2, IDALsJacFnBS farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDALsJacFnBS arg3 = (IDALsJacFnBS) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDALsJacFnBS)(farg3); result = (int)IDASetJacFnBS(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetEpsLinB(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)IDASetEpsLinB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetLSNormFactorB(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)IDASetLSNormFactorB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetLinearSolutionScalingB(void *farg1, int const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); result = (int)IDASetLinearSolutionScalingB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetIncrementFactorB(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)IDASetIncrementFactorB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetPreconditionerB(void *farg1, int const *farg2, IDALsPrecSetupFnB farg3, IDALsPrecSolveFnB farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDALsPrecSetupFnB arg3 = (IDALsPrecSetupFnB) 0 ; IDALsPrecSolveFnB arg4 = (IDALsPrecSolveFnB) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDALsPrecSetupFnB)(farg3); arg4 = (IDALsPrecSolveFnB)(farg4); result = (int)IDASetPreconditionerB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetPreconditionerBS(void *farg1, int const *farg2, IDALsPrecSetupFnBS farg3, IDALsPrecSolveFnBS farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDALsPrecSetupFnBS arg3 = (IDALsPrecSetupFnBS) 0 ; IDALsPrecSolveFnBS arg4 = (IDALsPrecSolveFnBS) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDALsPrecSetupFnBS)(farg3); arg4 = (IDALsPrecSolveFnBS)(farg4); result = (int)IDASetPreconditionerBS(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetJacTimesB(void *farg1, int const *farg2, IDALsJacTimesSetupFnB farg3, IDALsJacTimesVecFnB farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDALsJacTimesSetupFnB arg3 = (IDALsJacTimesSetupFnB) 0 ; IDALsJacTimesVecFnB arg4 = (IDALsJacTimesVecFnB) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDALsJacTimesSetupFnB)(farg3); arg4 = (IDALsJacTimesVecFnB)(farg4); result = (int)IDASetJacTimesB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FIDASetJacTimesBS(void *farg1, int const *farg2, IDALsJacTimesSetupFnBS farg3, IDALsJacTimesVecFnBS farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; IDALsJacTimesSetupFnBS arg3 = (IDALsJacTimesSetupFnBS) 0 ; IDALsJacTimesVecFnBS arg4 = (IDALsJacTimesVecFnBS) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (IDALsJacTimesSetupFnBS)(farg3); arg4 = (IDALsJacTimesVecFnBS)(farg4); result = (int)IDASetJacTimesBS(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } StanHeaders/src/idas/fmod/fidas_mod.f900000644000176200001440000052703414645137106017376 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fidas_mod use, intrinsic :: ISO_C_BINDING use fsundials_types_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_linearsolver_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_nonlinearsolver_mod implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: IDA_NORMAL = 1_C_INT integer(C_INT), parameter, public :: IDA_ONE_STEP = 2_C_INT integer(C_INT), parameter, public :: IDA_YA_YDP_INIT = 1_C_INT integer(C_INT), parameter, public :: IDA_Y_INIT = 2_C_INT integer(C_INT), parameter, public :: IDA_SIMULTANEOUS = 1_C_INT integer(C_INT), parameter, public :: IDA_STAGGERED = 2_C_INT integer(C_INT), parameter, public :: IDA_CENTERED = 1_C_INT integer(C_INT), parameter, public :: IDA_FORWARD = 2_C_INT integer(C_INT), parameter, public :: IDA_HERMITE = 1_C_INT integer(C_INT), parameter, public :: IDA_POLYNOMIAL = 2_C_INT integer(C_INT), parameter, public :: IDA_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: IDA_TSTOP_RETURN = 1_C_INT integer(C_INT), parameter, public :: IDA_ROOT_RETURN = 2_C_INT integer(C_INT), parameter, public :: IDA_WARNING = 99_C_INT integer(C_INT), parameter, public :: IDA_TOO_MUCH_WORK = -1_C_INT integer(C_INT), parameter, public :: IDA_TOO_MUCH_ACC = -2_C_INT integer(C_INT), parameter, public :: IDA_ERR_FAIL = -3_C_INT integer(C_INT), parameter, public :: IDA_CONV_FAIL = -4_C_INT integer(C_INT), parameter, public :: IDA_LINIT_FAIL = -5_C_INT integer(C_INT), parameter, public :: IDA_LSETUP_FAIL = -6_C_INT integer(C_INT), parameter, public :: IDA_LSOLVE_FAIL = -7_C_INT integer(C_INT), parameter, public :: IDA_RES_FAIL = -8_C_INT integer(C_INT), parameter, public :: IDA_REP_RES_ERR = -9_C_INT integer(C_INT), parameter, public :: IDA_RTFUNC_FAIL = -10_C_INT integer(C_INT), parameter, public :: IDA_CONSTR_FAIL = -11_C_INT integer(C_INT), parameter, public :: IDA_FIRST_RES_FAIL = -12_C_INT integer(C_INT), parameter, public :: IDA_LINESEARCH_FAIL = -13_C_INT integer(C_INT), parameter, public :: IDA_NO_RECOVERY = -14_C_INT integer(C_INT), parameter, public :: IDA_NLS_INIT_FAIL = -15_C_INT integer(C_INT), parameter, public :: IDA_NLS_SETUP_FAIL = -16_C_INT integer(C_INT), parameter, public :: IDA_NLS_FAIL = -17_C_INT integer(C_INT), parameter, public :: IDA_MEM_NULL = -20_C_INT integer(C_INT), parameter, public :: IDA_MEM_FAIL = -21_C_INT integer(C_INT), parameter, public :: IDA_ILL_INPUT = -22_C_INT integer(C_INT), parameter, public :: IDA_NO_MALLOC = -23_C_INT integer(C_INT), parameter, public :: IDA_BAD_EWT = -24_C_INT integer(C_INT), parameter, public :: IDA_BAD_K = -25_C_INT integer(C_INT), parameter, public :: IDA_BAD_T = -26_C_INT integer(C_INT), parameter, public :: IDA_BAD_DKY = -27_C_INT integer(C_INT), parameter, public :: IDA_VECTOROP_ERR = -28_C_INT integer(C_INT), parameter, public :: IDA_CONTEXT_ERR = -29_C_INT integer(C_INT), parameter, public :: IDA_NO_QUAD = -30_C_INT integer(C_INT), parameter, public :: IDA_QRHS_FAIL = -31_C_INT integer(C_INT), parameter, public :: IDA_FIRST_QRHS_ERR = -32_C_INT integer(C_INT), parameter, public :: IDA_REP_QRHS_ERR = -33_C_INT integer(C_INT), parameter, public :: IDA_NO_SENS = -40_C_INT integer(C_INT), parameter, public :: IDA_SRES_FAIL = -41_C_INT integer(C_INT), parameter, public :: IDA_REP_SRES_ERR = -42_C_INT integer(C_INT), parameter, public :: IDA_BAD_IS = -43_C_INT integer(C_INT), parameter, public :: IDA_NO_QUADSENS = -50_C_INT integer(C_INT), parameter, public :: IDA_QSRHS_FAIL = -51_C_INT integer(C_INT), parameter, public :: IDA_FIRST_QSRHS_ERR = -52_C_INT integer(C_INT), parameter, public :: IDA_REP_QSRHS_ERR = -53_C_INT integer(C_INT), parameter, public :: IDA_UNRECOGNIZED_ERROR = -99_C_INT integer(C_INT), parameter, public :: IDA_NO_ADJ = -101_C_INT integer(C_INT), parameter, public :: IDA_NO_FWD = -102_C_INT integer(C_INT), parameter, public :: IDA_NO_BCK = -103_C_INT integer(C_INT), parameter, public :: IDA_BAD_TB0 = -104_C_INT integer(C_INT), parameter, public :: IDA_REIFWD_FAIL = -105_C_INT integer(C_INT), parameter, public :: IDA_FWD_FAIL = -106_C_INT integer(C_INT), parameter, public :: IDA_GETY_BADT = -107_C_INT public :: FIDACreate public :: FIDAInit public :: FIDAReInit public :: FIDASStolerances public :: FIDASVtolerances public :: FIDAWFtolerances public :: FIDACalcIC public :: FIDASetNonlinConvCoefIC public :: FIDASetMaxNumStepsIC public :: FIDASetMaxNumJacsIC public :: FIDASetMaxNumItersIC public :: FIDASetLineSearchOffIC public :: FIDASetStepToleranceIC public :: FIDASetMaxBacksIC public :: FIDASetErrHandlerFn public :: FIDASetErrFile public :: FIDASetUserData public :: FIDASetMaxOrd public :: FIDASetMaxNumSteps public :: FIDASetInitStep public :: FIDASetMaxStep public :: FIDASetStopTime public :: FIDASetNonlinConvCoef public :: FIDASetMaxErrTestFails public :: FIDASetMaxNonlinIters public :: FIDASetMaxConvFails public :: FIDASetSuppressAlg public :: FIDASetId public :: FIDASetConstraints public :: FIDASetNonlinearSolver public :: FIDASetNlsResFn public :: FIDARootInit public :: FIDASetRootDirection public :: FIDASetNoInactiveRootWarn public :: FIDASolve public :: FIDAComputeY public :: FIDAComputeYp public :: FIDAComputeYSens public :: FIDAComputeYpSens public :: FIDAGetDky public :: FIDAGetWorkSpace public :: FIDAGetNumSteps public :: FIDAGetNumResEvals public :: FIDAGetNumLinSolvSetups public :: FIDAGetNumErrTestFails public :: FIDAGetNumBacktrackOps public :: FIDAGetConsistentIC public :: FIDAGetLastOrder public :: FIDAGetCurrentOrder public :: FIDAGetCurrentCj public :: FIDAGetCurrentY public :: FIDAGetCurrentYSens public :: FIDAGetCurrentYp public :: FIDAGetCurrentYpSens public :: FIDAGetActualInitStep public :: FIDAGetLastStep public :: FIDAGetCurrentStep public :: FIDAGetCurrentTime public :: FIDAGetTolScaleFactor public :: FIDAGetErrWeights public :: FIDAGetEstLocalErrors public :: FIDAGetNumGEvals public :: FIDAGetRootInfo public :: FIDAGetIntegratorStats public :: FIDAGetNonlinearSystemData public :: FIDAGetNonlinearSystemDataSens public :: FIDAGetNumNonlinSolvIters public :: FIDAGetNumNonlinSolvConvFails public :: FIDAGetNonlinSolvStats type, bind(C) :: SwigArrayWrapper type(C_PTR), public :: data = C_NULL_PTR integer(C_SIZE_T), public :: size = 0 end type public :: FIDAGetReturnFlagName public :: FIDAFree public :: FIDASetJacTimesResFn public :: FIDAQuadInit public :: FIDAQuadReInit public :: FIDAQuadSStolerances public :: FIDAQuadSVtolerances public :: FIDASetQuadErrCon public :: FIDAGetQuad public :: FIDAGetQuadDky public :: FIDAGetQuadNumRhsEvals public :: FIDAGetQuadNumErrTestFails public :: FIDAGetQuadErrWeights public :: FIDAGetQuadStats public :: FIDAQuadFree public :: FIDASensInit public :: FIDASensReInit public :: FIDASensSStolerances public :: FIDASensSVtolerances public :: FIDASensEEtolerances public :: FIDAGetSensConsistentIC public :: FIDASetSensDQMethod public :: FIDASetSensErrCon public :: FIDASetSensMaxNonlinIters public :: FIDASetSensParams public :: FIDASetNonlinearSolverSensSim public :: FIDASetNonlinearSolverSensStg public :: FIDASensToggleOff public :: FIDAGetSens public :: FIDAGetSens1 public :: FIDAGetSensDky public :: FIDAGetSensDky1 public :: FIDAGetSensNumResEvals public :: FIDAGetNumResEvalsSens public :: FIDAGetSensNumErrTestFails public :: FIDAGetSensNumLinSolvSetups public :: FIDAGetSensErrWeights public :: FIDAGetSensStats public :: FIDAGetSensNumNonlinSolvIters public :: FIDAGetSensNumNonlinSolvConvFails public :: FIDAGetSensNonlinSolvStats public :: FIDASensFree public :: FIDAQuadSensInit public :: FIDAQuadSensReInit public :: FIDAQuadSensSStolerances public :: FIDAQuadSensSVtolerances public :: FIDAQuadSensEEtolerances public :: FIDASetQuadSensErrCon public :: FIDAGetQuadSens public :: FIDAGetQuadSens1 public :: FIDAGetQuadSensDky public :: FIDAGetQuadSensDky1 public :: FIDAGetQuadSensNumRhsEvals public :: FIDAGetQuadSensNumErrTestFails public :: FIDAGetQuadSensErrWeights public :: FIDAGetQuadSensStats public :: FIDAQuadSensFree public :: FIDAAdjInit public :: FIDAAdjReInit public :: FIDAAdjFree public :: FIDACreateB public :: FIDAInitB public :: FIDAInitBS public :: FIDAReInitB public :: FIDASStolerancesB public :: FIDASVtolerancesB public :: FIDAQuadInitB public :: FIDAQuadInitBS public :: FIDAQuadReInitB public :: FIDAQuadSStolerancesB public :: FIDAQuadSVtolerancesB public :: FIDACalcICB public :: FIDACalcICBS public :: FIDASolveF public :: FIDASolveB public :: FIDAAdjSetNoSensi public :: FIDASetUserDataB public :: FIDASetMaxOrdB public :: FIDASetMaxNumStepsB public :: FIDASetInitStepB public :: FIDASetMaxStepB public :: FIDASetSuppressAlgB public :: FIDASetIdB public :: FIDASetConstraintsB public :: FIDASetQuadErrConB public :: FIDASetNonlinearSolverB public :: FIDAGetB public :: FIDAGetQuadB public :: FIDAGetAdjIDABmem public :: FIDAGetConsistentICB public :: FIDAGetAdjY integer, parameter :: swig_cmem_own_bit = 0 integer, parameter :: swig_cmem_rvalue_bit = 1 integer, parameter :: swig_cmem_const_bit = 2 type, bind(C) :: SwigClassWrapper type(C_PTR), public :: cptr = C_NULL_PTR integer(C_INT), public :: cmemflags = 0 end type ! struct IDAadjCheckPointRec type, public :: IDAadjCheckPointRec type(SwigClassWrapper), public :: swigdata contains procedure :: set_my_addr => swigf_IDAadjCheckPointRec_my_addr_set procedure :: get_my_addr => swigf_IDAadjCheckPointRec_my_addr_get procedure :: set_next_addr => swigf_IDAadjCheckPointRec_next_addr_set procedure :: get_next_addr => swigf_IDAadjCheckPointRec_next_addr_get procedure :: set_t0 => swigf_IDAadjCheckPointRec_t0_set procedure :: get_t0 => swigf_IDAadjCheckPointRec_t0_get procedure :: set_t1 => swigf_IDAadjCheckPointRec_t1_set procedure :: get_t1 => swigf_IDAadjCheckPointRec_t1_get procedure :: set_nstep => swigf_IDAadjCheckPointRec_nstep_set procedure :: get_nstep => swigf_IDAadjCheckPointRec_nstep_get procedure :: set_order => swigf_IDAadjCheckPointRec_order_set procedure :: get_order => swigf_IDAadjCheckPointRec_order_get procedure :: set_step => swigf_IDAadjCheckPointRec_step_set procedure :: get_step => swigf_IDAadjCheckPointRec_step_get procedure :: release => swigf_release_IDAadjCheckPointRec procedure, private :: swigf_IDAadjCheckPointRec_op_assign__ generic :: assignment(=) => swigf_IDAadjCheckPointRec_op_assign__ end type IDAadjCheckPointRec interface IDAadjCheckPointRec module procedure swigf_create_IDAadjCheckPointRec end interface public :: FIDAGetAdjCheckPointsInfo public :: FIDASetJacTimesResFnB public :: FIDAGetAdjDataPointHermite public :: FIDAGetAdjDataPointPolynomial public :: FIDAGetAdjCurrentCheckPoint public :: FIDABBDPrecInit public :: FIDABBDPrecReInit public :: FIDABBDPrecGetWorkSpace public :: FIDABBDPrecGetNumGfnEvals public :: FIDABBDPrecInitB public :: FIDABBDPrecReInitB integer(C_INT), parameter, public :: IDALS_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: IDALS_MEM_NULL = -1_C_INT integer(C_INT), parameter, public :: IDALS_LMEM_NULL = -2_C_INT integer(C_INT), parameter, public :: IDALS_ILL_INPUT = -3_C_INT integer(C_INT), parameter, public :: IDALS_MEM_FAIL = -4_C_INT integer(C_INT), parameter, public :: IDALS_PMEM_NULL = -5_C_INT integer(C_INT), parameter, public :: IDALS_JACFUNC_UNRECVR = -6_C_INT integer(C_INT), parameter, public :: IDALS_JACFUNC_RECVR = -7_C_INT integer(C_INT), parameter, public :: IDALS_SUNMAT_FAIL = -8_C_INT integer(C_INT), parameter, public :: IDALS_SUNLS_FAIL = -9_C_INT integer(C_INT), parameter, public :: IDALS_NO_ADJ = -101_C_INT integer(C_INT), parameter, public :: IDALS_LMEMB_NULL = -102_C_INT public :: FIDASetLinearSolver public :: FIDASetJacFn public :: FIDASetPreconditioner public :: FIDASetJacTimes public :: FIDASetEpsLin public :: FIDASetLSNormFactor public :: FIDASetLinearSolutionScaling public :: FIDASetIncrementFactor public :: FIDAGetLinWorkSpace public :: FIDAGetNumJacEvals public :: FIDAGetNumPrecEvals public :: FIDAGetNumPrecSolves public :: FIDAGetNumLinIters public :: FIDAGetNumLinConvFails public :: FIDAGetNumJTSetupEvals public :: FIDAGetNumJtimesEvals public :: FIDAGetNumLinResEvals public :: FIDAGetLastLinFlag public :: FIDAGetLinReturnFlagName public :: FIDASetLinearSolverB public :: FIDASetJacFnB public :: FIDASetJacFnBS public :: FIDASetEpsLinB public :: FIDASetLSNormFactorB public :: FIDASetLinearSolutionScalingB public :: FIDASetIncrementFactorB public :: FIDASetPreconditionerB public :: FIDASetPreconditionerBS public :: FIDASetJacTimesB public :: FIDASetJacTimesBS ! WRAPPER DECLARATIONS interface function swigc_FIDACreate(farg1) & bind(C, name="_wrap_FIDACreate") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FIDAInit(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FIDAInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FIDAReInit(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDASStolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASStolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASVtolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASVtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAWFtolerances(farg1, farg2) & bind(C, name="_wrap_FIDAWFtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDACalcIC(farg1, farg2, farg3) & bind(C, name="_wrap_FIDACalcIC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetNonlinConvCoefIC(farg1, farg2) & bind(C, name="_wrap_FIDASetNonlinConvCoefIC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetMaxNumStepsIC(farg1, farg2) & bind(C, name="_wrap_FIDASetMaxNumStepsIC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetMaxNumJacsIC(farg1, farg2) & bind(C, name="_wrap_FIDASetMaxNumJacsIC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetMaxNumItersIC(farg1, farg2) & bind(C, name="_wrap_FIDASetMaxNumItersIC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetLineSearchOffIC(farg1, farg2) & bind(C, name="_wrap_FIDASetLineSearchOffIC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetStepToleranceIC(farg1, farg2) & bind(C, name="_wrap_FIDASetStepToleranceIC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetMaxBacksIC(farg1, farg2) & bind(C, name="_wrap_FIDASetMaxBacksIC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetErrHandlerFn(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetErrHandlerFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetErrFile(farg1, farg2) & bind(C, name="_wrap_FIDASetErrFile") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetUserData(farg1, farg2) & bind(C, name="_wrap_FIDASetUserData") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetMaxOrd(farg1, farg2) & bind(C, name="_wrap_FIDASetMaxOrd") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetMaxNumSteps(farg1, farg2) & bind(C, name="_wrap_FIDASetMaxNumSteps") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetInitStep(farg1, farg2) & bind(C, name="_wrap_FIDASetInitStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetMaxStep(farg1, farg2) & bind(C, name="_wrap_FIDASetMaxStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetStopTime(farg1, farg2) & bind(C, name="_wrap_FIDASetStopTime") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetNonlinConvCoef(farg1, farg2) & bind(C, name="_wrap_FIDASetNonlinConvCoef") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetMaxErrTestFails(farg1, farg2) & bind(C, name="_wrap_FIDASetMaxErrTestFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetMaxNonlinIters(farg1, farg2) & bind(C, name="_wrap_FIDASetMaxNonlinIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetMaxConvFails(farg1, farg2) & bind(C, name="_wrap_FIDASetMaxConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetSuppressAlg(farg1, farg2) & bind(C, name="_wrap_FIDASetSuppressAlg") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetId(farg1, farg2) & bind(C, name="_wrap_FIDASetId") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetConstraints(farg1, farg2) & bind(C, name="_wrap_FIDASetConstraints") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetNonlinearSolver(farg1, farg2) & bind(C, name="_wrap_FIDASetNonlinearSolver") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetNlsResFn(farg1, farg2) & bind(C, name="_wrap_FIDASetNlsResFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDARootInit(farg1, farg2, farg3) & bind(C, name="_wrap_FIDARootInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetRootDirection(farg1, farg2) & bind(C, name="_wrap_FIDASetRootDirection") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetNoInactiveRootWarn(farg1) & bind(C, name="_wrap_FIDASetNoInactiveRootWarn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FIDASolve(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FIDASolve") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT), intent(in) :: farg6 integer(C_INT) :: fresult end function function swigc_FIDAComputeY(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAComputeY") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAComputeYp(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAComputeYp") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAComputeYSens(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAComputeYSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAComputeYpSens(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAComputeYpSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAGetDky(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAGetDky") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAGetWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAGetNumSteps(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumSteps") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumResEvals(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumResEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumLinSolvSetups(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumLinSolvSetups") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumErrTestFails(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumErrTestFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumBacktrackOps(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumBacktrackOps") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetConsistentIC(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetConsistentIC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAGetLastOrder(farg1, farg2) & bind(C, name="_wrap_FIDAGetLastOrder") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetCurrentOrder(farg1, farg2) & bind(C, name="_wrap_FIDAGetCurrentOrder") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetCurrentCj(farg1, farg2) & bind(C, name="_wrap_FIDAGetCurrentCj") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetCurrentY(farg1, farg2) & bind(C, name="_wrap_FIDAGetCurrentY") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetCurrentYSens(farg1, farg2) & bind(C, name="_wrap_FIDAGetCurrentYSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetCurrentYp(farg1, farg2) & bind(C, name="_wrap_FIDAGetCurrentYp") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetCurrentYpSens(farg1, farg2) & bind(C, name="_wrap_FIDAGetCurrentYpSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetActualInitStep(farg1, farg2) & bind(C, name="_wrap_FIDAGetActualInitStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetLastStep(farg1, farg2) & bind(C, name="_wrap_FIDAGetLastStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetCurrentStep(farg1, farg2) & bind(C, name="_wrap_FIDAGetCurrentStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetCurrentTime(farg1, farg2) & bind(C, name="_wrap_FIDAGetCurrentTime") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetTolScaleFactor(farg1, farg2) & bind(C, name="_wrap_FIDAGetTolScaleFactor") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetErrWeights(farg1, farg2) & bind(C, name="_wrap_FIDAGetErrWeights") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetEstLocalErrors(farg1, farg2) & bind(C, name="_wrap_FIDAGetEstLocalErrors") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumGEvals(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumGEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetRootInfo(farg1, farg2) & bind(C, name="_wrap_FIDAGetRootInfo") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetIntegratorStats(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9, farg10, farg11) & bind(C, name="_wrap_FIDAGetIntegratorStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 type(C_PTR), value :: farg7 type(C_PTR), value :: farg8 type(C_PTR), value :: farg9 type(C_PTR), value :: farg10 type(C_PTR), value :: farg11 integer(C_INT) :: fresult end function function swigc_FIDAGetNonlinearSystemData(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) & bind(C, name="_wrap_FIDAGetNonlinearSystemData") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 type(C_PTR), value :: farg7 type(C_PTR), value :: farg8 type(C_PTR), value :: farg9 integer(C_INT) :: fresult end function function swigc_FIDAGetNonlinearSystemDataSens(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8) & bind(C, name="_wrap_FIDAGetNonlinearSystemDataSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 type(C_PTR), value :: farg7 type(C_PTR), value :: farg8 integer(C_INT) :: fresult end function function swigc_FIDAGetNumNonlinSolvIters(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumNonlinSolvIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumNonlinSolvConvFails(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumNonlinSolvConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNonlinSolvStats(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetNonlinSolvStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function subroutine SWIG_free(cptr) & bind(C, name="free") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: cptr end subroutine function swigc_FIDAGetReturnFlagName(farg1) & bind(C, name="_wrap_FIDAGetReturnFlagName") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper integer(C_LONG), intent(in) :: farg1 type(SwigArrayWrapper) :: fresult end function subroutine swigc_FIDAFree(farg1) & bind(C, name="_wrap_FIDAFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FIDASetJacTimesResFn(farg1, farg2) & bind(C, name="_wrap_FIDASetJacTimesResFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAQuadInit(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAQuadInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAQuadReInit(farg1, farg2) & bind(C, name="_wrap_FIDAQuadReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAQuadSStolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAQuadSStolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAQuadSVtolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAQuadSVtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetQuadErrCon(farg1, farg2) & bind(C, name="_wrap_FIDASetQuadErrCon") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetQuad(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetQuad") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadDky(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAGetQuadDky") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadNumRhsEvals(farg1, farg2) & bind(C, name="_wrap_FIDAGetQuadNumRhsEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadNumErrTestFails(farg1, farg2) & bind(C, name="_wrap_FIDAGetQuadNumErrTestFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadErrWeights(farg1, farg2) & bind(C, name="_wrap_FIDAGetQuadErrWeights") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadStats(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetQuadStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function subroutine swigc_FIDAQuadFree(farg1) & bind(C, name="_wrap_FIDAQuadFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FIDASensInit(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FIDASensInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_FUNPTR), value :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FIDASensReInit(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDASensReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDASensSStolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASensSStolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASensSVtolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASensSVtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASensEEtolerances(farg1) & bind(C, name="_wrap_FIDASensEEtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FIDAGetSensConsistentIC(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetSensConsistentIC") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetSensDQMethod(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetSensDQMethod") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetSensErrCon(farg1, farg2) & bind(C, name="_wrap_FIDASetSensErrCon") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetSensMaxNonlinIters(farg1, farg2) & bind(C, name="_wrap_FIDASetSensMaxNonlinIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetSensParams(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDASetSensParams") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDASetNonlinearSolverSensSim(farg1, farg2) & bind(C, name="_wrap_FIDASetNonlinearSolverSensSim") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetNonlinearSolverSensStg(farg1, farg2) & bind(C, name="_wrap_FIDASetNonlinearSolverSensStg") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASensToggleOff(farg1) & bind(C, name="_wrap_FIDASensToggleOff") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FIDAGetSens(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAGetSens1(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAGetSens1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAGetSensDky(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAGetSensDky") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAGetSensDky1(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FIDAGetSensDky1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT), intent(in) :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FIDAGetSensNumResEvals(farg1, farg2) & bind(C, name="_wrap_FIDAGetSensNumResEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumResEvalsSens(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumResEvalsSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetSensNumErrTestFails(farg1, farg2) & bind(C, name="_wrap_FIDAGetSensNumErrTestFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetSensNumLinSolvSetups(farg1, farg2) & bind(C, name="_wrap_FIDAGetSensNumLinSolvSetups") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetSensErrWeights(farg1, farg2) & bind(C, name="_wrap_FIDAGetSensErrWeights") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetSensStats(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FIDAGetSensStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FIDAGetSensNumNonlinSolvIters(farg1, farg2) & bind(C, name="_wrap_FIDAGetSensNumNonlinSolvIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetSensNumNonlinSolvConvFails(farg1, farg2) & bind(C, name="_wrap_FIDAGetSensNumNonlinSolvConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetSensNonlinSolvStats(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetSensNonlinSolvStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function subroutine swigc_FIDASensFree(farg1) & bind(C, name="_wrap_FIDASensFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FIDAQuadSensInit(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAQuadSensInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAQuadSensReInit(farg1, farg2) & bind(C, name="_wrap_FIDAQuadSensReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAQuadSensSStolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAQuadSensSStolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAQuadSensSVtolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAQuadSensSVtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAQuadSensEEtolerances(farg1) & bind(C, name="_wrap_FIDAQuadSensEEtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FIDASetQuadSensErrCon(farg1, farg2) & bind(C, name="_wrap_FIDASetQuadSensErrCon") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadSens(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetQuadSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadSens1(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAGetQuadSens1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadSensDky(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAGetQuadSensDky") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadSensDky1(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FIDAGetQuadSensDky1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT), intent(in) :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadSensNumRhsEvals(farg1, farg2) & bind(C, name="_wrap_FIDAGetQuadSensNumRhsEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadSensNumErrTestFails(farg1, farg2) & bind(C, name="_wrap_FIDAGetQuadSensNumErrTestFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadSensErrWeights(farg1, farg2) & bind(C, name="_wrap_FIDAGetQuadSensErrWeights") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadSensStats(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetQuadSensStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function subroutine swigc_FIDAQuadSensFree(farg1) & bind(C, name="_wrap_FIDAQuadSensFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FIDAAdjInit(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAAdjInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAAdjReInit(farg1) & bind(C, name="_wrap_FIDAAdjReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function subroutine swigc_FIDAAdjFree(farg1) & bind(C, name="_wrap_FIDAAdjFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FIDACreateB(farg1, farg2) & bind(C, name="_wrap_FIDACreateB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAInitB(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FIDAInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FIDAInitBS(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FIDAInitBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FIDAReInitB(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FIDAReInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FIDASStolerancesB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDASStolerancesB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 real(C_DOUBLE), intent(in) :: farg4 integer(C_INT) :: fresult end function function swigc_FIDASVtolerancesB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDASVtolerancesB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAQuadInitB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAQuadInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAQuadInitBS(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAQuadInitBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAQuadReInitB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAQuadReInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAQuadSStolerancesB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAQuadSStolerancesB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 real(C_DOUBLE), intent(in) :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAQuadSVtolerancesB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAQuadSVtolerancesB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDACalcICB(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FIDACalcICB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FIDACalcICBS(farg1, farg2, farg3, farg4, farg5, farg6, farg7) & bind(C, name="_wrap_FIDACalcICBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 type(C_PTR), value :: farg7 integer(C_INT) :: fresult end function function swigc_FIDASolveF(farg1, farg2, farg3, farg4, farg5, farg6, farg7) & bind(C, name="_wrap_FIDASolveF") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT), intent(in) :: farg6 type(C_PTR), value :: farg7 integer(C_INT) :: fresult end function function swigc_FIDASolveB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASolveB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAAdjSetNoSensi(farg1) & bind(C, name="_wrap_FIDAAdjSetNoSensi") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FIDASetUserDataB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetUserDataB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetMaxOrdB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetMaxOrdB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetMaxNumStepsB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetMaxNumStepsB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_LONG), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetInitStepB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetInitStepB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetMaxStepB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetMaxStepB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetSuppressAlgB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetSuppressAlgB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetIdB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetIdB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetConstraintsB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetConstraintsB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetQuadErrConB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetQuadErrConB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetNonlinearSolverB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetNonlinearSolverB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAGetB(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FIDAGetB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FIDAGetQuadB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAGetQuadB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAGetAdjIDABmem(farg1, farg2) & bind(C, name="_wrap_FIDAGetAdjIDABmem") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR) :: fresult end function function swigc_FIDAGetConsistentICB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAGetConsistentICB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDAGetAdjY(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDAGetAdjY") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function subroutine swigc_IDAadjCheckPointRec_my_addr_set(farg1, farg2) & bind(C, name="_wrap_IDAadjCheckPointRec_my_addr_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_IDAadjCheckPointRec_my_addr_get(farg1) & bind(C, name="_wrap_IDAadjCheckPointRec_my_addr_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 type(C_PTR) :: fresult end function subroutine swigc_IDAadjCheckPointRec_next_addr_set(farg1, farg2) & bind(C, name="_wrap_IDAadjCheckPointRec_next_addr_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_IDAadjCheckPointRec_next_addr_get(farg1) & bind(C, name="_wrap_IDAadjCheckPointRec_next_addr_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 type(C_PTR) :: fresult end function subroutine swigc_IDAadjCheckPointRec_t0_set(farg1, farg2) & bind(C, name="_wrap_IDAadjCheckPointRec_t0_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE), intent(in) :: farg2 end subroutine function swigc_IDAadjCheckPointRec_t0_get(farg1) & bind(C, name="_wrap_IDAadjCheckPointRec_t0_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: fresult end function subroutine swigc_IDAadjCheckPointRec_t1_set(farg1, farg2) & bind(C, name="_wrap_IDAadjCheckPointRec_t1_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE), intent(in) :: farg2 end subroutine function swigc_IDAadjCheckPointRec_t1_get(farg1) & bind(C, name="_wrap_IDAadjCheckPointRec_t1_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: fresult end function subroutine swigc_IDAadjCheckPointRec_nstep_set(farg1, farg2) & bind(C, name="_wrap_IDAadjCheckPointRec_nstep_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 integer(C_LONG), intent(in) :: farg2 end subroutine function swigc_IDAadjCheckPointRec_nstep_get(farg1) & bind(C, name="_wrap_IDAadjCheckPointRec_nstep_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 integer(C_LONG) :: fresult end function subroutine swigc_IDAadjCheckPointRec_order_set(farg1, farg2) & bind(C, name="_wrap_IDAadjCheckPointRec_order_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 integer(C_INT), intent(in) :: farg2 end subroutine function swigc_IDAadjCheckPointRec_order_get(farg1) & bind(C, name="_wrap_IDAadjCheckPointRec_order_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 integer(C_INT) :: fresult end function subroutine swigc_IDAadjCheckPointRec_step_set(farg1, farg2) & bind(C, name="_wrap_IDAadjCheckPointRec_step_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE), intent(in) :: farg2 end subroutine function swigc_IDAadjCheckPointRec_step_get(farg1) & bind(C, name="_wrap_IDAadjCheckPointRec_step_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: fresult end function function swigc_new_IDAadjCheckPointRec() & bind(C, name="_wrap_new_IDAadjCheckPointRec") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: fresult end function subroutine swigc_delete_IDAadjCheckPointRec(farg1) & bind(C, name="_wrap_delete_IDAadjCheckPointRec") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper), intent(inout) :: farg1 end subroutine subroutine swigc_IDAadjCheckPointRec_op_assign__(farg1, farg2) & bind(C, name="_wrap_IDAadjCheckPointRec_op_assign__") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper), intent(inout) :: farg1 type(SwigClassWrapper) :: farg2 end subroutine function swigc_FIDAGetAdjCheckPointsInfo(farg1, farg2) & bind(C, name="_wrap_FIDAGetAdjCheckPointsInfo") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(C_PTR), value :: farg1 type(SwigClassWrapper) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetJacTimesResFnB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetJacTimesResFnB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAGetAdjDataPointHermite(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FIDAGetAdjDataPointHermite") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FIDAGetAdjDataPointPolynomial(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FIDAGetAdjDataPointPolynomial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FIDAGetAdjCurrentCheckPoint(farg1, farg2) & bind(C, name="_wrap_FIDAGetAdjCurrentCheckPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDABBDPrecInit(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) & bind(C, name="_wrap_FIDABBDPrecInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT64_T), intent(in) :: farg4 integer(C_INT64_T), intent(in) :: farg5 integer(C_INT64_T), intent(in) :: farg6 real(C_DOUBLE), intent(in) :: farg7 type(C_FUNPTR), value :: farg8 type(C_FUNPTR), value :: farg9 integer(C_INT) :: fresult end function function swigc_FIDABBDPrecReInit(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDABBDPrecReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 real(C_DOUBLE), intent(in) :: farg4 integer(C_INT) :: fresult end function function swigc_FIDABBDPrecGetWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FIDABBDPrecGetWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDABBDPrecGetNumGfnEvals(farg1, farg2) & bind(C, name="_wrap_FIDABBDPrecGetNumGfnEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDABBDPrecInitB(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9, farg10) & bind(C, name="_wrap_FIDABBDPrecInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT64_T), intent(in) :: farg4 integer(C_INT64_T), intent(in) :: farg5 integer(C_INT64_T), intent(in) :: farg6 integer(C_INT64_T), intent(in) :: farg7 real(C_DOUBLE), intent(in) :: farg8 type(C_FUNPTR), value :: farg9 type(C_FUNPTR), value :: farg10 integer(C_INT) :: fresult end function function swigc_FIDABBDPrecReInitB(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FIDABBDPrecReInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT64_T), intent(in) :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FIDASetLinearSolver(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetLinearSolver") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetJacFn(farg1, farg2) & bind(C, name="_wrap_FIDASetJacFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetPreconditioner(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetPreconditioner") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetJacTimes(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetJacTimes") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetEpsLin(farg1, farg2) & bind(C, name="_wrap_FIDASetEpsLin") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetLSNormFactor(farg1, farg2) & bind(C, name="_wrap_FIDASetLSNormFactor") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetLinearSolutionScaling(farg1, farg2) & bind(C, name="_wrap_FIDASetLinearSolutionScaling") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDASetIncrementFactor(farg1, farg2) & bind(C, name="_wrap_FIDASetIncrementFactor") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetLinWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FIDAGetLinWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDAGetNumJacEvals(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumJacEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumPrecEvals(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumPrecEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumPrecSolves(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumPrecSolves") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumLinIters(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumLinIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumLinConvFails(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumLinConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumJTSetupEvals(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumJTSetupEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumJtimesEvals(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumJtimesEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetNumLinResEvals(farg1, farg2) & bind(C, name="_wrap_FIDAGetNumLinResEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetLastLinFlag(farg1, farg2) & bind(C, name="_wrap_FIDAGetLastLinFlag") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FIDAGetLinReturnFlagName(farg1) & bind(C, name="_wrap_FIDAGetLinReturnFlagName") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper integer(C_LONG), intent(in) :: farg1 type(SwigArrayWrapper) :: fresult end function function swigc_FIDASetLinearSolverB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDASetLinearSolverB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDASetJacFnB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetJacFnB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetJacFnBS(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetJacFnBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetEpsLinB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetEpsLinB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetLSNormFactorB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetLSNormFactorB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetLinearSolutionScalingB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetLinearSolutionScalingB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetIncrementFactorB(farg1, farg2, farg3) & bind(C, name="_wrap_FIDASetIncrementFactorB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FIDASetPreconditionerB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDASetPreconditionerB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDASetPreconditionerBS(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDASetPreconditionerBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDASetJacTimesB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDASetJacTimesB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FIDASetJacTimesBS(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FIDASetJacTimesBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FIDACreate(sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = sunctx fresult = swigc_FIDACreate(farg1) swig_result = fresult end function function FIDAInit(ida_mem, res, t0, yy0, yp0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_FUNPTR), intent(in), value :: res real(C_DOUBLE), intent(in) :: t0 type(N_Vector), target, intent(inout) :: yy0 type(N_Vector), target, intent(inout) :: yp0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = ida_mem farg2 = res farg3 = t0 farg4 = c_loc(yy0) farg5 = c_loc(yp0) fresult = swigc_FIDAInit(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FIDAReInit(ida_mem, t0, yy0, yp0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: t0 type(N_Vector), target, intent(inout) :: yy0 type(N_Vector), target, intent(inout) :: yp0 integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = t0 farg3 = c_loc(yy0) farg4 = c_loc(yp0) fresult = swigc_FIDAReInit(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDASStolerances(ida_mem, reltol, abstol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: reltol real(C_DOUBLE), intent(in) :: abstol integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 real(C_DOUBLE) :: farg3 farg1 = ida_mem farg2 = reltol farg3 = abstol fresult = swigc_FIDASStolerances(farg1, farg2, farg3) swig_result = fresult end function function FIDASVtolerances(ida_mem, reltol, abstol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: reltol type(N_Vector), target, intent(inout) :: abstol integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = reltol farg3 = c_loc(abstol) fresult = swigc_FIDASVtolerances(farg1, farg2, farg3) swig_result = fresult end function function FIDAWFtolerances(ida_mem, efun) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_FUNPTR), intent(in), value :: efun integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = ida_mem farg2 = efun fresult = swigc_FIDAWFtolerances(farg1, farg2) swig_result = fresult end function function FIDACalcIC(ida_mem, icopt, tout1) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: icopt real(C_DOUBLE), intent(in) :: tout1 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = ida_mem farg2 = icopt farg3 = tout1 fresult = swigc_FIDACalcIC(farg1, farg2, farg3) swig_result = fresult end function function FIDASetNonlinConvCoefIC(ida_mem, epiccon) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: epiccon integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = ida_mem farg2 = epiccon fresult = swigc_FIDASetNonlinConvCoefIC(farg1, farg2) swig_result = fresult end function function FIDASetMaxNumStepsIC(ida_mem, maxnh) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: maxnh integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = maxnh fresult = swigc_FIDASetMaxNumStepsIC(farg1, farg2) swig_result = fresult end function function FIDASetMaxNumJacsIC(ida_mem, maxnj) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: maxnj integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = maxnj fresult = swigc_FIDASetMaxNumJacsIC(farg1, farg2) swig_result = fresult end function function FIDASetMaxNumItersIC(ida_mem, maxnit) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: maxnit integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = maxnit fresult = swigc_FIDASetMaxNumItersIC(farg1, farg2) swig_result = fresult end function function FIDASetLineSearchOffIC(ida_mem, lsoff) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: lsoff integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = lsoff fresult = swigc_FIDASetLineSearchOffIC(farg1, farg2) swig_result = fresult end function function FIDASetStepToleranceIC(ida_mem, steptol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: steptol integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = ida_mem farg2 = steptol fresult = swigc_FIDASetStepToleranceIC(farg1, farg2) swig_result = fresult end function function FIDASetMaxBacksIC(ida_mem, maxbacks) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: maxbacks integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = maxbacks fresult = swigc_FIDASetMaxBacksIC(farg1, farg2) swig_result = fresult end function function FIDASetErrHandlerFn(ida_mem, ehfun, eh_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_FUNPTR), intent(in), value :: ehfun type(C_PTR) :: eh_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = ehfun farg3 = eh_data fresult = swigc_FIDASetErrHandlerFn(farg1, farg2, farg3) swig_result = fresult end function function FIDASetErrFile(ida_mem, errfp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR) :: errfp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = errfp fresult = swigc_FIDASetErrFile(farg1, farg2) swig_result = fresult end function function FIDASetUserData(ida_mem, user_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR) :: user_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = user_data fresult = swigc_FIDASetUserData(farg1, farg2) swig_result = fresult end function function FIDASetMaxOrd(ida_mem, maxord) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: maxord integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = maxord fresult = swigc_FIDASetMaxOrd(farg1, farg2) swig_result = fresult end function function FIDASetMaxNumSteps(ida_mem, mxsteps) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), intent(in) :: mxsteps integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = ida_mem farg2 = mxsteps fresult = swigc_FIDASetMaxNumSteps(farg1, farg2) swig_result = fresult end function function FIDASetInitStep(ida_mem, hin) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: hin integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = ida_mem farg2 = hin fresult = swigc_FIDASetInitStep(farg1, farg2) swig_result = fresult end function function FIDASetMaxStep(ida_mem, hmax) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: hmax integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = ida_mem farg2 = hmax fresult = swigc_FIDASetMaxStep(farg1, farg2) swig_result = fresult end function function FIDASetStopTime(ida_mem, tstop) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: tstop integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = ida_mem farg2 = tstop fresult = swigc_FIDASetStopTime(farg1, farg2) swig_result = fresult end function function FIDASetNonlinConvCoef(ida_mem, epcon) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: epcon integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = ida_mem farg2 = epcon fresult = swigc_FIDASetNonlinConvCoef(farg1, farg2) swig_result = fresult end function function FIDASetMaxErrTestFails(ida_mem, maxnef) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: maxnef integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = maxnef fresult = swigc_FIDASetMaxErrTestFails(farg1, farg2) swig_result = fresult end function function FIDASetMaxNonlinIters(ida_mem, maxcor) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: maxcor integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = maxcor fresult = swigc_FIDASetMaxNonlinIters(farg1, farg2) swig_result = fresult end function function FIDASetMaxConvFails(ida_mem, maxncf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: maxncf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = maxncf fresult = swigc_FIDASetMaxConvFails(farg1, farg2) swig_result = fresult end function function FIDASetSuppressAlg(ida_mem, suppressalg) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: suppressalg integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = suppressalg fresult = swigc_FIDASetSuppressAlg(farg1, farg2) swig_result = fresult end function function FIDASetId(ida_mem, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(N_Vector), target, intent(inout) :: id integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(id) fresult = swigc_FIDASetId(farg1, farg2) swig_result = fresult end function function FIDASetConstraints(ida_mem, constraints) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(N_Vector), target, intent(inout) :: constraints integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(constraints) fresult = swigc_FIDASetConstraints(farg1, farg2) swig_result = fresult end function function FIDASetNonlinearSolver(ida_mem, nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nls) fresult = swigc_FIDASetNonlinearSolver(farg1, farg2) swig_result = fresult end function function FIDASetNlsResFn(ida_mem, res) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_FUNPTR), intent(in), value :: res integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = ida_mem farg2 = res fresult = swigc_FIDASetNlsResFn(farg1, farg2) swig_result = fresult end function function FIDARootInit(ida_mem, nrtfn, g) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: nrtfn type(C_FUNPTR), intent(in), value :: g integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 farg1 = ida_mem farg2 = nrtfn farg3 = g fresult = swigc_FIDARootInit(farg1, farg2, farg3) swig_result = fresult end function function FIDASetRootDirection(ida_mem, rootdir) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), dimension(*), target, intent(inout) :: rootdir integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(rootdir(1)) fresult = swigc_FIDASetRootDirection(farg1, farg2) swig_result = fresult end function function FIDASetNoInactiveRootWarn(ida_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = ida_mem fresult = swigc_FIDASetNoInactiveRootWarn(farg1) swig_result = fresult end function function FIDASolve(ida_mem, tout, tret, yret, ypret, itask) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: tout real(C_DOUBLE), dimension(*), target, intent(inout) :: tret type(N_Vector), target, intent(inout) :: yret type(N_Vector), target, intent(inout) :: ypret integer(C_INT), intent(in) :: itask integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 integer(C_INT) :: farg6 farg1 = ida_mem farg2 = tout farg3 = c_loc(tret(1)) farg4 = c_loc(yret) farg5 = c_loc(ypret) farg6 = itask fresult = swigc_FIDASolve(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FIDAComputeY(ida_mem, ycor, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(N_Vector), target, intent(inout) :: ycor type(N_Vector), target, intent(inout) :: y integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(ycor) farg3 = c_loc(y) fresult = swigc_FIDAComputeY(farg1, farg2, farg3) swig_result = fresult end function function FIDAComputeYp(ida_mem, ycor, yp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(N_Vector), target, intent(inout) :: ycor type(N_Vector), target, intent(inout) :: yp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(ycor) farg3 = c_loc(yp) fresult = swigc_FIDAComputeYp(farg1, farg2, farg3) swig_result = fresult end function function FIDAComputeYSens(ida_mem, ycor, yys) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR) :: ycor type(C_PTR) :: yys integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = ycor farg3 = yys fresult = swigc_FIDAComputeYSens(farg1, farg2, farg3) swig_result = fresult end function function FIDAComputeYpSens(ida_mem, ycor, yps) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR) :: ycor type(C_PTR) :: yps integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = ycor farg3 = yps fresult = swigc_FIDAComputeYpSens(farg1, farg2, farg3) swig_result = fresult end function function FIDAGetDky(ida_mem, t, k, dky) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k type(N_Vector), target, intent(inout) :: dky integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = t farg3 = k farg4 = c_loc(dky) fresult = swigc_FIDAGetDky(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAGetWorkSpace(ida_mem, lenrw, leniw) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: lenrw integer(C_LONG), dimension(*), target, intent(inout) :: leniw integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(lenrw(1)) farg3 = c_loc(leniw(1)) fresult = swigc_FIDAGetWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FIDAGetNumSteps(ida_mem, nsteps) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsteps integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nsteps(1)) fresult = swigc_FIDAGetNumSteps(farg1, farg2) swig_result = fresult end function function FIDAGetNumResEvals(ida_mem, nrevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nrevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nrevals(1)) fresult = swigc_FIDAGetNumResEvals(farg1, farg2) swig_result = fresult end function function FIDAGetNumLinSolvSetups(ida_mem, nlinsetups) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nlinsetups integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nlinsetups(1)) fresult = swigc_FIDAGetNumLinSolvSetups(farg1, farg2) swig_result = fresult end function function FIDAGetNumErrTestFails(ida_mem, netfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: netfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(netfails(1)) fresult = swigc_FIDAGetNumErrTestFails(farg1, farg2) swig_result = fresult end function function FIDAGetNumBacktrackOps(ida_mem, nbacktr) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nbacktr integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nbacktr(1)) fresult = swigc_FIDAGetNumBacktrackOps(farg1, farg2) swig_result = fresult end function function FIDAGetConsistentIC(ida_mem, yy0_mod, yp0_mod) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(N_Vector), target, intent(inout) :: yy0_mod type(N_Vector), target, intent(inout) :: yp0_mod integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(yy0_mod) farg3 = c_loc(yp0_mod) fresult = swigc_FIDAGetConsistentIC(farg1, farg2, farg3) swig_result = fresult end function function FIDAGetLastOrder(ida_mem, klast) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), dimension(*), target, intent(inout) :: klast integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(klast(1)) fresult = swigc_FIDAGetLastOrder(farg1, farg2) swig_result = fresult end function function FIDAGetCurrentOrder(ida_mem, kcur) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), dimension(*), target, intent(inout) :: kcur integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(kcur(1)) fresult = swigc_FIDAGetCurrentOrder(farg1, farg2) swig_result = fresult end function function FIDAGetCurrentCj(ida_mem, cj) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: cj integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(cj(1)) fresult = swigc_FIDAGetCurrentCj(farg1, farg2) swig_result = fresult end function function FIDAGetCurrentY(ida_mem, ycur) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR) :: ycur integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = ycur fresult = swigc_FIDAGetCurrentY(farg1, farg2) swig_result = fresult end function function FIDAGetCurrentYSens(ida_mem, ys) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR), target, intent(inout) :: ys integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(ys) fresult = swigc_FIDAGetCurrentYSens(farg1, farg2) swig_result = fresult end function function FIDAGetCurrentYp(ida_mem, ypcur) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR) :: ypcur integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = ypcur fresult = swigc_FIDAGetCurrentYp(farg1, farg2) swig_result = fresult end function function FIDAGetCurrentYpSens(ida_mem, yps) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR), target, intent(inout) :: yps integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(yps) fresult = swigc_FIDAGetCurrentYpSens(farg1, farg2) swig_result = fresult end function function FIDAGetActualInitStep(ida_mem, hinused) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: hinused integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(hinused(1)) fresult = swigc_FIDAGetActualInitStep(farg1, farg2) swig_result = fresult end function function FIDAGetLastStep(ida_mem, hlast) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: hlast integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(hlast(1)) fresult = swigc_FIDAGetLastStep(farg1, farg2) swig_result = fresult end function function FIDAGetCurrentStep(ida_mem, hcur) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: hcur integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(hcur(1)) fresult = swigc_FIDAGetCurrentStep(farg1, farg2) swig_result = fresult end function function FIDAGetCurrentTime(ida_mem, tcur) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tcur integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(tcur(1)) fresult = swigc_FIDAGetCurrentTime(farg1, farg2) swig_result = fresult end function function FIDAGetTolScaleFactor(ida_mem, tolsfact) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tolsfact integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(tolsfact(1)) fresult = swigc_FIDAGetTolScaleFactor(farg1, farg2) swig_result = fresult end function function FIDAGetErrWeights(ida_mem, eweight) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(N_Vector), target, intent(inout) :: eweight integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(eweight) fresult = swigc_FIDAGetErrWeights(farg1, farg2) swig_result = fresult end function function FIDAGetEstLocalErrors(ida_mem, ele) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(N_Vector), target, intent(inout) :: ele integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(ele) fresult = swigc_FIDAGetEstLocalErrors(farg1, farg2) swig_result = fresult end function function FIDAGetNumGEvals(ida_mem, ngevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: ngevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(ngevals(1)) fresult = swigc_FIDAGetNumGEvals(farg1, farg2) swig_result = fresult end function function FIDAGetRootInfo(ida_mem, rootsfound) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), dimension(*), target, intent(inout) :: rootsfound integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(rootsfound(1)) fresult = swigc_FIDAGetRootInfo(farg1, farg2) swig_result = fresult end function function FIDAGetIntegratorStats(ida_mem, nsteps, nrevals, nlinsetups, netfails, qlast, qcur, hinused, hlast, hcur, tcur) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsteps integer(C_LONG), dimension(*), target, intent(inout) :: nrevals integer(C_LONG), dimension(*), target, intent(inout) :: nlinsetups integer(C_LONG), dimension(*), target, intent(inout) :: netfails integer(C_INT), dimension(*), target, intent(inout) :: qlast integer(C_INT), dimension(*), target, intent(inout) :: qcur real(C_DOUBLE), dimension(*), target, intent(inout) :: hinused real(C_DOUBLE), dimension(*), target, intent(inout) :: hlast real(C_DOUBLE), dimension(*), target, intent(inout) :: hcur real(C_DOUBLE), dimension(*), target, intent(inout) :: tcur integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 type(C_PTR) :: farg7 type(C_PTR) :: farg8 type(C_PTR) :: farg9 type(C_PTR) :: farg10 type(C_PTR) :: farg11 farg1 = ida_mem farg2 = c_loc(nsteps(1)) farg3 = c_loc(nrevals(1)) farg4 = c_loc(nlinsetups(1)) farg5 = c_loc(netfails(1)) farg6 = c_loc(qlast(1)) farg7 = c_loc(qcur(1)) farg8 = c_loc(hinused(1)) farg9 = c_loc(hlast(1)) farg10 = c_loc(hcur(1)) farg11 = c_loc(tcur(1)) fresult = swigc_FIDAGetIntegratorStats(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9, farg10, farg11) swig_result = fresult end function function FIDAGetNonlinearSystemData(ida_mem, tcur, yypred, yppred, yyn, ypn, res, cj, user_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tcur type(C_PTR) :: yypred type(C_PTR) :: yppred type(C_PTR) :: yyn type(C_PTR) :: ypn type(C_PTR) :: res real(C_DOUBLE), dimension(*), target, intent(inout) :: cj type(C_PTR), target, intent(inout) :: user_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 type(C_PTR) :: farg7 type(C_PTR) :: farg8 type(C_PTR) :: farg9 farg1 = ida_mem farg2 = c_loc(tcur(1)) farg3 = yypred farg4 = yppred farg5 = yyn farg6 = ypn farg7 = res farg8 = c_loc(cj(1)) farg9 = c_loc(user_data) fresult = swigc_FIDAGetNonlinearSystemData(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) swig_result = fresult end function function FIDAGetNonlinearSystemDataSens(ida_mem, tcur, yyspred, ypspred, yysn, ypsn, cj, user_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tcur type(C_PTR), target, intent(inout) :: yyspred type(C_PTR), target, intent(inout) :: ypspred type(C_PTR), target, intent(inout) :: yysn type(C_PTR), target, intent(inout) :: ypsn real(C_DOUBLE), dimension(*), target, intent(inout) :: cj type(C_PTR), target, intent(inout) :: user_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 type(C_PTR) :: farg7 type(C_PTR) :: farg8 farg1 = ida_mem farg2 = c_loc(tcur(1)) farg3 = c_loc(yyspred) farg4 = c_loc(ypspred) farg5 = c_loc(yysn) farg6 = c_loc(ypsn) farg7 = c_loc(cj(1)) farg8 = c_loc(user_data) fresult = swigc_FIDAGetNonlinearSystemDataSens(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8) swig_result = fresult end function function FIDAGetNumNonlinSolvIters(ida_mem, nniters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nniters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nniters(1)) fresult = swigc_FIDAGetNumNonlinSolvIters(farg1, farg2) swig_result = fresult end function function FIDAGetNumNonlinSolvConvFails(ida_mem, nncfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nncfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nncfails(1)) fresult = swigc_FIDAGetNumNonlinSolvConvFails(farg1, farg2) swig_result = fresult end function function FIDAGetNonlinSolvStats(ida_mem, nniters, nncfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nniters integer(C_LONG), dimension(*), target, intent(inout) :: nncfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(nniters(1)) farg3 = c_loc(nncfails(1)) fresult = swigc_FIDAGetNonlinSolvStats(farg1, farg2, farg3) swig_result = fresult end function subroutine SWIG_chararray_to_string(wrap, string) use, intrinsic :: ISO_C_BINDING type(SwigArrayWrapper), intent(IN) :: wrap character(kind=C_CHAR, len=:), allocatable, intent(OUT) :: string character(kind=C_CHAR), dimension(:), pointer :: chars integer(kind=C_SIZE_T) :: i call c_f_pointer(wrap%data, chars, [wrap%size]) allocate(character(kind=C_CHAR, len=wrap%size) :: string) do i=1, wrap%size string(i:i) = chars(i) end do end subroutine function FIDAGetReturnFlagName(flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING character(kind=C_CHAR, len=:), allocatable :: swig_result integer(C_LONG), intent(in) :: flag type(SwigArrayWrapper) :: fresult integer(C_LONG) :: farg1 farg1 = flag fresult = swigc_FIDAGetReturnFlagName(farg1) call SWIG_chararray_to_string(fresult, swig_result) if (.false.) call SWIG_free(fresult%data) end function subroutine FIDAFree(ida_mem) use, intrinsic :: ISO_C_BINDING type(C_PTR), target, intent(inout) :: ida_mem type(C_PTR) :: farg1 farg1 = c_loc(ida_mem) call swigc_FIDAFree(farg1) end subroutine function FIDASetJacTimesResFn(ida_mem, jtimesresfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_FUNPTR), intent(in), value :: jtimesresfn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = ida_mem farg2 = jtimesresfn fresult = swigc_FIDASetJacTimesResFn(farg1, farg2) swig_result = fresult end function function FIDAQuadInit(ida_mem, rhsq, yq0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_FUNPTR), intent(in), value :: rhsq type(N_Vector), target, intent(inout) :: yq0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = rhsq farg3 = c_loc(yq0) fresult = swigc_FIDAQuadInit(farg1, farg2, farg3) swig_result = fresult end function function FIDAQuadReInit(ida_mem, yq0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(N_Vector), target, intent(inout) :: yq0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(yq0) fresult = swigc_FIDAQuadReInit(farg1, farg2) swig_result = fresult end function function FIDAQuadSStolerances(ida_mem, reltolq, abstolq) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: reltolq real(C_DOUBLE), intent(in) :: abstolq integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 real(C_DOUBLE) :: farg3 farg1 = ida_mem farg2 = reltolq farg3 = abstolq fresult = swigc_FIDAQuadSStolerances(farg1, farg2, farg3) swig_result = fresult end function function FIDAQuadSVtolerances(ida_mem, reltolq, abstolq) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: reltolq type(N_Vector), target, intent(inout) :: abstolq integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = reltolq farg3 = c_loc(abstolq) fresult = swigc_FIDAQuadSVtolerances(farg1, farg2, farg3) swig_result = fresult end function function FIDASetQuadErrCon(ida_mem, errconq) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: errconq integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = errconq fresult = swigc_FIDASetQuadErrCon(farg1, farg2) swig_result = fresult end function function FIDAGetQuad(ida_mem, t, yqout) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: t type(N_Vector), target, intent(inout) :: yqout integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(t(1)) farg3 = c_loc(yqout) fresult = swigc_FIDAGetQuad(farg1, farg2, farg3) swig_result = fresult end function function FIDAGetQuadDky(ida_mem, t, k, dky) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k type(N_Vector), target, intent(inout) :: dky integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = t farg3 = k farg4 = c_loc(dky) fresult = swigc_FIDAGetQuadDky(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAGetQuadNumRhsEvals(ida_mem, nrhsqevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nrhsqevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nrhsqevals(1)) fresult = swigc_FIDAGetQuadNumRhsEvals(farg1, farg2) swig_result = fresult end function function FIDAGetQuadNumErrTestFails(ida_mem, nqetfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nqetfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nqetfails(1)) fresult = swigc_FIDAGetQuadNumErrTestFails(farg1, farg2) swig_result = fresult end function function FIDAGetQuadErrWeights(ida_mem, eqweight) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(N_Vector), target, intent(inout) :: eqweight integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(eqweight) fresult = swigc_FIDAGetQuadErrWeights(farg1, farg2) swig_result = fresult end function function FIDAGetQuadStats(ida_mem, nrhsqevals, nqetfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nrhsqevals integer(C_LONG), dimension(*), target, intent(inout) :: nqetfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(nrhsqevals(1)) farg3 = c_loc(nqetfails(1)) fresult = swigc_FIDAGetQuadStats(farg1, farg2, farg3) swig_result = fresult end function subroutine FIDAQuadFree(ida_mem) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: ida_mem type(C_PTR) :: farg1 farg1 = ida_mem call swigc_FIDAQuadFree(farg1) end subroutine function FIDASensInit(ida_mem, ns, ism, ress, ys0, yps0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: ns integer(C_INT), intent(in) :: ism type(C_FUNPTR), intent(in), value :: ress type(C_PTR) :: ys0 type(C_PTR) :: yps0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 type(C_FUNPTR) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 farg1 = ida_mem farg2 = ns farg3 = ism farg4 = ress farg5 = ys0 farg6 = yps0 fresult = swigc_FIDASensInit(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FIDASensReInit(ida_mem, ism, ys0, yps0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: ism type(C_PTR) :: ys0 type(C_PTR) :: yps0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = ism farg3 = ys0 farg4 = yps0 fresult = swigc_FIDASensReInit(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDASensSStolerances(ida_mem, reltols, abstols) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: reltols real(C_DOUBLE), dimension(*), target, intent(inout) :: abstols integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = reltols farg3 = c_loc(abstols(1)) fresult = swigc_FIDASensSStolerances(farg1, farg2, farg3) swig_result = fresult end function function FIDASensSVtolerances(ida_mem, reltols, abstols) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: reltols type(C_PTR) :: abstols integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = reltols farg3 = abstols fresult = swigc_FIDASensSVtolerances(farg1, farg2, farg3) swig_result = fresult end function function FIDASensEEtolerances(ida_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = ida_mem fresult = swigc_FIDASensEEtolerances(farg1) swig_result = fresult end function function FIDAGetSensConsistentIC(ida_mem, yys0, yps0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR) :: yys0 type(C_PTR) :: yps0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = yys0 farg3 = yps0 fresult = swigc_FIDAGetSensConsistentIC(farg1, farg2, farg3) swig_result = fresult end function function FIDASetSensDQMethod(ida_mem, dqtype, dqrhomax) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: dqtype real(C_DOUBLE), intent(in) :: dqrhomax integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = ida_mem farg2 = dqtype farg3 = dqrhomax fresult = swigc_FIDASetSensDQMethod(farg1, farg2, farg3) swig_result = fresult end function function FIDASetSensErrCon(ida_mem, errcons) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: errcons integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = errcons fresult = swigc_FIDASetSensErrCon(farg1, farg2) swig_result = fresult end function function FIDASetSensMaxNonlinIters(ida_mem, maxcors) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: maxcors integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = maxcors fresult = swigc_FIDASetSensMaxNonlinIters(farg1, farg2) swig_result = fresult end function function FIDASetSensParams(ida_mem, p, pbar, plist) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: p real(C_DOUBLE), dimension(*), target, intent(inout) :: pbar integer(C_INT), dimension(*), target, intent(inout) :: plist integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = c_loc(p(1)) farg3 = c_loc(pbar(1)) farg4 = c_loc(plist(1)) fresult = swigc_FIDASetSensParams(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDASetNonlinearSolverSensSim(ida_mem, nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nls) fresult = swigc_FIDASetNonlinearSolverSensSim(farg1, farg2) swig_result = fresult end function function FIDASetNonlinearSolverSensStg(ida_mem, nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nls) fresult = swigc_FIDASetNonlinearSolverSensStg(farg1, farg2) swig_result = fresult end function function FIDASensToggleOff(ida_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = ida_mem fresult = swigc_FIDASensToggleOff(farg1) swig_result = fresult end function function FIDAGetSens(ida_mem, tret, yysout) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tret type(C_PTR) :: yysout integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(tret(1)) farg3 = yysout fresult = swigc_FIDAGetSens(farg1, farg2, farg3) swig_result = fresult end function function FIDAGetSens1(ida_mem, tret, is, yysret) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tret integer(C_INT), intent(in) :: is type(N_Vector), target, intent(inout) :: yysret integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = c_loc(tret(1)) farg3 = is farg4 = c_loc(yysret) fresult = swigc_FIDAGetSens1(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAGetSensDky(ida_mem, t, k, dkys) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k type(C_PTR) :: dkys integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = t farg3 = k farg4 = dkys fresult = swigc_FIDAGetSensDky(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAGetSensDky1(ida_mem, t, k, is, dkys) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k integer(C_INT), intent(in) :: is type(N_Vector), target, intent(inout) :: dkys integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 integer(C_INT) :: farg4 type(C_PTR) :: farg5 farg1 = ida_mem farg2 = t farg3 = k farg4 = is farg5 = c_loc(dkys) fresult = swigc_FIDAGetSensDky1(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FIDAGetSensNumResEvals(ida_mem, nressevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nressevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nressevals(1)) fresult = swigc_FIDAGetSensNumResEvals(farg1, farg2) swig_result = fresult end function function FIDAGetNumResEvalsSens(ida_mem, nresevalss) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nresevalss integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nresevalss(1)) fresult = swigc_FIDAGetNumResEvalsSens(farg1, farg2) swig_result = fresult end function function FIDAGetSensNumErrTestFails(ida_mem, nsetfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsetfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nsetfails(1)) fresult = swigc_FIDAGetSensNumErrTestFails(farg1, farg2) swig_result = fresult end function function FIDAGetSensNumLinSolvSetups(ida_mem, nlinsetupss) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nlinsetupss integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nlinsetupss(1)) fresult = swigc_FIDAGetSensNumLinSolvSetups(farg1, farg2) swig_result = fresult end function function FIDAGetSensErrWeights(ida_mem, esweight) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR) :: esweight integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = esweight fresult = swigc_FIDAGetSensErrWeights(farg1, farg2) swig_result = fresult end function function FIDAGetSensStats(ida_mem, nressevals, nresevalss, nsetfails, nlinsetupss) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nressevals integer(C_LONG), dimension(*), target, intent(inout) :: nresevalss integer(C_LONG), dimension(*), target, intent(inout) :: nsetfails integer(C_LONG), dimension(*), target, intent(inout) :: nlinsetupss integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = ida_mem farg2 = c_loc(nressevals(1)) farg3 = c_loc(nresevalss(1)) farg4 = c_loc(nsetfails(1)) farg5 = c_loc(nlinsetupss(1)) fresult = swigc_FIDAGetSensStats(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FIDAGetSensNumNonlinSolvIters(ida_mem, nsniters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsniters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nsniters(1)) fresult = swigc_FIDAGetSensNumNonlinSolvIters(farg1, farg2) swig_result = fresult end function function FIDAGetSensNumNonlinSolvConvFails(ida_mem, nsncfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsncfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nsncfails(1)) fresult = swigc_FIDAGetSensNumNonlinSolvConvFails(farg1, farg2) swig_result = fresult end function function FIDAGetSensNonlinSolvStats(ida_mem, nsniters, nsncfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsniters integer(C_LONG), dimension(*), target, intent(inout) :: nsncfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(nsniters(1)) farg3 = c_loc(nsncfails(1)) fresult = swigc_FIDAGetSensNonlinSolvStats(farg1, farg2, farg3) swig_result = fresult end function subroutine FIDASensFree(ida_mem) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: ida_mem type(C_PTR) :: farg1 farg1 = ida_mem call swigc_FIDASensFree(farg1) end subroutine function FIDAQuadSensInit(ida_mem, resqs, yqs0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_FUNPTR), intent(in), value :: resqs type(C_PTR) :: yqs0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = resqs farg3 = yqs0 fresult = swigc_FIDAQuadSensInit(farg1, farg2, farg3) swig_result = fresult end function function FIDAQuadSensReInit(ida_mem, yqs0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR) :: yqs0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = yqs0 fresult = swigc_FIDAQuadSensReInit(farg1, farg2) swig_result = fresult end function function FIDAQuadSensSStolerances(ida_mem, reltolqs, abstolqs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: reltolqs real(C_DOUBLE), dimension(*), target, intent(inout) :: abstolqs integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = reltolqs farg3 = c_loc(abstolqs(1)) fresult = swigc_FIDAQuadSensSStolerances(farg1, farg2, farg3) swig_result = fresult end function function FIDAQuadSensSVtolerances(ida_mem, reltolqs, abstolqs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: reltolqs type(C_PTR) :: abstolqs integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = reltolqs farg3 = abstolqs fresult = swigc_FIDAQuadSensSVtolerances(farg1, farg2, farg3) swig_result = fresult end function function FIDAQuadSensEEtolerances(ida_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = ida_mem fresult = swigc_FIDAQuadSensEEtolerances(farg1) swig_result = fresult end function function FIDASetQuadSensErrCon(ida_mem, errconqs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: errconqs integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = errconqs fresult = swigc_FIDASetQuadSensErrCon(farg1, farg2) swig_result = fresult end function function FIDAGetQuadSens(ida_mem, tret, yyqsout) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tret type(C_PTR) :: yyqsout integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(tret(1)) farg3 = yyqsout fresult = swigc_FIDAGetQuadSens(farg1, farg2, farg3) swig_result = fresult end function function FIDAGetQuadSens1(ida_mem, tret, is, yyqsret) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tret integer(C_INT), intent(in) :: is type(N_Vector), target, intent(inout) :: yyqsret integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = c_loc(tret(1)) farg3 = is farg4 = c_loc(yyqsret) fresult = swigc_FIDAGetQuadSens1(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAGetQuadSensDky(ida_mem, t, k, dkyqs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k type(C_PTR) :: dkyqs integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = t farg3 = k farg4 = dkyqs fresult = swigc_FIDAGetQuadSensDky(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAGetQuadSensDky1(ida_mem, t, k, is, dkyqs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k integer(C_INT), intent(in) :: is type(N_Vector), target, intent(inout) :: dkyqs integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 integer(C_INT) :: farg4 type(C_PTR) :: farg5 farg1 = ida_mem farg2 = t farg3 = k farg4 = is farg5 = c_loc(dkyqs) fresult = swigc_FIDAGetQuadSensDky1(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FIDAGetQuadSensNumRhsEvals(ida_mem, nrhsqsevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nrhsqsevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nrhsqsevals(1)) fresult = swigc_FIDAGetQuadSensNumRhsEvals(farg1, farg2) swig_result = fresult end function function FIDAGetQuadSensNumErrTestFails(ida_mem, nqsetfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nqsetfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nqsetfails(1)) fresult = swigc_FIDAGetQuadSensNumErrTestFails(farg1, farg2) swig_result = fresult end function function FIDAGetQuadSensErrWeights(ida_mem, eqsweight) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR) :: eqsweight integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = eqsweight fresult = swigc_FIDAGetQuadSensErrWeights(farg1, farg2) swig_result = fresult end function function FIDAGetQuadSensStats(ida_mem, nrhsqsevals, nqsetfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nrhsqsevals integer(C_LONG), dimension(*), target, intent(inout) :: nqsetfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(nrhsqsevals(1)) farg3 = c_loc(nqsetfails(1)) fresult = swigc_FIDAGetQuadSensStats(farg1, farg2, farg3) swig_result = fresult end function subroutine FIDAQuadSensFree(ida_mem) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: ida_mem type(C_PTR) :: farg1 farg1 = ida_mem call swigc_FIDAQuadSensFree(farg1) end subroutine function FIDAAdjInit(ida_mem, steps, interp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), intent(in) :: steps integer(C_INT), intent(in) :: interp integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 integer(C_INT) :: farg3 farg1 = ida_mem farg2 = steps farg3 = interp fresult = swigc_FIDAAdjInit(farg1, farg2, farg3) swig_result = fresult end function function FIDAAdjReInit(ida_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = ida_mem fresult = swigc_FIDAAdjReInit(farg1) swig_result = fresult end function subroutine FIDAAdjFree(ida_mem) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: ida_mem type(C_PTR) :: farg1 farg1 = ida_mem call swigc_FIDAAdjFree(farg1) end subroutine function FIDACreateB(ida_mem, which) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), dimension(*), target, intent(inout) :: which integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(which(1)) fresult = swigc_FIDACreateB(farg1, farg2) swig_result = fresult end function function FIDAInitB(ida_mem, which, resb, tb0, yyb0, ypb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: resb real(C_DOUBLE), intent(in) :: tb0 type(N_Vector), target, intent(inout) :: yyb0 type(N_Vector), target, intent(inout) :: ypb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 farg1 = ida_mem farg2 = which farg3 = resb farg4 = tb0 farg5 = c_loc(yyb0) farg6 = c_loc(ypb0) fresult = swigc_FIDAInitB(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FIDAInitBS(ida_mem, which, ress, tb0, yyb0, ypb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: ress real(C_DOUBLE), intent(in) :: tb0 type(N_Vector), target, intent(inout) :: yyb0 type(N_Vector), target, intent(inout) :: ypb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 farg1 = ida_mem farg2 = which farg3 = ress farg4 = tb0 farg5 = c_loc(yyb0) farg6 = c_loc(ypb0) fresult = swigc_FIDAInitBS(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FIDAReInitB(ida_mem, which, tb0, yyb0, ypb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: tb0 type(N_Vector), target, intent(inout) :: yyb0 type(N_Vector), target, intent(inout) :: ypb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = ida_mem farg2 = which farg3 = tb0 farg4 = c_loc(yyb0) farg5 = c_loc(ypb0) fresult = swigc_FIDAReInitB(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FIDASStolerancesB(ida_mem, which, reltolb, abstolb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: reltolb real(C_DOUBLE), intent(in) :: abstolb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 real(C_DOUBLE) :: farg4 farg1 = ida_mem farg2 = which farg3 = reltolb farg4 = abstolb fresult = swigc_FIDASStolerancesB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDASVtolerancesB(ida_mem, which, reltolb, abstolb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: reltolb type(N_Vector), target, intent(inout) :: abstolb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = reltolb farg4 = c_loc(abstolb) fresult = swigc_FIDASVtolerancesB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAQuadInitB(ida_mem, which, rhsqb, yqb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: rhsqb type(N_Vector), target, intent(inout) :: yqb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = rhsqb farg4 = c_loc(yqb0) fresult = swigc_FIDAQuadInitB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAQuadInitBS(ida_mem, which, rhsqs, yqb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: rhsqs type(N_Vector), target, intent(inout) :: yqb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = rhsqs farg4 = c_loc(yqb0) fresult = swigc_FIDAQuadInitBS(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAQuadReInitB(ida_mem, which, yqb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(N_Vector), target, intent(inout) :: yqb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = which farg3 = c_loc(yqb0) fresult = swigc_FIDAQuadReInitB(farg1, farg2, farg3) swig_result = fresult end function function FIDAQuadSStolerancesB(ida_mem, which, reltolqb, abstolqb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: reltolqb real(C_DOUBLE), intent(in) :: abstolqb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 real(C_DOUBLE) :: farg4 farg1 = ida_mem farg2 = which farg3 = reltolqb farg4 = abstolqb fresult = swigc_FIDAQuadSStolerancesB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAQuadSVtolerancesB(ida_mem, which, reltolqb, abstolqb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: reltolqb type(N_Vector), target, intent(inout) :: abstolqb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = reltolqb farg4 = c_loc(abstolqb) fresult = swigc_FIDAQuadSVtolerancesB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDACalcICB(ida_mem, which, tout1, yy0, yp0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: tout1 type(N_Vector), target, intent(inout) :: yy0 type(N_Vector), target, intent(inout) :: yp0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = ida_mem farg2 = which farg3 = tout1 farg4 = c_loc(yy0) farg5 = c_loc(yp0) fresult = swigc_FIDACalcICB(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FIDACalcICBS(ida_mem, which, tout1, yy0, yp0, yys0, yps0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: tout1 type(N_Vector), target, intent(inout) :: yy0 type(N_Vector), target, intent(inout) :: yp0 type(C_PTR) :: yys0 type(C_PTR) :: yps0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 type(C_PTR) :: farg7 farg1 = ida_mem farg2 = which farg3 = tout1 farg4 = c_loc(yy0) farg5 = c_loc(yp0) farg6 = yys0 farg7 = yps0 fresult = swigc_FIDACalcICBS(farg1, farg2, farg3, farg4, farg5, farg6, farg7) swig_result = fresult end function function FIDASolveF(ida_mem, tout, tret, yret, ypret, itask, ncheckptr) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: tout real(C_DOUBLE), dimension(*), target, intent(inout) :: tret type(N_Vector), target, intent(inout) :: yret type(N_Vector), target, intent(inout) :: ypret integer(C_INT), intent(in) :: itask integer(C_INT), dimension(*), target, intent(inout) :: ncheckptr integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 integer(C_INT) :: farg6 type(C_PTR) :: farg7 farg1 = ida_mem farg2 = tout farg3 = c_loc(tret(1)) farg4 = c_loc(yret) farg5 = c_loc(ypret) farg6 = itask farg7 = c_loc(ncheckptr(1)) fresult = swigc_FIDASolveF(farg1, farg2, farg3, farg4, farg5, farg6, farg7) swig_result = fresult end function function FIDASolveB(ida_mem, tbout, itaskb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: tbout integer(C_INT), intent(in) :: itaskb integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 farg1 = ida_mem farg2 = tbout farg3 = itaskb fresult = swigc_FIDASolveB(farg1, farg2, farg3) swig_result = fresult end function function FIDAAdjSetNoSensi(ida_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = ida_mem fresult = swigc_FIDAAdjSetNoSensi(farg1) swig_result = fresult end function function FIDASetUserDataB(ida_mem, which, user_datab) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_PTR) :: user_datab integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = which farg3 = user_datab fresult = swigc_FIDASetUserDataB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetMaxOrdB(ida_mem, which, maxordb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which integer(C_INT), intent(in) :: maxordb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 farg1 = ida_mem farg2 = which farg3 = maxordb fresult = swigc_FIDASetMaxOrdB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetMaxNumStepsB(ida_mem, which, mxstepsb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which integer(C_LONG), intent(in) :: mxstepsb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_LONG) :: farg3 farg1 = ida_mem farg2 = which farg3 = mxstepsb fresult = swigc_FIDASetMaxNumStepsB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetInitStepB(ida_mem, which, hinb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: hinb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = ida_mem farg2 = which farg3 = hinb fresult = swigc_FIDASetInitStepB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetMaxStepB(ida_mem, which, hmaxb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: hmaxb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = ida_mem farg2 = which farg3 = hmaxb fresult = swigc_FIDASetMaxStepB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetSuppressAlgB(ida_mem, which, suppressalgb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which integer(C_INT), intent(in) :: suppressalgb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 farg1 = ida_mem farg2 = which farg3 = suppressalgb fresult = swigc_FIDASetSuppressAlgB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetIdB(ida_mem, which, idb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(N_Vector), target, intent(inout) :: idb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = which farg3 = c_loc(idb) fresult = swigc_FIDASetIdB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetConstraintsB(ida_mem, which, constraintsb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(N_Vector), target, intent(inout) :: constraintsb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = which farg3 = c_loc(constraintsb) fresult = swigc_FIDASetConstraintsB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetQuadErrConB(ida_mem, which, errconqb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which integer(C_INT), intent(in) :: errconqb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 farg1 = ida_mem farg2 = which farg3 = errconqb fresult = swigc_FIDASetQuadErrConB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetNonlinearSolverB(ida_mem, which, nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = which farg3 = c_loc(nls) fresult = swigc_FIDASetNonlinearSolverB(farg1, farg2, farg3) swig_result = fresult end function function FIDAGetB(ida_mem, which, tret, yy, yp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), dimension(*), target, intent(inout) :: tret type(N_Vector), target, intent(inout) :: yy type(N_Vector), target, intent(inout) :: yp integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = ida_mem farg2 = which farg3 = c_loc(tret(1)) farg4 = c_loc(yy) farg5 = c_loc(yp) fresult = swigc_FIDAGetB(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FIDAGetQuadB(ida_mem, which, tret, qb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), dimension(*), target, intent(inout) :: tret type(N_Vector), target, intent(inout) :: qb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = c_loc(tret(1)) farg4 = c_loc(qb) fresult = swigc_FIDAGetQuadB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAGetAdjIDABmem(ida_mem, which) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = which fresult = swigc_FIDAGetAdjIDABmem(farg1, farg2) swig_result = fresult end function function FIDAGetConsistentICB(ida_mem, which, yyb0, ypb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(N_Vector), target, intent(inout) :: yyb0 type(N_Vector), target, intent(inout) :: ypb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = c_loc(yyb0) farg4 = c_loc(ypb0) fresult = swigc_FIDAGetConsistentICB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDAGetAdjY(ida_mem, t, yy, yp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: t type(N_Vector), target, intent(inout) :: yy type(N_Vector), target, intent(inout) :: yp integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = t farg3 = c_loc(yy) farg4 = c_loc(yp) fresult = swigc_FIDAGetAdjY(farg1, farg2, farg3, farg4) swig_result = fresult end function subroutine swigf_IDAadjCheckPointRec_my_addr_set(self, my_addr) use, intrinsic :: ISO_C_BINDING class(IDAadjCheckPointRec), intent(in) :: self type(C_PTR) :: my_addr type(SwigClassWrapper) :: farg1 type(C_PTR) :: farg2 farg1 = self%swigdata farg2 = my_addr call swigc_IDAadjCheckPointRec_my_addr_set(farg1, farg2) end subroutine function swigf_IDAadjCheckPointRec_my_addr_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result class(IDAadjCheckPointRec), intent(in) :: self type(C_PTR) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_IDAadjCheckPointRec_my_addr_get(farg1) swig_result = fresult end function subroutine swigf_IDAadjCheckPointRec_next_addr_set(self, next_addr) use, intrinsic :: ISO_C_BINDING class(IDAadjCheckPointRec), intent(in) :: self type(C_PTR) :: next_addr type(SwigClassWrapper) :: farg1 type(C_PTR) :: farg2 farg1 = self%swigdata farg2 = next_addr call swigc_IDAadjCheckPointRec_next_addr_set(farg1, farg2) end subroutine function swigf_IDAadjCheckPointRec_next_addr_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result class(IDAadjCheckPointRec), intent(in) :: self type(C_PTR) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_IDAadjCheckPointRec_next_addr_get(farg1) swig_result = fresult end function subroutine swigf_IDAadjCheckPointRec_t0_set(self, t0) use, intrinsic :: ISO_C_BINDING class(IDAadjCheckPointRec), intent(in) :: self real(C_DOUBLE), intent(in) :: t0 type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: farg2 farg1 = self%swigdata farg2 = t0 call swigc_IDAadjCheckPointRec_t0_set(farg1, farg2) end subroutine function swigf_IDAadjCheckPointRec_t0_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result class(IDAadjCheckPointRec), intent(in) :: self real(C_DOUBLE) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_IDAadjCheckPointRec_t0_get(farg1) swig_result = fresult end function subroutine swigf_IDAadjCheckPointRec_t1_set(self, t1) use, intrinsic :: ISO_C_BINDING class(IDAadjCheckPointRec), intent(in) :: self real(C_DOUBLE), intent(in) :: t1 type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: farg2 farg1 = self%swigdata farg2 = t1 call swigc_IDAadjCheckPointRec_t1_set(farg1, farg2) end subroutine function swigf_IDAadjCheckPointRec_t1_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result class(IDAadjCheckPointRec), intent(in) :: self real(C_DOUBLE) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_IDAadjCheckPointRec_t1_get(farg1) swig_result = fresult end function subroutine swigf_IDAadjCheckPointRec_nstep_set(self, nstep) use, intrinsic :: ISO_C_BINDING class(IDAadjCheckPointRec), intent(in) :: self integer(C_LONG), intent(in) :: nstep type(SwigClassWrapper) :: farg1 integer(C_LONG) :: farg2 farg1 = self%swigdata farg2 = nstep call swigc_IDAadjCheckPointRec_nstep_set(farg1, farg2) end subroutine function swigf_IDAadjCheckPointRec_nstep_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_LONG) :: swig_result class(IDAadjCheckPointRec), intent(in) :: self integer(C_LONG) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_IDAadjCheckPointRec_nstep_get(farg1) swig_result = fresult end function subroutine swigf_IDAadjCheckPointRec_order_set(self, order) use, intrinsic :: ISO_C_BINDING class(IDAadjCheckPointRec), intent(in) :: self integer(C_INT), intent(in) :: order type(SwigClassWrapper) :: farg1 integer(C_INT) :: farg2 farg1 = self%swigdata farg2 = order call swigc_IDAadjCheckPointRec_order_set(farg1, farg2) end subroutine function swigf_IDAadjCheckPointRec_order_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result class(IDAadjCheckPointRec), intent(in) :: self integer(C_INT) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_IDAadjCheckPointRec_order_get(farg1) swig_result = fresult end function subroutine swigf_IDAadjCheckPointRec_step_set(self, step) use, intrinsic :: ISO_C_BINDING class(IDAadjCheckPointRec), intent(in) :: self real(C_DOUBLE), intent(in) :: step type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: farg2 farg1 = self%swigdata farg2 = step call swigc_IDAadjCheckPointRec_step_set(farg1, farg2) end subroutine function swigf_IDAadjCheckPointRec_step_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result class(IDAadjCheckPointRec), intent(in) :: self real(C_DOUBLE) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_IDAadjCheckPointRec_step_get(farg1) swig_result = fresult end function function swigf_create_IDAadjCheckPointRec() & result(self) use, intrinsic :: ISO_C_BINDING type(IDAadjCheckPointRec) :: self type(SwigClassWrapper) :: fresult fresult = swigc_new_IDAadjCheckPointRec() self%swigdata = fresult end function subroutine swigf_release_IDAadjCheckPointRec(self) use, intrinsic :: ISO_C_BINDING class(IDAadjCheckPointRec), intent(inout) :: self type(SwigClassWrapper) :: farg1 farg1 = self%swigdata if (btest(farg1%cmemflags, swig_cmem_own_bit)) then call swigc_delete_IDAadjCheckPointRec(farg1) endif farg1%cptr = C_NULL_PTR farg1%cmemflags = 0 self%swigdata = farg1 end subroutine subroutine swigf_IDAadjCheckPointRec_op_assign__(self, other) use, intrinsic :: ISO_C_BINDING class(IDAadjCheckPointRec), intent(inout) :: self type(IDAadjCheckPointRec), intent(in) :: other type(SwigClassWrapper) :: farg1 type(SwigClassWrapper) :: farg2 farg1 = self%swigdata farg2 = other%swigdata call swigc_IDAadjCheckPointRec_op_assign__(farg1, farg2) self%swigdata = farg1 end subroutine function FIDAGetAdjCheckPointsInfo(ida_mem, ckpnt) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem class(IDAadjCheckPointRec), intent(in) :: ckpnt integer(C_INT) :: fresult type(C_PTR) :: farg1 type(SwigClassWrapper) :: farg2 farg1 = ida_mem farg2 = ckpnt%swigdata fresult = swigc_FIDAGetAdjCheckPointsInfo(farg1, farg2) swig_result = fresult end function function FIDASetJacTimesResFnB(ida_mem, which, jtimesresfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: jtimesresfn integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 farg1 = ida_mem farg2 = which farg3 = jtimesresfn fresult = swigc_FIDASetJacTimesResFnB(farg1, farg2, farg3) swig_result = fresult end function function FIDAGetAdjDataPointHermite(ida_mem, which, t, yy, yd) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), dimension(*), target, intent(inout) :: t type(N_Vector), target, intent(inout) :: yy type(N_Vector), target, intent(inout) :: yd integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = ida_mem farg2 = which farg3 = c_loc(t(1)) farg4 = c_loc(yy) farg5 = c_loc(yd) fresult = swigc_FIDAGetAdjDataPointHermite(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FIDAGetAdjDataPointPolynomial(ida_mem, which, t, order, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), dimension(*), target, intent(inout) :: t integer(C_INT), dimension(*), target, intent(inout) :: order type(N_Vector), target, intent(inout) :: y integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = ida_mem farg2 = which farg3 = c_loc(t(1)) farg4 = c_loc(order(1)) farg5 = c_loc(y) fresult = swigc_FIDAGetAdjDataPointPolynomial(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FIDAGetAdjCurrentCheckPoint(ida_mem, addr) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_PTR), target, intent(inout) :: addr integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(addr) fresult = swigc_FIDAGetAdjCurrentCheckPoint(farg1, farg2) swig_result = fresult end function function FIDABBDPrecInit(ida_mem, nlocal, mudq, mldq, mukeep, mlkeep, dq_rel_yy, gres, gcomm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT64_T), intent(in) :: nlocal integer(C_INT64_T), intent(in) :: mudq integer(C_INT64_T), intent(in) :: mldq integer(C_INT64_T), intent(in) :: mukeep integer(C_INT64_T), intent(in) :: mlkeep real(C_DOUBLE), intent(in) :: dq_rel_yy type(C_FUNPTR), intent(in), value :: gres type(C_FUNPTR), intent(in), value :: gcomm integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT64_T) :: farg4 integer(C_INT64_T) :: farg5 integer(C_INT64_T) :: farg6 real(C_DOUBLE) :: farg7 type(C_FUNPTR) :: farg8 type(C_FUNPTR) :: farg9 farg1 = ida_mem farg2 = nlocal farg3 = mudq farg4 = mldq farg5 = mukeep farg6 = mlkeep farg7 = dq_rel_yy farg8 = gres farg9 = gcomm fresult = swigc_FIDABBDPrecInit(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) swig_result = fresult end function function FIDABBDPrecReInit(ida_mem, mudq, mldq, dq_rel_yy) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT64_T), intent(in) :: mudq integer(C_INT64_T), intent(in) :: mldq real(C_DOUBLE), intent(in) :: dq_rel_yy integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 real(C_DOUBLE) :: farg4 farg1 = ida_mem farg2 = mudq farg3 = mldq farg4 = dq_rel_yy fresult = swigc_FIDABBDPrecReInit(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDABBDPrecGetWorkSpace(ida_mem, lenrwbbdp, leniwbbdp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: lenrwbbdp integer(C_LONG), dimension(*), target, intent(inout) :: leniwbbdp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(lenrwbbdp(1)) farg3 = c_loc(leniwbbdp(1)) fresult = swigc_FIDABBDPrecGetWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FIDABBDPrecGetNumGfnEvals(ida_mem, ngevalsbbdp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: ngevalsbbdp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(ngevalsbbdp(1)) fresult = swigc_FIDABBDPrecGetNumGfnEvals(farg1, farg2) swig_result = fresult end function function FIDABBDPrecInitB(ida_mem, which, nlocalb, mudqb, mldqb, mukeepb, mlkeepb, dq_rel_yyb, gresb, gcommb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which integer(C_INT64_T), intent(in) :: nlocalb integer(C_INT64_T), intent(in) :: mudqb integer(C_INT64_T), intent(in) :: mldqb integer(C_INT64_T), intent(in) :: mukeepb integer(C_INT64_T), intent(in) :: mlkeepb real(C_DOUBLE), intent(in) :: dq_rel_yyb type(C_FUNPTR), intent(in), value :: gresb type(C_FUNPTR), intent(in), value :: gcommb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT64_T) :: farg4 integer(C_INT64_T) :: farg5 integer(C_INT64_T) :: farg6 integer(C_INT64_T) :: farg7 real(C_DOUBLE) :: farg8 type(C_FUNPTR) :: farg9 type(C_FUNPTR) :: farg10 farg1 = ida_mem farg2 = which farg3 = nlocalb farg4 = mudqb farg5 = mldqb farg6 = mukeepb farg7 = mlkeepb farg8 = dq_rel_yyb farg9 = gresb farg10 = gcommb fresult = swigc_FIDABBDPrecInitB(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9, farg10) swig_result = fresult end function function FIDABBDPrecReInitB(ida_mem, which, mudqb, mldqb, dq_rel_yyb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which integer(C_INT64_T), intent(in) :: mudqb integer(C_INT64_T), intent(in) :: mldqb real(C_DOUBLE), intent(in) :: dq_rel_yyb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT64_T) :: farg4 real(C_DOUBLE) :: farg5 farg1 = ida_mem farg2 = which farg3 = mudqb farg4 = mldqb farg5 = dq_rel_yyb fresult = swigc_FIDABBDPrecReInitB(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FIDASetLinearSolver(ida_mem, ls, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(SUNLinearSolver), target, intent(inout) :: ls type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(ls) farg3 = c_loc(a) fresult = swigc_FIDASetLinearSolver(farg1, farg2, farg3) swig_result = fresult end function function FIDASetJacFn(ida_mem, jac) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_FUNPTR), intent(in), value :: jac integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = ida_mem farg2 = jac fresult = swigc_FIDASetJacFn(farg1, farg2) swig_result = fresult end function function FIDASetPreconditioner(ida_mem, pset, psolve) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_FUNPTR), intent(in), value :: pset type(C_FUNPTR), intent(in), value :: psolve integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = ida_mem farg2 = pset farg3 = psolve fresult = swigc_FIDASetPreconditioner(farg1, farg2, farg3) swig_result = fresult end function function FIDASetJacTimes(ida_mem, jtsetup, jtimes) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem type(C_FUNPTR), intent(in), value :: jtsetup type(C_FUNPTR), intent(in), value :: jtimes integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = ida_mem farg2 = jtsetup farg3 = jtimes fresult = swigc_FIDASetJacTimes(farg1, farg2, farg3) swig_result = fresult end function function FIDASetEpsLin(ida_mem, eplifac) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: eplifac integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = ida_mem farg2 = eplifac fresult = swigc_FIDASetEpsLin(farg1, farg2) swig_result = fresult end function function FIDASetLSNormFactor(ida_mem, nrmfac) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: nrmfac integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = ida_mem farg2 = nrmfac fresult = swigc_FIDASetLSNormFactor(farg1, farg2) swig_result = fresult end function function FIDASetLinearSolutionScaling(ida_mem, onoff) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: onoff integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = ida_mem farg2 = onoff fresult = swigc_FIDASetLinearSolutionScaling(farg1, farg2) swig_result = fresult end function function FIDASetIncrementFactor(ida_mem, dqincfac) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem real(C_DOUBLE), intent(in) :: dqincfac integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = ida_mem farg2 = dqincfac fresult = swigc_FIDASetIncrementFactor(farg1, farg2) swig_result = fresult end function function FIDAGetLinWorkSpace(ida_mem, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = ida_mem farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FIDAGetLinWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FIDAGetNumJacEvals(ida_mem, njevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: njevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(njevals(1)) fresult = swigc_FIDAGetNumJacEvals(farg1, farg2) swig_result = fresult end function function FIDAGetNumPrecEvals(ida_mem, npevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: npevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(npevals(1)) fresult = swigc_FIDAGetNumPrecEvals(farg1, farg2) swig_result = fresult end function function FIDAGetNumPrecSolves(ida_mem, npsolves) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: npsolves integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(npsolves(1)) fresult = swigc_FIDAGetNumPrecSolves(farg1, farg2) swig_result = fresult end function function FIDAGetNumLinIters(ida_mem, nliters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nliters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nliters(1)) fresult = swigc_FIDAGetNumLinIters(farg1, farg2) swig_result = fresult end function function FIDAGetNumLinConvFails(ida_mem, nlcfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nlcfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nlcfails(1)) fresult = swigc_FIDAGetNumLinConvFails(farg1, farg2) swig_result = fresult end function function FIDAGetNumJTSetupEvals(ida_mem, njtsetups) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: njtsetups integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(njtsetups(1)) fresult = swigc_FIDAGetNumJTSetupEvals(farg1, farg2) swig_result = fresult end function function FIDAGetNumJtimesEvals(ida_mem, njvevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: njvevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(njvevals(1)) fresult = swigc_FIDAGetNumJtimesEvals(farg1, farg2) swig_result = fresult end function function FIDAGetNumLinResEvals(ida_mem, nrevalsls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: nrevalsls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(nrevalsls(1)) fresult = swigc_FIDAGetNumLinResEvals(farg1, farg2) swig_result = fresult end function function FIDAGetLastLinFlag(ida_mem, flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_LONG), dimension(*), target, intent(inout) :: flag integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = ida_mem farg2 = c_loc(flag(1)) fresult = swigc_FIDAGetLastLinFlag(farg1, farg2) swig_result = fresult end function function FIDAGetLinReturnFlagName(flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING character(kind=C_CHAR, len=:), allocatable :: swig_result integer(C_LONG), intent(in) :: flag type(SwigArrayWrapper) :: fresult integer(C_LONG) :: farg1 farg1 = flag fresult = swigc_FIDAGetLinReturnFlagName(farg1) call SWIG_chararray_to_string(fresult, swig_result) if (.false.) call SWIG_free(fresult%data) end function function FIDASetLinearSolverB(ida_mem, which, ls, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(SUNLinearSolver), target, intent(inout) :: ls type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = c_loc(ls) farg4 = c_loc(a) fresult = swigc_FIDASetLinearSolverB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDASetJacFnB(ida_mem, which, jacb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: jacb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 farg1 = ida_mem farg2 = which farg3 = jacb fresult = swigc_FIDASetJacFnB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetJacFnBS(ida_mem, which, jacbs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: jacbs integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 farg1 = ida_mem farg2 = which farg3 = jacbs fresult = swigc_FIDASetJacFnBS(farg1, farg2, farg3) swig_result = fresult end function function FIDASetEpsLinB(ida_mem, which, eplifacb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: eplifacb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = ida_mem farg2 = which farg3 = eplifacb fresult = swigc_FIDASetEpsLinB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetLSNormFactorB(ida_mem, which, nrmfacb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: nrmfacb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = ida_mem farg2 = which farg3 = nrmfacb fresult = swigc_FIDASetLSNormFactorB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetLinearSolutionScalingB(ida_mem, which, onoffb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which integer(C_INT), intent(in) :: onoffb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 farg1 = ida_mem farg2 = which farg3 = onoffb fresult = swigc_FIDASetLinearSolutionScalingB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetIncrementFactorB(ida_mem, which, dqincfacb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: dqincfacb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = ida_mem farg2 = which farg3 = dqincfacb fresult = swigc_FIDASetIncrementFactorB(farg1, farg2, farg3) swig_result = fresult end function function FIDASetPreconditionerB(ida_mem, which, psetb, psolveb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: psetb type(C_FUNPTR), intent(in), value :: psolveb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = psetb farg4 = psolveb fresult = swigc_FIDASetPreconditionerB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDASetPreconditionerBS(ida_mem, which, psetbs, psolvebs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: psetbs type(C_FUNPTR), intent(in), value :: psolvebs integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = psetbs farg4 = psolvebs fresult = swigc_FIDASetPreconditionerBS(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDASetJacTimesB(ida_mem, which, jtsetupb, jtimesb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: jtsetupb type(C_FUNPTR), intent(in), value :: jtimesb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = jtsetupb farg4 = jtimesb fresult = swigc_FIDASetJacTimesB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FIDASetJacTimesBS(ida_mem, which, jtsetupbs, jtimesbs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: ida_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: jtsetupbs type(C_FUNPTR), intent(in), value :: jtimesbs integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = ida_mem farg2 = which farg3 = jtsetupbs farg4 = jtimesbs fresult = swigc_FIDASetJacTimesBS(farg1, farg2, farg3, farg4) swig_result = fresult end function end module StanHeaders/src/idas/README.md0000644000176200001440000000442614645137106015456 0ustar liggesusers# IDAS ### Version 5.1.1 (Feb 2022) **Radu Serban, Cosmin Petra, Alan C. Hindmarsh, Cody J. Balos, David J. Gardner, and Carol S. Woodward, Center for Applied Scientific Computing, LLNL** **Daniel R. Reynolds, Department of Mathematics, Southern Methodist University** IDAS is a package for the solution of differential-algebraic equation (DAE) systems ``` F(t,y,y',p) = 0, y(t0) = y0(p), y'(t0) = y0'(p) ``` with sensitivity analysis capabilities (both forward and adjoint modes). The integration methods used in IDAS are variable-order, variable-coefficient BDF (Backward Differentiation Formula) methods in fixed-leading-coefficient form. IDAS is part of the SUNDIALS Suite of Nonlinear and Differential/Algebraic equation Solvers which consists of ARKode, CVODE, CVODES, IDA, IDAS and KINSOL. It is written in ANSI standard C and can be used in a variety of computing environments including serial, shared memory, distributed memory, and accelerator-based (e.g., GPU) systems. This flexibility is obtained from a modular design that leverages the shared vector, matrix, linear solver, and nonlinear solver APIs used across SUNDIALS packages. ## Documentation See the [IDAS User Guide](/doc/idas/idas_guide.pdf) and [IDAS Examples](/doc/idas/idas_examples.pdf) document for more information about IDAS usage and the provided example programs respectively. ## Installation For installation instructions see the [INSTALL_GUIDE](/INSTALL_GUIDE.pdf) or the "Installation Procedure" chapter in the IDAS User Guide. ## Release History Information on recent changes to IDAS can be found in the "Introduction" chapter of the IDAS User Guide and a complete release history is available in the "SUNDIALS Release History" appendix of the IDAS User Guide. ## References * R. Serban, C. Petra, A. C. Hindmarsh, C. J. Balos, D. J. Gardner, D. R. Reynolds and C. S. Woodward, "User Documentation for IDAS v5.1.1," LLNL technical report UCRL-SM-234051, Feb 2022. * R. Serban and A.C. Hindmarsh, "Example Programs for IDAS v5.1.1," LLNL technical report LLNL-TR-437091, Feb 2022. * A. C. Hindmarsh, P. N. Brown, K. E. Grant, S. L. Lee, R. Serban, D. E. Shumaker, and C. S. Woodward, "SUNDIALS, Suite of Nonlinear and Differential/Algebraic Equation Solvers," ACM Trans. Math. Softw., 31(3), pp. 363-396, 2005. StanHeaders/src/idas/idas_ic.c0000644000176200001440000014023114645137106015731 0ustar liggesusers/* * ----------------------------------------------------------------- * $Revision$ * $Date$ * ----------------------------------------------------------------- * Programmers: Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the IC calculation for IDAS. * It is independent of the linear solver in use. * ----------------------------------------------------------------- */ #include #include #include "idas_impl.h" #include /* * ================================================================= * IDA Constants * ================================================================= */ /* Private Constants */ #define ZERO RCONST(0.0) /* real 0.0 */ #define HALF RCONST(0.5) /* real 0.5 */ #define ONE RCONST(1.0) /* real 1.0 */ #define TWO RCONST(2.0) /* real 2.0 */ #define PT99 RCONST(0.99) /* real 0.99 */ #define PT1 RCONST(0.1) /* real 0.1 */ #define PT001 RCONST(0.001) /* real 0.001 */ /* IDACalcIC control constants */ #define ICRATEMAX RCONST(0.9) /* max. Newton conv. rate */ #define ALPHALS RCONST(0.0001) /* alpha in linesearch conv. test */ /* Return values for lower level routines used by IDACalcIC */ #define IC_FAIL_RECOV 1 #define IC_CONSTR_FAILED 2 #define IC_LINESRCH_FAILED 3 #define IC_CONV_FAIL 4 #define IC_SLOW_CONVRG 5 /*=================================================================*/ /* Shortcuts */ /*=================================================================*/ #define IDA_PROFILER IDA_mem->ida_sunctx->profiler /* * ================================================================= * Private Helper Functions Prototypes * ================================================================= */ extern int IDAInitialSetup(IDAMem IDA_mem); extern realtype IDAWrmsNorm(IDAMem IDA_mem, N_Vector x, N_Vector w, booleantype mask); extern realtype IDASensWrmsNorm(IDAMem IDA_mem, N_Vector *xS, N_Vector *wS, booleantype mask); extern realtype IDASensWrmsNormUpdate(IDAMem IDA_mem, realtype old_nrm, N_Vector *xS, N_Vector *wS, booleantype mask); extern int IDASensEwtSet(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS); static int IDANlsIC(IDAMem IDA_mem); static int IDANewtonIC(IDAMem IDA_mem); static int IDALineSrch(IDAMem IDA_mem, realtype *delnorm, realtype *fnorm); static int IDAfnorm(IDAMem IDA_mem, realtype *fnorm); static int IDANewyyp(IDAMem IDA_mem, realtype lambda); static int IDANewy(IDAMem IDA_mem); static int IDASensNewtonIC(IDAMem IDA_mem); static int IDASensLineSrch(IDAMem IDA_mem, realtype *delnorm, realtype *fnorm); static int IDASensNewyyp(IDAMem IDA_mem, realtype lambda); static int IDASensfnorm(IDAMem IDA_mem, realtype *fnorm); static int IDASensNlsIC(IDAMem IDA_mem); static int IDAICFailFlag(IDAMem IDA_mem, int retval); /* * ================================================================= * EXPORTED FUNCTIONS IMPLEMENTATION * ================================================================= */ /* * ----------------------------------------------------------------- * IDACalcIC * ----------------------------------------------------------------- * IDACalcIC computes consistent initial conditions, given the * user's initial guess for unknown components of yy0 and/or yp0. * * The return value is IDA_SUCCESS = 0 if no error occurred. * * The error return values (fully described in ida.h) are: * IDA_MEM_NULL ida_mem is NULL * IDA_NO_MALLOC ida_mem was not allocated * IDA_ILL_INPUT bad value for icopt, tout1, or id * IDA_LINIT_FAIL the linear solver linit routine failed * IDA_BAD_EWT zero value of some component of ewt * IDA_RES_FAIL res had a non-recoverable error * IDA_FIRST_RES_FAIL res failed recoverably on the first call * IDA_LSETUP_FAIL lsetup had a non-recoverable error * IDA_LSOLVE_FAIL lsolve had a non-recoverable error * IDA_NO_RECOVERY res, lsetup, or lsolve had a recoverable * error, but IDACalcIC could not recover * IDA_CONSTR_FAIL the inequality constraints could not be met * IDA_LINESEARCH_FAIL if the linesearch failed (either on steptol test * or on the maxbacks test) * IDA_CONV_FAIL the Newton iterations failed to converge * ----------------------------------------------------------------- */ int IDACalcIC(void *ida_mem, int icopt, realtype tout1) { int ewtsetOK; int ier, nwt, nh, mxnh, icret, retval=0; int is; realtype tdist, troundoff, minid, hic, ypnorm; IDAMem IDA_mem; booleantype sensi_stg, sensi_sim; /* Check if IDA memory exists */ if(ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDACalcIC", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Check if problem was malloc'ed */ if(IDA_mem->ida_MallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_MALLOC, "IDAS", "IDACalcIC", MSG_NO_MALLOC); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_MALLOC); } /* Check inputs to IDA for correctness and consistency */ ier = IDAInitialSetup(IDA_mem); if(ier != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } IDA_mem->ida_SetupDone = SUNTRUE; /* Check legality of input arguments, and set IDA memory copies. */ if(icopt != IDA_YA_YDP_INIT && icopt != IDA_Y_INIT) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDACalcIC", MSG_IC_BAD_ICOPT); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } IDA_mem->ida_icopt = icopt; if(icopt == IDA_YA_YDP_INIT && (IDA_mem->ida_id == NULL)) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDACalcIC", MSG_IC_MISSING_ID); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } tdist = SUNRabs(tout1 - IDA_mem->ida_tn); troundoff = TWO * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(tout1)); if(tdist < troundoff) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDACalcIC", MSG_IC_TOO_CLOSE); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Are we computing sensitivities? */ sensi_stg = (IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_STAGGERED)); sensi_sim = (IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_SIMULTANEOUS)); /* Allocate space and initialize temporary vectors */ IDA_mem->ida_yy0 = N_VClone(IDA_mem->ida_ee); IDA_mem->ida_yp0 = N_VClone(IDA_mem->ida_ee); IDA_mem->ida_t0 = IDA_mem->ida_tn; N_VScale(ONE, IDA_mem->ida_phi[0], IDA_mem->ida_yy0); N_VScale(ONE, IDA_mem->ida_phi[1], IDA_mem->ida_yp0); if (IDA_mem->ida_sensi) { /* Allocate temporary space required for sensitivity IC: yyS0 and ypS0. */ IDA_mem->ida_yyS0 = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_ee); IDA_mem->ida_ypS0 = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_ee); /* Initialize sensitivity vector. */ for (is=0; isida_Ns; is++) { N_VScale(ONE, IDA_mem->ida_phiS[0][is], IDA_mem->ida_yyS0[is]); N_VScale(ONE, IDA_mem->ida_phiS[1][is], IDA_mem->ida_ypS0[is]); } /* Initialize work space vectors needed for sensitivities. */ IDA_mem->ida_savresS = IDA_mem->ida_phiS[2]; IDA_mem->ida_delnewS = IDA_mem->ida_phiS[3]; IDA_mem->ida_yyS0new = IDA_mem->ida_phiS[4]; IDA_mem->ida_ypS0new = IDA_mem->ida_eeS; } /* For use in the IDA_YA_YP_INIT case, set sysindex and tscale. */ IDA_mem->ida_sysindex = 1; IDA_mem->ida_tscale = tdist; if(icopt == IDA_YA_YDP_INIT) { minid = N_VMin(IDA_mem->ida_id); if(minid < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDACalcIC", MSG_IC_BAD_ID); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if(minid > HALF) IDA_mem->ida_sysindex = 0; } /* Set the test constant in the Newton convergence test */ IDA_mem->ida_epsNewt = IDA_mem->ida_epiccon; /* Initializations: cjratio = 1 (for use in direct linear solvers); set nbacktr = 0; */ IDA_mem->ida_cjratio = ONE; IDA_mem->ida_nbacktr = 0; /* Set hic, hh, cj, and mxnh. */ hic = PT001*tdist; ypnorm = IDAWrmsNorm(IDA_mem, IDA_mem->ida_yp0, IDA_mem->ida_ewt, IDA_mem->ida_suppressalg); if (sensi_sim) ypnorm = IDASensWrmsNormUpdate(IDA_mem, ypnorm, IDA_mem->ida_ypS0, IDA_mem->ida_ewtS, SUNFALSE); if(ypnorm > HALF/hic) hic = HALF/ypnorm; if(tout1 < IDA_mem->ida_tn) hic = -hic; IDA_mem->ida_hh = hic; if(icopt == IDA_YA_YDP_INIT) { IDA_mem->ida_cj = ONE/hic; mxnh = IDA_mem->ida_maxnh; } else { IDA_mem->ida_cj = ZERO; mxnh = 1; } /* Loop over nwt = number of evaluations of ewt vector. */ for(nwt = 1; nwt <= 2; nwt++) { /* Loop over nh = number of h values. */ for(nh = 1; nh <= mxnh; nh++) { /* Call the IC nonlinear solver function. */ retval = IDANlsIC(IDA_mem); /* Cut h and loop on recoverable IDA_YA_YDP_INIT failure; else break. */ if(retval == IDA_SUCCESS) break; IDA_mem->ida_ncfn++; if(retval < 0) break; if(nh == mxnh) break; /* If looping to try again, reset yy0 and yp0 if not converging. */ if(retval != IC_SLOW_CONVRG) { N_VScale(ONE, IDA_mem->ida_phi[0], IDA_mem->ida_yy0); N_VScale(ONE, IDA_mem->ida_phi[1], IDA_mem->ida_yp0); if (sensi_sim) { /* Reset yyS0 and ypS0. */ /* Copy phiS[0] and phiS[1] into yyS0 and ypS0. */ for (is=0; isida_Ns; is++) { N_VScale(ONE, IDA_mem->ida_phiS[0][is], IDA_mem->ida_yyS0[is]); N_VScale(ONE, IDA_mem->ida_phiS[1][is], IDA_mem->ida_ypS0[is]); } } } hic *= PT1; IDA_mem->ida_cj = ONE/hic; IDA_mem->ida_hh = hic; } /* End of nh loop */ /* Break on failure */ if(retval != IDA_SUCCESS) break; /* Reset ewt, save yy0, yp0 in phi, and loop. */ ewtsetOK = IDA_mem->ida_efun(IDA_mem->ida_yy0, IDA_mem->ida_ewt, IDA_mem->ida_edata); if(ewtsetOK != 0) { retval = IDA_BAD_EWT; break; } N_VScale(ONE, IDA_mem->ida_yy0, IDA_mem->ida_phi[0]); N_VScale(ONE, IDA_mem->ida_yp0, IDA_mem->ida_phi[1]); if (sensi_sim) { /* Reevaluate ewtS. */ ewtsetOK = IDASensEwtSet(IDA_mem, IDA_mem->ida_yyS0, IDA_mem->ida_ewtS); if(ewtsetOK != 0) { retval = IDA_BAD_EWT; break; } /* Save yyS0 and ypS0. */ for (is=0; isida_Ns; is++) { N_VScale(ONE, IDA_mem->ida_yyS0[is], IDA_mem->ida_phiS[0][is]); N_VScale(ONE, IDA_mem->ida_ypS0[is], IDA_mem->ida_phiS[1][is]); } } } /* End of nwt loop */ /* Load the optional outputs. */ if(icopt == IDA_YA_YDP_INIT) IDA_mem->ida_hused = hic; /* On any failure, free memory, print error message and return */ if(retval != IDA_SUCCESS) { N_VDestroy(IDA_mem->ida_yy0); N_VDestroy(IDA_mem->ida_yp0); if(IDA_mem->ida_sensi) { N_VDestroyVectorArray(IDA_mem->ida_yyS0, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypS0, IDA_mem->ida_Ns); } icret = IDAICFailFlag(IDA_mem, retval); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(icret); } /* Unless using the STAGGERED approach for sensitivities, return now */ if (!sensi_stg) { N_VDestroy(IDA_mem->ida_yy0); N_VDestroy(IDA_mem->ida_yp0); if(IDA_mem->ida_sensi) { N_VDestroyVectorArray(IDA_mem->ida_yyS0, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypS0, IDA_mem->ida_Ns); } SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* Find consistent I.C. for sensitivities using a staggered approach */ /* Evaluate res at converged y, needed for future evaluations of sens. RHS If res() fails recoverably, treat it as a convergence failure and attempt the step again */ retval = IDA_mem->ida_res(IDA_mem->ida_t0, IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_delta, IDA_mem->ida_user_data); IDA_mem->ida_nre++; if(retval < 0) { /* res function failed unrecoverably. */ SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_RES_FAIL); } if(retval > 0) { /* res function failed recoverably but no recovery possible. */ SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_FIRST_RES_FAIL); } /* Loop over nwt = number of evaluations of ewt vector. */ for(nwt = 1; nwt <= 2; nwt++) { /* Loop over nh = number of h values. */ for(nh = 1; nh <= mxnh; nh++) { retval = IDASensNlsIC(IDA_mem); if(retval == IDA_SUCCESS) break; /* Increment the number of the sensitivity related corrector convergence failures. */ IDA_mem->ida_ncfnS++; if(retval < 0) break; if(nh == mxnh) break; /* If looping to try again, reset yyS0 and ypS0 if not converging. */ if(retval != IC_SLOW_CONVRG) { for (is=0; isida_Ns; is++) { N_VScale(ONE, IDA_mem->ida_phiS[0][is], IDA_mem->ida_yyS0[is]); N_VScale(ONE, IDA_mem->ida_phiS[1][is], IDA_mem->ida_ypS0[is]); } } hic *= PT1; IDA_mem->ida_cj = ONE/hic; IDA_mem->ida_hh = hic; } /* End of nh loop */ /* Break on failure */ if(retval != IDA_SUCCESS) break; /* Since it was successful, reevaluate ewtS with the new values of yyS0, save yyS0 and ypS0 in phiS[0] and phiS[1] and loop one more time to check and maybe correct the new sensitivities IC with respect to the new weights. */ /* Reevaluate ewtS. */ ewtsetOK = IDASensEwtSet(IDA_mem, IDA_mem->ida_yyS0, IDA_mem->ida_ewtS); if(ewtsetOK != 0) { retval = IDA_BAD_EWT; break; } /* Save yyS0 and ypS0. */ for (is=0; isida_Ns; is++) { N_VScale(ONE, IDA_mem->ida_yyS0[is], IDA_mem->ida_phiS[0][is]); N_VScale(ONE, IDA_mem->ida_ypS0[is], IDA_mem->ida_phiS[1][is]); } } /* End of nwt loop */ /* Load the optional outputs. */ if(icopt == IDA_YA_YDP_INIT) IDA_mem->ida_hused = hic; /* Free temporary space */ N_VDestroy(IDA_mem->ida_yy0); N_VDestroy(IDA_mem->ida_yp0); /* Here sensi is SUNTRUE, so deallocate sensitivity temporary vectors. */ N_VDestroyVectorArray(IDA_mem->ida_yyS0, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypS0, IDA_mem->ida_Ns); /* On any failure, print message and return proper flag. */ if(retval != IDA_SUCCESS) { icret = IDAICFailFlag(IDA_mem, retval); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(icret); } /* Otherwise return success flag. */ SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * ================================================================= * PRIVATE FUNCTIONS IMPLEMENTATION * ================================================================= */ /* * ----------------------------------------------------------------- * IDANlsIC * ----------------------------------------------------------------- * IDANlsIC solves a nonlinear system for consistent initial * conditions. It calls IDANewtonIC to do most of the work. * * The return value is IDA_SUCCESS = 0 if no error occurred. * The error return values (positive) considered recoverable are: * IC_FAIL_RECOV if res, lsetup, or lsolve failed recoverably * IC_CONSTR_FAILED if the constraints could not be met * IC_LINESRCH_FAILED if the linesearch failed (either on steptol test * or on maxbacks test) * IC_CONV_FAIL if the Newton iterations failed to converge * IC_SLOW_CONVRG if the iterations are converging slowly * (failed the convergence test, but showed * norm reduction or convergence rate < 1) * The error return values (negative) considered non-recoverable are: * IDA_RES_FAIL if res had a non-recoverable error * IDA_FIRST_RES_FAIL if res failed recoverably on the first call * IDA_LSETUP_FAIL if lsetup had a non-recoverable error * IDA_LSOLVE_FAIL if lsolve had a non-recoverable error * ----------------------------------------------------------------- */ static int IDANlsIC(IDAMem IDA_mem) { int retval, nj, is; N_Vector tv1, tv2, tv3; booleantype sensi_sim; /* Are we computing sensitivities with the IDA_SIMULTANEOUS approach? */ sensi_sim = (IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_SIMULTANEOUS)); tv1 = IDA_mem->ida_ee; tv2 = IDA_mem->ida_tempv2; tv3 = IDA_mem->ida_phi[2]; /* Evaluate RHS. */ retval = IDA_mem->ida_res(IDA_mem->ida_t0, IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_delta, IDA_mem->ida_user_data); IDA_mem->ida_nre++; if(retval < 0) return(IDA_RES_FAIL); if(retval > 0) return(IDA_FIRST_RES_FAIL); /* Save the residual. */ N_VScale(ONE, IDA_mem->ida_delta, IDA_mem->ida_savres); if(sensi_sim) { /*Evaluate sensitivity RHS and save it in savresS. */ retval = IDA_mem->ida_resS(IDA_mem->ida_Ns, IDA_mem->ida_t0, IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_delta, IDA_mem->ida_yyS0, IDA_mem->ida_ypS0, IDA_mem->ida_deltaS, IDA_mem->ida_user_dataS, IDA_mem->ida_tmpS1, IDA_mem->ida_tmpS2, IDA_mem->ida_tmpS3); IDA_mem->ida_nrSe++; if(retval < 0) return(IDA_RES_FAIL); if(retval > 0) return(IDA_FIRST_RES_FAIL); for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_deltaS[is], IDA_mem->ida_savresS[is]); } /* Loop over nj = number of linear solve Jacobian setups. */ for(nj = 1; nj <= IDA_mem->ida_maxnj; nj++) { /* If there is a setup routine, call it. */ if(IDA_mem->ida_lsetup) { IDA_mem->ida_nsetups++; retval = IDA_mem->ida_lsetup(IDA_mem, IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_delta, tv1, tv2, tv3); if(retval < 0) return(IDA_LSETUP_FAIL); if(retval > 0) return(IC_FAIL_RECOV); } /* Call the Newton iteration routine, and return if successful. */ retval = IDANewtonIC(IDA_mem); if(retval == IDA_SUCCESS) return(IDA_SUCCESS); /* If converging slowly and lsetup is nontrivial, retry. */ if(retval == IC_SLOW_CONVRG && IDA_mem->ida_lsetup) { N_VScale(ONE, IDA_mem->ida_savres, IDA_mem->ida_delta); if(sensi_sim) for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_savresS[is], IDA_mem->ida_deltaS[is]); continue; } else { return(retval); } } /* End of nj loop */ /* No convergence after maxnj tries; return with retval=IC_SLOW_CONVRG */ return(retval); } /* * ----------------------------------------------------------------- * IDANewtonIC * ----------------------------------------------------------------- * IDANewtonIC performs the Newton iteration to solve for consistent * initial conditions. It calls IDALineSrch within each iteration. * On return, savres contains the current residual vector. * * The return value is IDA_SUCCESS = 0 if no error occurred. * The error return values (positive) considered recoverable are: * IC_FAIL_RECOV if res or lsolve failed recoverably * IC_CONSTR_FAILED if the constraints could not be met * IC_LINESRCH_FAILED if the linesearch failed (either on steptol test * or on maxbacks test) * IC_CONV_FAIL if the Newton iterations failed to converge * IC_SLOW_CONVRG if the iterations appear to be converging slowly. * They failed the convergence test, but showed * an overall norm reduction (by a factor of < 0.1) * or a convergence rate <= ICRATEMAX). * The error return values (negative) considered non-recoverable are: * IDA_RES_FAIL if res had a non-recoverable error * IDA_LSOLVE_FAIL if lsolve had a non-recoverable error * ----------------------------------------------------------------- */ static int IDANewtonIC(IDAMem IDA_mem) { int retval, mnewt, is; realtype delnorm, fnorm, fnorm0, oldfnrm, rate; booleantype sensi_sim; /* Are we computing sensitivities with the IDA_SIMULTANEOUS approach? */ sensi_sim = (IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_SIMULTANEOUS)); /* Set pointer for vector delnew */ IDA_mem->ida_delnew = IDA_mem->ida_phi[2]; /* Call the linear solve function to get the Newton step, delta. */ retval = IDA_mem->ida_lsolve(IDA_mem, IDA_mem->ida_delta, IDA_mem->ida_ewt, IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_savres); if(retval < 0) return(IDA_LSOLVE_FAIL); if(retval > 0) return(IC_FAIL_RECOV); /* Compute the norm of the step. */ fnorm = IDAWrmsNorm(IDA_mem, IDA_mem->ida_delta, IDA_mem->ida_ewt, SUNFALSE); /* Call the lsolve function to get correction vectors deltaS. */ if (sensi_sim) { for(is=0;isida_Ns;is++) { retval = IDA_mem->ida_lsolve(IDA_mem, IDA_mem->ida_deltaS[is], IDA_mem->ida_ewtS[is], IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_savres); if(retval < 0) return(IDA_LSOLVE_FAIL); if(retval > 0) return(IC_FAIL_RECOV); } /* Update the norm of delta. */ fnorm = IDASensWrmsNormUpdate(IDA_mem, fnorm, IDA_mem->ida_deltaS, IDA_mem->ida_ewtS, SUNFALSE); } /* Test for convergence. Return now if the norm is small. */ if(IDA_mem->ida_sysindex == 0) fnorm *= IDA_mem->ida_tscale * SUNRabs(IDA_mem->ida_cj); if(fnorm <= IDA_mem->ida_epsNewt) return(IDA_SUCCESS); fnorm0 = fnorm; /* Initialize rate to avoid compiler warning message */ rate = ZERO; /* Newton iteration loop */ for(mnewt = 0; mnewt < IDA_mem->ida_maxnit; mnewt++) { IDA_mem->ida_nni++; delnorm = fnorm; oldfnrm = fnorm; /* Call the Linesearch function and return if it failed. */ retval = IDALineSrch(IDA_mem, &delnorm, &fnorm); if(retval != IDA_SUCCESS) return(retval); /* Set the observed convergence rate and test for convergence. */ rate = fnorm/oldfnrm; if(fnorm <= IDA_mem->ida_epsNewt) return(IDA_SUCCESS); /* If not converged, copy new step vector, and loop. */ N_VScale(ONE, IDA_mem->ida_delnew, IDA_mem->ida_delta); if(sensi_sim) { /* Update the iteration's step for sensitivities. */ for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_delnewS[is], IDA_mem->ida_deltaS[is]); } } /* End of Newton iteration loop */ /* Return either IC_SLOW_CONVRG or recoverable fail flag. */ if(rate <= ICRATEMAX || fnorm < PT1*fnorm0) return(IC_SLOW_CONVRG); return(IC_CONV_FAIL); } /* * ----------------------------------------------------------------- * IDALineSrch * ----------------------------------------------------------------- * IDALineSrch performs the Linesearch algorithm with the * calculation of consistent initial conditions. * * On entry, yy0 and yp0 are the current values of y and y', the * Newton step is delta, the current residual vector F is savres, * delnorm is WRMS-norm(delta), and fnorm is the norm of the vector * J-inverse F. * * On a successful return, yy0, yp0, and savres have been updated, * delnew contains the current value of J-inverse F, and fnorm is * WRMS-norm(delnew). * * The return value is IDA_SUCCESS = 0 if no error occurred. * The error return values (positive) considered recoverable are: * IC_FAIL_RECOV if res or lsolve failed recoverably * IC_CONSTR_FAILED if the constraints could not be met * IC_LINESRCH_FAILED if the linesearch failed (either on steptol test * or on maxbacks test) * The error return values (negative) considered non-recoverable are: * IDA_RES_FAIL if res had a non-recoverable error * IDA_LSOLVE_FAIL if lsolve had a non-recoverable error * ----------------------------------------------------------------- */ static int IDALineSrch(IDAMem IDA_mem, realtype *delnorm, realtype *fnorm) { booleantype conOK; int retval, is, nbacks; realtype f1norm, fnormp, f1normp, ratio, lambda, minlam, slpi; N_Vector mc; booleantype sensi_sim; /* Initialize work space pointers, f1norm, ratio. (Use of mc in constraint check does not conflict with ypnew.) */ mc = IDA_mem->ida_ee; IDA_mem->ida_dtemp = IDA_mem->ida_phi[3]; IDA_mem->ida_ynew = IDA_mem->ida_tempv2; IDA_mem->ida_ypnew = IDA_mem->ida_ee; f1norm = (*fnorm)*(*fnorm)*HALF; ratio = ONE; /* If there are constraints, check and reduce step if necessary. */ if(IDA_mem->ida_constraintsSet) { /* Update y and check constraints. */ IDANewy(IDA_mem); conOK = N_VConstrMask(IDA_mem->ida_constraints, IDA_mem->ida_ynew, mc); if(!conOK) { /* Not satisfied. Compute scaled step to satisfy constraints. */ N_VProd(mc, IDA_mem->ida_delta, IDA_mem->ida_dtemp); ratio = PT99*N_VMinQuotient(IDA_mem->ida_yy0, IDA_mem->ida_dtemp); (*delnorm) *= ratio; if((*delnorm) <= IDA_mem->ida_steptol) return(IC_CONSTR_FAILED); N_VScale(ratio, IDA_mem->ida_delta, IDA_mem->ida_delta); } } /* End of constraints check */ slpi = -TWO*f1norm*ratio; minlam = IDA_mem->ida_steptol / (*delnorm); lambda = ONE; nbacks = 0; /* Are we computing sensitivities with the IDA_SIMULTANEOUS approach? */ sensi_sim = (IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_SIMULTANEOUS)); /* In IDA_Y_INIT case, set ypnew = yp0 (fixed) for linesearch. */ if(IDA_mem->ida_icopt == IDA_Y_INIT) { N_VScale(ONE, IDA_mem->ida_yp0, IDA_mem->ida_ypnew); /* do the same for sensitivities. */ if(sensi_sim) { for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_ypS0[is], IDA_mem->ida_ypS0new[is]); } } /* Loop on linesearch variable lambda. */ for(;;) { if (nbacks == IDA_mem->ida_maxbacks) return(IC_LINESRCH_FAILED); /* Get new (y,y') = (ynew,ypnew) and norm of new function value. */ IDANewyyp(IDA_mem, lambda); retval = IDAfnorm(IDA_mem, &fnormp); if(retval != IDA_SUCCESS) return(retval); /* If lsoff option is on, break out. */ if(IDA_mem->ida_lsoff) break; /* Do alpha-condition test. */ f1normp = fnormp*fnormp*HALF; if(f1normp <= f1norm + ALPHALS*slpi*lambda) break; if(lambda < minlam) return(IC_LINESRCH_FAILED); lambda /= TWO; IDA_mem->ida_nbacktr++; nbacks++; } /* End of breakout linesearch loop */ /* Update yy0, yp0. */ N_VScale(ONE, IDA_mem->ida_ynew, IDA_mem->ida_yy0); if(sensi_sim) { /* Update yyS0 and ypS0. */ for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_yyS0new[is], IDA_mem->ida_yyS0[is]); } if(IDA_mem->ida_icopt == IDA_YA_YDP_INIT) { N_VScale(ONE, IDA_mem->ida_ypnew, IDA_mem->ida_yp0); if(sensi_sim) for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_ypS0new[is], IDA_mem->ida_ypS0[is]); } /* Update fnorm, then return. */ *fnorm = fnormp; return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * IDAfnorm * ----------------------------------------------------------------- * IDAfnorm computes the norm of the current function value, by * evaluating the DAE residual function, calling the linear * system solver, and computing a WRMS-norm. * * On return, savres contains the current residual vector F, and * delnew contains J-inverse F. * * The return value is IDA_SUCCESS = 0 if no error occurred, or * IC_FAIL_RECOV if res or lsolve failed recoverably, or * IDA_RES_FAIL if res had a non-recoverable error, or * IDA_LSOLVE_FAIL if lsolve had a non-recoverable error. * ----------------------------------------------------------------- */ static int IDAfnorm(IDAMem IDA_mem, realtype *fnorm) { int retval, is; /* Get residual vector F, return if failed, and save F in savres. */ retval = IDA_mem->ida_res(IDA_mem->ida_t0, IDA_mem->ida_ynew, IDA_mem->ida_ypnew, IDA_mem->ida_delnew, IDA_mem->ida_user_data); IDA_mem->ida_nre++; if(retval < 0) return(IDA_RES_FAIL); if(retval > 0) return(IC_FAIL_RECOV); N_VScale(ONE, IDA_mem->ida_delnew, IDA_mem->ida_savres); /* Call the linear solve function to get J-inverse F; return if failed. */ retval = IDA_mem->ida_lsolve(IDA_mem, IDA_mem->ida_delnew, IDA_mem->ida_ewt, IDA_mem->ida_ynew, IDA_mem->ida_ypnew, IDA_mem->ida_savres); if(retval < 0) return(IDA_LSOLVE_FAIL); if(retval > 0) return(IC_FAIL_RECOV); /* Compute the WRMS-norm. */ *fnorm = IDAWrmsNorm(IDA_mem, IDA_mem->ida_delnew, IDA_mem->ida_ewt, SUNFALSE); /* Are we computing SENSITIVITIES with the IDA_SIMULTANEOUS approach? */ if(IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_SIMULTANEOUS)) { /* Evaluate the residual for sensitivities. */ retval = IDA_mem->ida_resS(IDA_mem->ida_Ns, IDA_mem->ida_t0, IDA_mem->ida_ynew, IDA_mem->ida_ypnew, IDA_mem->ida_savres, IDA_mem->ida_yyS0new, IDA_mem->ida_ypS0new, IDA_mem->ida_delnewS, IDA_mem->ida_user_dataS, IDA_mem->ida_tmpS1, IDA_mem->ida_tmpS2, IDA_mem->ida_tmpS3); IDA_mem->ida_nrSe++; if(retval < 0) return(IDA_RES_FAIL); if(retval > 0) return(IC_FAIL_RECOV); /* Save delnewS in savresS. */ for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_delnewS[is], IDA_mem->ida_savresS[is]); /* Call the linear solve function to get J-inverse deltaS. */ for(is=0; isida_Ns; is++) { retval = IDA_mem->ida_lsolve(IDA_mem, IDA_mem->ida_delnewS[is], IDA_mem->ida_ewtS[is], IDA_mem->ida_ynew, IDA_mem->ida_ypnew, IDA_mem->ida_savres); if(retval < 0) return(IDA_LSOLVE_FAIL); if(retval > 0) return(IC_FAIL_RECOV); } /* Include sensitivities in norm. */ *fnorm = IDASensWrmsNormUpdate(IDA_mem, *fnorm, IDA_mem->ida_delnewS, IDA_mem->ida_ewtS, SUNFALSE); } /* Rescale norm if index = 0. */ if(IDA_mem->ida_sysindex == 0) (*fnorm) *= IDA_mem->ida_tscale * SUNRabs(IDA_mem->ida_cj); return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * IDANewyyp * ----------------------------------------------------------------- * IDANewyyp updates the vectors ynew and ypnew from yy0 and yp0, * using the current step vector lambda*delta, in a manner * depending on icopt and the input id vector. * * The return value is always IDA_SUCCESS = 0. * ----------------------------------------------------------------- */ static int IDANewyyp(IDAMem IDA_mem, realtype lambda) { int retval; retval = IDA_SUCCESS; /* IDA_YA_YDP_INIT case: ynew = yy0 - lambda*delta where id_i = 0 ypnew = yp0 - cj*lambda*delta where id_i = 1. */ if(IDA_mem->ida_icopt == IDA_YA_YDP_INIT) { N_VProd(IDA_mem->ida_id, IDA_mem->ida_delta, IDA_mem->ida_dtemp); N_VLinearSum(ONE, IDA_mem->ida_yp0, -IDA_mem->ida_cj*lambda, IDA_mem->ida_dtemp, IDA_mem->ida_ypnew); N_VLinearSum(ONE, IDA_mem->ida_delta, -ONE, IDA_mem->ida_dtemp, IDA_mem->ida_dtemp); N_VLinearSum(ONE, IDA_mem->ida_yy0, -lambda, IDA_mem->ida_dtemp, IDA_mem->ida_ynew); }else if(IDA_mem->ida_icopt == IDA_Y_INIT) { /* IDA_Y_INIT case: ynew = yy0 - lambda*delta. (ypnew = yp0 preset.) */ N_VLinearSum(ONE, IDA_mem->ida_yy0, -lambda, IDA_mem->ida_delta, IDA_mem->ida_ynew); } if(IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_SIMULTANEOUS)) retval = IDASensNewyyp(IDA_mem, lambda); return(retval); } /* * ----------------------------------------------------------------- * IDANewy * ----------------------------------------------------------------- * IDANewy updates the vector ynew from yy0, * using the current step vector delta, in a manner * depending on icopt and the input id vector. * * The return value is always IDA_SUCCESS = 0. * ----------------------------------------------------------------- */ static int IDANewy(IDAMem IDA_mem) { /* IDA_YA_YDP_INIT case: ynew = yy0 - delta where id_i = 0. */ if(IDA_mem->ida_icopt == IDA_YA_YDP_INIT) { N_VProd(IDA_mem->ida_id, IDA_mem->ida_delta, IDA_mem->ida_dtemp); N_VLinearSum(ONE, IDA_mem->ida_delta, -ONE, IDA_mem->ida_dtemp, IDA_mem->ida_dtemp); N_VLinearSum(ONE, IDA_mem->ida_yy0, -ONE, IDA_mem->ida_dtemp, IDA_mem->ida_ynew); return(IDA_SUCCESS); } /* IDA_Y_INIT case: ynew = yy0 - delta. */ N_VLinearSum(ONE, IDA_mem->ida_yy0, -ONE, IDA_mem->ida_delta, IDA_mem->ida_ynew); return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * Sensitivity I.C. functions * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * IDASensNlsIC * ----------------------------------------------------------------- * IDASensNlsIC solves nonlinear systems for sensitivities consistent * initial conditions. It mainly relies on IDASensNewtonIC. * * The return value is IDA_SUCCESS = 0 if no error occurred. * The error return values (positive) considered recoverable are: * IC_FAIL_RECOV if res, lsetup, or lsolve failed recoverably * IC_CONSTR_FAILED if the constraints could not be met * IC_LINESRCH_FAILED if the linesearch failed (either on steptol test * or on maxbacks test) * IC_CONV_FAIL if the Newton iterations failed to converge * IC_SLOW_CONVRG if the iterations are converging slowly * (failed the convergence test, but showed * norm reduction or convergence rate < 1) * The error return values (negative) considered non-recoverable are: * IDA_RES_FAIL if res had a non-recoverable error * IDA_FIRST_RES_FAIL if res failed recoverably on the first call * IDA_LSETUP_FAIL if lsetup had a non-recoverable error * IDA_LSOLVE_FAIL if lsolve had a non-recoverable error * ----------------------------------------------------------------- */ static int IDASensNlsIC(IDAMem IDA_mem) { int retval; int is, nj; retval = IDA_mem->ida_resS(IDA_mem->ida_Ns, IDA_mem->ida_t0, IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_delta, IDA_mem->ida_yyS0, IDA_mem->ida_ypS0, IDA_mem->ida_deltaS, IDA_mem->ida_user_dataS, IDA_mem->ida_tmpS1, IDA_mem->ida_tmpS2, IDA_mem->ida_tmpS3); IDA_mem->ida_nrSe++; if(retval < 0) return(IDA_RES_FAIL); if(retval > 0) return(IDA_FIRST_RES_FAIL); /* Save deltaS */ for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_deltaS[is], IDA_mem->ida_savresS[is]); /* Loop over nj = number of linear solve Jacobian setups. */ for(nj = 1; nj <= 2; nj++) { /* Call the Newton iteration routine */ retval = IDASensNewtonIC(IDA_mem); if(retval == IDA_SUCCESS) return(IDA_SUCCESS); /* If converging slowly and lsetup is nontrivial and this is the first pass, update Jacobian and retry. */ if(retval == IC_SLOW_CONVRG && IDA_mem->ida_lsetup && nj==1) { /* Restore deltaS. */ for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_savresS[is], IDA_mem->ida_deltaS[is]); IDA_mem->ida_nsetupsS++; retval = IDA_mem->ida_lsetup(IDA_mem, IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_delta, IDA_mem->ida_tmpS1, IDA_mem->ida_tmpS2, IDA_mem->ida_tmpS3); if(retval < 0) return(IDA_LSETUP_FAIL); if(retval > 0) return(IC_FAIL_RECOV); continue; } else { return(retval); } } return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * IDASensNewtonIC * ----------------------------------------------------------------- * IDANewtonIC performs the Newton iteration to solve for * sensitivities consistent initial conditions. It calls * IDASensLineSrch within each iteration. * On return, savresS contains the current residual vectors. * * The return value is IDA_SUCCESS = 0 if no error occurred. * The error return values (positive) considered recoverable are: * IC_FAIL_RECOV if res or lsolve failed recoverably * IC_CONSTR_FAILED if the constraints could not be met * IC_LINESRCH_FAILED if the linesearch failed (either on steptol test * or on maxbacks test) * IC_CONV_FAIL if the Newton iterations failed to converge * IC_SLOW_CONVRG if the iterations appear to be converging slowly. * They failed the convergence test, but showed * an overall norm reduction (by a factor of < 0.1) * or a convergence rate <= ICRATEMAX). * The error return values (negative) considered non-recoverable are: * IDA_RES_FAIL if res had a non-recoverable error * IDA_LSOLVE_FAIL if lsolve had a non-recoverable error * ----------------------------------------------------------------- */ static int IDASensNewtonIC(IDAMem IDA_mem) { int retval, is, mnewt; realtype delnorm, fnorm, fnorm0, oldfnrm, rate; for(is=0;isida_Ns;is++) { /* Call the linear solve function to get the Newton step, delta. */ retval = IDA_mem->ida_lsolve(IDA_mem, IDA_mem->ida_deltaS[is], IDA_mem->ida_ewtS[is], IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_delta); if(retval < 0) return(IDA_LSOLVE_FAIL); if(retval > 0) return(IC_FAIL_RECOV); } /* Compute the norm of the step and return if it is small enough */ fnorm = IDASensWrmsNorm(IDA_mem, IDA_mem->ida_deltaS, IDA_mem->ida_ewtS, SUNFALSE); if(IDA_mem->ida_sysindex == 0) fnorm *= IDA_mem->ida_tscale * SUNRabs(IDA_mem->ida_cj); if(fnorm <= IDA_mem->ida_epsNewt) return(IDA_SUCCESS); fnorm0 = fnorm; rate = ZERO; /* Newton iteration loop */ for(mnewt = 0; mnewt < IDA_mem->ida_maxnit; mnewt++) { IDA_mem->ida_nniS++; delnorm = fnorm; oldfnrm = fnorm; /* Call the Linesearch function and return if it failed. */ retval = IDASensLineSrch(IDA_mem, &delnorm, &fnorm); if(retval != IDA_SUCCESS) return(retval); /* Set the observed convergence rate and test for convergence. */ rate = fnorm/oldfnrm; if(fnorm <= IDA_mem->ida_epsNewt) return(IDA_SUCCESS); /* If not converged, copy new step vectors, and loop. */ for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_delnewS[is], IDA_mem->ida_deltaS[is]); } /* End of Newton iteration loop */ /* Return either IC_SLOW_CONVRG or recoverable fail flag. */ if(rate <= ICRATEMAX || fnorm < PT1*fnorm0) return(IC_SLOW_CONVRG); return(IC_CONV_FAIL); } /* * ----------------------------------------------------------------- * IDASensLineSrch * ----------------------------------------------------------------- * IDASensLineSrch performs the Linesearch algorithm with the * calculation of consistent initial conditions for sensitivities * systems. * * On entry, yyS0 and ypS0 contain the current values, the Newton * steps are contained in deltaS, the current residual vectors FS are * savresS, delnorm is sens-WRMS-norm(deltaS), and fnorm is * max { WRMS-norm( J-inverse FS[is] ) : is=1,2,...,Ns } * * On a successful return, yy0, yp0, and savres have been updated, * delnew contains the current values of J-inverse FS, and fnorm is * max { WRMS-norm(delnewS[is]) : is = 1,2,...Ns } * * The return value is IDA_SUCCESS = 0 if no error occurred. * The error return values (positive) considered recoverable are: * IC_FAIL_RECOV if res or lsolve failed recoverably * IC_CONSTR_FAILED if the constraints could not be met * IC_LINESRCH_FAILED if the linesearch failed (either on steptol test * or on maxbacks test) * The error return values (negative) considered non-recoverable are: * IDA_RES_FAIL if res had a non-recoverable error * IDA_LSOLVE_FAIL if lsolve had a non-recoverable error * ----------------------------------------------------------------- */ static int IDASensLineSrch(IDAMem IDA_mem, realtype *delnorm, realtype *fnorm) { int is, retval, nbacks; realtype f1norm, fnormp, f1normp, slpi, minlam; realtype lambda, ratio; /* Set work space pointer. */ IDA_mem->ida_dtemp = IDA_mem->ida_phi[3]; f1norm = (*fnorm)*(*fnorm)*HALF; /* Initialize local variables. */ ratio = ONE; slpi = -TWO*f1norm*ratio; minlam = IDA_mem->ida_steptol / (*delnorm); lambda = ONE; nbacks = 0; for(;;) { if (nbacks == IDA_mem->ida_maxbacks) return(IC_LINESRCH_FAILED); /* Get new iteration in (ySnew, ypSnew). */ IDASensNewyyp(IDA_mem, lambda); /* Get the norm of new function value. */ retval = IDASensfnorm(IDA_mem, &fnormp); if (retval!=IDA_SUCCESS) return retval; /* If lsoff option is on, break out. */ if(IDA_mem->ida_lsoff) break; /* Do alpha-condition test. */ f1normp = fnormp*fnormp*HALF; if(f1normp <= f1norm + ALPHALS*slpi*lambda) break; if(lambda < minlam) return(IC_LINESRCH_FAILED); lambda /= TWO; IDA_mem->ida_nbacktr++; nbacks++; } /* Update yyS0, ypS0 and fnorm and return. */ for(is=0; isida_Ns; is++) { N_VScale(ONE, IDA_mem->ida_yyS0new[is], IDA_mem->ida_yyS0[is]); } if (IDA_mem->ida_icopt == IDA_YA_YDP_INIT) for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_ypS0new[is], IDA_mem->ida_ypS0[is]); *fnorm = fnormp; return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * IDASensfnorm * ----------------------------------------------------------------- * IDASensfnorm computes the norm of the current function value, by * evaluating the sensitivity residual function, calling the linear * system solver, and computing a WRMS-norm. * * On return, savresS contains the current residual vectors FS, and * delnewS contains J-inverse FS. * * The return value is IDA_SUCCESS = 0 if no error occurred, or * IC_FAIL_RECOV if res or lsolve failed recoverably, or * IDA_RES_FAIL if res had a non-recoverable error, or * IDA_LSOLVE_FAIL if lsolve had a non-recoverable error. * ----------------------------------------------------------------- */ static int IDASensfnorm(IDAMem IDA_mem, realtype *fnorm) { int is, retval; /* Get sensitivity residual */ retval = IDA_mem->ida_resS(IDA_mem->ida_Ns, IDA_mem->ida_t0, IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_delta, IDA_mem->ida_yyS0new, IDA_mem->ida_ypS0new, IDA_mem->ida_delnewS, IDA_mem->ida_user_dataS, IDA_mem->ida_tmpS1, IDA_mem->ida_tmpS2, IDA_mem->ida_tmpS3); IDA_mem->ida_nrSe++; if(retval < 0) return(IDA_RES_FAIL); if(retval > 0) return(IC_FAIL_RECOV); for(is=0; isida_Ns; is++) N_VScale(ONE, IDA_mem->ida_delnewS[is], IDA_mem->ida_savresS[is]); /* Call linear solve function */ for(is=0; isida_Ns; is++) { retval = IDA_mem->ida_lsolve(IDA_mem, IDA_mem->ida_delnewS[is], IDA_mem->ida_ewtS[is], IDA_mem->ida_yy0, IDA_mem->ida_yp0, IDA_mem->ida_delta); if(retval < 0) return(IDA_LSOLVE_FAIL); if(retval > 0) return(IC_FAIL_RECOV); } /* Compute the WRMS-norm; rescale if index = 0. */ *fnorm = IDASensWrmsNorm(IDA_mem, IDA_mem->ida_delnewS, IDA_mem->ida_ewtS, SUNFALSE); if(IDA_mem->ida_sysindex == 0) (*fnorm) *= IDA_mem->ida_tscale * SUNRabs(IDA_mem->ida_cj); return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * IDASensNewyyp * ----------------------------------------------------------------- * IDASensNewyyp computes the Newton updates for each of the * sensitivities systems using the current step vector lambda*delta, * in a manner depending on icopt and the input id vector. * * The return value is always IDA_SUCCESS = 0. * ----------------------------------------------------------------- */ static int IDASensNewyyp(IDAMem IDA_mem, realtype lambda) { int is; if(IDA_mem->ida_icopt == IDA_YA_YDP_INIT) { /* IDA_YA_YDP_INIT case: - ySnew = yS0 - lambda*deltaS where id_i = 0 - ypSnew = ypS0 - cj*lambda*delta where id_i = 1. */ for(is=0; isida_Ns; is++) { /* It is ok to use dtemp as temporary vector here. */ N_VProd(IDA_mem->ida_id, IDA_mem->ida_deltaS[is], IDA_mem->ida_dtemp); N_VLinearSum(ONE, IDA_mem->ida_ypS0[is], -IDA_mem->ida_cj*lambda, IDA_mem->ida_dtemp, IDA_mem->ida_ypS0new[is]); N_VLinearSum(ONE, IDA_mem->ida_deltaS[is], -ONE, IDA_mem->ida_dtemp, IDA_mem->ida_dtemp); N_VLinearSum(ONE, IDA_mem->ida_yyS0[is], -lambda, IDA_mem->ida_dtemp, IDA_mem->ida_yyS0new[is]); } /* end loop is */ }else { /* IDA_Y_INIT case: - ySnew = yS0 - lambda*deltaS. (ypnew = yp0 preset.) */ for(is=0; isida_Ns; is++) N_VLinearSum(ONE, IDA_mem->ida_yyS0[is], -lambda, IDA_mem->ida_deltaS[is], IDA_mem->ida_yyS0new[is]); } /* end loop is */ return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * IDAICFailFlag * ----------------------------------------------------------------- * IDAICFailFlag prints a message and sets the IDACalcIC return * value appropriate to the flag retval returned by IDANlsIC. * ----------------------------------------------------------------- */ static int IDAICFailFlag(IDAMem IDA_mem, int retval) { /* Depending on retval, print error message and return error flag. */ switch(retval) { case IDA_RES_FAIL: IDAProcessError(IDA_mem, IDA_RES_FAIL, "IDAS", "IDACalcIC", MSG_IC_RES_NONREC); return(IDA_RES_FAIL); case IDA_FIRST_RES_FAIL: IDAProcessError(IDA_mem, IDA_FIRST_RES_FAIL, "IDAS", "IDACalcIC", MSG_IC_RES_FAIL); return(IDA_FIRST_RES_FAIL); case IDA_LSETUP_FAIL: IDAProcessError(IDA_mem, IDA_LSETUP_FAIL, "IDAS", "IDACalcIC", MSG_IC_SETUP_FAIL); return(IDA_LSETUP_FAIL); case IDA_LSOLVE_FAIL: IDAProcessError(IDA_mem, IDA_LSOLVE_FAIL, "IDAS", "IDACalcIC", MSG_IC_SOLVE_FAIL); return(IDA_LSOLVE_FAIL); case IC_FAIL_RECOV: IDAProcessError(IDA_mem, IDA_NO_RECOVERY, "IDAS", "IDACalcIC", MSG_IC_NO_RECOVERY); return(IDA_NO_RECOVERY); case IC_CONSTR_FAILED: IDAProcessError(IDA_mem, IDA_CONSTR_FAIL, "IDAS", "IDACalcIC", MSG_IC_FAIL_CONSTR); return(IDA_CONSTR_FAIL); case IC_LINESRCH_FAILED: IDAProcessError(IDA_mem, IDA_LINESEARCH_FAIL, "IDAS", "IDACalcIC", MSG_IC_FAILED_LINS); return(IDA_LINESEARCH_FAIL); case IC_CONV_FAIL: IDAProcessError(IDA_mem, IDA_CONV_FAIL, "IDAS", "IDACalcIC", MSG_IC_CONV_FAILED); return(IDA_CONV_FAIL); case IC_SLOW_CONVRG: IDAProcessError(IDA_mem, IDA_CONV_FAIL, "IDAS", "IDACalcIC", MSG_IC_CONV_FAILED); return(IDA_CONV_FAIL); case IDA_BAD_EWT: IDAProcessError(IDA_mem, IDA_BAD_EWT, "IDAS", "IDACalcIC", MSG_IC_BAD_EWT); return(IDA_BAD_EWT); } return -99; } StanHeaders/src/idas/idas.c0000644000176200001440000075162314645137106015273 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the main IDAS solver. * ----------------------------------------------------------------- * * EXPORTED FUNCTIONS * ------------------ * Creation, allocation and re-initialization functions * IDACreate * IDAInit * IDAReInit * IDAQuadInit * IDAQuadReInit * IDAQuadSStolerances * IDAQuadSVtolerances * IDASensInit * IDASensReInit * IDASensToggleOff * IDASensSStolerances * IDASensSVtolerances * IDASensEEtolerances * IDAQuadSensInit * IDAQuadSensReInit * IDARootInit * Main solver function * IDASolve * Interpolated output and extraction functions * IDAGetDky * IDAGetQuad * IDAGetQuadDky * IDAGetSens * IDAGetSens1 * IDAGetSensDky * IDAGetSensDky1 * Deallocation functions * IDAFree * IDAQuadFree * IDASensFree * IDAQuadSensFree * * PRIVATE FUNCTIONS * ----------------- * IDACheckNvector * Memory allocation/deallocation * IDAAllocVectors * IDAFreeVectors * IDAQuadAllocVectors * IDAQuadFreeVectors * IDASensAllocVectors * IDASensFreeVectors * IDAQuadSensAllocVectors * IDAQuadSensFreeVectors * Initial setup * IDAInitialSetup * IDAEwtSet * IDAEwtSetSS * IDAEwtSetSV * IDAQuadEwtSet * IDAQuadEwtSetSS * IDAQuadEwtSetSV * IDASensEwtSet * IDASensEwtSetEE * IDASensEwtSetSS * IDASensEwtSetSV * IDAQuadSensEwtSet * IDAQuadSensEwtSetEE * IDAQuadSensEwtSetSS * IDAQuadSensEwtSetSV * Stopping tests * IDAStopTest1 * IDAStopTest2 * Error handler * IDAHandleFailure * Main IDAStep function * IDAStep * IDASetCoeffs * Nonlinear solver functions * IDANls * IDAPredict * IDAQuadNls * IDAQuadSensNls * IDAQuadPredict * IDAQuadSensPredict * IDASensNls * IDASensPredict * Error test * IDATestError * IDAQuadTestError * IDASensTestError * IDAQuadSensTestError * IDARestore * Handler for convergence and/or error test failures * IDAHandleNFlag * IDAReset * Function called after a successful step * IDACompleteStep * Get solution * IDAGetSolution * Norm functions * IDAWrmsNorm * IDASensWrmsNorm * IDAQuadSensWrmsNorm * IDAQuadWrmsNormUpdate * IDASensWrmsNormUpdate * IDAQuadSensWrmsNormUpdate * Functions for rootfinding * IDARcheck1 * IDARcheck2 * IDARcheck3 * IDARootfind * IDA Error message handling functions * IDAProcessError * IDAErrHandler * Internal DQ approximations for sensitivity RHS * IDASensResDQ * IDASensRes1DQ * IDAQuadSensResDQ * IDAQuadSensRes1DQ * ----------------------------------------------------------------- */ /* * ================================================================= * IMPORTED HEADER FILES * ================================================================= */ #include #include #include #include #include "idas_impl.h" #include #include #include /* * ================================================================= * IDAS PRIVATE CONSTANTS * ================================================================= */ #define ZERO RCONST(0.0) /* real 0.0 */ #define HALF RCONST(0.5) /* real 0.5 */ #define QUARTER RCONST(0.25) /* real 0.25 */ #define TWOTHIRDS RCONST(0.667) /* real 2/3 */ #define ONE RCONST(1.0) /* real 1.0 */ #define ONEPT5 RCONST(1.5) /* real 1.5 */ #define TWO RCONST(2.0) /* real 2.0 */ #define FOUR RCONST(4.0) /* real 4.0 */ #define FIVE RCONST(5.0) /* real 5.0 */ #define TEN RCONST(10.0) /* real 10.0 */ #define TWELVE RCONST(12.0) /* real 12.0 */ #define TWENTY RCONST(20.0) /* real 20.0 */ #define HUNDRED RCONST(100.0) /* real 100.0 */ #define PT9 RCONST(0.9) /* real 0.9 */ #define PT99 RCONST(0.99) /* real 0.99 */ #define PT1 RCONST(0.1) /* real 0.1 */ #define PT01 RCONST(0.01) /* real 0.01 */ #define PT001 RCONST(0.001) /* real 0.001 */ #define PT0001 RCONST(0.0001) /* real 0.0001 */ /* * ================================================================= * IDAS ROUTINE-SPECIFIC CONSTANTS * ================================================================= */ /* * Control constants for lower-level functions used by IDASolve * ------------------------------------------------------------ */ /* IDAStep control constants */ #define PREDICT_AGAIN 20 /* Return values for lower level routines used by IDASolve */ #define CONTINUE_STEPS +99 /* IDACompleteStep constants */ #define UNSET -1 #define LOWER +1 #define RAISE +2 #define MAINTAIN +3 /* IDATestError constants */ #define ERROR_TEST_FAIL +7 /* * Control constants for lower-level rootfinding functions * ------------------------------------------------------- */ #define RTFOUND +1 #define CLOSERT +3 /* * Control constants for sensitivity DQ * ------------------------------------ */ #define CENTERED1 +1 #define CENTERED2 +2 #define FORWARD1 +3 #define FORWARD2 +4 /* * Algorithmic constants * --------------------- */ #define MXNCF 10 /* max number of convergence failures allowed */ #define MXNEF 10 /* max number of error test failures allowed */ #define MAXNH 5 /* max. number of h tries in IC calc. */ #define MAXNJ 4 /* max. number of J tries in IC calc. */ #define MAXNI 10 /* max. Newton iterations in IC calc. */ #define EPCON RCONST(0.33) /* Newton convergence test constant */ #define MAXBACKS 100 /* max backtracks per Newton step in IDACalcIC */ #define XRATE RCONST(0.25) /* constant for updating Jacobian/preconditioner */ /*=================================================================*/ /* Shortcuts */ /*=================================================================*/ #define IDA_PROFILER IDA_mem->ida_sunctx->profiler /* * ================================================================= * PRIVATE FUNCTION PROTOTYPES * ================================================================= */ static booleantype IDACheckNvector(N_Vector tmpl); /* Memory allocation/deallocation */ static booleantype IDAAllocVectors(IDAMem IDA_mem, N_Vector tmpl); static void IDAFreeVectors(IDAMem IDA_mem); static booleantype IDAQuadAllocVectors(IDAMem IDA_mem, N_Vector tmpl); static void IDAQuadFreeVectors(IDAMem IDA_mem); static booleantype IDASensAllocVectors(IDAMem IDA_mem, N_Vector tmpl); static void IDASensFreeVectors(IDAMem IDA_mem); static booleantype IDAQuadSensAllocVectors(IDAMem ida_mem, N_Vector tmpl); static void IDAQuadSensFreeVectors(IDAMem ida_mem); /* Initial setup */ int IDAInitialSetup(IDAMem IDA_mem); static int IDAEwtSetSS(IDAMem IDA_mem, N_Vector ycur, N_Vector weight); static int IDAEwtSetSV(IDAMem IDA_mem, N_Vector ycur, N_Vector weight); static int IDAQuadEwtSet(IDAMem IDA_mem, N_Vector qcur, N_Vector weightQ); static int IDAQuadEwtSetSS(IDAMem IDA_mem, N_Vector qcur, N_Vector weightQ); static int IDAQuadEwtSetSV(IDAMem IDA_mem, N_Vector qcur, N_Vector weightQ); /* Used in IC for sensitivities. */ int IDASensEwtSet(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS); static int IDASensEwtSetEE(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS); static int IDASensEwtSetSS(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS); static int IDASensEwtSetSV(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS); int IDAQuadSensEwtSet(IDAMem IDA_mem, N_Vector *yQScur, N_Vector *weightQS); static int IDAQuadSensEwtSetEE(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS); static int IDAQuadSensEwtSetSS(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS); static int IDAQuadSensEwtSetSV(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS); /* Main IDAStep function */ static int IDAStep(IDAMem IDA_mem); /* Function called at beginning of step */ static void IDASetCoeffs(IDAMem IDA_mem, realtype *ck); /* Nonlinear solver functions */ static void IDAPredict(IDAMem IDA_mem); static void IDAQuadPredict(IDAMem IDA_mem); static void IDASensPredict(IDAMem IDA_mem, N_Vector *yySens, N_Vector *ypSens); static void IDAQuadSensPredict(IDAMem IDA_mem, N_Vector *yQS, N_Vector *ypQS); static int IDANls(IDAMem IDA_mem); static int IDASensNls(IDAMem IDA_mem); static int IDAQuadNls(IDAMem IDA_mem); static int IDAQuadSensNls(IDAMem IDA_mem); /* Error test */ static int IDATestError(IDAMem IDA_mem, realtype ck, realtype *err_k, realtype *err_km1, realtype *err_km2); static int IDAQuadTestError(IDAMem IDA_mem, realtype ck, realtype *err_k, realtype *err_km1, realtype *err_km2); static int IDASensTestError(IDAMem IDA_mem, realtype ck, realtype *err_k, realtype *err_km1, realtype *err_km2); static int IDAQuadSensTestError(IDAMem IDA_mem, realtype ck, realtype *err_k, realtype *err_km1, realtype *err_km2); /* Handling of convergence and/or error test failures */ static void IDARestore(IDAMem IDA_mem, realtype saved_t); static int IDAHandleNFlag(IDAMem IDA_mem, int nflag, realtype err_k, realtype err_km1, long int *ncfnPtr, int *ncfPtr, long int *netfPtr, int *nefPtr); static void IDAReset(IDAMem IDA_mem); /* Function called after a successful step */ static void IDACompleteStep(IDAMem IDA_mem, realtype err_k, realtype err_km1); /* Function called to evaluate the solutions y(t) and y'(t) at t. Also used in IDAA */ int IDAGetSolution(void *ida_mem, realtype t, N_Vector yret, N_Vector ypret); /* Stopping tests and failure handling */ static int IDAStopTest1(IDAMem IDA_mem, realtype tout,realtype *tret, N_Vector yret, N_Vector ypret, int itask); static int IDAStopTest2(IDAMem IDA_mem, realtype tout, realtype *tret, N_Vector yret, N_Vector ypret, int itask); static int IDAHandleFailure(IDAMem IDA_mem, int sflag); /* Norm functions */ static realtype IDAQuadWrmsNormUpdate(IDAMem IDA_mem, realtype old_nrm, N_Vector xQ, N_Vector wQ); static realtype IDAQuadSensWrmsNorm(IDAMem IDA_mem, N_Vector *xQS, N_Vector *wQS); static realtype IDAQuadSensWrmsNormUpdate(IDAMem IDA_mem, realtype old_nrm, N_Vector *xQS, N_Vector *wQS); /* Functions for rootfinding */ static int IDARcheck1(IDAMem IDA_mem); static int IDARcheck2(IDAMem IDA_mem); static int IDARcheck3(IDAMem IDA_mem); static int IDARootfind(IDAMem IDA_mem); /* Sensitivity residual DQ function */ static int IDASensRes1DQ(int Ns, realtype t, N_Vector yy, N_Vector yp, N_Vector resval, int iS, N_Vector yyS, N_Vector ypS, N_Vector resvalS, void *user_dataS, N_Vector ytemp, N_Vector yptemp, N_Vector restemp); static int IDAQuadSensRhsInternalDQ(int Ns, realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS, N_Vector rrQ, N_Vector *resvalQS, void *ida_mem, N_Vector yytmp, N_Vector yptmp, N_Vector tmpQS); static int IDAQuadSensRhs1InternalDQ(IDAMem IDA_mem, int is, realtype t, N_Vector yy, N_Vector y, N_Vector yyS, N_Vector ypS, N_Vector resvalQ, N_Vector resvalQS, N_Vector yytmp, N_Vector yptmp, N_Vector tmpQS); /* * ================================================================= * EXPORTED FUNCTIONS IMPLEMENTATION * ================================================================= */ /* * ----------------------------------------------------------------- * Creation, allocation and re-initialization functions * ----------------------------------------------------------------- */ /* * IDACreate * * IDACreate creates an internal memory block for a problem to * be solved by IDA. * If successful, IDACreate returns a pointer to the problem memory. * This pointer should be passed to IDAInit. * If an initialization error occurs, IDACreate prints an error * message to standard err and returns NULL. */ void *IDACreate(SUNContext sunctx) { IDAMem IDA_mem; /* Test inputs */ if (sunctx == NULL) { IDAProcessError(NULL, 0, "IDAS", "IDACreate", MSG_NULL_SUNCTX); return(NULL); } IDA_mem = NULL; IDA_mem = (IDAMem) malloc(sizeof(struct IDAMemRec)); if (IDA_mem == NULL) { IDAProcessError(NULL, 0, "IDAS", "IDACreate", MSG_MEM_FAIL); return (NULL); } /* Zero out ida_mem */ memset(IDA_mem, 0, sizeof(struct IDAMemRec)); IDA_mem->ida_sunctx = sunctx; /* Set unit roundoff in IDA_mem */ IDA_mem->ida_uround = UNIT_ROUNDOFF; /* Set default values for integrator optional inputs */ IDA_mem->ida_res = NULL; IDA_mem->ida_user_data = NULL; IDA_mem->ida_itol = IDA_NN; IDA_mem->ida_atolmin0 = SUNTRUE; IDA_mem->ida_user_efun = SUNFALSE; IDA_mem->ida_efun = NULL; IDA_mem->ida_edata = NULL; IDA_mem->ida_ehfun = IDAErrHandler; IDA_mem->ida_eh_data = IDA_mem; IDA_mem->ida_errfp = stderr; IDA_mem->ida_maxord = MAXORD_DEFAULT; IDA_mem->ida_mxstep = MXSTEP_DEFAULT; IDA_mem->ida_hmax_inv = HMAX_INV_DEFAULT; IDA_mem->ida_hin = ZERO; IDA_mem->ida_epcon = EPCON; IDA_mem->ida_maxnef = MXNEF; IDA_mem->ida_maxncf = MXNCF; IDA_mem->ida_suppressalg = SUNFALSE; IDA_mem->ida_id = NULL; IDA_mem->ida_constraints = NULL; IDA_mem->ida_constraintsSet = SUNFALSE; IDA_mem->ida_tstopset = SUNFALSE; /* set the saved value maxord_alloc */ IDA_mem->ida_maxord_alloc = MAXORD_DEFAULT; /* Set default values for IC optional inputs */ IDA_mem->ida_epiccon = PT01 * EPCON; IDA_mem->ida_maxnh = MAXNH; IDA_mem->ida_maxnj = MAXNJ; IDA_mem->ida_maxnit = MAXNI; IDA_mem->ida_maxbacks = MAXBACKS; IDA_mem->ida_lsoff = SUNFALSE; IDA_mem->ida_steptol = SUNRpowerR(IDA_mem->ida_uround, TWOTHIRDS); /* Set default values for quad. optional inputs */ IDA_mem->ida_quadr = SUNFALSE; IDA_mem->ida_rhsQ = NULL; IDA_mem->ida_errconQ = SUNFALSE; IDA_mem->ida_itolQ = IDA_NN; IDA_mem->ida_atolQmin0 = SUNTRUE; /* Set default values for sensi. optional inputs */ IDA_mem->ida_sensi = SUNFALSE; IDA_mem->ida_user_dataS = (void *)IDA_mem; IDA_mem->ida_resS = IDASensResDQ; IDA_mem->ida_resSDQ = SUNTRUE; IDA_mem->ida_DQtype = IDA_CENTERED; IDA_mem->ida_DQrhomax = ZERO; IDA_mem->ida_p = NULL; IDA_mem->ida_pbar = NULL; IDA_mem->ida_plist = NULL; IDA_mem->ida_errconS = SUNFALSE; IDA_mem->ida_itolS = IDA_EE; IDA_mem->ida_atolSmin0 = NULL; IDA_mem->ida_ism = -1; /* initialize to invalid option */ /* Defaults for sensi. quadr. optional inputs. */ IDA_mem->ida_quadr_sensi = SUNFALSE; IDA_mem->ida_user_dataQS = (void *)IDA_mem; IDA_mem->ida_rhsQS = IDAQuadSensRhsInternalDQ; IDA_mem->ida_rhsQSDQ = SUNTRUE; IDA_mem->ida_errconQS = SUNFALSE; IDA_mem->ida_itolQS = IDA_EE; IDA_mem->ida_atolQSmin0 = NULL; /* Set defaults for ASA. */ IDA_mem->ida_adj = SUNFALSE; IDA_mem->ida_adj_mem = NULL; /* Initialize lrw and liw */ IDA_mem->ida_lrw = 25 + 5*MXORDP1; IDA_mem->ida_liw = 38; /* No mallocs have been done yet */ IDA_mem->ida_VatolMallocDone = SUNFALSE; IDA_mem->ida_constraintsMallocDone = SUNFALSE; IDA_mem->ida_idMallocDone = SUNFALSE; IDA_mem->ida_MallocDone = SUNFALSE; IDA_mem->ida_VatolQMallocDone = SUNFALSE; IDA_mem->ida_quadMallocDone = SUNFALSE; IDA_mem->ida_VatolSMallocDone = SUNFALSE; IDA_mem->ida_SatolSMallocDone = SUNFALSE; IDA_mem->ida_sensMallocDone = SUNFALSE; IDA_mem->ida_VatolQSMallocDone = SUNFALSE; IDA_mem->ida_SatolQSMallocDone = SUNFALSE; IDA_mem->ida_quadSensMallocDone = SUNFALSE; IDA_mem->ida_adjMallocDone = SUNFALSE; /* Initialize nonlinear solver variables */ IDA_mem->NLS = NULL; IDA_mem->ownNLS = SUNFALSE; IDA_mem->NLSsim = NULL; IDA_mem->ownNLSsim = SUNFALSE; IDA_mem->ypredictSim = NULL; IDA_mem->ycorSim = NULL; IDA_mem->ewtSim = NULL; IDA_mem->simMallocDone = SUNFALSE; IDA_mem->NLSstg = NULL; IDA_mem->ownNLSstg = SUNFALSE; IDA_mem->ypredictStg = NULL; IDA_mem->ycorStg = NULL; IDA_mem->ewtStg = NULL; IDA_mem->stgMallocDone = SUNFALSE; /* Return pointer to IDA memory block */ return((void *)IDA_mem); } /*-----------------------------------------------------------------*/ /* * IDAInit * * IDAInit allocates and initializes memory for a problem. All * problem specification inputs are checked for errors. If any * error occurs during initialization, it is reported to the * error handler function. */ int IDAInit(void *ida_mem, IDAResFn res, realtype t0, N_Vector yy0, N_Vector yp0) { int retval; IDAMem IDA_mem; booleantype nvectorOK, allocOK; sunindextype lrw1, liw1; SUNNonlinearSolver NLS; /* Check ida_mem */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAInit", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Check for legal input parameters */ if (yy0 == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInit", MSG_Y0_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if (yp0 == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInit", MSG_YP0_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if (res == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInit", MSG_RES_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Test if all required vector operations are implemented */ nvectorOK = IDACheckNvector(yy0); if (!nvectorOK) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInit", MSG_BAD_NVECTOR); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Set space requirements for one N_Vector */ if (yy0->ops->nvspace != NULL) { N_VSpace(yy0, &lrw1, &liw1); } else { lrw1 = 0; liw1 = 0; } IDA_mem->ida_lrw1 = lrw1; IDA_mem->ida_liw1 = liw1; /* Allocate the vectors (using yy0 as a template) */ allocOK = IDAAllocVectors(IDA_mem, yy0); if (!allocOK) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDAInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* Allocate temporary work arrays for fused vector ops */ IDA_mem->ida_cvals = NULL; IDA_mem->ida_cvals = (realtype *) malloc(MXORDP1*sizeof(realtype)); IDA_mem->ida_Xvecs = NULL; IDA_mem->ida_Xvecs = (N_Vector *) malloc(MXORDP1*sizeof(N_Vector)); IDA_mem->ida_Zvecs = NULL; IDA_mem->ida_Zvecs = (N_Vector *) malloc(MXORDP1*sizeof(N_Vector)); if ((IDA_mem->ida_cvals == NULL) || (IDA_mem->ida_Xvecs == NULL) || (IDA_mem->ida_Zvecs == NULL)) { IDAFreeVectors(IDA_mem); IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDAInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* Input checks complete at this point and history array allocated */ /* Copy the input parameters into IDA memory block */ IDA_mem->ida_res = res; IDA_mem->ida_tn = t0; /* Initialize the phi array */ N_VScale(ONE, yy0, IDA_mem->ida_phi[0]); N_VScale(ONE, yp0, IDA_mem->ida_phi[1]); /* create a Newton nonlinear solver object by default */ NLS = SUNNonlinSol_Newton(yy0, IDA_mem->ida_sunctx); /* check that nonlinear solver is non-NULL */ if (NLS == NULL) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDAInit", MSG_MEM_FAIL); IDAFreeVectors(IDA_mem); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* attach the nonlinear solver to the IDA memory */ retval = IDASetNonlinearSolver(IDA_mem, NLS); /* check that the nonlinear solver was successfully attached */ if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, retval, "IDAS", "IDAInit", "Setting the nonlinear solver failed"); IDAFreeVectors(IDA_mem); SUNNonlinSolFree(NLS); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* set ownership flag */ IDA_mem->ownNLS = SUNTRUE; /* All error checking is complete at this point */ /* Copy the input parameters into IDA memory block */ IDA_mem->ida_res = res; IDA_mem->ida_tn = t0; /* Set the linear solver addresses to NULL */ IDA_mem->ida_linit = NULL; IDA_mem->ida_lsetup = NULL; IDA_mem->ida_lsolve = NULL; IDA_mem->ida_lperf = NULL; IDA_mem->ida_lfree = NULL; IDA_mem->ida_lmem = NULL; /* Set forceSetup to SUNFALSE */ IDA_mem->ida_forceSetup = SUNFALSE; /* Initialize the phi array */ N_VScale(ONE, yy0, IDA_mem->ida_phi[0]); N_VScale(ONE, yp0, IDA_mem->ida_phi[1]); /* Initialize all the counters and other optional output values */ IDA_mem->ida_nst = 0; IDA_mem->ida_nre = 0; IDA_mem->ida_ncfn = 0; IDA_mem->ida_netf = 0; IDA_mem->ida_nni = 0; IDA_mem->ida_nsetups = 0; IDA_mem->ida_kused = 0; IDA_mem->ida_hused = ZERO; IDA_mem->ida_tolsf = ONE; IDA_mem->ida_nge = 0; IDA_mem->ida_irfnd = 0; /* Initialize counters specific to IC calculation. */ IDA_mem->ida_nbacktr = 0; /* Initialize root-finding variables */ IDA_mem->ida_glo = NULL; IDA_mem->ida_ghi = NULL; IDA_mem->ida_grout = NULL; IDA_mem->ida_iroots = NULL; IDA_mem->ida_rootdir = NULL; IDA_mem->ida_gfun = NULL; IDA_mem->ida_nrtfn = 0; IDA_mem->ida_gactive = NULL; IDA_mem->ida_mxgnull = 1; /* Initial setup not done yet */ IDA_mem->ida_SetupDone = SUNFALSE; /* Problem memory has been successfully allocated */ IDA_mem->ida_MallocDone = SUNTRUE; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ /* * IDAReInit * * IDAReInit re-initializes IDA's memory for a problem, assuming * it has already beeen allocated in a prior IDAInit call. * All problem specification inputs are checked for errors. * The problem size Neq is assumed to be unchanged since the call * to IDAInit, and the maximum order maxord must not be larger. * If any error occurs during reinitialization, it is reported to * the error handler function. * The return value is IDA_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int IDAReInit(void *ida_mem, realtype t0, N_Vector yy0, N_Vector yp0) { IDAMem IDA_mem; /* Check for legal input parameters */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAReInit", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Check if problem was malloc'ed */ if (IDA_mem->ida_MallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_MALLOC, "IDAS", "IDAReInit", MSG_NO_MALLOC); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_MALLOC); } /* Check for legal input parameters */ if (yy0 == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAReInit", MSG_Y0_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if (yp0 == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAReInit", MSG_YP0_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Copy the input parameters into IDA memory block */ IDA_mem->ida_tn = t0; /* Set forceSetup to SUNFALSE */ IDA_mem->ida_forceSetup = SUNFALSE; /* Initialize the phi array */ N_VScale(ONE, yy0, IDA_mem->ida_phi[0]); N_VScale(ONE, yp0, IDA_mem->ida_phi[1]); /* Initialize all the counters and other optional output values */ IDA_mem->ida_nst = 0; IDA_mem->ida_nre = 0; IDA_mem->ida_ncfn = 0; IDA_mem->ida_netf = 0; IDA_mem->ida_nni = 0; IDA_mem->ida_nsetups = 0; IDA_mem->ida_kused = 0; IDA_mem->ida_hused = ZERO; IDA_mem->ida_tolsf = ONE; IDA_mem->ida_nge = 0; IDA_mem->ida_irfnd = 0; /* Initial setup not done yet */ IDA_mem->ida_SetupDone = SUNFALSE; /* Problem has been successfully re-initialized */ SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ /* * IDASStolerances * IDASVtolerances * IDAWFtolerances * * These functions specify the integration tolerances. One of them * MUST be called before the first call to IDA. * * IDASStolerances specifies scalar relative and absolute tolerances. * IDASVtolerances specifies scalar relative tolerance and a vector * absolute tolerance (a potentially different absolute tolerance * for each vector component). * IDAWFtolerances specifies a user-provides function (of type IDAEwtFn) * which will be called to set the error weight vector. */ int IDASStolerances(void *ida_mem, realtype reltol, realtype abstol) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASStolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_MallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_MALLOC, "IDAS", "IDASStolerances", MSG_NO_MALLOC); return(IDA_NO_MALLOC); } /* Check inputs */ if (reltol < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASStolerances", MSG_BAD_RTOL); return(IDA_ILL_INPUT); } if (abstol < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASStolerances", MSG_BAD_ATOL); return(IDA_ILL_INPUT); } /* Copy tolerances into memory */ IDA_mem->ida_rtol = reltol; IDA_mem->ida_Satol = abstol; IDA_mem->ida_atolmin0 = (abstol == ZERO); IDA_mem->ida_itol = IDA_SS; IDA_mem->ida_user_efun = SUNFALSE; IDA_mem->ida_efun = IDAEwtSet; IDA_mem->ida_edata = NULL; /* will be set to ida_mem in InitialSetup */ return(IDA_SUCCESS); } int IDASVtolerances(void *ida_mem, realtype reltol, N_Vector abstol) { IDAMem IDA_mem; realtype atolmin; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASVtolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_MallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_MALLOC, "IDAS", "IDASVtolerances", MSG_NO_MALLOC); return(IDA_NO_MALLOC); } /* Check inputs */ if (reltol < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASVtolerances", MSG_BAD_RTOL); return(IDA_ILL_INPUT); } atolmin = N_VMin(abstol); if (atolmin < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASVtolerances", MSG_BAD_ATOL); return(IDA_ILL_INPUT); } /* Copy tolerances into memory */ if ( !(IDA_mem->ida_VatolMallocDone) ) { IDA_mem->ida_Vatol = N_VClone(IDA_mem->ida_ewt); IDA_mem->ida_lrw += IDA_mem->ida_lrw1; IDA_mem->ida_liw += IDA_mem->ida_liw1; IDA_mem->ida_VatolMallocDone = SUNTRUE; } IDA_mem->ida_rtol = reltol; N_VScale(ONE, abstol, IDA_mem->ida_Vatol); IDA_mem->ida_atolmin0 = (atolmin == ZERO); IDA_mem->ida_itol = IDA_SV; IDA_mem->ida_user_efun = SUNFALSE; IDA_mem->ida_efun = IDAEwtSet; IDA_mem->ida_edata = NULL; /* will be set to ida_mem in InitialSetup */ return(IDA_SUCCESS); } int IDAWFtolerances(void *ida_mem, IDAEwtFn efun) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAWFtolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_MallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_MALLOC, "IDAS", "IDAWFtolerances", MSG_NO_MALLOC); return(IDA_NO_MALLOC); } IDA_mem->ida_itol = IDA_WF; IDA_mem->ida_user_efun = SUNTRUE; IDA_mem->ida_efun = efun; IDA_mem->ida_edata = NULL; /* will be set to user_data in InitialSetup */ return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ /* * IDAQuadMalloc * * IDAQuadMalloc allocates and initializes quadrature related * memory for a problem. All problem specification inputs are * checked for errors. If any error occurs during initialization, * it is reported to the file whose file pointer is errfp. * The return value is IDA_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int IDAQuadInit(void *ida_mem, IDAQuadRhsFn rhsQ, N_Vector yQ0) { IDAMem IDA_mem; booleantype allocOK; sunindextype lrw1Q, liw1Q; int retval; /* Check ida_mem */ if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAQuadInit", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Set space requirements for one N_Vector */ N_VSpace(yQ0, &lrw1Q, &liw1Q); IDA_mem->ida_lrw1Q = lrw1Q; IDA_mem->ida_liw1Q = liw1Q; /* Allocate the vectors (using yQ0 as a template) */ allocOK = IDAQuadAllocVectors(IDA_mem, yQ0); if (!allocOK) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDAQuadInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* Initialize phiQ in the history array */ N_VScale(ONE, yQ0, IDA_mem->ida_phiQ[0]); retval = N_VConstVectorArray(IDA_mem->ida_maxord, ZERO, IDA_mem->ida_phiQ+1); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } /* Copy the input parameters into IDAS state */ IDA_mem->ida_rhsQ = rhsQ; /* Initialize counters */ IDA_mem->ida_nrQe = 0; IDA_mem->ida_netfQ = 0; /* Quadrature integration turned ON */ IDA_mem->ida_quadr = SUNTRUE; IDA_mem->ida_quadMallocDone = SUNTRUE; /* Quadrature initialization was successfull */ SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ /* * IDAQuadReInit * * IDAQuadReInit re-initializes IDAS's quadrature related memory * for a problem, assuming it has already been allocated in prior * calls to IDAInit and IDAQuadMalloc. * All problem specification inputs are checked for errors. * If any error occurs during initialization, it is reported to the * file whose file pointer is errfp. * The return value is IDA_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int IDAQuadReInit(void *ida_mem, N_Vector yQ0) { IDAMem IDA_mem; int retval; /* Check ida_mem */ if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAQuadReInit", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Ckeck if quadrature was initialized */ if (IDA_mem->ida_quadMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUAD, "IDAS", "IDAQuadReInit", MSG_NO_QUAD); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_QUAD); } /* Initialize phiQ in the history array */ N_VScale(ONE, yQ0, IDA_mem->ida_phiQ[0]); retval = N_VConstVectorArray(IDA_mem->ida_maxord, ZERO, IDA_mem->ida_phiQ+1); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } /* Initialize counters */ IDA_mem->ida_nrQe = 0; IDA_mem->ida_netfQ = 0; /* Quadrature integration turned ON */ IDA_mem->ida_quadr = SUNTRUE; /* Quadrature re-initialization was successfull */ SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAQuadSStolerances * IDAQuadSVtolerances * * * These functions specify the integration tolerances for quadrature * variables. One of them MUST be called before the first call to * IDA IF error control on the quadrature variables is enabled * (see IDASetQuadErrCon). * * IDASStolerances specifies scalar relative and absolute tolerances. * IDASVtolerances specifies scalar relative tolerance and a vector * absolute tolerance (a potentially different absolute tolerance * for each vector component). */ int IDAQuadSStolerances(void *ida_mem, realtype reltolQ, realtype abstolQ) { IDAMem IDA_mem; /*Check ida mem*/ if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAQuadSStolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Ckeck if quadrature was initialized */ if (IDA_mem->ida_quadMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUAD, "IDAS", "IDAQuadSStolerances", MSG_NO_QUAD); return(IDA_NO_QUAD); } /* Test user-supplied tolerances */ if (reltolQ < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSStolerances", MSG_BAD_RTOLQ); return(IDA_ILL_INPUT); } if (abstolQ < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSStolerances", MSG_BAD_ATOLQ); return(IDA_ILL_INPUT); } /* Copy tolerances into memory */ IDA_mem->ida_itolQ = IDA_SS; IDA_mem->ida_rtolQ = reltolQ; IDA_mem->ida_SatolQ = abstolQ; IDA_mem->ida_atolQmin0 = (abstolQ == ZERO); return (IDA_SUCCESS); } int IDAQuadSVtolerances(void *ida_mem, realtype reltolQ, N_Vector abstolQ) { IDAMem IDA_mem; realtype atolmin; /*Check ida mem*/ if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAQuadSVtolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Ckeck if quadrature was initialized */ if (IDA_mem->ida_quadMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUAD, "IDAS", "IDAQuadSVtolerances", MSG_NO_QUAD); return(IDA_NO_QUAD); } /* Test user-supplied tolerances */ if (reltolQ < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSVtolerances", MSG_BAD_RTOLQ); return(IDA_ILL_INPUT); } if (abstolQ == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSVtolerances", MSG_NULL_ATOLQ); return(IDA_ILL_INPUT); } atolmin = N_VMin(abstolQ); if (atolmin < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSVtolerances", MSG_BAD_ATOLQ); return(IDA_ILL_INPUT); } /* Copy tolerances into memory */ IDA_mem->ida_itolQ = IDA_SV; IDA_mem->ida_rtolQ = reltolQ; /* clone the absolute tolerances vector (if necessary) */ if (SUNFALSE == IDA_mem->ida_VatolQMallocDone) { IDA_mem->ida_VatolQ = N_VClone(abstolQ); IDA_mem->ida_lrw += IDA_mem->ida_lrw1Q; IDA_mem->ida_liw += IDA_mem->ida_liw1Q; IDA_mem->ida_VatolQMallocDone = SUNTRUE; } N_VScale(ONE, abstolQ, IDA_mem->ida_VatolQ); IDA_mem->ida_atolQmin0 = (atolmin == ZERO); return(IDA_SUCCESS); } /* * IDASenMalloc * * IDASensInit allocates and initializes sensitivity related * memory for a problem. All problem specification inputs are * checked for errors. If any error occurs during initialization, * it is reported to the file whose file pointer is errfp. * The return value is IDA_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int IDASensInit(void *ida_mem, int Ns, int ism, IDASensResFn fS, N_Vector *yS0, N_Vector *ypS0) { IDAMem IDA_mem; booleantype allocOK; int is, retval; SUNNonlinearSolver NLS; /* Check ida_mem */ if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASensInit", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Check if Ns is legal */ if (Ns<=0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensInit", MSG_BAD_NS); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } IDA_mem->ida_Ns = Ns; /* Check if ism is legal */ if ((ism!=IDA_SIMULTANEOUS) && (ism!=IDA_STAGGERED)) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensInit", MSG_BAD_ISM); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } IDA_mem->ida_ism = ism; /* Check if yS0 and ypS0 are non-null */ if (yS0 == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensInit", MSG_NULL_YYS0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if (ypS0 == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensInit", MSG_NULL_YPS0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Store sensitivity RHS-related data */ if (fS != NULL) { IDA_mem->ida_resS = fS; IDA_mem->ida_user_dataS = IDA_mem->ida_user_data; IDA_mem->ida_resSDQ = SUNFALSE; } else { IDA_mem->ida_resS = IDASensResDQ; IDA_mem->ida_user_dataS = ida_mem; IDA_mem->ida_resSDQ = SUNTRUE; } /* Allocate the vectors (using yS0[0] as a template) */ allocOK = IDASensAllocVectors(IDA_mem, yS0[0]); if (!allocOK) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDASensInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* Allocate temporary work arrays for fused vector ops */ if (Ns*MXORDP1 > MXORDP1) { free(IDA_mem->ida_cvals); IDA_mem->ida_cvals = NULL; free(IDA_mem->ida_Xvecs); IDA_mem->ida_Xvecs = NULL; free(IDA_mem->ida_Zvecs); IDA_mem->ida_Zvecs = NULL; IDA_mem->ida_cvals = (realtype *) malloc((Ns*MXORDP1)*sizeof(realtype)); IDA_mem->ida_Xvecs = (N_Vector *) malloc((Ns*MXORDP1)*sizeof(N_Vector)); IDA_mem->ida_Zvecs = (N_Vector *) malloc((Ns*MXORDP1)*sizeof(N_Vector)); if ((IDA_mem->ida_cvals == NULL) || (IDA_mem->ida_Xvecs == NULL) || (IDA_mem->ida_Zvecs == NULL)) { IDASensFreeVectors(IDA_mem); IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDASensInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } } /*---------------------------------------------- All error checking is complete at this point -----------------------------------------------*/ /* Initialize the phiS array */ for (is=0; isida_cvals[is] = ONE; retval = N_VScaleVectorArray(Ns, IDA_mem->ida_cvals, yS0, IDA_mem->ida_phiS[0]); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } retval = N_VScaleVectorArray(Ns, IDA_mem->ida_cvals, ypS0, IDA_mem->ida_phiS[1]); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } /* Initialize all sensitivity related counters */ IDA_mem->ida_nrSe = 0; IDA_mem->ida_nreS = 0; IDA_mem->ida_ncfnS = 0; IDA_mem->ida_netfS = 0; IDA_mem->ida_nniS = 0; IDA_mem->ida_nsetupsS = 0; /* Set default values for plist and pbar */ for (is=0; isida_plist[is] = is; IDA_mem->ida_pbar[is] = ONE; } /* Sensitivities will be computed */ IDA_mem->ida_sensi = SUNTRUE; IDA_mem->ida_sensMallocDone = SUNTRUE; /* create a Newton nonlinear solver object by default */ if (ism == IDA_SIMULTANEOUS) NLS = SUNNonlinSol_NewtonSens(Ns+1, IDA_mem->ida_delta, IDA_mem->ida_sunctx); else NLS = SUNNonlinSol_NewtonSens(Ns, IDA_mem->ida_delta, IDA_mem->ida_sunctx); /* check that the nonlinear solver is non-NULL */ if (NLS == NULL) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDASensInit", MSG_MEM_FAIL); IDASensFreeVectors(IDA_mem); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* attach the nonlinear solver to the IDA memory */ if (ism == IDA_SIMULTANEOUS) retval = IDASetNonlinearSolverSensSim(IDA_mem, NLS); else retval = IDASetNonlinearSolverSensStg(IDA_mem, NLS); /* check that the nonlinear solver was successfully attached */ if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, retval, "IDAS", "IDASensInit", "Setting the nonlinear solver failed"); IDASensFreeVectors(IDA_mem); SUNNonlinSolFree(NLS); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* set ownership flag */ if (ism == IDA_SIMULTANEOUS) IDA_mem->ownNLSsim = SUNTRUE; else IDA_mem->ownNLSstg = SUNTRUE; /* Sensitivity initialization was successfull */ SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ /* * IDASensReInit * * IDASensReInit re-initializes IDAS's sensitivity related memory * for a problem, assuming it has already been allocated in prior * calls to IDAInit and IDASensInit. * All problem specification inputs are checked for errors. * The number of sensitivities Ns is assumed to be unchanged since * the previous call to IDASensInit. * If any error occurs during initialization, it is reported to the * file whose file pointer is errfp. * The return value is IDA_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int IDASensReInit(void *ida_mem, int ism, N_Vector *yS0, N_Vector *ypS0) { IDAMem IDA_mem; int is, retval; SUNNonlinearSolver NLS; /* Check ida_mem */ if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASensReInit", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Was sensitivity initialized? */ if (IDA_mem->ida_sensMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDASensReInit", MSG_NO_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_SENS); } /* Check if ism is legal */ if ((ism!=IDA_SIMULTANEOUS) && (ism!=IDA_STAGGERED)) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensReInit", MSG_BAD_ISM); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } IDA_mem->ida_ism = ism; /* Check if yS0 and ypS0 are non-null */ if (yS0 == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensReInit", MSG_NULL_YYS0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if (ypS0 == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensReInit", MSG_NULL_YPS0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /*----------------------------------------------- All error checking is complete at this point -----------------------------------------------*/ /* Initialize the phiS array */ for (is=0; isida_Ns; is++) IDA_mem->ida_cvals[is] = ONE; retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, yS0, IDA_mem->ida_phiS[0]); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, ypS0, IDA_mem->ida_phiS[1]); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } /* Initialize all sensitivity related counters */ IDA_mem->ida_nrSe = 0; IDA_mem->ida_nreS = 0; IDA_mem->ida_ncfnS = 0; IDA_mem->ida_netfS = 0; IDA_mem->ida_nniS = 0; IDA_mem->ida_nsetupsS = 0; /* Set default values for plist and pbar */ for (is=0; isida_Ns; is++) { IDA_mem->ida_plist[is] = is; IDA_mem->ida_pbar[is] = ONE; } /* Sensitivities will be computed */ IDA_mem->ida_sensi = SUNTRUE; /* Check if the NLS exists, create the default NLS if needed */ if ((ism == IDA_SIMULTANEOUS && IDA_mem->NLSsim == NULL) || (ism == IDA_STAGGERED && IDA_mem->NLSstg == NULL)) { /* create a Newton nonlinear solver object by default */ if (ism == IDA_SIMULTANEOUS) NLS = SUNNonlinSol_NewtonSens(IDA_mem->ida_Ns+1, IDA_mem->ida_delta, IDA_mem->ida_sunctx); else NLS = SUNNonlinSol_NewtonSens(IDA_mem->ida_Ns, IDA_mem->ida_delta, IDA_mem->ida_sunctx); /* check that the nonlinear solver is non-NULL */ if (NLS == NULL) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDASensReInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* attach the nonlinear solver to the IDA memory */ if (ism == IDA_SIMULTANEOUS) retval = IDASetNonlinearSolverSensSim(IDA_mem, NLS); else retval = IDASetNonlinearSolverSensStg(IDA_mem, NLS); /* check that the nonlinear solver was successfully attached */ if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, retval, "IDAS", "IDASensReInit", "Setting the nonlinear solver failed"); SUNNonlinSolFree(NLS); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* set ownership flag */ if (ism == IDA_SIMULTANEOUS) IDA_mem->ownNLSsim = SUNTRUE; else IDA_mem->ownNLSstg = SUNTRUE; /* initialize the NLS object, this assumes that the linear solver has already been initialized in IDAInit */ if (ism == IDA_SIMULTANEOUS) retval = idaNlsInitSensSim(IDA_mem); else retval = idaNlsInitSensStg(IDA_mem); if (retval != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_NLS_INIT_FAIL, "IDAS", "IDASensReInit", MSG_NLS_INIT_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NLS_INIT_FAIL); } } /* Sensitivity re-initialization was successfull */ SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /*-----------------------------------------------------------------*/ /* * IDASensSStolerances * IDASensSVtolerances * IDASensEEtolerances * * These functions specify the integration tolerances for sensitivity * variables. One of them MUST be called before the first call to IDASolve. * * IDASensSStolerances specifies scalar relative and absolute tolerances. * IDASensSVtolerances specifies scalar relative tolerance and a vector * absolute tolerance for each sensitivity vector (a potentially different * absolute tolerance for each vector component). * IDASensEEtolerances specifies that tolerances for sensitivity variables * should be estimated from those provided for the state variables. */ int IDASensSStolerances(void *ida_mem, realtype reltolS, realtype *abstolS) { IDAMem IDA_mem; int is; /* Check ida_mem pointer */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASensSStolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Was sensitivity initialized? */ if (IDA_mem->ida_sensMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDASensSStolerances", MSG_NO_SENSI); return(IDA_NO_SENS); } /* Test user-supplied tolerances */ if (reltolS < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensSStolerances", MSG_BAD_RTOLS); return(IDA_ILL_INPUT); } if (abstolS == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensSStolerances", MSG_NULL_ATOLS); return(IDA_ILL_INPUT); } for (is=0; isida_Ns; is++) if (abstolS[is] < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensSStolerances", MSG_BAD_ATOLS); return(IDA_ILL_INPUT); } /* Copy tolerances into memory */ IDA_mem->ida_itolS = IDA_SS; IDA_mem->ida_rtolS = reltolS; if ( !(IDA_mem->ida_SatolSMallocDone) ) { IDA_mem->ida_SatolS = NULL; IDA_mem->ida_SatolS = (realtype *)malloc(IDA_mem->ida_Ns*sizeof(realtype)); IDA_mem->ida_atolSmin0 = (booleantype *)malloc(IDA_mem->ida_Ns*sizeof(booleantype)); IDA_mem->ida_lrw += IDA_mem->ida_Ns; IDA_mem->ida_SatolSMallocDone = SUNTRUE; } for (is=0; isida_Ns; is++) { IDA_mem->ida_SatolS[is] = abstolS[is]; IDA_mem->ida_atolSmin0[is] = (abstolS[is] == ZERO); } return(IDA_SUCCESS); } int IDASensSVtolerances(void *ida_mem, realtype reltolS, N_Vector *abstolS) { IDAMem IDA_mem; int is, retval; realtype *atolmin; /* Check ida_mem pointer */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASensSVtolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Was sensitivity initialized? */ if (IDA_mem->ida_sensMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDASensSVtolerances", MSG_NO_SENSI); return(IDA_NO_SENS); } /* Test user-supplied tolerances */ if (reltolS < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensSVtolerances", MSG_BAD_RTOLS); return(IDA_ILL_INPUT); } if (abstolS == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensSVtolerances", MSG_NULL_ATOLS); return(IDA_ILL_INPUT); } atolmin = (realtype *)malloc(IDA_mem->ida_Ns*sizeof(realtype)); for (is=0; isida_Ns; is++) { atolmin[is] = N_VMin(abstolS[is]); if (atolmin[is] < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASensSStolerances", MSG_BAD_ATOLS); free(atolmin); return(IDA_ILL_INPUT); } } IDA_mem->ida_itolS = IDA_SV; IDA_mem->ida_rtolS = reltolS ; if ( SUNFALSE == IDA_mem->ida_VatolSMallocDone ) { IDA_mem->ida_VatolS = N_VCloneVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_tempv1); IDA_mem->ida_atolSmin0 = (booleantype *)malloc(IDA_mem->ida_Ns*sizeof(booleantype)); IDA_mem->ida_lrw += IDA_mem->ida_Ns*IDA_mem->ida_lrw1; IDA_mem->ida_liw += IDA_mem->ida_Ns*IDA_mem->ida_liw1; IDA_mem->ida_VatolSMallocDone = SUNTRUE; } for (is=0; isida_Ns; is++) { IDA_mem->ida_cvals[is] = ONE; IDA_mem->ida_atolSmin0[is] = (atolmin[is] == ZERO); } free(atolmin); retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, abstolS, IDA_mem->ida_VatolS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); return(IDA_SUCCESS); } int IDASensEEtolerances(void *ida_mem) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASensEEtolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Was sensitivity initialized? */ if (IDA_mem->ida_sensMallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDASensEEtolerances", MSG_NO_SENSI); return(IDA_NO_SENS); } IDA_mem->ida_itolS = IDA_EE; return(IDA_SUCCESS); } int IDAQuadSensInit(void *ida_mem, IDAQuadSensRhsFn rhsQS, N_Vector *yQS0) { IDAMem IDA_mem; booleantype allocOK; int is, retval; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAQuadSensInit", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Check if sensitivity analysis is active */ if (!IDA_mem->ida_sensi) { IDAProcessError(NULL, IDA_NO_SENS, "IDAS", "IDAQuadSensInit", MSG_NO_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_SENS); } /* Verifiy yQS0 parameter. */ if (yQS0==NULL) { IDAProcessError(NULL, IDA_ILL_INPUT, "IDAS", "IDAQuadSensInit", MSG_NULL_YQS0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Allocate vector needed for quadratures' sensitivities. */ allocOK = IDAQuadSensAllocVectors(IDA_mem, yQS0[0]); if (!allocOK) { IDAProcessError(NULL, IDA_MEM_FAIL, "IDAS", "IDAQuadSensInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* Error checking complete. */ if (rhsQS == NULL) { IDA_mem->ida_rhsQSDQ = SUNTRUE; IDA_mem->ida_rhsQS = IDAQuadSensRhsInternalDQ; IDA_mem->ida_user_dataQS = ida_mem; } else { IDA_mem->ida_rhsQSDQ = SUNFALSE; IDA_mem->ida_rhsQS = rhsQS; IDA_mem->ida_user_dataQS = IDA_mem->ida_user_data; } /* Initialize phiQS[0] in the history array */ for (is=0; isida_Ns; is++) IDA_mem->ida_cvals[is] = ONE; retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, yQS0, IDA_mem->ida_phiQS[0]); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } /* Initialize all sensitivities related counters. */ IDA_mem->ida_nrQSe = 0; IDA_mem->ida_nrQeS = 0; IDA_mem->ida_netfQS = 0; /* Everything allright, set the flags and return with success. */ IDA_mem->ida_quadr_sensi = SUNTRUE; IDA_mem->ida_quadSensMallocDone = SUNTRUE; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } int IDAQuadSensReInit(void *ida_mem, N_Vector *yQS0) { IDAMem IDA_mem; int is, retval; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAQuadSensReInit", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Check if sensitivity analysis is active */ if (!IDA_mem->ida_sensi) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAQuadSensReInit", MSG_NO_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_SENS); } /* Was sensitivity for quadrature already initialized? */ if (!IDA_mem->ida_quadSensMallocDone) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAQuadSensReInit", MSG_NO_QUADSENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_QUADSENS); } /* Verifiy yQS0 parameter. */ if (yQS0==NULL) { IDAProcessError(NULL, IDA_ILL_INPUT, "IDAS", "IDAQuadSensReInit", MSG_NULL_YQS0); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } /* Error checking complete at this point. */ /* Initialize phiQS[0] in the history array */ for (is=0; isida_Ns; is++) IDA_mem->ida_cvals[is] = ONE; retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, yQS0, IDA_mem->ida_phiQS[0]); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } /* Initialize all sensitivities related counters. */ IDA_mem->ida_nrQSe = 0; IDA_mem->ida_nrQeS = 0; IDA_mem->ida_netfQS = 0; /* Everything allright, set the flags and return with success. */ IDA_mem->ida_quadr_sensi = SUNTRUE; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAQuadSensSStolerances * IDAQuadSensSVtolerances * IDAQuadSensEEtolerances * * These functions specify the integration tolerances for quadrature * sensitivity variables. One of them MUST be called before the first * call to IDAS IF these variables are included in the error test. * * IDAQuadSensSStolerances specifies scalar relative and absolute tolerances. * IDAQuadSensSVtolerances specifies scalar relative tolerance and a vector * absolute tolerance for each quadrature sensitivity vector (a potentially * different absolute tolerance for each vector component). * IDAQuadSensEEtolerances specifies that tolerances for sensitivity variables * should be estimated from those provided for the quadrature variables. * In this case, tolerances for the quadrature variables must be * specified through a call to one of IDAQuad**tolerances. */ int IDAQuadSensSStolerances(void *ida_mem, realtype reltolQS, realtype *abstolQS) { IDAMem IDA_mem; int is; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAQuadSensSStolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Check if sensitivity analysis is active */ if (!IDA_mem->ida_sensi) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAQuadSensSStolerances", MSG_NO_SENSI); return(IDA_NO_SENS); } /* Was sensitivity for quadrature already initialized? */ if (!IDA_mem->ida_quadSensMallocDone) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAQuadSensSStolerances", MSG_NO_QUADSENSI); return(IDA_NO_QUADSENS); } /* Test user-supplied tolerances */ if (reltolQS < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSensSStolerances", MSG_BAD_RELTOLQS); return(IDA_ILL_INPUT); } if (abstolQS == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSensSStolerances", MSG_NULL_ABSTOLQS); return(IDA_ILL_INPUT); } for (is=0; isida_Ns; is++) if (abstolQS[is] < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSensSStolerances", MSG_BAD_ABSTOLQS); return(IDA_ILL_INPUT); } /* Save data. */ IDA_mem->ida_itolQS = IDA_SS; IDA_mem->ida_rtolQS = reltolQS; if ( !(IDA_mem->ida_SatolQSMallocDone) ) { IDA_mem->ida_SatolQS = (realtype *)malloc(IDA_mem->ida_Ns*sizeof(realtype)); IDA_mem->ida_atolQSmin0 = (booleantype *)malloc(IDA_mem->ida_Ns*sizeof(booleantype)); IDA_mem->ida_lrw += IDA_mem->ida_Ns; IDA_mem->ida_SatolQSMallocDone = SUNTRUE; } for (is=0; isida_Ns; is++) { IDA_mem->ida_SatolQS[is] = abstolQS[is]; IDA_mem->ida_atolQSmin0[is] = (abstolQS[is] == ZERO); } return(IDA_SUCCESS); } int IDAQuadSensSVtolerances(void *ida_mem, realtype reltolQS, N_Vector *abstolQS) { IDAMem IDA_mem; int is, retval; realtype *atolmin; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAQuadSensSVtolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Check if sensitivity analysis is active */ if (!IDA_mem->ida_sensi) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAQuadSensSVtolerances", MSG_NO_SENSI); return(IDA_NO_SENS); } /* Was sensitivity for quadrature already initialized? */ if (!IDA_mem->ida_quadSensMallocDone) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAQuadSensSVtolerances", MSG_NO_QUADSENSI); return(IDA_NO_QUADSENS); } /* Test user-supplied tolerances */ if (reltolQS < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSensSVtolerances", MSG_BAD_RELTOLQS); return(IDA_ILL_INPUT); } if (abstolQS == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSensSVtolerances", MSG_NULL_ABSTOLQS); return(IDA_ILL_INPUT); } atolmin = (realtype *)malloc(IDA_mem->ida_Ns*sizeof(realtype)); for (is=0; isida_Ns; is++) { atolmin[is] = N_VMin(abstolQS[is]); if (atolmin[is] < ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAQuadSensSVtolerances", MSG_BAD_ABSTOLQS); free(atolmin); return(IDA_ILL_INPUT); } } /* Save data. */ IDA_mem->ida_itolQS = IDA_SV; IDA_mem->ida_rtolQS = reltolQS; if ( !(IDA_mem->ida_VatolQSMallocDone) ) { IDA_mem->ida_VatolQS = N_VCloneVectorArray(IDA_mem->ida_Ns, abstolQS[0]); IDA_mem->ida_atolQSmin0 = (booleantype *)malloc(IDA_mem->ida_Ns*sizeof(booleantype)); IDA_mem->ida_lrw += IDA_mem->ida_Ns*IDA_mem->ida_lrw1Q; IDA_mem->ida_liw += IDA_mem->ida_Ns*IDA_mem->ida_liw1Q; IDA_mem->ida_VatolQSMallocDone = SUNTRUE; } for (is=0; isida_Ns; is++) { IDA_mem->ida_cvals[is] = ONE; IDA_mem->ida_atolQSmin0[is] = (atolmin[is] == ZERO); } free(atolmin); retval = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, abstolQS, IDA_mem->ida_VatolQS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); return(IDA_SUCCESS); } int IDAQuadSensEEtolerances(void *ida_mem) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAQuadSensEEtolerances", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Check if sensitivity analysis is active */ if (!IDA_mem->ida_sensi) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAQuadSensEEtolerances", MSG_NO_SENSI); return(IDA_NO_SENS); } /* Was sensitivity for quadrature already initialized? */ if (!IDA_mem->ida_quadSensMallocDone) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAQuadSensEEtolerances", MSG_NO_QUADSENSI); return(IDA_NO_QUADSENS); } IDA_mem->ida_itolQS = IDA_EE; return(IDA_SUCCESS); } /* * IDASensToggleOff * * IDASensToggleOff deactivates sensitivity calculations. * It does NOT deallocate sensitivity-related memory. */ int IDASensToggleOff(void *ida_mem) { IDAMem IDA_mem; /* Check ida_mem */ if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASensToggleOff", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Disable sensitivities */ IDA_mem->ida_sensi = SUNFALSE; IDA_mem->ida_quadr_sensi = SUNFALSE; return(IDA_SUCCESS); } /* * IDARootInit * * IDARootInit initializes a rootfinding problem to be solved * during the integration of the DAE system. It loads the root * function pointer and the number of root functions, and allocates * workspace memory. The return value is IDA_SUCCESS = 0 if no * errors occurred, or a negative value otherwise. */ int IDARootInit(void *ida_mem, int nrtfn, IDARootFn g) { IDAMem IDA_mem; int i, nrt; /* Check ida_mem pointer */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDARootInit", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); nrt = (nrtfn < 0) ? 0 : nrtfn; /* If rerunning IDARootInit() with a different number of root functions (changing number of gfun components), then free currently held memory resources */ if ((nrt != IDA_mem->ida_nrtfn) && (IDA_mem->ida_nrtfn > 0)) { free(IDA_mem->ida_glo); IDA_mem->ida_glo = NULL; free(IDA_mem->ida_ghi); IDA_mem->ida_ghi = NULL; free(IDA_mem->ida_grout); IDA_mem->ida_grout = NULL; free(IDA_mem->ida_iroots); IDA_mem->ida_iroots = NULL; free(IDA_mem->ida_rootdir); IDA_mem->ida_rootdir = NULL; free(IDA_mem->ida_gactive); IDA_mem->ida_gactive = NULL; IDA_mem->ida_lrw -= 3 * (IDA_mem->ida_nrtfn); IDA_mem->ida_liw -= 3 * (IDA_mem->ida_nrtfn); } /* If IDARootInit() was called with nrtfn == 0, then set ida_nrtfn to zero and ida_gfun to NULL before returning */ if (nrt == 0) { IDA_mem->ida_nrtfn = nrt; IDA_mem->ida_gfun = NULL; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* If rerunning IDARootInit() with the same number of root functions (not changing number of gfun components), then check if the root function argument has changed */ /* If g != NULL then return as currently reserved memory resources will suffice */ if (nrt == IDA_mem->ida_nrtfn) { if (g != IDA_mem->ida_gfun) { if (g == NULL) { free(IDA_mem->ida_glo); IDA_mem->ida_glo = NULL; free(IDA_mem->ida_ghi); IDA_mem->ida_ghi = NULL; free(IDA_mem->ida_grout); IDA_mem->ida_grout = NULL; free(IDA_mem->ida_iroots); IDA_mem->ida_iroots = NULL; free(IDA_mem->ida_rootdir); IDA_mem->ida_rootdir = NULL; free(IDA_mem->ida_gactive); IDA_mem->ida_gactive = NULL; IDA_mem->ida_lrw -= 3*nrt; IDA_mem->ida_liw -= 3*nrt; IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDARootInit", MSG_ROOT_FUNC_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } else { IDA_mem->ida_gfun = g; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } } else { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } } /* Set variable values in IDA memory block */ IDA_mem->ida_nrtfn = nrt; if (g == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDARootInit", MSG_ROOT_FUNC_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } else { IDA_mem->ida_gfun = g; } /* Allocate necessary memory and return */ IDA_mem->ida_glo = NULL; IDA_mem->ida_glo = (realtype *) malloc(nrt*sizeof(realtype)); if (IDA_mem->ida_glo == NULL) { IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDARootInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } IDA_mem->ida_ghi = NULL; IDA_mem->ida_ghi = (realtype *) malloc(nrt*sizeof(realtype)); if (IDA_mem->ida_ghi == NULL) { free(IDA_mem->ida_glo); IDA_mem->ida_glo = NULL; IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDARootInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } IDA_mem->ida_grout = NULL; IDA_mem->ida_grout = (realtype *) malloc(nrt*sizeof(realtype)); if (IDA_mem->ida_grout == NULL) { free(IDA_mem->ida_glo); IDA_mem->ida_glo = NULL; free(IDA_mem->ida_ghi); IDA_mem->ida_ghi = NULL; IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDARootInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } IDA_mem->ida_iroots = NULL; IDA_mem->ida_iroots = (int *) malloc(nrt*sizeof(int)); if (IDA_mem->ida_iroots == NULL) { free(IDA_mem->ida_glo); IDA_mem->ida_glo = NULL; free(IDA_mem->ida_ghi); IDA_mem->ida_ghi = NULL; free(IDA_mem->ida_grout); IDA_mem->ida_grout = NULL; IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDARootInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } IDA_mem->ida_rootdir = NULL; IDA_mem->ida_rootdir = (int *) malloc(nrt*sizeof(int)); if (IDA_mem->ida_rootdir == NULL) { free(IDA_mem->ida_glo); IDA_mem->ida_glo = NULL; free(IDA_mem->ida_ghi); IDA_mem->ida_ghi = NULL; free(IDA_mem->ida_grout); IDA_mem->ida_grout = NULL; free(IDA_mem->ida_iroots); IDA_mem->ida_iroots = NULL; IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDAS", "IDARootInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } IDA_mem->ida_gactive = NULL; IDA_mem->ida_gactive = (booleantype *) malloc(nrt*sizeof(booleantype)); if (IDA_mem->ida_gactive == NULL) { free(IDA_mem->ida_glo); IDA_mem->ida_glo = NULL; free(IDA_mem->ida_ghi); IDA_mem->ida_ghi = NULL; free(IDA_mem->ida_grout); IDA_mem->ida_grout = NULL; free(IDA_mem->ida_iroots); IDA_mem->ida_iroots = NULL; free(IDA_mem->ida_rootdir); IDA_mem->ida_rootdir = NULL; IDAProcessError(IDA_mem, IDA_MEM_FAIL, "IDA", "IDARootInit", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_MEM_FAIL); } /* Set default values for rootdir (both directions) */ for(i=0; iida_rootdir[i] = 0; /* Set default values for gactive (all active) */ for(i=0; iida_gactive[i] = SUNTRUE; IDA_mem->ida_lrw += 3*nrt; IDA_mem->ida_liw += 3*nrt; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * Main solver function * ----------------------------------------------------------------- */ /* * IDASolve * * This routine is the main driver of the IDA package. * * It integrates over an independent variable interval defined by the user, * by calling IDAStep to take internal independent variable steps. * * The first time that IDASolve is called for a successfully initialized * problem, it computes a tentative initial step size. * * IDASolve supports two modes, specified by itask: * In the IDA_NORMAL mode, the solver steps until it passes tout and then * interpolates to obtain y(tout) and yp(tout). * In the IDA_ONE_STEP mode, it takes one internal step and returns. * * IDASolve returns integer values corresponding to success and failure as below: * * successful returns: * * IDA_SUCCESS * IDA_TSTOP_RETURN * * failed returns: * * IDA_ILL_INPUT * IDA_TOO_MUCH_WORK * IDA_MEM_NULL * IDA_TOO_MUCH_ACC * IDA_CONV_FAIL * IDA_LSETUP_FAIL * IDA_LSOLVE_FAIL * IDA_CONSTR_FAIL * IDA_ERR_FAIL * IDA_REP_RES_ERR * IDA_RES_FAIL */ int IDASolve(void *ida_mem, realtype tout, realtype *tret, N_Vector yret, N_Vector ypret, int itask) { long int nstloc; int sflag, istate, ier, irfndp, ir, is; realtype tdist, troundoff, ypnorm, rh, nrm; IDAMem IDA_mem; booleantype inactive_roots; /* Check for legal inputs in all cases. */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASolve", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Check if problem was malloc'ed */ if (IDA_mem->ida_MallocDone == SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_MALLOC, "IDAS", "IDASolve", MSG_NO_MALLOC); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_MALLOC); } /* Check for legal arguments */ if (yret == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_YRET_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } IDA_mem->ida_yy = yret; if (ypret == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_YPRET_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } IDA_mem->ida_yp = ypret; if (tret == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_TRET_NULL); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if ((itask != IDA_NORMAL) && (itask != IDA_ONE_STEP)) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_BAD_ITASK); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if (itask == IDA_NORMAL) IDA_mem->ida_toutc = tout; IDA_mem->ida_taskc = itask; /* Sensitivity-specific tests (if using internal DQ functions) */ if (IDA_mem->ida_sensi && IDA_mem->ida_resSDQ) { /* Make sure we have the right 'user data' */ IDA_mem->ida_user_dataS = ida_mem; /* Test if we have the problem parameters */ if(IDA_mem->ida_p == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_NULL_P); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } } if (IDA_mem->ida_quadr_sensi && IDA_mem->ida_rhsQSDQ) { IDA_mem->ida_user_dataQS = ida_mem; /* Test if we have the problem parameters */ if(IDA_mem->ida_p == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_NULL_P); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } } if (IDA_mem->ida_nst == 0) { /* This is the first call */ /* Check inputs to IDA for correctness and consistency */ if (IDA_mem->ida_SetupDone == SUNFALSE) { ier = IDAInitialSetup(IDA_mem); if (ier != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(ier); } IDA_mem->ida_SetupDone = SUNTRUE; } /* On first call, check for tout - tn too small, set initial hh, check for approach to tstop, and scale phi[1], phiQ[1], and phiS[1] by hh. Also check for zeros of root function g at and near t0. */ tdist = SUNRabs(tout - IDA_mem->ida_tn); if (tdist == ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_TOO_CLOSE); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } troundoff = TWO * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(tout)); if (tdist < troundoff) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_TOO_CLOSE); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } IDA_mem->ida_hh = IDA_mem->ida_hin; if ( (IDA_mem->ida_hh != ZERO) && ((tout-IDA_mem->ida_tn)*IDA_mem->ida_hh < ZERO) ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_BAD_HINIT); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if (IDA_mem->ida_hh == ZERO) { IDA_mem->ida_hh = PT001*tdist; ypnorm = IDAWrmsNorm(IDA_mem, IDA_mem->ida_phi[1], IDA_mem->ida_ewt, IDA_mem->ida_suppressalg); if (IDA_mem->ida_errconQ) ypnorm = IDAQuadWrmsNormUpdate(IDA_mem, ypnorm, IDA_mem->ida_phiQ[1], IDA_mem->ida_ewtQ); if (IDA_mem->ida_errconS) ypnorm = IDASensWrmsNormUpdate(IDA_mem, ypnorm, IDA_mem->ida_phiS[1], IDA_mem->ida_ewtS, IDA_mem->ida_suppressalg); if (IDA_mem->ida_errconQS) ypnorm = IDAQuadSensWrmsNormUpdate(IDA_mem, ypnorm, IDA_mem->ida_phiQS[1], IDA_mem->ida_ewtQS); if (ypnorm > HALF / IDA_mem->ida_hh) IDA_mem->ida_hh = HALF/ypnorm; if (tout < IDA_mem->ida_tn) IDA_mem->ida_hh = -IDA_mem->ida_hh; } rh = SUNRabs(IDA_mem->ida_hh) * IDA_mem->ida_hmax_inv; if (rh > ONE) IDA_mem->ida_hh /= rh; if (IDA_mem->ida_tstopset) { if ( (IDA_mem->ida_tstop - IDA_mem->ida_tn)*IDA_mem->ida_hh <= ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_BAD_TSTOP, IDA_mem->ida_tstop, IDA_mem->ida_tn); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } if ( (IDA_mem->ida_tn + IDA_mem->ida_hh - IDA_mem->ida_tstop)*IDA_mem->ida_hh > ZERO) IDA_mem->ida_hh = (IDA_mem->ida_tstop - IDA_mem->ida_tn)*(ONE - FOUR * IDA_mem->ida_uround); } IDA_mem->ida_h0u = IDA_mem->ida_hh; IDA_mem->ida_kk = 0; IDA_mem->ida_kused = 0; /* set in case of an error return before a step */ /* Check for exact zeros of the root functions at or near t0. */ if (IDA_mem->ida_nrtfn > 0) { ier = IDARcheck1(IDA_mem); if (ier == IDA_RTFUNC_FAIL) { IDAProcessError(IDA_mem, IDA_RTFUNC_FAIL, "IDAS", "IDARcheck1", MSG_RTFUNC_FAILED, IDA_mem->ida_tn); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_RTFUNC_FAIL); } } /* set phi[1] = hh*y' */ N_VScale(IDA_mem->ida_hh, IDA_mem->ida_phi[1], IDA_mem->ida_phi[1]); /* set phiQ[1] = hh*yQ' */ if (IDA_mem->ida_quadr) N_VScale(IDA_mem->ida_hh, IDA_mem->ida_phiQ[1], IDA_mem->ida_phiQ[1]); if (IDA_mem->ida_sensi || IDA_mem->ida_quadr_sensi) for (is=0; isida_Ns; is++) IDA_mem->ida_cvals[is] = IDA_mem->ida_hh; if (IDA_mem->ida_sensi) { /* set phiS[1][i] = hh*yS_i' */ ier = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_phiS[1], IDA_mem->ida_phiS[1]); if (ier != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } } if (IDA_mem->ida_quadr_sensi) { ier = N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_phiQS[1], IDA_mem->ida_phiQS[1]); if (ier != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } } /* Set the convergence test constants epsNewt and toldel */ IDA_mem->ida_epsNewt = IDA_mem->ida_epcon; IDA_mem->ida_toldel = PT0001 * IDA_mem->ida_epsNewt; } /* end of first-call block. */ /* Call lperf function and set nstloc for later performance testing. */ if (IDA_mem->ida_lperf != NULL) IDA_mem->ida_lperf(IDA_mem, 0); nstloc = 0; /* If not the first call, perform all stopping tests. */ if (IDA_mem->ida_nst > 0) { /* First, check for a root in the last step taken, other than the last root found, if any. If itask = IDA_ONE_STEP and y(tn) was not returned because of an intervening root, return y(tn) now. */ if (IDA_mem->ida_nrtfn > 0) { irfndp = IDA_mem->ida_irfnd; ier = IDARcheck2(IDA_mem); if (ier == CLOSERT) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDARcheck2", MSG_CLOSE_ROOTS, IDA_mem->ida_tlo); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ILL_INPUT); } else if (ier == IDA_RTFUNC_FAIL) { IDAProcessError(IDA_mem, IDA_RTFUNC_FAIL, "IDAS", "IDARcheck2", MSG_RTFUNC_FAILED, IDA_mem->ida_tlo); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_RTFUNC_FAIL); } else if (ier == RTFOUND) { IDA_mem->ida_tretlast = *tret = IDA_mem->ida_tlo; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ROOT_RETURN); } /* If tn is distinct from tretlast (within roundoff), check remaining interval for roots */ troundoff = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)); if ( SUNRabs(IDA_mem->ida_tn - IDA_mem->ida_tretlast) > troundoff ) { ier = IDARcheck3(IDA_mem); if (ier == IDA_SUCCESS) { /* no root found */ IDA_mem->ida_irfnd = 0; if ((irfndp == 1) && (itask == IDA_ONE_STEP)) { IDA_mem->ida_tretlast = *tret = IDA_mem->ida_tn; ier = IDAGetSolution(IDA_mem, IDA_mem->ida_tn, yret, ypret); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } } else if (ier == RTFOUND) { /* a new root was found */ IDA_mem->ida_irfnd = 1; IDA_mem->ida_tretlast = *tret = IDA_mem->ida_tlo; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_ROOT_RETURN); } else if (ier == IDA_RTFUNC_FAIL) { /* g failed */ IDAProcessError(IDA_mem, IDA_RTFUNC_FAIL, "IDAS", "IDARcheck3", MSG_RTFUNC_FAILED, IDA_mem->ida_tlo); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_RTFUNC_FAIL); } } } /* end of root stop check */ /* Now test for all other stop conditions. */ istate = IDAStopTest1(IDA_mem, tout, tret, yret, ypret, itask); if (istate != CONTINUE_STEPS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(istate); } } /* Looping point for internal steps. */ for(;;) { /* Check for too many steps taken. */ if ( (IDA_mem->ida_mxstep>0) && (nstloc >= IDA_mem->ida_mxstep) ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_MAX_STEPS, IDA_mem->ida_tn); istate = IDA_TOO_MUCH_WORK; *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tn; break; /* Here yy=yret and yp=ypret already have the current solution. */ } /* Call lperf to generate warnings of poor performance. */ if (IDA_mem->ida_lperf != NULL) IDA_mem->ida_lperf(IDA_mem, 1); /* Reset and check ewt, ewtQ, ewtS and ewtQS (if not first call). */ if (IDA_mem->ida_nst > 0) { ier = IDA_mem->ida_efun(IDA_mem->ida_phi[0], IDA_mem->ida_ewt, IDA_mem->ida_edata); if (ier != 0) { if (IDA_mem->ida_itol == IDA_WF) IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_EWT_NOW_FAIL, IDA_mem->ida_tn); else IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_EWT_NOW_BAD, IDA_mem->ida_tn); istate = IDA_ILL_INPUT; ier = IDAGetSolution(IDA_mem, IDA_mem->ida_tn, yret, ypret); *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tn; break; } if (IDA_mem->ida_quadr && IDA_mem->ida_errconQ) { ier = IDAQuadEwtSet(IDA_mem, IDA_mem->ida_phiQ[0], IDA_mem->ida_ewtQ); if (ier != 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_EWTQ_NOW_BAD, IDA_mem->ida_tn); istate = IDA_ILL_INPUT; ier = IDAGetSolution(IDA_mem, IDA_mem->ida_tn, yret, ypret); *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tn; break; } } if (IDA_mem->ida_sensi) { ier = IDASensEwtSet(IDA_mem, IDA_mem->ida_phiS[0], IDA_mem->ida_ewtS); if (ier != 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_EWTS_NOW_BAD, IDA_mem->ida_tn); istate = IDA_ILL_INPUT; ier = IDAGetSolution(IDA_mem, IDA_mem->ida_tn, yret, ypret); *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tn; break; } } if (IDA_mem->ida_quadr_sensi && IDA_mem->ida_errconQS) { ier = IDAQuadSensEwtSet(IDA_mem, IDA_mem->ida_phiQS[0], IDA_mem->ida_ewtQS); if (ier != 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_EWTQS_NOW_BAD, IDA_mem->ida_tn); istate = IDA_ILL_INPUT; ier = IDAGetSolution(IDA_mem, IDA_mem->ida_tn, yret, ypret); IDA_mem->ida_tretlast = *tret = IDA_mem->ida_tn; break; } } } /* Check for too much accuracy requested. */ nrm = IDAWrmsNorm(IDA_mem, IDA_mem->ida_phi[0], IDA_mem->ida_ewt, IDA_mem->ida_suppressalg); if (IDA_mem->ida_errconQ) nrm = IDAQuadWrmsNormUpdate(IDA_mem, nrm, IDA_mem->ida_phiQ[0], IDA_mem->ida_ewtQ); if (IDA_mem->ida_errconS) nrm = IDASensWrmsNormUpdate(IDA_mem, nrm, IDA_mem->ida_phiS[0], IDA_mem->ida_ewtS, IDA_mem->ida_suppressalg); if (IDA_mem->ida_errconQS) nrm = IDAQuadSensWrmsNormUpdate(IDA_mem, nrm, IDA_mem->ida_phiQS[0], IDA_mem->ida_ewtQS); IDA_mem->ida_tolsf = IDA_mem->ida_uround * nrm; if (IDA_mem->ida_tolsf > ONE) { IDA_mem->ida_tolsf *= TEN; IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDASolve", MSG_TOO_MUCH_ACC, IDA_mem->ida_tn); istate = IDA_TOO_MUCH_ACC; *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tn; if (IDA_mem->ida_nst > 0) ier = IDAGetSolution(IDA_mem, IDA_mem->ida_tn, yret, ypret); break; } /* Call IDAStep to take a step. */ sflag = IDAStep(IDA_mem); /* Process all failed-step cases, and exit loop. */ if (sflag != IDA_SUCCESS) { istate = IDAHandleFailure(IDA_mem, sflag); *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tn; ier = IDAGetSolution(IDA_mem, IDA_mem->ida_tn, yret, ypret); break; } nstloc++; /* If tstop is set and was reached, reset IDA_mem->ida_tn = tstop */ if (IDA_mem->ida_tstopset) { troundoff = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)); if (SUNRabs(IDA_mem->ida_tn - IDA_mem->ida_tstop) <= troundoff) IDA_mem->ida_tn = IDA_mem->ida_tstop; } /* After successful step, check for stop conditions; continue or break. */ /* First check for root in the last step taken. */ if (IDA_mem->ida_nrtfn > 0) { ier = IDARcheck3(IDA_mem); if (ier == RTFOUND) { /* A new root was found */ IDA_mem->ida_irfnd = 1; istate = IDA_ROOT_RETURN; IDA_mem->ida_tretlast = *tret = IDA_mem->ida_tlo; break; } else if (ier == IDA_RTFUNC_FAIL) { /* g failed */ IDAProcessError(IDA_mem, IDA_RTFUNC_FAIL, "IDAS", "IDARcheck3", MSG_RTFUNC_FAILED, IDA_mem->ida_tlo); istate = IDA_RTFUNC_FAIL; break; } /* If we are at the end of the first step and we still have * some event functions that are inactive, issue a warning * as this may indicate a user error in the implementation * of the root function. */ if (IDA_mem->ida_nst==1) { inactive_roots = SUNFALSE; for (ir=0; irida_nrtfn; ir++) { if (!IDA_mem->ida_gactive[ir]) { inactive_roots = SUNTRUE; break; } } if ((IDA_mem->ida_mxgnull > 0) && inactive_roots) { IDAProcessError(IDA_mem, IDA_WARNING, "IDAS", "IDASolve", MSG_INACTIVE_ROOTS); } } } /* Now check all other stop conditions. */ istate = IDAStopTest2(IDA_mem, tout, tret, yret, ypret, itask); if (istate != CONTINUE_STEPS) break; } /* End of step loop */ SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(istate); } /* * ----------------------------------------------------------------- * Interpolated output and extraction functions * ----------------------------------------------------------------- */ /* * IDAGetDky * * This routine evaluates the k-th derivative of y(t) as the value of * the k-th derivative of the interpolating polynomial at the independent * variable t, and stores the results in the vector dky. It uses the current * independent variable value, tn, and the method order last used, kused. * * The return values are: * IDA_SUCCESS if t is legal * IDA_BAD_T if t is not within the interval of the last step taken * IDA_BAD_DKY if the dky vector is NULL * IDA_BAD_K if the requested k is not in the range [0,order used] * IDA_VECTOROP_ERR if the fused vector operation fails * */ int IDAGetDky(void *ida_mem, realtype t, int k, N_Vector dky) { IDAMem IDA_mem; realtype tfuzz, tp, delt, psij_1; int i, j, retval; realtype cjk [MXORDP1]; realtype cjk_1[MXORDP1]; /* Check ida_mem */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetDky", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); if (dky == NULL) { IDAProcessError(IDA_mem, IDA_BAD_DKY, "IDAS", "IDAGetDky", MSG_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_DKY); } if ((k < 0) || (k > IDA_mem->ida_kused)) { IDAProcessError(IDA_mem, IDA_BAD_K, "IDAS", "IDAGetDky", MSG_BAD_K); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_K); } /* Check t for legality. Here tn - hused is t_{n-1}. */ tfuzz = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)); if (IDA_mem->ida_hh < ZERO) tfuzz = - tfuzz; tp = IDA_mem->ida_tn - IDA_mem->ida_hused - tfuzz; if ((t - tp)*IDA_mem->ida_hh < ZERO) { IDAProcessError(IDA_mem, IDA_BAD_T, "IDAS", "IDAGetDky", MSG_BAD_T, t, IDA_mem->ida_tn-IDA_mem->ida_hused, IDA_mem->ida_tn); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_T); } /* Initialize the c_j^(k) and c_k^(k-1) */ for(i=0; iida_tn; for(i=0; i<=k; i++) { /* The below reccurence is used to compute the k-th derivative of the solution: c_j^(k) = ( k * c_{j-1}^(k-1) + c_{j-1}^{k} (Delta+psi_{j-1}) ) / psi_j Translated in indexes notation: cjk[j] = ( k*cjk_1[j-1] + cjk[j-1]*(delt+psi[j-2]) ) / psi[j-1] For k=0, j=1: c_1 = c_0^(-1) + (delt+psi[-1]) / psi[0] In order to be able to deal with k=0 in the same way as for k>0, the following conventions were adopted: - c_0(t) = 1 , c_0^(-1)(t)=0 - psij_1 stands for psi[-1]=0 when j=1 for psi[j-2] when j>1 */ if(i==0) { cjk[i] = 1; psij_1 = 0; }else { /* i i-1 1 c_i^(i) can be always updated since c_i^(i) = ----- -------- ... ----- psi_j psi_{j-1} psi_1 */ cjk[i] = cjk[i-1]*i / IDA_mem->ida_psi[i-1]; psij_1 = IDA_mem->ida_psi[i-1]; } /* update c_j^(i) */ /*j does not need to go till kused */ for(j=i+1; j<=IDA_mem->ida_kused-k+i; j++) { cjk[j] = ( i* cjk_1[j-1] + cjk[j-1] * (delt + psij_1) ) / IDA_mem->ida_psi[j-1]; psij_1 = IDA_mem->ida_psi[j-1]; } /* save existing c_j^(i)'s */ for(j=i+1; j<=IDA_mem->ida_kused-k+i; j++) cjk_1[j] = cjk[j]; } /* Compute sum (c_j(t) * phi(t)) */ /* Sum j=k to j<=IDA_mem->ida_kused */ retval = N_VLinearCombination(IDA_mem->ida_kused-k+1, cjk+k, IDA_mem->ida_phi+k, dky); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAGetQuad * * The following function can be called to obtain the quadrature * variables after a successful integration step. * * This is just a wrapper that calls IDAGetQuadDky with k=0. */ int IDAGetQuad(void *ida_mem, realtype *ptret, N_Vector yQout) { IDAMem IDA_mem; int retval; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuad", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem)ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); *ptret = IDA_mem->ida_tretlast; retval = IDAGetQuadDky(ida_mem, IDA_mem->ida_tretlast, 0, yQout); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(retval); } /* * IDAGetQuadDky * * Returns the quadrature variables (or their * derivatives up to the current method order) at any time within * the last integration step (dense output). */ int IDAGetQuadDky(void *ida_mem, realtype t, int k, N_Vector dkyQ) { IDAMem IDA_mem; realtype tfuzz, tp, delt, psij_1; int i, j, retval; realtype cjk [MXORDP1]; realtype cjk_1[MXORDP1]; /* Check ida_mem */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadDky", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /* Ckeck if quadrature was initialized */ if (IDA_mem->ida_quadr != SUNTRUE) { IDAProcessError(IDA_mem, IDA_NO_QUAD, "IDAS", "IDAGetQuadDky", MSG_NO_QUAD); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_QUAD); } if (dkyQ == NULL) { IDAProcessError(IDA_mem, IDA_BAD_DKY, "IDAS", "IDAGetQuadDky", MSG_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_DKY); } if ((k < 0) || (k > IDA_mem->ida_kk)) { IDAProcessError(IDA_mem, IDA_BAD_K, "IDAS", "IDAGetQuadDky", MSG_BAD_K); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_K); } /* Check t for legality. Here tn - hused is t_{n-1}. */ tfuzz = HUNDRED * IDA_mem->ida_uround * (IDA_mem->ida_tn + IDA_mem->ida_hh); tp = IDA_mem->ida_tn - IDA_mem->ida_hused - tfuzz; if ( (t - tp)*IDA_mem->ida_hh < ZERO) { IDAProcessError(IDA_mem, IDA_BAD_T, "IDAS", "IDAGetQuadDky", MSG_BAD_T, t, IDA_mem->ida_tn-IDA_mem->ida_hused, IDA_mem->ida_tn); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_T); } /* Initialize the c_j^(k) and c_k^(k-1) */ for(i=0; iida_tn; for(i=0; i<=k; i++) { if(i==0) { cjk[i] = 1; psij_1 = 0; }else { cjk[i] = cjk[i-1]*i / IDA_mem->ida_psi[i-1]; psij_1 = IDA_mem->ida_psi[i-1]; } /* update c_j^(i) */ for(j=i+1; j<=IDA_mem->ida_kused-k+i; j++) { cjk[j] = ( i* cjk_1[j-1] + cjk[j-1] * (delt + psij_1) ) / IDA_mem->ida_psi[j-1]; psij_1 = IDA_mem->ida_psi[j-1]; } /* save existing c_j^(i)'s */ for(j=i+1; j<=IDA_mem->ida_kused-k+i; j++) cjk_1[j] = cjk[j]; } /* Compute sum (c_j(t) * phi(t)) */ retval = N_VLinearCombination(IDA_mem->ida_kused-k+1, cjk+k, IDA_mem->ida_phiQ+k, dkyQ); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAGetSens * * This routine extracts sensitivity solution into yySout at the * time at which IDASolve returned the solution. * This is just a wrapper that calls IDAGetSensDky1 with k=0 and * is=0, 1, ... ,NS-1. */ int IDAGetSens(void *ida_mem, realtype *ptret, N_Vector *yySout) { IDAMem IDA_mem; int is, ierr=0; /* Check ida_mem */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSens", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /*Check the parameters */ if (yySout == NULL) { IDAProcessError(IDA_mem, IDA_BAD_DKY, "IDAS", "IDAGetSens", MSG_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_DKY); } /* are sensitivities enabled? */ if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSens", MSG_NO_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_SENS); } *ptret = IDA_mem->ida_tretlast; for(is=0; isida_Ns; is++) if( IDA_SUCCESS != (ierr = IDAGetSensDky1(ida_mem, *ptret, 0, is, yySout[is])) ) break; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(ierr); } /* * IDAGetSensDky * * Computes the k-th derivative of all sensitivities of the y function at * time t. It repeatedly calls IDAGetSensDky1. The argument dkyS must be * a pointer to N_Vector and must be allocated by the user to hold at * least Ns vectors. */ int IDAGetSensDky(void *ida_mem, realtype t, int k, N_Vector *dkySout) { int is, ier=0; IDAMem IDA_mem; /* Check all inputs for legality */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSensDky", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensDky", MSG_NO_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_SENS); } if (dkySout == NULL) { IDAProcessError(IDA_mem, IDA_BAD_DKY, "IDAS", "IDAGetSensDky", MSG_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_DKY); } if ((k < 0) || (k > IDA_mem->ida_kk)) { IDAProcessError(IDA_mem, IDA_BAD_K, "IDAS", "IDAGetSensDky", MSG_BAD_K); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_K); } for (is=0; isida_Ns; is++) { ier = IDAGetSensDky1(ida_mem, t, k, is, dkySout[is]); if (ier!=IDA_SUCCESS) break; } SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(ier); } /* * IDAGetSens1 * * This routine extracts the is-th sensitivity solution into ySout * at the time at which IDASolve returned the solution. * This is just a wrapper that calls IDASensDky1 with k=0. */ int IDAGetSens1(void *ida_mem, realtype *ptret, int is, N_Vector yySret) { IDAMem IDA_mem; int retval; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSens1", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); *ptret = IDA_mem->ida_tretlast; retval = IDAGetSensDky1(ida_mem, *ptret, 0, is, yySret); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(retval); } /* * IDAGetSensDky1 * * IDASensDky1 computes the kth derivative of the yS[is] function * at time t, where tn-hu <= t <= tn, tn denotes the current * internal time reached, and hu is the last internal step size * successfully used by the solver. The user may request * is=0, 1, ..., Ns-1 and k=0, 1, ..., kk, where kk is the current * order. The derivative vector is returned in dky. This vector * must be allocated by the caller. It is only legal to call this * function after a successful return from IDASolve with sensitivity * computation enabled. */ int IDAGetSensDky1(void *ida_mem, realtype t, int k, int is, N_Vector dkyS) { IDAMem IDA_mem; realtype tfuzz, tp, delt, psij_1; int i, j, retval; realtype cjk [MXORDP1]; realtype cjk_1[MXORDP1]; /* Check all inputs for legality */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSensDky1", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetSensDky1", MSG_NO_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_SENS); } if (dkyS == NULL) { IDAProcessError(IDA_mem, IDA_BAD_DKY, "IDAS", "IDAGetSensDky1", MSG_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_DKY); } /* Is the requested sensitivity index valid? */ if(is<0 || is >= IDA_mem->ida_Ns) { IDAProcessError(IDA_mem, IDA_BAD_IS, "IDAS", "IDAGetSensDky1", MSG_BAD_IS); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_IS); } /* Is the requested order valid? */ if ((k < 0) || (k > IDA_mem->ida_kused)) { IDAProcessError(IDA_mem, IDA_BAD_K, "IDAS", "IDAGetSensDky1", MSG_BAD_K); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_K); } /* Check t for legality. Here tn - hused is t_{n-1}. */ tfuzz = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)); if (IDA_mem->ida_hh < ZERO) tfuzz = - tfuzz; tp = IDA_mem->ida_tn - IDA_mem->ida_hused - tfuzz; if ((t - tp)*IDA_mem->ida_hh < ZERO) { IDAProcessError(IDA_mem, IDA_BAD_T, "IDAS", "IDAGetSensDky1", MSG_BAD_T, t, IDA_mem->ida_tn-IDA_mem->ida_hused, IDA_mem->ida_tn); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_T); } /* Initialize the c_j^(k) and c_k^(k-1) */ for(i=0; iida_tn; for(i=0; i<=k; i++) { if(i==0) { cjk[i] = 1; psij_1 = 0; }else { cjk[i] = cjk[i-1]*i / IDA_mem->ida_psi[i-1]; psij_1 = IDA_mem->ida_psi[i-1]; } /* Update cjk based on the reccurence */ for(j=i+1; j<=IDA_mem->ida_kused-k+i; j++) { cjk[j] = ( i* cjk_1[j-1] + cjk[j-1] * (delt + psij_1) ) / IDA_mem->ida_psi[j-1]; psij_1 = IDA_mem->ida_psi[j-1]; } /* Update cjk_1 for the next step */ for(j=i+1; j<=IDA_mem->ida_kused-k+i; j++) cjk_1[j] = cjk[j]; } /* Compute sum (c_j(t) * phi(t)) */ for(j=k; j<=IDA_mem->ida_kused; j++) IDA_mem->ida_Xvecs[j-k] = IDA_mem->ida_phiS[j][is]; retval = N_VLinearCombination(IDA_mem->ida_kused-k+1, cjk+k, IDA_mem->ida_Xvecs, dkyS); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAGetQuadSens * * This routine extracts quadrature sensitivity solution into yyQSout at the * time at which IDASolve returned the solution. * This is just a wrapper that calls IDAGetQuadSensDky1 with k=0 and * is=0, 1, ... ,NS-1. */ int IDAGetQuadSens(void *ida_mem, realtype *ptret, N_Vector *yyQSout) { IDAMem IDA_mem; int is, ierr=0; /* Check ida_mem */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadSens", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); /*Check the parameters */ if (yyQSout == NULL) { IDAProcessError(IDA_mem, IDA_BAD_DKY, "IDAS", "IDAGetQuadSens", MSG_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_DKY); } /* are sensitivities enabled? */ if (IDA_mem->ida_quadr_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetQuadSens", MSG_NO_QUADSENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_SENS); } *ptret = IDA_mem->ida_tretlast; for(is=0; isida_Ns; is++) if( IDA_SUCCESS != (ierr = IDAGetQuadSensDky1(ida_mem, *ptret, 0, is, yyQSout[is])) ) break; SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(ierr); } /* * IDAGetQuadSensDky * * Computes the k-th derivative of all quadratures sensitivities of the y function at * time t. It repeatedly calls IDAGetQuadSensDky. The argument dkyS must be * a pointer to N_Vector and must be allocated by the user to hold at * least Ns vectors. */ int IDAGetQuadSensDky(void *ida_mem, realtype t, int k, N_Vector *dkyQSout) { int is, ier=0; IDAMem IDA_mem; /* Check all inputs for legality */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadSensDky", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetQuadSensDky", MSG_NO_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_SENS); } if (IDA_mem->ida_quadr_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAGetQuadSensDky", MSG_NO_QUADSENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_QUADSENS); } if (dkyQSout == NULL) { IDAProcessError(IDA_mem, IDA_BAD_DKY, "IDAS", "IDAGetQuadSensDky", MSG_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_DKY); } if ((k < 0) || (k > IDA_mem->ida_kk)) { IDAProcessError(IDA_mem, IDA_BAD_K, "IDAS", "IDAGetQuadSensDky", MSG_BAD_K); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_K); } for (is=0; isida_Ns; is++) { ier = IDAGetQuadSensDky1(ida_mem, t, k, is, dkyQSout[is]); if (ier!=IDA_SUCCESS) break; } SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(ier); } /* * IDAGetQuadSens1 * * This routine extracts the is-th quadrature sensitivity solution into yQSout * at the time at which IDASolve returned the solution. * This is just a wrapper that calls IDASensDky1 with k=0. */ int IDAGetQuadSens1(void *ida_mem, realtype *ptret, int is, N_Vector yyQSret) { IDAMem IDA_mem; int retval; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadSens1", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetQuadSens1", MSG_NO_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_SENS); } if (IDA_mem->ida_quadr_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAGetQuadSens1", MSG_NO_QUADSENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_QUADSENS); } if (yyQSret == NULL) { IDAProcessError(IDA_mem, IDA_BAD_DKY, "IDAS", "IDAGetQuadSens1", MSG_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_DKY); } *ptret = IDA_mem->ida_tretlast; retval = IDAGetQuadSensDky1(ida_mem, *ptret, 0, is, yyQSret); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(retval); } /* * IDAGetQuadSensDky1 * * IDAGetQuadSensDky1 computes the kth derivative of the yS[is] function * at time t, where tn-hu <= t <= tn, tn denotes the current * internal time reached, and hu is the last internal step size * successfully used by the solver. The user may request * is=0, 1, ..., Ns-1 and k=0, 1, ..., kk, where kk is the current * order. The derivative vector is returned in dky. This vector * must be allocated by the caller. It is only legal to call this * function after a successful return from IDASolve with sensitivity * computation enabled. */ int IDAGetQuadSensDky1(void *ida_mem, realtype t, int k, int is, N_Vector dkyQS) { IDAMem IDA_mem; realtype tfuzz, tp, delt, psij_1; int i, j, retval; realtype cjk [MXORDP1]; realtype cjk_1[MXORDP1]; /* Check all inputs for legality */ if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetQuadSensDky1", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); if (IDA_mem->ida_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_SENS, "IDAS", "IDAGetQuadSensDky1", MSG_NO_SENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_SENS); } if (IDA_mem->ida_quadr_sensi==SUNFALSE) { IDAProcessError(IDA_mem, IDA_NO_QUADSENS, "IDAS", "IDAGetQuadSensDky1", MSG_NO_QUADSENSI); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_NO_QUADSENS); } if (dkyQS == NULL) { IDAProcessError(IDA_mem, IDA_BAD_DKY, "IDAS", "IDAGetQuadSensDky1", MSG_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_DKY); } /* Is the requested sensitivity index valid*/ if(is<0 || is >= IDA_mem->ida_Ns) { IDAProcessError(IDA_mem, IDA_BAD_IS, "IDAS", "IDAGetQuadSensDky1", MSG_BAD_IS); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_IS); } /* Is the requested order valid? */ if ((k < 0) || (k > IDA_mem->ida_kused)) { IDAProcessError(IDA_mem, IDA_BAD_K, "IDAS", "IDAGetQuadSensDky1", MSG_BAD_K); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_K); } /* Check t for legality. Here tn - hused is t_{n-1}. */ tfuzz = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)); if (IDA_mem->ida_hh < ZERO) tfuzz = - tfuzz; tp = IDA_mem->ida_tn - IDA_mem->ida_hused - tfuzz; if ((t - tp)*IDA_mem->ida_hh < ZERO) { IDAProcessError(IDA_mem, IDA_BAD_T, "IDAS", "IDAGetQuadSensDky1", MSG_BAD_T, t, IDA_mem->ida_tn-IDA_mem->ida_hused, IDA_mem->ida_tn); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_BAD_T); } /* Initialize the c_j^(k) and c_k^(k-1) */ for(i=0; iida_tn; for(i=0; i<=k; i++) { if(i==0) { cjk[i] = 1; psij_1 = 0; }else { cjk[i] = cjk[i-1]*i / IDA_mem->ida_psi[i-1]; psij_1 = IDA_mem->ida_psi[i-1]; } /* Update cjk based on the reccurence */ for(j=i+1; j<=IDA_mem->ida_kused-k+i; j++) { cjk[j] = ( i* cjk_1[j-1] + cjk[j-1] * (delt + psij_1) ) / IDA_mem->ida_psi[j-1]; psij_1 = IDA_mem->ida_psi[j-1]; } /* Update cjk_1 for the next step */ for(j=i+1; j<=IDA_mem->ida_kused-k+i; j++) cjk_1[j] = cjk[j]; } /* Compute sum (c_j(t) * phi(t)) */ for(j=k; j<=IDA_mem->ida_kused; j++) IDA_mem->ida_Xvecs[j-k] = IDA_mem->ida_phiQS[j][is]; retval = N_VLinearCombination(IDA_mem->ida_kused-k+1, cjk+k, IDA_mem->ida_Xvecs, dkyQS); if (retval != IDA_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_VECTOROP_ERR); } SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAComputeY * * Computes y based on the current prediction and given correction. */ int IDAComputeY(void *ida_mem, N_Vector ycor, N_Vector y) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAComputeY", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); N_VLinearSum(ONE, IDA_mem->ida_yypredict, ONE, ycor, y); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAComputeYp * * Computes y' based on the current prediction and given correction. */ int IDAComputeYp(void *ida_mem, N_Vector ycor, N_Vector yp) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAComputeYp", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); N_VLinearSum(ONE, IDA_mem->ida_yppredict, IDA_mem->ida_cj, ycor, yp); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAComputeYSens * * Computes yS based on the current prediction and given correction. */ int IDAComputeYSens(void *ida_mem, N_Vector *ycorS, N_Vector *yyS) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAComputeYSens", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_yySpredict, ONE, ycorS, yyS); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * IDAComputeYpSens * * Computes yS' based on the current prediction and given correction. */ int IDAComputeYpSens(void *ida_mem, N_Vector *ycorS, N_Vector *ypS) { IDAMem IDA_mem; if (ida_mem==NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAComputeYpSens", MSG_NO_MEM); return(IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; SUNDIALS_MARK_FUNCTION_BEGIN(IDA_PROFILER); N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_ypSpredict, IDA_mem->ida_cj, ycorS, ypS); SUNDIALS_MARK_FUNCTION_END(IDA_PROFILER); return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * Deallocation functions * ----------------------------------------------------------------- */ /* * IDAFree * * This routine frees the problem memory allocated by IDAInit * Such memory includes all the vectors allocated by IDAAllocVectors, * and the memory lmem for the linear solver (deallocated by a call * to lfree). */ void IDAFree(void **ida_mem) { IDAMem IDA_mem; if (*ida_mem == NULL) return; IDA_mem = (IDAMem) (*ida_mem); IDAFreeVectors(IDA_mem); IDAQuadFree(IDA_mem); IDASensFree(IDA_mem); IDAQuadSensFree(IDA_mem); IDAAdjFree(IDA_mem); /* if IDA created the NLS object then free it */ if (IDA_mem->ownNLS) { SUNNonlinSolFree(IDA_mem->NLS); IDA_mem->ownNLS = SUNFALSE; IDA_mem->NLS = NULL; } if (IDA_mem->ida_lfree != NULL) IDA_mem->ida_lfree(IDA_mem); if (IDA_mem->ida_nrtfn > 0) { free(IDA_mem->ida_glo); IDA_mem->ida_glo = NULL; free(IDA_mem->ida_ghi); IDA_mem->ida_ghi = NULL; free(IDA_mem->ida_grout); IDA_mem->ida_grout = NULL; free(IDA_mem->ida_iroots); IDA_mem->ida_iroots = NULL; free(IDA_mem->ida_rootdir); IDA_mem->ida_rootdir = NULL; free(IDA_mem->ida_gactive); IDA_mem->ida_gactive = NULL; } free(IDA_mem->ida_cvals); IDA_mem->ida_cvals = NULL; free(IDA_mem->ida_Xvecs); IDA_mem->ida_Xvecs = NULL; free(IDA_mem->ida_Zvecs); IDA_mem->ida_Zvecs = NULL; free(*ida_mem); *ida_mem = NULL; } /* * IDAQuadFree * * IDAQuadFree frees the problem memory in ida_mem allocated * for quadrature integration. Its only argument is the pointer * ida_mem returned by IDACreate. */ void IDAQuadFree(void *ida_mem) { IDAMem IDA_mem; if (ida_mem == NULL) return; IDA_mem = (IDAMem) ida_mem; if(IDA_mem->ida_quadMallocDone) { IDAQuadFreeVectors(IDA_mem); IDA_mem->ida_quadMallocDone = SUNFALSE; IDA_mem->ida_quadr = SUNFALSE; } } /* * IDASensFree * * IDASensFree frees the problem memory in ida_mem allocated * for sensitivity analysis. Its only argument is the pointer * ida_mem returned by IDACreate. */ void IDASensFree(void *ida_mem) { IDAMem IDA_mem; /* return immediately if IDA memory is NULL */ if (ida_mem == NULL) return; IDA_mem = (IDAMem) ida_mem; if(IDA_mem->ida_sensMallocDone) { IDASensFreeVectors(IDA_mem); IDA_mem->ida_sensMallocDone = SUNFALSE; IDA_mem->ida_sensi = SUNFALSE; } /* free any vector wrappers */ if (IDA_mem->simMallocDone) { N_VDestroy(IDA_mem->ypredictSim); IDA_mem->ypredictSim = NULL; N_VDestroy(IDA_mem->ycorSim); IDA_mem->ycorSim = NULL; N_VDestroy(IDA_mem->ewtSim); IDA_mem->ewtSim = NULL; IDA_mem->simMallocDone = SUNFALSE; } if (IDA_mem->stgMallocDone) { N_VDestroy(IDA_mem->ypredictStg); IDA_mem->ypredictStg = NULL; N_VDestroy(IDA_mem->ycorStg); IDA_mem->ycorStg = NULL; N_VDestroy(IDA_mem->ewtStg); IDA_mem->ewtStg = NULL; IDA_mem->stgMallocDone = SUNFALSE; } /* if IDA created the NLS object then free it */ if (IDA_mem->ownNLSsim) { SUNNonlinSolFree(IDA_mem->NLSsim); IDA_mem->ownNLSsim = SUNFALSE; IDA_mem->NLSsim = NULL; } if (IDA_mem->ownNLSstg) { SUNNonlinSolFree(IDA_mem->NLSstg); IDA_mem->ownNLSstg = SUNFALSE; IDA_mem->NLSstg = NULL; } /* free min atol array if necessary */ if (IDA_mem->ida_atolSmin0) { free(IDA_mem->ida_atolSmin0); IDA_mem->ida_atolSmin0 = NULL; } } /* * IDAQuadSensFree * * IDAQuadSensFree frees the problem memory in ida_mem allocated * for quadrature sensitivity analysis. Its only argument is the * pointer ida_mem returned by IDACreate. */ void IDAQuadSensFree(void* ida_mem) { IDAMem IDA_mem; if (ida_mem==NULL) return; IDA_mem = (IDAMem) ida_mem; if (IDA_mem->ida_quadSensMallocDone) { IDAQuadSensFreeVectors(IDA_mem); IDA_mem->ida_quadSensMallocDone=SUNFALSE; IDA_mem->ida_quadr_sensi = SUNFALSE; } /* free min atol array if necessary */ if (IDA_mem->ida_atolQSmin0) { free(IDA_mem->ida_atolQSmin0); IDA_mem->ida_atolQSmin0 = NULL; } } /* * ================================================================= * PRIVATE FUNCTIONS * ================================================================= */ /* * IDACheckNvector * * This routine checks if all required vector operations are present. * If any of them is missing it returns SUNFALSE. */ static booleantype IDACheckNvector(N_Vector tmpl) { if ((tmpl->ops->nvclone == NULL) || (tmpl->ops->nvdestroy == NULL) || (tmpl->ops->nvlinearsum == NULL) || (tmpl->ops->nvconst == NULL) || (tmpl->ops->nvprod == NULL) || (tmpl->ops->nvscale == NULL) || (tmpl->ops->nvabs == NULL) || (tmpl->ops->nvinv == NULL) || (tmpl->ops->nvaddconst == NULL) || (tmpl->ops->nvwrmsnorm == NULL) || (tmpl->ops->nvmin == NULL)) return(SUNFALSE); else return(SUNTRUE); } /* * ----------------------------------------------------------------- * Memory allocation/deallocation * ----------------------------------------------------------------- */ /* * IDAAllocVectors * * This routine allocates the IDA vectors ewt, tempv1, tempv2, and * phi[0], ..., phi[maxord]. * If all memory allocations are successful, IDAAllocVectors returns * SUNTRUE. Otherwise all allocated memory is freed and IDAAllocVectors * returns SUNFALSE. * This routine also sets the optional outputs lrw and liw, which are * (respectively) the lengths of the real and integer work spaces * allocated here. */ static booleantype IDAAllocVectors(IDAMem IDA_mem, N_Vector tmpl) { int i, j, maxcol; /* Allocate ewt, ee, delta, yypredict, yppredict, savres, tempv1, tempv2, tempv3 */ IDA_mem->ida_ewt = N_VClone(tmpl); if (IDA_mem->ida_ewt == NULL) return(SUNFALSE); IDA_mem->ida_ee = N_VClone(tmpl); if (IDA_mem->ida_ee == NULL) { N_VDestroy(IDA_mem->ida_ewt); return(SUNFALSE); } IDA_mem->ida_delta = N_VClone(tmpl); if (IDA_mem->ida_delta == NULL) { N_VDestroy(IDA_mem->ida_ewt); N_VDestroy(IDA_mem->ida_ee); return(SUNFALSE); } IDA_mem->ida_yypredict = N_VClone(tmpl); if (IDA_mem->ida_yypredict == NULL) { N_VDestroy(IDA_mem->ida_ewt); N_VDestroy(IDA_mem->ida_ee); N_VDestroy(IDA_mem->ida_delta); return(SUNFALSE); } IDA_mem->ida_yppredict = N_VClone(tmpl); if (IDA_mem->ida_yppredict == NULL) { N_VDestroy(IDA_mem->ida_ewt); N_VDestroy(IDA_mem->ida_ee); N_VDestroy(IDA_mem->ida_delta); N_VDestroy(IDA_mem->ida_yypredict); return(SUNFALSE); } IDA_mem->ida_savres = N_VClone(tmpl); if (IDA_mem->ida_savres == NULL) { N_VDestroy(IDA_mem->ida_ewt); N_VDestroy(IDA_mem->ida_ee); N_VDestroy(IDA_mem->ida_delta); N_VDestroy(IDA_mem->ida_yypredict); N_VDestroy(IDA_mem->ida_yppredict); return(SUNFALSE); } IDA_mem->ida_tempv1 = N_VClone(tmpl); if (IDA_mem->ida_tempv1 == NULL) { N_VDestroy(IDA_mem->ida_ewt); N_VDestroy(IDA_mem->ida_ee); N_VDestroy(IDA_mem->ida_delta); N_VDestroy(IDA_mem->ida_yypredict); N_VDestroy(IDA_mem->ida_yppredict); N_VDestroy(IDA_mem->ida_savres); return(SUNFALSE); } IDA_mem->ida_tempv2 = N_VClone(tmpl); if (IDA_mem->ida_tempv2 == NULL) { N_VDestroy(IDA_mem->ida_ewt); N_VDestroy(IDA_mem->ida_ee); N_VDestroy(IDA_mem->ida_delta); N_VDestroy(IDA_mem->ida_yypredict); N_VDestroy(IDA_mem->ida_yppredict); N_VDestroy(IDA_mem->ida_savres); N_VDestroy(IDA_mem->ida_tempv1); return(SUNFALSE); } IDA_mem->ida_tempv3 = N_VClone(tmpl); if (IDA_mem->ida_tempv3 == NULL) { N_VDestroy(IDA_mem->ida_ewt); N_VDestroy(IDA_mem->ida_ee); N_VDestroy(IDA_mem->ida_delta); N_VDestroy(IDA_mem->ida_yypredict); N_VDestroy(IDA_mem->ida_yppredict); N_VDestroy(IDA_mem->ida_savres); N_VDestroy(IDA_mem->ida_tempv1); N_VDestroy(IDA_mem->ida_tempv2); return(SUNFALSE); } /* Allocate phi[0] ... phi[maxord]. Make sure phi[2] and phi[3] are allocated (for use as temporary vectors), regardless of maxord. */ maxcol = SUNMAX(IDA_mem->ida_maxord,3); for (j=0; j <= maxcol; j++) { IDA_mem->ida_phi[j] = N_VClone(tmpl); if (IDA_mem->ida_phi[j] == NULL) { N_VDestroy(IDA_mem->ida_ewt); N_VDestroy(IDA_mem->ida_ee); N_VDestroy(IDA_mem->ida_delta); N_VDestroy(IDA_mem->ida_yypredict); N_VDestroy(IDA_mem->ida_yppredict); N_VDestroy(IDA_mem->ida_savres); N_VDestroy(IDA_mem->ida_tempv1); N_VDestroy(IDA_mem->ida_tempv2); N_VDestroy(IDA_mem->ida_tempv3); for (i=0; i < j; i++) N_VDestroy(IDA_mem->ida_phi[i]); return(SUNFALSE); } } /* Update solver workspace lengths */ IDA_mem->ida_lrw += (maxcol + 10)*IDA_mem->ida_lrw1; IDA_mem->ida_liw += (maxcol + 10)*IDA_mem->ida_liw1; /* Store the value of maxord used here */ IDA_mem->ida_maxord_alloc = IDA_mem->ida_maxord; return(SUNTRUE); } /* * IDAfreeVectors * * This routine frees the IDA vectors allocated for IDA. */ static void IDAFreeVectors(IDAMem IDA_mem) { int j, maxcol; N_VDestroy(IDA_mem->ida_ewt); IDA_mem->ida_ewt = NULL; N_VDestroy(IDA_mem->ida_ee); IDA_mem->ida_ee = NULL; N_VDestroy(IDA_mem->ida_delta); IDA_mem->ida_delta = NULL; N_VDestroy(IDA_mem->ida_yypredict); IDA_mem->ida_yypredict = NULL; N_VDestroy(IDA_mem->ida_yppredict); IDA_mem->ida_yppredict = NULL; N_VDestroy(IDA_mem->ida_savres); IDA_mem->ida_savres = NULL; N_VDestroy(IDA_mem->ida_tempv1); IDA_mem->ida_tempv1 = NULL; N_VDestroy(IDA_mem->ida_tempv2); IDA_mem->ida_tempv2 = NULL; N_VDestroy(IDA_mem->ida_tempv3); IDA_mem->ida_tempv3 = NULL; maxcol = SUNMAX(IDA_mem->ida_maxord_alloc,3); for(j=0; j <= maxcol; j++) { N_VDestroy(IDA_mem->ida_phi[j]); IDA_mem->ida_phi[j] = NULL; } IDA_mem->ida_lrw -= (maxcol + 10)*IDA_mem->ida_lrw1; IDA_mem->ida_liw -= (maxcol + 10)*IDA_mem->ida_liw1; if (IDA_mem->ida_VatolMallocDone) { N_VDestroy(IDA_mem->ida_Vatol); IDA_mem->ida_Vatol = NULL; IDA_mem->ida_lrw -= IDA_mem->ida_lrw1; IDA_mem->ida_liw -= IDA_mem->ida_liw1; } if (IDA_mem->ida_constraintsMallocDone) { N_VDestroy(IDA_mem->ida_constraints); IDA_mem->ida_constraints = NULL; IDA_mem->ida_lrw -= IDA_mem->ida_lrw1; IDA_mem->ida_liw -= IDA_mem->ida_liw1; } if (IDA_mem->ida_idMallocDone) { N_VDestroy(IDA_mem->ida_id); IDA_mem->ida_id = NULL; IDA_mem->ida_lrw -= IDA_mem->ida_lrw1; IDA_mem->ida_liw -= IDA_mem->ida_liw1; } } /* * IDAQuadAllocVectors * * NOTE: Space for ewtQ is allocated even when errconQ=SUNFALSE, * although in this case, ewtQ is never used. The reason for this * decision is to allow the user to re-initialize the quadrature * computation with errconQ=SUNTRUE, after an initialization with * errconQ=SUNFALSE, without new memory allocation within * IDAQuadReInit. */ static booleantype IDAQuadAllocVectors(IDAMem IDA_mem, N_Vector tmpl) { int i, j; /* Allocate yyQ */ IDA_mem->ida_yyQ = N_VClone(tmpl); if (IDA_mem->ida_yyQ == NULL) { return (SUNFALSE); } /* Allocate ypQ */ IDA_mem->ida_ypQ = N_VClone(tmpl); if (IDA_mem->ida_ypQ == NULL) { N_VDestroy(IDA_mem->ida_yyQ); return (SUNFALSE); } /* Allocate ewtQ */ IDA_mem->ida_ewtQ = N_VClone(tmpl); if (IDA_mem->ida_ewtQ == NULL) { N_VDestroy(IDA_mem->ida_yyQ); N_VDestroy(IDA_mem->ida_ypQ); return (SUNFALSE); } /* Allocate eeQ */ IDA_mem->ida_eeQ = N_VClone(tmpl); if (IDA_mem->ida_eeQ == NULL) { N_VDestroy(IDA_mem->ida_yyQ); N_VDestroy(IDA_mem->ida_ypQ); N_VDestroy(IDA_mem->ida_ewtQ); return (SUNFALSE); } for (j=0; j <= IDA_mem->ida_maxord; j++) { IDA_mem->ida_phiQ[j] = N_VClone(tmpl); if (IDA_mem->ida_phiQ[j] == NULL) { N_VDestroy(IDA_mem->ida_yyQ); N_VDestroy(IDA_mem->ida_ypQ); N_VDestroy(IDA_mem->ida_ewtQ); N_VDestroy(IDA_mem->ida_eeQ); for (i=0; i < j; i++) N_VDestroy(IDA_mem->ida_phiQ[i]); return(SUNFALSE); } } IDA_mem->ida_lrw += (IDA_mem->ida_maxord+4)*IDA_mem->ida_lrw1Q; IDA_mem->ida_liw += (IDA_mem->ida_maxord+4)*IDA_mem->ida_liw1Q; return(SUNTRUE); } /* * IDAQuadFreeVectors * * This routine frees the IDAS vectors allocated in IDAQuadAllocVectors. */ static void IDAQuadFreeVectors(IDAMem IDA_mem) { int j; N_VDestroy(IDA_mem->ida_yyQ); IDA_mem->ida_yyQ = NULL; N_VDestroy(IDA_mem->ida_ypQ); IDA_mem->ida_ypQ = NULL; N_VDestroy(IDA_mem->ida_ewtQ); IDA_mem->ida_ewtQ = NULL; N_VDestroy(IDA_mem->ida_eeQ); IDA_mem->ida_eeQ = NULL; for(j=0; j <= IDA_mem->ida_maxord; j++) { N_VDestroy(IDA_mem->ida_phiQ[j]); IDA_mem->ida_phiQ[j] = NULL; } IDA_mem->ida_lrw -= (IDA_mem->ida_maxord+5)*IDA_mem->ida_lrw1Q; IDA_mem->ida_liw -= (IDA_mem->ida_maxord+5)*IDA_mem->ida_liw1Q; if (IDA_mem->ida_VatolQMallocDone) { N_VDestroy(IDA_mem->ida_VatolQ); IDA_mem->ida_VatolQ = NULL; IDA_mem->ida_lrw -= IDA_mem->ida_lrw1Q; IDA_mem->ida_liw -= IDA_mem->ida_liw1Q; } IDA_mem->ida_VatolQMallocDone = SUNFALSE; } /* * IDASensAllocVectors * * Allocates space for the N_Vectors, plist, and pbar required for FSA. */ static booleantype IDASensAllocVectors(IDAMem IDA_mem, N_Vector tmpl) { int j, maxcol; IDA_mem->ida_tmpS1 = IDA_mem->ida_tempv1; IDA_mem->ida_tmpS2 = IDA_mem->ida_tempv2; /* Allocate space for workspace vectors */ IDA_mem->ida_tmpS3 = N_VClone(tmpl); if (IDA_mem->ida_tmpS3==NULL) { return(SUNFALSE); } IDA_mem->ida_ewtS = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_ewtS==NULL) { N_VDestroy(IDA_mem->ida_tmpS3); return(SUNFALSE); } IDA_mem->ida_eeS = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_eeS==NULL) { N_VDestroy(IDA_mem->ida_tmpS3); N_VDestroyVectorArray(IDA_mem->ida_ewtS, IDA_mem->ida_Ns); return(SUNFALSE); } IDA_mem->ida_yyS = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_yyS==NULL) { N_VDestroyVectorArray(IDA_mem->ida_eeS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtS, IDA_mem->ida_Ns); N_VDestroy(IDA_mem->ida_tmpS3); return(SUNFALSE); } IDA_mem->ida_ypS = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_ypS==NULL) { N_VDestroyVectorArray(IDA_mem->ida_yyS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtS, IDA_mem->ida_Ns); N_VDestroy(IDA_mem->ida_tmpS3); return(SUNFALSE); } IDA_mem->ida_yySpredict = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_yySpredict==NULL) { N_VDestroyVectorArray(IDA_mem->ida_ypS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yyS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtS, IDA_mem->ida_Ns); N_VDestroy(IDA_mem->ida_tmpS3); return(SUNFALSE); } IDA_mem->ida_ypSpredict = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_ypSpredict==NULL) { N_VDestroyVectorArray(IDA_mem->ida_yySpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yyS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtS, IDA_mem->ida_Ns); N_VDestroy(IDA_mem->ida_tmpS3); return(SUNFALSE); } IDA_mem->ida_deltaS = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_deltaS==NULL) { N_VDestroyVectorArray(IDA_mem->ida_ypSpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yySpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yyS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtS, IDA_mem->ida_Ns); N_VDestroy(IDA_mem->ida_tmpS3); return(SUNFALSE); } /* Update solver workspace lengths */ IDA_mem->ida_lrw += (5*IDA_mem->ida_Ns+1)*IDA_mem->ida_lrw1; IDA_mem->ida_liw += (5*IDA_mem->ida_Ns+1)*IDA_mem->ida_liw1; /* Allocate space for phiS */ /* Make sure phiS[2], phiS[3] and phiS[4] are allocated (for use as temporary vectors), regardless of maxord.*/ maxcol = SUNMAX(IDA_mem->ida_maxord,4); for (j=0; j <= maxcol; j++) { IDA_mem->ida_phiS[j] = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_phiS[j] == NULL) { N_VDestroy(IDA_mem->ida_tmpS3); N_VDestroyVectorArray(IDA_mem->ida_ewtS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yyS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yySpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypSpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_deltaS, IDA_mem->ida_Ns); return(SUNFALSE); } } /* Update solver workspace lengths */ IDA_mem->ida_lrw += maxcol*IDA_mem->ida_Ns*IDA_mem->ida_lrw1; IDA_mem->ida_liw += maxcol*IDA_mem->ida_Ns*IDA_mem->ida_liw1; /* Allocate space for pbar and plist */ IDA_mem->ida_pbar = NULL; IDA_mem->ida_pbar = (realtype *)malloc(IDA_mem->ida_Ns*sizeof(realtype)); if (IDA_mem->ida_pbar == NULL) { N_VDestroy(IDA_mem->ida_tmpS3); N_VDestroyVectorArray(IDA_mem->ida_ewtS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yyS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yySpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypSpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_deltaS, IDA_mem->ida_Ns); for (j=0; j<=maxcol; j++) N_VDestroyVectorArray(IDA_mem->ida_phiS[j], IDA_mem->ida_Ns); return(SUNFALSE); } IDA_mem->ida_plist = NULL; IDA_mem->ida_plist = (int *)malloc(IDA_mem->ida_Ns*sizeof(int)); if (IDA_mem->ida_plist == NULL) { N_VDestroy(IDA_mem->ida_tmpS3); N_VDestroyVectorArray(IDA_mem->ida_ewtS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yyS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yySpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypSpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_deltaS, IDA_mem->ida_Ns); for (j=0; j<=maxcol; j++) N_VDestroyVectorArray(IDA_mem->ida_phiS[j], IDA_mem->ida_Ns); free(IDA_mem->ida_pbar); IDA_mem->ida_pbar = NULL; return(SUNFALSE); } /* Update solver workspace lengths */ IDA_mem->ida_lrw += IDA_mem->ida_Ns; IDA_mem->ida_liw += IDA_mem->ida_Ns; return(SUNTRUE); } /* * IDASensFreeVectors * * Frees memory allocated by IDASensAllocVectors. */ static void IDASensFreeVectors(IDAMem IDA_mem) { int j, maxcol; N_VDestroyVectorArray(IDA_mem->ida_deltaS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypSpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yySpredict, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ypS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_yyS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtS, IDA_mem->ida_Ns); N_VDestroy(IDA_mem->ida_tmpS3); maxcol = SUNMAX(IDA_mem->ida_maxord_alloc, 4); for (j=0; j<=maxcol; j++) N_VDestroyVectorArray(IDA_mem->ida_phiS[j], IDA_mem->ida_Ns); free(IDA_mem->ida_pbar); IDA_mem->ida_pbar = NULL; free(IDA_mem->ida_plist); IDA_mem->ida_plist = NULL; IDA_mem->ida_lrw -= ( (maxcol+3)*IDA_mem->ida_Ns + 1 ) * IDA_mem->ida_lrw1 + IDA_mem->ida_Ns; IDA_mem->ida_liw -= ( (maxcol+3)*IDA_mem->ida_Ns + 1 ) * IDA_mem->ida_liw1 + IDA_mem->ida_Ns; if (IDA_mem->ida_VatolSMallocDone) { N_VDestroyVectorArray(IDA_mem->ida_VatolS, IDA_mem->ida_Ns); IDA_mem->ida_lrw -= IDA_mem->ida_Ns*IDA_mem->ida_lrw1; IDA_mem->ida_liw -= IDA_mem->ida_Ns*IDA_mem->ida_liw1; IDA_mem->ida_VatolSMallocDone = SUNFALSE; } if (IDA_mem->ida_SatolSMallocDone) { free(IDA_mem->ida_SatolS); IDA_mem->ida_SatolS = NULL; IDA_mem->ida_lrw -= IDA_mem->ida_Ns; IDA_mem->ida_SatolSMallocDone = SUNFALSE; } } /* * IDAQuadSensAllocVectors * * Create (through duplication) N_Vectors used for quadrature sensitivity analysis, * using the N_Vector 'tmpl' as a template. */ static booleantype IDAQuadSensAllocVectors(IDAMem IDA_mem, N_Vector tmpl) { int i, j, maxcol; /* Allocate yQS */ IDA_mem->ida_yyQS = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_yyQS == NULL) { return(SUNFALSE); } /* Allocate ewtQS */ IDA_mem->ida_ewtQS = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_ewtQS == NULL) { N_VDestroyVectorArray(IDA_mem->ida_yyQS, IDA_mem->ida_Ns); return(SUNFALSE); } /* Allocate tempvQS */ IDA_mem->ida_tempvQS = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_tempvQS == NULL) { N_VDestroyVectorArray(IDA_mem->ida_yyQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtQS, IDA_mem->ida_Ns); return(SUNFALSE); } IDA_mem->ida_eeQS = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_eeQS == NULL) { N_VDestroyVectorArray(IDA_mem->ida_yyQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_tempvQS, IDA_mem->ida_Ns); return(SUNFALSE); } IDA_mem->ida_savrhsQ = N_VClone(tmpl); if (IDA_mem->ida_savrhsQ == NULL) { N_VDestroyVectorArray(IDA_mem->ida_yyQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_tempvQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeQS, IDA_mem->ida_Ns); } maxcol = SUNMAX(IDA_mem->ida_maxord,4); /* Allocate phiQS */ for (j=0; j<=maxcol; j++) { IDA_mem->ida_phiQS[j] = N_VCloneVectorArray(IDA_mem->ida_Ns, tmpl); if (IDA_mem->ida_phiQS[j] == NULL) { N_VDestroyVectorArray(IDA_mem->ida_yyQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_tempvQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeQS, IDA_mem->ida_Ns); N_VDestroy(IDA_mem->ida_savrhsQ); for (i=0; iida_phiQS[i], IDA_mem->ida_Ns); return(SUNFALSE); } } /* Update solver workspace lengths */ IDA_mem->ida_lrw += (maxcol + 5)*IDA_mem->ida_Ns*IDA_mem->ida_lrw1Q; IDA_mem->ida_liw += (maxcol + 5)*IDA_mem->ida_Ns*IDA_mem->ida_liw1Q; return(SUNTRUE); } /* * IDAQuadSensFreeVectors * * This routine frees the IDAS vectors allocated in IDAQuadSensAllocVectors. */ static void IDAQuadSensFreeVectors(IDAMem IDA_mem) { int j, maxcol; maxcol = SUNMAX(IDA_mem->ida_maxord, 4); N_VDestroyVectorArray(IDA_mem->ida_yyQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_ewtQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_eeQS, IDA_mem->ida_Ns); N_VDestroyVectorArray(IDA_mem->ida_tempvQS, IDA_mem->ida_Ns); N_VDestroy(IDA_mem->ida_savrhsQ); for (j=0; j<=maxcol; j++) N_VDestroyVectorArray(IDA_mem->ida_phiQS[j], IDA_mem->ida_Ns); IDA_mem->ida_lrw -= (maxcol + 5)*IDA_mem->ida_Ns*IDA_mem->ida_lrw1Q; IDA_mem->ida_liw -= (maxcol + 5)*IDA_mem->ida_Ns*IDA_mem->ida_liw1Q; if (IDA_mem->ida_VatolQSMallocDone) { N_VDestroyVectorArray(IDA_mem->ida_VatolQS, IDA_mem->ida_Ns); IDA_mem->ida_lrw -= IDA_mem->ida_Ns*IDA_mem->ida_lrw1Q; IDA_mem->ida_liw -= IDA_mem->ida_Ns*IDA_mem->ida_liw1Q; } if (IDA_mem->ida_SatolQSMallocDone) { free(IDA_mem->ida_SatolQS); IDA_mem->ida_SatolQS = NULL; IDA_mem->ida_lrw -= IDA_mem->ida_Ns; } IDA_mem->ida_VatolQSMallocDone = SUNFALSE; IDA_mem->ida_SatolQSMallocDone = SUNFALSE; } /* * ----------------------------------------------------------------- * Initial setup * ----------------------------------------------------------------- */ /* * IDAInitialSetup * * This routine is called by IDASolve once at the first step. * It performs all checks on optional inputs and inputs to * IDAInit/IDAReInit that could not be done before. * * If no error is encountered, IDAInitialSetup returns IDA_SUCCESS. * Otherwise, it returns an error flag and reported to the error * handler function. */ int IDAInitialSetup(IDAMem IDA_mem) { booleantype conOK; int ier; /* Test for more vector operations, depending on options */ if (IDA_mem->ida_suppressalg) if (IDA_mem->ida_phi[0]->ops->nvwrmsnormmask == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_BAD_NVECTOR); return(IDA_ILL_INPUT); } /* Test id vector for legality */ if (IDA_mem->ida_suppressalg && (IDA_mem->ida_id==NULL)){ IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_MISSING_ID); return(IDA_ILL_INPUT); } /* Did the user specify tolerances? */ if (IDA_mem->ida_itol == IDA_NN) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_NO_TOLS); return(IDA_ILL_INPUT); } /* Set data for efun */ if (IDA_mem->ida_user_efun) IDA_mem->ida_edata = IDA_mem->ida_user_data; else IDA_mem->ida_edata = IDA_mem; /* Initial error weight vector */ ier = IDA_mem->ida_efun(IDA_mem->ida_phi[0], IDA_mem->ida_ewt, IDA_mem->ida_edata); if (ier != 0) { if (IDA_mem->ida_itol == IDA_WF) IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_FAIL_EWT); else IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_BAD_EWT); return(IDA_ILL_INPUT); } if (IDA_mem->ida_quadr) { /* Evaluate quadrature rhs and set phiQ[1] */ ier = IDA_mem->ida_rhsQ(IDA_mem->ida_tn, IDA_mem->ida_phi[0], IDA_mem->ida_phi[1], IDA_mem->ida_phiQ[1], IDA_mem->ida_user_data); IDA_mem->ida_nrQe++; if (ier < 0) { IDAProcessError(IDA_mem, IDA_QRHS_FAIL, "IDAS", "IDAInitialSetup", MSG_QRHSFUNC_FAILED); return(IDA_QRHS_FAIL); } else if (ier > 0) { IDAProcessError(IDA_mem, IDA_FIRST_QRHS_ERR, "IDAS", "IDAInitialSetup", MSG_QRHSFUNC_FIRST); return(IDA_FIRST_QRHS_ERR); } if (IDA_mem->ida_errconQ) { /* Did the user specify tolerances? */ if (IDA_mem->ida_itolQ == IDA_NN) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_NO_TOLQ); return(IDA_ILL_INPUT); } /* Load ewtQ */ ier = IDAQuadEwtSet(IDA_mem, IDA_mem->ida_phiQ[0], IDA_mem->ida_ewtQ); if (ier != 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_BAD_EWTQ); return(IDA_ILL_INPUT); } } } else { IDA_mem->ida_errconQ = SUNFALSE; } if (IDA_mem->ida_sensi) { /* Did the user specify tolerances? */ if (IDA_mem->ida_itolS == IDA_NN) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_NO_TOLS); return(IDA_ILL_INPUT); } /* Load ewtS */ ier = IDASensEwtSet(IDA_mem, IDA_mem->ida_phiS[0], IDA_mem->ida_ewtS); if (ier != 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_BAD_EWTS); return(IDA_ILL_INPUT); } } else { IDA_mem->ida_errconS = SUNFALSE; } if (IDA_mem->ida_quadr_sensi) { /* store the quadrature sensitivity residual. */ ier = IDA_mem->ida_rhsQS(IDA_mem->ida_Ns, IDA_mem->ida_tn, IDA_mem->ida_phi[0], IDA_mem->ida_phi[1], IDA_mem->ida_phiS[0], IDA_mem->ida_phiS[1], IDA_mem->ida_phiQ[1], IDA_mem->ida_phiQS[1], IDA_mem->ida_user_dataQS, IDA_mem->ida_tmpS1, IDA_mem->ida_tmpS2, IDA_mem->ida_tmpS3); IDA_mem->ida_nrQSe++; if (ier < 0) { IDAProcessError(IDA_mem, IDA_QSRHS_FAIL, "IDAS", "IDAInitialSetup", MSG_QSRHSFUNC_FAILED); return(IDA_QRHS_FAIL); } else if (ier > 0) { IDAProcessError(IDA_mem, IDA_FIRST_QSRHS_ERR, "IDAS", "IDAInitialSetup", MSG_QSRHSFUNC_FIRST); return(IDA_FIRST_QSRHS_ERR); } /* If using the internal DQ functions, we must have access to fQ * (i.e. quadrature integration must be enabled) and to the problem parameters */ if (IDA_mem->ida_rhsQSDQ) { /* Test if quadratures are defined, so we can use fQ */ if (!IDA_mem->ida_quadr) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_NULL_RHSQ); return(IDA_ILL_INPUT); } /* Test if we have the problem parameters */ if (IDA_mem->ida_p == NULL) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_NULL_P); return(IDA_ILL_INPUT); } } if (IDA_mem->ida_errconQS) { /* Did the user specify tolerances? */ if (IDA_mem->ida_itolQS == IDA_NN) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_NO_TOLQS); return(IDA_ILL_INPUT); } /* If needed, did the user provide quadrature tolerances? */ if ( (IDA_mem->ida_itolQS == IDA_EE) && (IDA_mem->ida_itolQ == IDA_NN) ) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_NO_TOLQ); return(IDA_ILL_INPUT); } /* Load ewtS */ ier = IDAQuadSensEwtSet(IDA_mem, IDA_mem->ida_phiQS[0], IDA_mem->ida_ewtQS); if (ier != 0) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_BAD_EWTQS); return(IDA_ILL_INPUT); } } } else { IDA_mem->ida_errconQS = SUNFALSE; } /* Check to see if y0 satisfies constraints. */ if (IDA_mem->ida_constraintsSet) { if (IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_SIMULTANEOUS)) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_BAD_ISM_CONSTR); return(IDA_ILL_INPUT); } conOK = N_VConstrMask(IDA_mem->ida_constraints, IDA_mem->ida_phi[0], IDA_mem->ida_tempv2); if (!conOK) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDAS", "IDAInitialSetup", MSG_Y0_FAIL_CONSTR); return(IDA_ILL_INPUT); } } /* Call linit function if it exists. */ if (IDA_mem->ida_linit != NULL) { ier = IDA_mem->ida_linit(IDA_mem); if (ier != 0) { IDAProcessError(IDA_mem, IDA_LINIT_FAIL, "IDAS", "IDAInitialSetup", MSG_LINIT_FAIL); return(IDA_LINIT_FAIL); } } /* Initialize the nonlinear solver (must occur after linear solver is initialize) so * that lsetup and lsolve pointers have been set */ /* always initialize the DAE NLS in case the user disables sensitivities later */ ier = idaNlsInit(IDA_mem); if (ier != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_NLS_INIT_FAIL, "IDAS", "IDAInitialSetup", MSG_NLS_INIT_FAIL); return(IDA_NLS_INIT_FAIL); } if (IDA_mem->NLSsim != NULL) { ier = idaNlsInitSensSim(IDA_mem); if (ier != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_NLS_INIT_FAIL, "IDAS", "IDAInitialSetup", MSG_NLS_INIT_FAIL); return(IDA_NLS_INIT_FAIL); } } if (IDA_mem->NLSstg != NULL) { ier = idaNlsInitSensStg(IDA_mem); if (ier != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_NLS_INIT_FAIL, "IDAS", "IDAInitialSetup", MSG_NLS_INIT_FAIL); return(IDA_NLS_INIT_FAIL); } } return(IDA_SUCCESS); } /* * IDAEwtSet * * This routine is responsible for loading the error weight vector * ewt, according to itol, as follows: * (1) ewt[i] = 1 / (rtol * SUNRabs(ycur[i]) + atol), i=0,...,Neq-1 * if itol = IDA_SS * (2) ewt[i] = 1 / (rtol * SUNRabs(ycur[i]) + atol[i]), i=0,...,Neq-1 * if itol = IDA_SV * * IDAEwtSet returns 0 if ewt is successfully set as above to a * positive vector and -1 otherwise. In the latter case, ewt is * considered undefined. * * All the real work is done in the routines IDAEwtSetSS, IDAEwtSetSV. */ int IDAEwtSet(N_Vector ycur, N_Vector weight, void *data) { IDAMem IDA_mem; int flag = 0; /* data points to IDA_mem here */ IDA_mem = (IDAMem) data; switch(IDA_mem->ida_itol) { case IDA_SS: flag = IDAEwtSetSS(IDA_mem, ycur, weight); break; case IDA_SV: flag = IDAEwtSetSV(IDA_mem, ycur, weight); break; } return(flag); } /* * IDAEwtSetSS * * This routine sets ewt as decribed above in the case itol=IDA_SS. * If the absolute tolerance is zero, it tests for non-positive components * before inverting. IDAEwtSetSS returns 0 if ewt is successfully set to a * positive vector and -1 otherwise. In the latter case, ewt is considered * undefined. */ static int IDAEwtSetSS(IDAMem IDA_mem, N_Vector ycur, N_Vector weight) { N_VAbs(ycur, IDA_mem->ida_tempv1); N_VScale(IDA_mem->ida_rtol, IDA_mem->ida_tempv1, IDA_mem->ida_tempv1); N_VAddConst(IDA_mem->ida_tempv1, IDA_mem->ida_Satol, IDA_mem->ida_tempv1); if (IDA_mem->ida_atolmin0) { if (N_VMin(IDA_mem->ida_tempv1) <= ZERO) return(-1); } N_VInv(IDA_mem->ida_tempv1, weight); return(0); } /* * IDAEwtSetSV * * This routine sets ewt as decribed above in the case itol=IDA_SV. * If the absolute tolerance is zero, it tests for non-positive components * before inverting. IDAEwtSetSV returns 0 if ewt is successfully set to a * positive vector and -1 otherwise. In the latter case, ewt is considered * undefined. */ static int IDAEwtSetSV(IDAMem IDA_mem, N_Vector ycur, N_Vector weight) { N_VAbs(ycur, IDA_mem->ida_tempv1); N_VLinearSum(IDA_mem->ida_rtol, IDA_mem->ida_tempv1, ONE, IDA_mem->ida_Vatol, IDA_mem->ida_tempv1); if (IDA_mem->ida_atolmin0) { if (N_VMin(IDA_mem->ida_tempv1) <= ZERO) return(-1); } N_VInv(IDA_mem->ida_tempv1, weight); return(0); } /* * IDAQuadEwtSet * */ static int IDAQuadEwtSet(IDAMem IDA_mem, N_Vector qcur, N_Vector weightQ) { int flag=0; switch (IDA_mem->ida_itolQ) { case IDA_SS: flag = IDAQuadEwtSetSS(IDA_mem, qcur, weightQ); break; case IDA_SV: flag = IDAQuadEwtSetSV(IDA_mem, qcur, weightQ); break; } return(flag); } /* * IDAQuadEwtSetSS * */ static int IDAQuadEwtSetSS(IDAMem IDA_mem, N_Vector qcur, N_Vector weightQ) { N_Vector tempvQ; /* Use ypQ as temporary storage */ tempvQ = IDA_mem->ida_ypQ; N_VAbs(qcur, tempvQ); N_VScale(IDA_mem->ida_rtolQ, tempvQ, tempvQ); N_VAddConst(tempvQ, IDA_mem->ida_SatolQ, tempvQ); if (IDA_mem->ida_atolQmin0) { if (N_VMin(tempvQ) <= ZERO) return(-1); } N_VInv(tempvQ, weightQ); return(0); } /* * IDAQuadEwtSetSV * */ static int IDAQuadEwtSetSV(IDAMem IDA_mem, N_Vector qcur, N_Vector weightQ) { N_Vector tempvQ; /* Use ypQ as temporary storage */ tempvQ = IDA_mem->ida_ypQ; N_VAbs(qcur, tempvQ); N_VLinearSum(IDA_mem->ida_rtolQ, tempvQ, ONE, IDA_mem->ida_VatolQ, tempvQ); if (IDA_mem->ida_atolQmin0) { if (N_VMin(tempvQ) <= ZERO) return(-1); } N_VInv(tempvQ, weightQ); return(0); } /* * IDASensEwtSet * */ int IDASensEwtSet(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS) { int flag=0; switch (IDA_mem->ida_itolS) { case IDA_EE: flag = IDASensEwtSetEE(IDA_mem, yScur, weightS); break; case IDA_SS: flag = IDASensEwtSetSS(IDA_mem, yScur, weightS); break; case IDA_SV: flag = IDASensEwtSetSV(IDA_mem, yScur, weightS); break; } return(flag); } /* * IDASensEwtSetEE * * In this case, the error weight vector for the i-th sensitivity is set to * * ewtS_i = pbar_i * efun(pbar_i*yS_i) * * In other words, the scaled sensitivity pbar_i * yS_i has the same error * weight vector calculation as the solution vector. * */ static int IDASensEwtSetEE(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS) { int is; N_Vector pyS; int flag; /* Use tempv1 as temporary storage for the scaled sensitivity */ pyS = IDA_mem->ida_tempv1; for (is=0; isida_Ns; is++) { N_VScale(IDA_mem->ida_pbar[is], yScur[is], pyS); flag = IDA_mem->ida_efun(pyS, weightS[is], IDA_mem->ida_edata); if (flag != 0) return(-1); N_VScale(IDA_mem->ida_pbar[is], weightS[is], weightS[is]); } return(0); } /* * IDASensEwtSetSS * */ static int IDASensEwtSetSS(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS) { int is; for (is=0; isida_Ns; is++) { N_VAbs(yScur[is], IDA_mem->ida_tempv1); N_VScale(IDA_mem->ida_rtolS, IDA_mem->ida_tempv1, IDA_mem->ida_tempv1); N_VAddConst(IDA_mem->ida_tempv1, IDA_mem->ida_SatolS[is], IDA_mem->ida_tempv1); if (IDA_mem->ida_atolSmin0[is]) { if (N_VMin(IDA_mem->ida_tempv1) <= ZERO) return(-1); } N_VInv(IDA_mem->ida_tempv1, weightS[is]); } return(0); } /* * IDASensEwtSetSV * */ static int IDASensEwtSetSV(IDAMem IDA_mem, N_Vector *yScur, N_Vector *weightS) { int is; for (is=0; isida_Ns; is++) { N_VAbs(yScur[is], IDA_mem->ida_tempv1); N_VLinearSum(IDA_mem->ida_rtolS, IDA_mem->ida_tempv1, ONE, IDA_mem->ida_VatolS[is], IDA_mem->ida_tempv1); if (IDA_mem->ida_atolSmin0[is]) { if (N_VMin(IDA_mem->ida_tempv1) <= ZERO) return(-1); } N_VInv(IDA_mem->ida_tempv1, weightS[is]); } return(0); } /* * IDAQuadSensEwtSet * */ int IDAQuadSensEwtSet(IDAMem IDA_mem, N_Vector *yQScur, N_Vector *weightQS) { int flag=0; switch (IDA_mem->ida_itolQS) { case IDA_EE: flag = IDAQuadSensEwtSetEE(IDA_mem, yQScur, weightQS); break; case IDA_SS: flag = IDAQuadSensEwtSetSS(IDA_mem, yQScur, weightQS); break; case IDA_SV: flag = IDAQuadSensEwtSetSV(IDA_mem, yQScur, weightQS); break; } return(flag); } /* * IDAQuadSensEwtSetEE * * In this case, the error weight vector for the i-th quadrature sensitivity * is set to * * ewtQS_i = pbar_i * IDAQuadEwtSet(pbar_i*yQS_i) * * In other words, the scaled sensitivity pbar_i * yQS_i has the same error * weight vector calculation as the quadrature vector. * */ static int IDAQuadSensEwtSetEE(IDAMem IDA_mem, N_Vector *yQScur, N_Vector *weightQS) { int is; N_Vector pyS; int flag; /* Use tempvQS[0] as temporary storage for the scaled sensitivity */ pyS = IDA_mem->ida_tempvQS[0]; for (is=0; isida_Ns; is++) { N_VScale(IDA_mem->ida_pbar[is], yQScur[is], pyS); flag = IDAQuadEwtSet(IDA_mem, pyS, weightQS[is]); if (flag != 0) return(-1); N_VScale(IDA_mem->ida_pbar[is], weightQS[is], weightQS[is]); } return(0); } static int IDAQuadSensEwtSetSS(IDAMem IDA_mem, N_Vector *yQScur, N_Vector *weightQS) { int is; N_Vector tempvQ; /* Use ypQ as temporary storage */ tempvQ = IDA_mem->ida_ypQ; for (is=0; isida_Ns; is++) { N_VAbs(yQScur[is], tempvQ); N_VScale(IDA_mem->ida_rtolQS, tempvQ, tempvQ); N_VAddConst(tempvQ, IDA_mem->ida_SatolQS[is], tempvQ); if (IDA_mem->ida_atolQSmin0[is]) { if (N_VMin(tempvQ) <= ZERO) return(-1); } N_VInv(tempvQ, weightQS[is]); } return(0); } static int IDAQuadSensEwtSetSV(IDAMem IDA_mem, N_Vector *yQScur, N_Vector *weightQS) { int is; N_Vector tempvQ; /* Use ypQ as temporary storage */ tempvQ = IDA_mem->ida_ypQ; for (is=0; isida_Ns; is++) { N_VAbs(yQScur[is], tempvQ); N_VLinearSum(IDA_mem->ida_rtolQS, tempvQ, ONE, IDA_mem->ida_VatolQS[is], tempvQ); if (IDA_mem->ida_atolQSmin0[is]) { if (N_VMin(tempvQ) <= ZERO) return(-1); } N_VInv(tempvQ, weightQS[is]); } return(0); } /* * ----------------------------------------------------------------- * Stopping tests * ----------------------------------------------------------------- */ /* * IDAStopTest1 * * This routine tests for stop conditions before taking a step. * The tests depend on the value of itask. * The variable tretlast is the previously returned value of tret. * * The return values are: * CONTINUE_STEPS if no stop conditions were found * IDA_SUCCESS for a normal return to the user * IDA_TSTOP_RETURN for a tstop-reached return to the user * IDA_ILL_INPUT for an illegal-input return to the user * * In the tstop cases, this routine may adjust the stepsize hh to cause * the next step to reach tstop exactly. */ static int IDAStopTest1(IDAMem IDA_mem, realtype tout, realtype *tret, N_Vector yret, N_Vector ypret, int itask) { int ier; realtype troundoff; switch (itask) { case IDA_NORMAL: if (IDA_mem->ida_tstopset) { /* Test for tn past tstop, tn = tretlast, tn past tout, tn near tstop. */ if ( (IDA_mem->ida_tn - IDA_mem->ida_tstop)*IDA_mem->ida_hh > ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDA", "IDASolve", MSG_BAD_TSTOP, IDA_mem->ida_tstop, IDA_mem->ida_tn); return(IDA_ILL_INPUT); } } /* Test for tout = tretlast, and for tn past tout. */ if (tout == IDA_mem->ida_tretlast) { *tret = IDA_mem->ida_tretlast = tout; return(IDA_SUCCESS); } if ((IDA_mem->ida_tn - tout)*IDA_mem->ida_hh >= ZERO) { ier = IDAGetSolution(IDA_mem, tout, yret, ypret); if (ier != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDA", "IDASolve", MSG_BAD_TOUT, tout); return(IDA_ILL_INPUT); } *tret = IDA_mem->ida_tretlast = tout; return(IDA_SUCCESS); } if (IDA_mem->ida_tstopset) { troundoff = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)); if (SUNRabs(IDA_mem->ida_tn - IDA_mem->ida_tstop) <= troundoff) { ier = IDAGetSolution(IDA_mem, IDA_mem->ida_tstop, yret, ypret); if (ier != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDA", "IDASolve", MSG_BAD_TSTOP, IDA_mem->ida_tstop, IDA_mem->ida_tn); return(IDA_ILL_INPUT); } *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tstop; IDA_mem->ida_tstopset = SUNFALSE; return(IDA_TSTOP_RETURN); } if ((IDA_mem->ida_tn + IDA_mem->ida_hh - IDA_mem->ida_tstop)*IDA_mem->ida_hh > ZERO) IDA_mem->ida_hh = (IDA_mem->ida_tstop - IDA_mem->ida_tn)*(ONE - FOUR * IDA_mem->ida_uround); } return(CONTINUE_STEPS); case IDA_ONE_STEP: if (IDA_mem->ida_tstopset) { /* Test for tn past tstop, tn past tretlast, and tn near tstop. */ if ((IDA_mem->ida_tn - IDA_mem->ida_tstop)*IDA_mem->ida_hh > ZERO) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDA", "IDASolve", MSG_BAD_TSTOP, IDA_mem->ida_tstop, IDA_mem->ida_tn); return(IDA_ILL_INPUT); } } /* Test for tn past tretlast. */ if ((IDA_mem->ida_tn - IDA_mem->ida_tretlast)*IDA_mem->ida_hh > ZERO) { ier = IDAGetSolution(IDA_mem, IDA_mem->ida_tn, yret, ypret); *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tn; return(IDA_SUCCESS); } if (IDA_mem->ida_tstopset) { troundoff = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)); if (SUNRabs(IDA_mem->ida_tn - IDA_mem->ida_tstop) <= troundoff) { ier = IDAGetSolution(IDA_mem, IDA_mem->ida_tstop, yret, ypret); if (ier != IDA_SUCCESS) { IDAProcessError(IDA_mem, IDA_ILL_INPUT, "IDA", "IDASolve", MSG_BAD_TSTOP, IDA_mem->ida_tstop, IDA_mem->ida_tn); return(IDA_ILL_INPUT); } *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tstop; IDA_mem->ida_tstopset = SUNFALSE; return(IDA_TSTOP_RETURN); } if ((IDA_mem->ida_tn + IDA_mem->ida_hh - IDA_mem->ida_tstop)*IDA_mem->ida_hh > ZERO) IDA_mem->ida_hh = (IDA_mem->ida_tstop - IDA_mem->ida_tn)*(ONE - FOUR * IDA_mem->ida_uround); } return(CONTINUE_STEPS); } return(IDA_ILL_INPUT); /* This return should never happen. */ } /* * IDAStopTest2 * * This routine tests for stop conditions after taking a step. * The tests depend on the value of itask. * * The return values are: * CONTINUE_STEPS if no stop conditions were found * IDA_SUCCESS for a normal return to the user * IDA_TSTOP_RETURN for a tstop-reached return to the user * IDA_ILL_INPUT for an illegal-input return to the user * * In the two cases with tstop, this routine may reset the stepsize hh * to cause the next step to reach tstop exactly. * * In the two cases with ONE_STEP mode, no interpolation to tn is needed * because yret and ypret already contain the current y and y' values. * * Note: No test is made for an error return from IDAGetSolution here, * because the same test was made prior to the step. */ static int IDAStopTest2(IDAMem IDA_mem, realtype tout, realtype *tret, N_Vector yret, N_Vector ypret, int itask) { /* int ier; */ realtype troundoff; switch (itask) { case IDA_NORMAL: /* Test for tn past tout. */ if ((IDA_mem->ida_tn - tout)*IDA_mem->ida_hh >= ZERO) { /* ier = */ IDAGetSolution(IDA_mem, tout, yret, ypret); *tret = IDA_mem->ida_tretlast = tout; return(IDA_SUCCESS); } if (IDA_mem->ida_tstopset) { /* Test for tn at tstop and for tn near tstop */ troundoff = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)); if (SUNRabs(IDA_mem->ida_tn - IDA_mem->ida_tstop) <= troundoff) { /* ier = */ IDAGetSolution(IDA_mem, IDA_mem->ida_tstop, yret, ypret); *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tstop; IDA_mem->ida_tstopset = SUNFALSE; return(IDA_TSTOP_RETURN); } if ((IDA_mem->ida_tn + IDA_mem->ida_hh - IDA_mem->ida_tstop)*IDA_mem->ida_hh > ZERO) IDA_mem->ida_hh = (IDA_mem->ida_tstop - IDA_mem->ida_tn)*(ONE - FOUR * IDA_mem->ida_uround); } return(CONTINUE_STEPS); case IDA_ONE_STEP: if (IDA_mem->ida_tstopset) { /* Test for tn at tstop and for tn near tstop */ troundoff = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)); if (SUNRabs(IDA_mem->ida_tn - IDA_mem->ida_tstop) <= troundoff) { /* ier = */ IDAGetSolution(IDA_mem, IDA_mem->ida_tstop, yret, ypret); *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tstop; IDA_mem->ida_tstopset = SUNFALSE; return(IDA_TSTOP_RETURN); } if ((IDA_mem->ida_tn + IDA_mem->ida_hh - IDA_mem->ida_tstop)*IDA_mem->ida_hh > ZERO) IDA_mem->ida_hh = (IDA_mem->ida_tstop - IDA_mem->ida_tn)*(ONE - FOUR * IDA_mem->ida_uround); } *tret = IDA_mem->ida_tretlast = IDA_mem->ida_tn; return(IDA_SUCCESS); } return IDA_ILL_INPUT; /* This return should never happen. */ } /* * ----------------------------------------------------------------- * Error handler * ----------------------------------------------------------------- */ /* * IDAHandleFailure * * This routine prints error messages for all cases of failure by * IDAStep. It returns to IDASolve the value that it is to return to * the user. */ static int IDAHandleFailure(IDAMem IDA_mem, int sflag) { /* Depending on sflag, print error message and return error flag */ switch (sflag) { case IDA_ERR_FAIL: IDAProcessError(IDA_mem, IDA_ERR_FAIL, "IDAS", "IDASolve", MSG_ERR_FAILS, IDA_mem->ida_tn, IDA_mem->ida_hh); return(IDA_ERR_FAIL); case IDA_CONV_FAIL: IDAProcessError(IDA_mem, IDA_CONV_FAIL, "IDAS", "IDASolve", MSG_CONV_FAILS, IDA_mem->ida_tn, IDA_mem->ida_hh); return(IDA_CONV_FAIL); case IDA_LSETUP_FAIL: IDAProcessError(IDA_mem, IDA_LSETUP_FAIL, "IDAS", "IDASolve", MSG_SETUP_FAILED, IDA_mem->ida_tn); return(IDA_LSETUP_FAIL); case IDA_LSOLVE_FAIL: IDAProcessError(IDA_mem, IDA_LSOLVE_FAIL, "IDAS", "IDASolve", MSG_SOLVE_FAILED, IDA_mem->ida_tn); return(IDA_LSOLVE_FAIL); case IDA_REP_RES_ERR: IDAProcessError(IDA_mem, IDA_REP_RES_ERR, "IDAS", "IDASolve", MSG_REP_RES_ERR, IDA_mem->ida_tn); return(IDA_REP_RES_ERR); case IDA_RES_FAIL: IDAProcessError(IDA_mem, IDA_RES_FAIL, "IDAS", "IDASolve", MSG_RES_NONRECOV, IDA_mem->ida_tn); return(IDA_RES_FAIL); case IDA_REP_QRHS_ERR: IDAProcessError(IDA_mem, IDA_REP_QRHS_ERR, "IDAS", "IDASolve", MSG_QRHSFUNC_REPTD, IDA_mem->ida_tn); return(IDA_REP_QRHS_ERR); case IDA_QRHS_FAIL: IDAProcessError(IDA_mem, IDA_QRHS_FAIL, "IDAS", "IDASolve", MSG_QRHSFUNC_FAILED, IDA_mem->ida_tn); return(IDA_QRHS_FAIL); case IDA_REP_SRES_ERR: IDAProcessError(IDA_mem, IDA_REP_SRES_ERR, "IDAS", "IDASolve", MSG_SRES_REPTD, IDA_mem->ida_tn); return(IDA_REP_SRES_ERR); case IDA_SRES_FAIL: IDAProcessError(IDA_mem, IDA_SRES_FAIL, "IDAS", "IDASolve", MSG_SRES_FAILED, IDA_mem->ida_tn); return(IDA_SRES_FAIL); case IDA_REP_QSRHS_ERR: IDAProcessError(IDA_mem, IDA_REP_QSRHS_ERR, "IDAS", "IDASolve", MSG_QSRHSFUNC_REPTD, IDA_mem->ida_tn); return(IDA_REP_QSRHS_ERR); case IDA_QSRHS_FAIL: IDAProcessError(IDA_mem, IDA_QSRHS_FAIL, "IDAS", "IDASolve", MSG_QSRHSFUNC_FAILED, IDA_mem->ida_tn); return(IDA_QSRHS_FAIL); case IDA_CONSTR_FAIL: IDAProcessError(IDA_mem, IDA_CONSTR_FAIL, "IDAS", "IDASolve", MSG_FAILED_CONSTR, IDA_mem->ida_tn); return(IDA_CONSTR_FAIL); case IDA_MEM_NULL: IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDASolve", MSG_NO_MEM); return(IDA_MEM_NULL); case SUN_NLS_MEM_NULL: IDAProcessError(IDA_mem, IDA_MEM_NULL, "IDAS", "IDASolve", MSG_NLS_INPUT_NULL, IDA_mem->ida_tn); return(IDA_MEM_NULL); case IDA_NLS_SETUP_FAIL: IDAProcessError(IDA_mem, IDA_NLS_SETUP_FAIL, "IDAS", "IDASolve", MSG_NLS_SETUP_FAILED, IDA_mem->ida_tn); return(IDA_NLS_SETUP_FAIL); case IDA_NLS_FAIL: IDAProcessError(IDA_mem, IDA_NLS_FAIL, "IDA", "IDASolve", MSG_NLS_FAIL, IDA_mem->ida_tn); return(IDA_NLS_FAIL); } /* This return should never happen */ IDAProcessError(IDA_mem, IDA_UNRECOGNIZED_ERROR, "IDAS", "IDASolve", "IDA encountered an unrecognized error. Please report this to the Sundials developers at sundials-users@llnl.gov"); return (IDA_UNRECOGNIZED_ERROR); } /* * ----------------------------------------------------------------- * Main IDAStep function * ----------------------------------------------------------------- */ /* * IDAStep * * This routine performs one internal IDA step, from tn to tn + hh. * It calls other routines to do all the work. * * It solves a system of differential/algebraic equations of the form * F(t,y,y') = 0, for one step. In IDA, tt is used for t, * yy is used for y, and yp is used for y'. The function F is supplied as 'res' * by the user. * * The methods used are modified divided difference, fixed leading * coefficient forms of backward differentiation formulas. * The code adjusts the stepsize and order to control the local error per step. * * The main operations done here are as follows: * * initialize various quantities; * * setting of multistep method coefficients; * * solution of the nonlinear system for yy at t = tn + hh; * * deciding on order reduction and testing the local error; * * attempting to recover from failure in nonlinear solver or error test; * * resetting stepsize and order for the next step. * * updating phi and other state data if successful; * * On a failure in the nonlinear system solution or error test, the * step may be reattempted, depending on the nature of the failure. * * Variables or arrays (all in the IDAMem structure) used in IDAStep are: * * tt -- Independent variable. * yy -- Solution vector at tt. * yp -- Derivative of solution vector after successful stelp. * res -- User-supplied function to evaluate the residual. See the * description given in file ida.h . * lsetup -- Routine to prepare for the linear solver call. It may either * save or recalculate quantities used by lsolve. (Optional) * lsolve -- Routine to solve a linear system. A prior call to lsetup * may be required. * hh -- Appropriate step size for next step. * ewt -- Vector of weights used in all convergence tests. * phi -- Array of divided differences used by IDAStep. This array is composed * of (maxord+1) nvectors (each of size Neq). (maxord+1) is the maximum * order for the problem, maxord, plus 1. * * Return values are: * IDA_SUCCESS IDA_RES_FAIL LSETUP_ERROR_NONRECVR * IDA_LSOLVE_FAIL IDA_ERR_FAIL * IDA_CONSTR_FAIL IDA_CONV_FAIL * IDA_REP_RES_ERR */ static int IDAStep(IDAMem IDA_mem) { realtype saved_t, ck; realtype err_k, err_km1, err_km2; int ncf, nef; int nflag, kflag; int retval; booleantype sensi_stg, sensi_sim; /* Are we computing sensitivities with the staggered or simultaneous approach? */ sensi_stg = (IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_STAGGERED)); sensi_sim = (IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_SIMULTANEOUS)); saved_t = IDA_mem->ida_tn; ncf = nef = 0; if (IDA_mem->ida_nst == ZERO){ IDA_mem->ida_kk = 1; IDA_mem->ida_kused = 0; IDA_mem->ida_hused = ZERO; IDA_mem->ida_psi[0] = IDA_mem->ida_hh; IDA_mem->ida_cj = ONE/IDA_mem->ida_hh; IDA_mem->ida_phase = 0; IDA_mem->ida_ns = 0; } /* To prevent 'unintialized variable' warnings */ err_k = ZERO; err_km1 = ZERO; err_km2 = ZERO; /* Looping point for attempts to take a step */ for(;;) { /*----------------------- Set method coefficients -----------------------*/ IDASetCoeffs(IDA_mem, &ck); kflag = IDA_SUCCESS; /*---------------------------------------------------- If tn is past tstop (by roundoff), reset it to tstop. -----------------------------------------------------*/ IDA_mem->ida_tn = IDA_mem->ida_tn + IDA_mem->ida_hh; if (IDA_mem->ida_tstopset) { if ((IDA_mem->ida_tn - IDA_mem->ida_tstop)*IDA_mem->ida_hh > ZERO) IDA_mem->ida_tn = IDA_mem->ida_tstop; } /*----------------------- Advance state variables -----------------------*/ /* Compute predicted values for yy and yp */ IDAPredict(IDA_mem); /* Compute predicted values for yyS and ypS (if simultaneous approach) */ if (sensi_sim) IDASensPredict(IDA_mem, IDA_mem->ida_yySpredict, IDA_mem->ida_ypSpredict); /* Nonlinear system solution */ nflag = IDANls(IDA_mem); /* If NLS was successful, perform error test */ if (nflag == IDA_SUCCESS) nflag = IDATestError(IDA_mem, ck, &err_k, &err_km1, &err_km2); /* Test for convergence or error test failures */ if (nflag != IDA_SUCCESS) { /* restore and decide what to do */ IDARestore(IDA_mem, saved_t); kflag = IDAHandleNFlag(IDA_mem, nflag, err_k, err_km1, &(IDA_mem->ida_ncfn), &ncf, &(IDA_mem->ida_netf), &nef); /* exit on nonrecoverable failure */ if (kflag != PREDICT_AGAIN) return(kflag); /* recoverable error; predict again */ if(IDA_mem->ida_nst==0) IDAReset(IDA_mem); continue; } /*---------------------------- Advance quadrature variables ----------------------------*/ if (IDA_mem->ida_quadr) { nflag = IDAQuadNls(IDA_mem); /* If NLS was successful, perform error test */ if (IDA_mem->ida_errconQ && (nflag == IDA_SUCCESS)) nflag = IDAQuadTestError(IDA_mem, ck, &err_k, &err_km1, &err_km2); /* Test for convergence or error test failures */ if (nflag != IDA_SUCCESS) { /* restore and decide what to do */ IDARestore(IDA_mem, saved_t); kflag = IDAHandleNFlag(IDA_mem, nflag, err_k, err_km1, &(IDA_mem->ida_ncfnQ), &ncf, &(IDA_mem->ida_netfQ), &nef); /* exit on nonrecoverable failure */ if (kflag != PREDICT_AGAIN) return(kflag); /* recoverable error; predict again */ if(IDA_mem->ida_nst==0) IDAReset(IDA_mem); continue; } } /*-------------------------------------------------- Advance sensitivity variables (Staggered approach) --------------------------------------------------*/ if (sensi_stg) { /* Evaluate res at converged y, needed for future evaluations of sens. RHS If res() fails recoverably, treat it as a convergence failure and attempt the step again */ retval = IDA_mem->ida_res(IDA_mem->ida_tn, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_delta, IDA_mem->ida_user_data); if (retval < 0) return(IDA_RES_FAIL); if (retval > 0) continue; /* Compute predicted values for yyS and ypS */ IDASensPredict(IDA_mem, IDA_mem->ida_yySpredict, IDA_mem->ida_ypSpredict); /* Nonlinear system solution */ nflag = IDASensNls(IDA_mem); /* If NLS was successful, perform error test */ if (IDA_mem->ida_errconS && (nflag == IDA_SUCCESS)) nflag = IDASensTestError(IDA_mem, ck, &err_k, &err_km1, &err_km2); /* Test for convergence or error test failures */ if (nflag != IDA_SUCCESS) { /* restore and decide what to do */ IDARestore(IDA_mem, saved_t); kflag = IDAHandleNFlag(IDA_mem, nflag, err_k, err_km1, &(IDA_mem->ida_ncfnQ), &ncf, &(IDA_mem->ida_netfQ), &nef); /* exit on nonrecoverable failure */ if (kflag != PREDICT_AGAIN) return(kflag); /* recoverable error; predict again */ if(IDA_mem->ida_nst==0) IDAReset(IDA_mem); continue; } } /*------------------------------------------- Advance quadrature sensitivity variables -------------------------------------------*/ if (IDA_mem->ida_quadr_sensi) { nflag = IDAQuadSensNls(IDA_mem); /* If NLS was successful, perform error test */ if (IDA_mem->ida_errconQS && (nflag == IDA_SUCCESS)) nflag = IDAQuadSensTestError(IDA_mem, ck, &err_k, &err_km1, &err_km2); /* Test for convergence or error test failures */ if (nflag != IDA_SUCCESS) { /* restore and decide what to do */ IDARestore(IDA_mem, saved_t); kflag = IDAHandleNFlag(IDA_mem, nflag, err_k, err_km1, &(IDA_mem->ida_ncfnQ), &ncf, &(IDA_mem->ida_netfQ), &nef); /* exit on nonrecoverable failure */ if (kflag != PREDICT_AGAIN) return(kflag); /* recoverable error; predict again */ if(IDA_mem->ida_nst==0) IDAReset(IDA_mem); continue; } } /* kflag == IDA_SUCCESS */ break; } /* end loop */ /* Nonlinear system solve and error test were both successful; update data, and consider change of step and/or order */ IDACompleteStep(IDA_mem, err_k, err_km1); /* Rescale ee vector to be the estimated local error Notes: (1) altering the value of ee is permissible since it will be overwritten by IDASolve()->IDAStep()->IDANls() before it is needed again (2) the value of ee is only valid if IDAHandleNFlag() returns either PREDICT_AGAIN or IDA_SUCCESS */ N_VScale(ck, IDA_mem->ida_ee, IDA_mem->ida_ee); return(IDA_SUCCESS); } /* * IDASetCoeffs * * This routine computes the coefficients relevant to the current step. * The counter ns counts the number of consecutive steps taken at * constant stepsize h and order k, up to a maximum of k + 2. * Then the first ns components of beta will be one, and on a step * with ns = k + 2, the coefficients alpha, etc. need not be reset here. * Also, IDACompleteStep prohibits an order increase until ns = k + 2. */ static void IDASetCoeffs(IDAMem IDA_mem, realtype *ck) { int i, j, is; realtype temp1, temp2, alpha0, alphas; /* Set coefficients for the current stepsize h */ if ( (IDA_mem->ida_hh != IDA_mem->ida_hused) || (IDA_mem->ida_kk != IDA_mem->ida_kused) ) IDA_mem->ida_ns = 0; IDA_mem->ida_ns = SUNMIN(IDA_mem->ida_ns+1, IDA_mem->ida_kused+2); if (IDA_mem->ida_kk + 1 >= IDA_mem->ida_ns) { IDA_mem->ida_beta[0] = ONE; IDA_mem->ida_alpha[0] = ONE; temp1 = IDA_mem->ida_hh; IDA_mem->ida_gamma[0] = ZERO; IDA_mem->ida_sigma[0] = ONE; for(i=1; i<=IDA_mem->ida_kk; i++) { temp2 = IDA_mem->ida_psi[i-1]; IDA_mem->ida_psi[i-1] = temp1; IDA_mem->ida_beta[i] = IDA_mem->ida_beta[i-1] * IDA_mem->ida_psi[i-1] / temp2; temp1 = temp2 + IDA_mem->ida_hh; IDA_mem->ida_alpha[i] = IDA_mem->ida_hh / temp1; IDA_mem->ida_sigma[i] = i * IDA_mem->ida_sigma[i-1] * IDA_mem->ida_alpha[i]; IDA_mem->ida_gamma[i] = IDA_mem->ida_gamma[i-1] + IDA_mem->ida_alpha[i-1] / IDA_mem->ida_hh; } IDA_mem->ida_psi[IDA_mem->ida_kk] = temp1; } /* compute alphas, alpha0 */ alphas = ZERO; alpha0 = ZERO; for(i=0; iida_kk; i++) { alphas = alphas - ONE/(i+1); alpha0 = alpha0 - IDA_mem->ida_alpha[i]; } /* compute leading coefficient cj */ IDA_mem->ida_cjlast = IDA_mem->ida_cj; IDA_mem->ida_cj = -alphas/IDA_mem->ida_hh; /* compute variable stepsize error coefficient ck */ *ck = SUNRabs(IDA_mem->ida_alpha[IDA_mem->ida_kk] + alphas - alpha0); *ck = SUNMAX(*ck, IDA_mem->ida_alpha[IDA_mem->ida_kk]); /* change phi to phi-star */ /* Scale i=IDA_mem->ida_ns to i<=IDA_mem->ida_kk */ if (IDA_mem->ida_ns <= IDA_mem->ida_kk) { for(i=IDA_mem->ida_ns; i<=IDA_mem->ida_kk; i++) IDA_mem->ida_cvals[i-IDA_mem->ida_ns] = IDA_mem->ida_beta[i]; (void) N_VScaleVectorArray(IDA_mem->ida_kk - IDA_mem->ida_ns + 1, IDA_mem->ida_cvals, IDA_mem->ida_phi+IDA_mem->ida_ns, IDA_mem->ida_phi+IDA_mem->ida_ns); if (IDA_mem->ida_quadr) (void) N_VScaleVectorArray(IDA_mem->ida_kk - IDA_mem->ida_ns + 1, IDA_mem->ida_cvals, IDA_mem->ida_phiQ+IDA_mem->ida_ns, IDA_mem->ida_phiQ+IDA_mem->ida_ns); if (IDA_mem->ida_sensi || IDA_mem->ida_quadr_sensi) { j = 0; for(i=IDA_mem->ida_ns; i<=IDA_mem->ida_kk; i++) { for(is=0; isida_Ns; is++) { IDA_mem->ida_cvals[j] = IDA_mem->ida_beta[i]; j++; } } } if (IDA_mem->ida_sensi) { j = 0; for(i=IDA_mem->ida_ns; i<=IDA_mem->ida_kk; i++) { for(is=0; isida_Ns; is++) { IDA_mem->ida_Xvecs[j] = IDA_mem->ida_phiS[i][is]; j++; } } (void) N_VScaleVectorArray(j, IDA_mem->ida_cvals, IDA_mem->ida_Xvecs, IDA_mem->ida_Xvecs); } if (IDA_mem->ida_quadr_sensi) { j = 0; for(i=IDA_mem->ida_ns; i<=IDA_mem->ida_kk; i++) { for(is=0; isida_Ns; is++) { IDA_mem->ida_Xvecs[j] = IDA_mem->ida_phiQS[i][is]; j++; } } (void) N_VScaleVectorArray(j, IDA_mem->ida_cvals, IDA_mem->ida_Xvecs, IDA_mem->ida_Xvecs); } } } /* * ----------------------------------------------------------------- * Nonlinear solver functions * ----------------------------------------------------------------- */ /* * IDANls * * This routine attempts to solve the nonlinear system using the linear * solver specified. NOTE: this routine uses N_Vector ee as the scratch * vector tempv3 passed to lsetup. * * Possible return values: * * IDA_SUCCESS * * IDA_RES_RECVR IDA_RES_FAIL * IDA_SRES_RECVR IDA_SRES_FAIL * IDA_LSETUP_RECVR IDA_LSETUP_FAIL * IDA_LSOLVE_RECVR IDA_LSOLVE_FAIL * * IDA_CONSTR_RECVR * SUN_NLS_CONV_RECVR * IDA_MEM_NULL */ static int IDANls(IDAMem IDA_mem) { int retval; booleantype constraintsPassed, callLSetup, sensi_sim; realtype temp1, temp2, vnorm; N_Vector mm, tmp; long int nni_inc; /* Are we computing sensitivities with the IDA_SIMULTANEOUS approach? */ sensi_sim = (IDA_mem->ida_sensi && (IDA_mem->ida_ism==IDA_SIMULTANEOUS)); callLSetup = SUNFALSE; /* Initialize if the first time called */ if (IDA_mem->ida_nst == 0){ IDA_mem->ida_cjold = IDA_mem->ida_cj; IDA_mem->ida_ss = TWENTY; IDA_mem->ida_ssS = TWENTY; if (IDA_mem->ida_lsetup) callLSetup = SUNTRUE; } /* Decide if lsetup is to be called */ if (IDA_mem->ida_lsetup) { IDA_mem->ida_cjratio = IDA_mem->ida_cj / IDA_mem->ida_cjold; temp1 = (ONE - XRATE) / (ONE + XRATE); temp2 = ONE/temp1; if (IDA_mem->ida_cjratio < temp1 || IDA_mem->ida_cjratio > temp2) callLSetup = SUNTRUE; if (IDA_mem->ida_forceSetup) callLSetup = SUNTRUE; if (IDA_mem->ida_cj != IDA_mem->ida_cjlast) { IDA_mem->ida_ss = HUNDRED; IDA_mem->ida_ssS = HUNDRED; } } /* initial guess for the correction to the predictor */ if (sensi_sim) N_VConst(ZERO, IDA_mem->ycorSim); else N_VConst(ZERO, IDA_mem->ida_ee); /* call nonlinear solver setup if it exists */ if ((IDA_mem->NLS)->ops->setup) { if (sensi_sim) retval = SUNNonlinSolSetup(IDA_mem->NLS, IDA_mem->ycorSim, IDA_mem); else retval = SUNNonlinSolSetup(IDA_mem->NLS, IDA_mem->ida_ee, IDA_mem); if (retval < 0) return(IDA_NLS_SETUP_FAIL); if (retval > 0) return(IDA_NLS_SETUP_RECVR); } /* solve the nonlinear system */ if (sensi_sim) { retval = SUNNonlinSolSolve(IDA_mem->NLSsim, IDA_mem->ypredictSim, IDA_mem->ycorSim, IDA_mem->ewtSim, IDA_mem->ida_epsNewt, callLSetup, IDA_mem); /* increment counter */ nni_inc = 0; (void) SUNNonlinSolGetNumIters(IDA_mem->NLSsim, &(nni_inc)); IDA_mem->ida_nni += nni_inc; } else { retval = SUNNonlinSolSolve(IDA_mem->NLS, IDA_mem->ida_yypredict, IDA_mem->ida_ee, IDA_mem->ida_ewt, IDA_mem->ida_epsNewt, callLSetup, IDA_mem); /* increment counter */ nni_inc = 0; (void) SUNNonlinSolGetNumIters(IDA_mem->NLS, &(nni_inc)); IDA_mem->ida_nni += nni_inc; } /* update the state using the final correction from the nonlinear solver */ N_VLinearSum(ONE, IDA_mem->ida_yypredict, ONE, IDA_mem->ida_ee, IDA_mem->ida_yy); N_VLinearSum(ONE, IDA_mem->ida_yppredict, IDA_mem->ida_cj, IDA_mem->ida_ee, IDA_mem->ida_yp); /* update the sensitivities based on the final correction from the nonlinear solver */ if (sensi_sim) { N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_yySpredict, ONE, IDA_mem->ida_eeS, IDA_mem->ida_yyS); N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_ypSpredict, IDA_mem->ida_cj, IDA_mem->ida_eeS, IDA_mem->ida_ypS); } /* return if nonlinear solver failed */ if (retval != IDA_SUCCESS) return(retval); /* If otherwise successful, check and enforce inequality constraints. */ if (IDA_mem->ida_constraintsSet) { /* shortcut names for temporary work vectors */ mm = IDA_mem->ida_tempv2; tmp = IDA_mem->ida_tempv1; /* Get mask vector mm, set where constraints failed */ constraintsPassed = N_VConstrMask(IDA_mem->ida_constraints, IDA_mem->ida_yy, mm); if (constraintsPassed) return(IDA_SUCCESS); /* Constraints not met */ /* Compute correction to satisfy constraints */ N_VCompare(ONEPT5, IDA_mem->ida_constraints, tmp); /* a[i] =1 when |c[i]| = 2 */ N_VProd(tmp, IDA_mem->ida_constraints, tmp); /* a * c */ N_VDiv(tmp, IDA_mem->ida_ewt, tmp); /* a * c * wt */ N_VLinearSum(ONE, IDA_mem->ida_yy, -PT1, tmp, tmp); /* y - 0.1 * a * c * wt */ N_VProd(tmp, mm, tmp); /* v = mm*(y-.1*a*c*wt) */ vnorm = IDAWrmsNorm(IDA_mem, tmp, IDA_mem->ida_ewt, SUNFALSE); /* ||v|| */ /* If vector v of constraint corrections is small in norm, correct and accept this step */ if (vnorm <= IDA_mem->ida_epsNewt) { N_VLinearSum(ONE, IDA_mem->ida_ee, -ONE, tmp, IDA_mem->ida_ee); /* ee <- ee - v */ return(IDA_SUCCESS); } /* Constraints correction is too large, reduce h by computing rr = h'/h */ N_VLinearSum(ONE, IDA_mem->ida_phi[0], -ONE, IDA_mem->ida_yy, tmp); N_VProd(mm, tmp, tmp); IDA_mem->ida_rr = PT9*N_VMinQuotient(IDA_mem->ida_phi[0], tmp); IDA_mem->ida_rr = SUNMAX(IDA_mem->ida_rr, PT1); /* Reattempt step with new step size */ return(IDA_CONSTR_RECVR); } return(IDA_SUCCESS); } /* * IDAPredict * * This routine predicts the new values for vectors yy and yp. */ static void IDAPredict(IDAMem IDA_mem) { int j; for(j=0; j<=IDA_mem->ida_kk; j++) IDA_mem->ida_cvals[j] = ONE; (void) N_VLinearCombination(IDA_mem->ida_kk+1, IDA_mem->ida_cvals, IDA_mem->ida_phi, IDA_mem->ida_yypredict); (void) N_VLinearCombination(IDA_mem->ida_kk, IDA_mem->ida_gamma+1, IDA_mem->ida_phi+1, IDA_mem->ida_yppredict); } /* * IDAQuadNls * * This routine solves for the quadrature variables at the new step. * It does not solve a nonlinear system, but rather updates the * quadrature variables. The name for this function is just for * uniformity purposes. * */ static int IDAQuadNls(IDAMem IDA_mem) { int retval; /* Predict: load yyQ and ypQ */ IDAQuadPredict(IDA_mem); /* Compute correction eeQ */ retval = IDA_mem->ida_rhsQ(IDA_mem->ida_tn, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_eeQ, IDA_mem->ida_user_data); IDA_mem->ida_nrQe++; if (retval < 0) return(IDA_QRHS_FAIL); else if (retval > 0) return(IDA_QRHS_RECVR); if (IDA_mem->ida_quadr_sensi) N_VScale(ONE, IDA_mem->ida_eeQ, IDA_mem->ida_savrhsQ); N_VLinearSum(ONE, IDA_mem->ida_eeQ, -ONE, IDA_mem->ida_ypQ, IDA_mem->ida_eeQ); N_VScale(ONE/IDA_mem->ida_cj, IDA_mem->ida_eeQ, IDA_mem->ida_eeQ); /* Apply correction: yyQ = yyQ + eeQ */ N_VLinearSum(ONE, IDA_mem->ida_yyQ, ONE, IDA_mem->ida_eeQ, IDA_mem->ida_yyQ); return(IDA_SUCCESS); } /* * IDAQuadPredict * * This routine predicts the new value for vectors yyQ and ypQ */ static void IDAQuadPredict(IDAMem IDA_mem) { int j; for(j=0; j<=IDA_mem->ida_kk; j++) IDA_mem->ida_cvals[j] = ONE; (void) N_VLinearCombination(IDA_mem->ida_kk+1, IDA_mem->ida_cvals, IDA_mem->ida_phiQ, IDA_mem->ida_yyQ); (void) N_VLinearCombination(IDA_mem->ida_kk, IDA_mem->ida_gamma+1, IDA_mem->ida_phiQ+1, IDA_mem->ida_ypQ); } /* * IDASensNls * * This routine attempts to solve, one by one, all the sensitivity * linear systems using nonlinear iterations and the linear solver * specified (Staggered approach). */ static int IDASensNls(IDAMem IDA_mem) { booleantype callLSetup; long int nniS_inc; int retval; callLSetup = SUNFALSE; /* initial guess for the correction to the predictor */ N_VConst(ZERO, IDA_mem->ycorStg); /* solve the nonlinear system */ retval = SUNNonlinSolSolve(IDA_mem->NLSstg, IDA_mem->ypredictStg, IDA_mem->ycorStg, IDA_mem->ewtStg, IDA_mem->ida_epsNewt, callLSetup, IDA_mem); /* increment counter */ nniS_inc = 0; (void) SUNNonlinSolGetNumIters(IDA_mem->NLSstg, &(nniS_inc)); IDA_mem->ida_nniS += nniS_inc; /* update using the final correction from the nonlinear solver */ N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_yySpredict, ONE, IDA_mem->ida_eeS, IDA_mem->ida_yyS); N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_ypSpredict, IDA_mem->ida_cj, IDA_mem->ida_eeS, IDA_mem->ida_ypS); if (retval != IDA_SUCCESS) IDA_mem->ida_ncfnS++; return(retval); } /* * IDASensPredict * * This routine loads the predicted values for the is-th sensitivity * in the vectors yySens and ypSens. * * When ism=IDA_STAGGERED, yySens = yyS[is] and ypSens = ypS[is] */ static void IDASensPredict(IDAMem IDA_mem, N_Vector *yySens, N_Vector *ypSens) { int j; for(j=0; j<=IDA_mem->ida_kk; j++) IDA_mem->ida_cvals[j] = ONE; (void) N_VLinearCombinationVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_kk+1, IDA_mem->ida_cvals, IDA_mem->ida_phiS, yySens); (void) N_VLinearCombinationVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_kk, IDA_mem->ida_gamma+1, IDA_mem->ida_phiS+1, ypSens); } /* * IDAQuadSensNls * * This routine solves for the snesitivity quadrature variables at the * new step. It does not solve a nonlinear system, but rather updates * the sensitivity variables. The name for this function is just for * uniformity purposes. * */ static int IDAQuadSensNls(IDAMem IDA_mem) { int retval; N_Vector *ypQS; /* Predict: load yyQS and ypQS for each sensitivity. Store 1st order information in tempvQS. */ ypQS = IDA_mem->ida_tempvQS; IDAQuadSensPredict(IDA_mem, IDA_mem->ida_yyQS, ypQS); /* Compute correction eeQS */ retval = IDA_mem->ida_rhsQS(IDA_mem->ida_Ns, IDA_mem->ida_tn, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_yyS, IDA_mem->ida_ypS, IDA_mem->ida_savrhsQ, IDA_mem->ida_eeQS, IDA_mem->ida_user_dataQS, IDA_mem->ida_tmpS1, IDA_mem->ida_tmpS2, IDA_mem->ida_tmpS3); IDA_mem->ida_nrQSe++; if (retval < 0) return(IDA_QSRHS_FAIL); else if (retval > 0) return(IDA_QSRHS_RECVR); retval = N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE/IDA_mem->ida_cj, IDA_mem->ida_eeQS, -ONE/IDA_mem->ida_cj, ypQS, IDA_mem->ida_eeQS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); /* Apply correction: yyQS[is] = yyQ[is] + eeQ[is] */ retval = N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_yyQS, ONE, IDA_mem->ida_eeQS, IDA_mem->ida_yyQS); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); return(IDA_SUCCESS); } /* * IDAQuadSensPredict * * This routine predicts the new value for vectors yyQS and ypQS */ static void IDAQuadSensPredict(IDAMem IDA_mem, N_Vector *yQS, N_Vector *ypQS) { int j; for(j=0; j<=IDA_mem->ida_kk; j++) IDA_mem->ida_cvals[j] = ONE; (void) N_VLinearCombinationVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_kk+1, IDA_mem->ida_cvals, IDA_mem->ida_phiQS, yQS); (void) N_VLinearCombinationVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_kk, IDA_mem->ida_gamma+1, IDA_mem->ida_phiQS+1, ypQS); } /* * ----------------------------------------------------------------- * Error test * ----------------------------------------------------------------- */ /* * IDATestError * * This routine estimates errors at orders k, k-1, k-2, decides * whether or not to suggest an order decrease, and performs * the local error test. * * IDATestError returns either IDA_SUCCESS or ERROR_TEST_FAIL. */ static int IDATestError(IDAMem IDA_mem, realtype ck, realtype *err_k, realtype *err_km1, realtype *err_km2) { realtype enorm_k, enorm_km1, enorm_km2; /* error norms */ realtype terr_k, terr_km1, terr_km2; /* local truncation error norms */ /* Compute error for order k. */ enorm_k = IDAWrmsNorm(IDA_mem, IDA_mem->ida_ee, IDA_mem->ida_ewt, IDA_mem->ida_suppressalg); *err_k = IDA_mem->ida_sigma[IDA_mem->ida_kk] * enorm_k; terr_k = (IDA_mem->ida_kk + 1) * (*err_k); IDA_mem->ida_knew = IDA_mem->ida_kk; if ( IDA_mem->ida_kk > 1 ) { /* Compute error at order k-1 */ N_VLinearSum(ONE, IDA_mem->ida_phi[IDA_mem->ida_kk], ONE, IDA_mem->ida_ee, IDA_mem->ida_delta); enorm_km1 = IDAWrmsNorm(IDA_mem, IDA_mem->ida_delta, IDA_mem->ida_ewt, IDA_mem->ida_suppressalg); *err_km1 = IDA_mem->ida_sigma[IDA_mem->ida_kk - 1] * enorm_km1; terr_km1 = IDA_mem->ida_kk * (*err_km1); if ( IDA_mem->ida_kk > 2 ) { /* Compute error at order k-2 */ N_VLinearSum(ONE, IDA_mem->ida_phi[IDA_mem->ida_kk - 1], ONE, IDA_mem->ida_delta, IDA_mem->ida_delta); enorm_km2 = IDAWrmsNorm(IDA_mem, IDA_mem->ida_delta, IDA_mem->ida_ewt, IDA_mem->ida_suppressalg); *err_km2 = IDA_mem->ida_sigma[IDA_mem->ida_kk - 2] * enorm_km2; terr_km2 = (IDA_mem->ida_kk - 1) * (*err_km2); /* Decrease order if errors are reduced */ if (SUNMAX(terr_km1, terr_km2) <= terr_k) IDA_mem->ida_knew = IDA_mem->ida_kk - 1; } else { /* Decrease order to 1 if errors are reduced by at least 1/2 */ if (terr_km1 <= (HALF * terr_k) ) IDA_mem->ida_knew = IDA_mem->ida_kk - 1; } } /* Perform error test */ if (ck * enorm_k > ONE) return(ERROR_TEST_FAIL); else return(IDA_SUCCESS); } /* * IDAQuadTestError * * This routine estimates quadrature errors and updates errors at * orders k, k-1, k-2, decides whether or not to suggest an order reduction, * and performs the local error test. * * IDAQuadTestError returns the updated local error estimate at orders k, * k-1, and k-2. These are norms of type SUNMAX(|err|,|errQ|). * * The return flag can be either IDA_SUCCESS or ERROR_TEST_FAIL. */ static int IDAQuadTestError(IDAMem IDA_mem, realtype ck, realtype *err_k, realtype *err_km1, realtype *err_km2) { realtype enormQ; realtype errQ_k, errQ_km1, errQ_km2; realtype terr_k, terr_km1, terr_km2; N_Vector tempv; booleantype check_for_reduction = SUNFALSE; /* Rename ypQ */ tempv = IDA_mem->ida_ypQ; /* Update error for order k. */ enormQ = N_VWrmsNorm(IDA_mem->ida_eeQ, IDA_mem->ida_ewtQ); errQ_k = IDA_mem->ida_sigma[IDA_mem->ida_kk] * enormQ; if (errQ_k > *err_k) { *err_k = errQ_k; check_for_reduction = SUNTRUE; } terr_k = (IDA_mem->ida_kk+1) * (*err_k); if ( IDA_mem->ida_kk > 1 ) { /* Update error at order k-1 */ N_VLinearSum(ONE, IDA_mem->ida_phiQ[IDA_mem->ida_kk], ONE, IDA_mem->ida_eeQ, tempv); errQ_km1 = IDA_mem->ida_sigma[IDA_mem->ida_kk-1] * N_VWrmsNorm(tempv, IDA_mem->ida_ewtQ); if (errQ_km1 > *err_km1) { *err_km1 = errQ_km1; check_for_reduction = SUNTRUE; } terr_km1 = IDA_mem->ida_kk * (*err_km1); /* Has an order decrease already been decided in IDATestError? */ if (IDA_mem->ida_knew != IDA_mem->ida_kk) check_for_reduction = SUNFALSE; if (check_for_reduction) { if ( IDA_mem->ida_kk > 2 ) { /* Update error at order k-2 */ N_VLinearSum(ONE, IDA_mem->ida_phiQ[IDA_mem->ida_kk-1], ONE, tempv, tempv); errQ_km2 = IDA_mem->ida_sigma[IDA_mem->ida_kk-2] * N_VWrmsNorm(tempv, IDA_mem->ida_ewtQ); if (errQ_km2 > *err_km2) { *err_km2 = errQ_km2; } terr_km2 = (IDA_mem->ida_kk-1) * (*err_km2); /* Decrease order if errors are reduced */ if (SUNMAX(terr_km1, terr_km2) <= terr_k) IDA_mem->ida_knew = IDA_mem->ida_kk - 1; } else { /* Decrease order to 1 if errors are reduced by at least 1/2 */ if (terr_km1 <= (HALF * terr_k) ) IDA_mem->ida_knew = IDA_mem->ida_kk - 1; } } } /* Perform error test */ if (ck * enormQ > ONE) return(ERROR_TEST_FAIL); else return(IDA_SUCCESS); } /* * IDASensTestError * * This routine estimates sensitivity errors and updates errors at * orders k, k-1, k-2, decides whether or not to suggest an order reduction, * and performs the local error test. (Used only in staggered approach). * * IDASensTestError returns the updated local error estimate at orders k, * k-1, and k-2. These are norms of type SUNMAX(|err|,|errQ|,|errS|). * * The return flag can be either IDA_SUCCESS or ERROR_TEST_FAIL. */ static int IDASensTestError(IDAMem IDA_mem, realtype ck, realtype *err_k, realtype *err_km1, realtype *err_km2) { realtype enormS; realtype errS_k, errS_km1, errS_km2; realtype terr_k, terr_km1, terr_km2; N_Vector *tempv; booleantype check_for_reduction = SUNFALSE; int retval; /* Rename deltaS */ tempv = IDA_mem->ida_deltaS; /* Update error for order k. */ enormS = IDASensWrmsNorm(IDA_mem, IDA_mem->ida_eeS, IDA_mem->ida_ewtS, IDA_mem->ida_suppressalg); errS_k = IDA_mem->ida_sigma[IDA_mem->ida_kk] * enormS; if (errS_k > *err_k) { *err_k = errS_k; check_for_reduction = SUNTRUE; } terr_k = (IDA_mem->ida_kk+1) * (*err_k); if ( IDA_mem->ida_kk > 1 ) { /* Update error at order k-1 */ retval = N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_phiS[IDA_mem->ida_kk], ONE, IDA_mem->ida_eeS, tempv); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); errS_km1 = IDA_mem->ida_sigma[IDA_mem->ida_kk-1] * IDASensWrmsNorm(IDA_mem, tempv, IDA_mem->ida_ewtS, IDA_mem->ida_suppressalg); if (errS_km1 > *err_km1) { *err_km1 = errS_km1; check_for_reduction = SUNTRUE; } terr_km1 = IDA_mem->ida_kk * (*err_km1); /* Has an order decrease already been decided in IDATestError? */ if (IDA_mem->ida_knew != IDA_mem->ida_kk) check_for_reduction = SUNFALSE; if (check_for_reduction) { if ( IDA_mem->ida_kk > 2 ) { /* Update error at order k-2 */ retval = N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_phiS[IDA_mem->ida_kk-1], ONE, tempv, tempv); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); errS_km2 = IDA_mem->ida_sigma[IDA_mem->ida_kk-2] * IDASensWrmsNorm(IDA_mem, tempv, IDA_mem->ida_ewtS, IDA_mem->ida_suppressalg); if (errS_km2 > *err_km2) { *err_km2 = errS_km2; } terr_km2 = (IDA_mem->ida_kk-1) * (*err_km2); /* Decrease order if errors are reduced */ if (SUNMAX(terr_km1, terr_km2) <= terr_k) IDA_mem->ida_knew = IDA_mem->ida_kk - 1; } else { /* Decrease order to 1 if errors are reduced by at least 1/2 */ if (terr_km1 <= (HALF * terr_k) ) IDA_mem->ida_knew = IDA_mem->ida_kk - 1; } } } /* Perform error test */ if (ck * enormS > ONE) return(ERROR_TEST_FAIL); else return(IDA_SUCCESS); } /* * IDAQuadSensTestError * * This routine estimates quadrature sensitivity errors and updates * errors at orders k, k-1, k-2, decides whether or not to suggest * an order reduction and performs the local error test. (Used * only in staggered approach). * * IDAQuadSensTestError returns the updated local error estimate at * orders k, k-1, and k-2. These are norms of type * SUNMAX(|err|,|errQ|,|errS|,|errQS|). * * The return flag can be either IDA_SUCCESS or ERROR_TEST_FAIL. */ static int IDAQuadSensTestError(IDAMem IDA_mem, realtype ck, realtype *err_k, realtype *err_km1, realtype *err_km2) { realtype enormQS; realtype errQS_k, errQS_km1, errQS_km2; realtype terr_k, terr_km1, terr_km2; N_Vector *tempv; booleantype check_for_reduction = SUNFALSE; int retval; tempv = IDA_mem->ida_yyQS; enormQS = IDAQuadSensWrmsNorm(IDA_mem, IDA_mem->ida_eeQS, IDA_mem->ida_ewtQS); errQS_k = IDA_mem->ida_sigma[IDA_mem->ida_kk] * enormQS; if (errQS_k > *err_k) { *err_k = errQS_k; check_for_reduction = SUNTRUE; } terr_k = (IDA_mem->ida_kk+1) * (*err_k); if ( IDA_mem->ida_kk > 1 ) { /* Update error at order k-1 */ retval = N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_phiQS[IDA_mem->ida_kk], ONE, IDA_mem->ida_eeQS, tempv); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); errQS_km1 = IDA_mem->ida_sigma[IDA_mem->ida_kk-1] * IDAQuadSensWrmsNorm(IDA_mem, tempv, IDA_mem->ida_ewtQS); if (errQS_km1 > *err_km1) { *err_km1 = errQS_km1; check_for_reduction = SUNTRUE; } terr_km1 = IDA_mem->ida_kk * (*err_km1); /* Has an order decrease already been decided in IDATestError? */ if (IDA_mem->ida_knew != IDA_mem->ida_kk) check_for_reduction = SUNFALSE; if (check_for_reduction) { if ( IDA_mem->ida_kk > 2 ) { /* Update error at order k-2 */ retval = N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_phiQS[IDA_mem->ida_kk-1], ONE, tempv, tempv); if (retval != IDA_SUCCESS) return (IDA_VECTOROP_ERR); errQS_km2 = IDA_mem->ida_sigma[IDA_mem->ida_kk-2] * IDAQuadSensWrmsNorm(IDA_mem, tempv, IDA_mem->ida_ewtQS); if (errQS_km2 > *err_km2) { *err_km2 = errQS_km2; } terr_km2 = (IDA_mem->ida_kk-1) * (*err_km2); /* Decrease order if errors are reduced */ if (SUNMAX(terr_km1, terr_km2) <= terr_k) IDA_mem->ida_knew = IDA_mem->ida_kk - 1; } else { /* Decrease order to 1 if errors are reduced by at least 1/2 */ if (terr_km1 <= (HALF * terr_k) ) IDA_mem->ida_knew = IDA_mem->ida_kk - 1; } } } /* Perform error test */ if (ck * enormQS > ONE) return(ERROR_TEST_FAIL); else return(IDA_SUCCESS); } /* * IDARestore * * This routine restores tn, psi, and phi in the event of a failure. * It changes back phi-star to phi (changed in IDASetCoeffs) */ static void IDARestore(IDAMem IDA_mem, realtype saved_t) { int i, j, is; IDA_mem->ida_tn = saved_t; for (i = 1; i <= IDA_mem->ida_kk; i++) IDA_mem->ida_psi[i-1] = IDA_mem->ida_psi[i] - IDA_mem->ida_hh; if (IDA_mem->ida_ns <= IDA_mem->ida_kk) { for(i = IDA_mem->ida_ns; i <= IDA_mem->ida_kk; i++) IDA_mem->ida_cvals[i-IDA_mem->ida_ns] = ONE/IDA_mem->ida_beta[i]; (void) N_VScaleVectorArray(IDA_mem->ida_kk - IDA_mem->ida_ns + 1, IDA_mem->ida_cvals, IDA_mem->ida_phi+IDA_mem->ida_ns, IDA_mem->ida_phi+IDA_mem->ida_ns); if (IDA_mem->ida_quadr) (void) N_VScaleVectorArray(IDA_mem->ida_kk - IDA_mem->ida_ns + 1, IDA_mem->ida_cvals, IDA_mem->ida_phiQ+IDA_mem->ida_ns, IDA_mem->ida_phiQ+IDA_mem->ida_ns); if (IDA_mem->ida_sensi || IDA_mem->ida_quadr_sensi) { j = 0; for(i=IDA_mem->ida_ns; i<=IDA_mem->ida_kk; i++) { for(is=0; isida_Ns; is++) { IDA_mem->ida_cvals[j] = ONE/IDA_mem->ida_beta[i]; j++; } } } if (IDA_mem->ida_sensi) { j = 0; for(i=IDA_mem->ida_ns; i<=IDA_mem->ida_kk; i++) { for(is=0; isida_Ns; is++) { IDA_mem->ida_Xvecs[j] = IDA_mem->ida_phiS[i][is]; j++; } } (void) N_VScaleVectorArray(j, IDA_mem->ida_cvals, IDA_mem->ida_Xvecs, IDA_mem->ida_Xvecs); } if (IDA_mem->ida_quadr_sensi) { j = 0; for(i=IDA_mem->ida_ns; i<=IDA_mem->ida_kk; i++) { for(is=0; isida_Ns; is++) { IDA_mem->ida_Xvecs[j] = IDA_mem->ida_phiQS[i][is]; j++; } } (void) N_VScaleVectorArray(j, IDA_mem->ida_cvals, IDA_mem->ida_Xvecs, IDA_mem->ida_Xvecs); } } } /* * ----------------------------------------------------------------- * Handler for convergence and/or error test failures * ----------------------------------------------------------------- */ /* * IDAHandleNFlag * * This routine handles failures indicated by the input variable nflag. * Positive values indicate various recoverable failures while negative * values indicate nonrecoverable failures. This routine adjusts the * step size for recoverable failures. * * Possible nflag values (input): * * --convergence failures-- * IDA_RES_RECVR > 0 * IDA_LSOLVE_RECVR > 0 * IDA_CONSTR_RECVR > 0 * SUN_NLS_CONV_RECVR > 0 * IDA_QRHS_RECVR > 0 * IDA_QSRHS_RECVR > 0 * IDA_RES_FAIL < 0 * IDA_LSOLVE_FAIL < 0 * IDA_LSETUP_FAIL < 0 * IDA_QRHS_FAIL < 0 * * --error test failure-- * ERROR_TEST_FAIL > 0 * * Possible kflag values (output): * * --recoverable-- * PREDICT_AGAIN * * --nonrecoverable-- * IDA_CONSTR_FAIL * IDA_REP_RES_ERR * IDA_ERR_FAIL * IDA_CONV_FAIL * IDA_RES_FAIL * IDA_LSETUP_FAIL * IDA_LSOLVE_FAIL * IDA_QRHS_FAIL * IDA_REP_QRHS_ERR */ static int IDAHandleNFlag(IDAMem IDA_mem, int nflag, realtype err_k, realtype err_km1, long int *ncfnPtr, int *ncfPtr, long int *netfPtr, int *nefPtr) { realtype err_knew; IDA_mem->ida_phase = 1; if (nflag != ERROR_TEST_FAIL) { /*----------------------- Nonlinear solver failed -----------------------*/ (*ncfPtr)++; /* local counter for convergence failures */ (*ncfnPtr)++; /* global counter for convergence failures */ if (nflag < 0) { /* nonrecoverable failure */ if (nflag == IDA_LSOLVE_FAIL) return(IDA_LSOLVE_FAIL); else if (nflag == IDA_LSETUP_FAIL) return(IDA_LSETUP_FAIL); else if (nflag == IDA_RES_FAIL) return(IDA_RES_FAIL); else if (nflag == IDA_QRHS_FAIL) return(IDA_QRHS_FAIL); else if (nflag == IDA_SRES_FAIL) return(IDA_SRES_FAIL); else if (nflag == IDA_QSRHS_FAIL) return(IDA_QSRHS_FAIL); else return(IDA_NLS_FAIL); } else { /* recoverable failure */ /* Reduce step size for a new prediction Note that if nflag=IDA_CONSTR_RECVR then rr was already set in IDANls */ if (nflag != IDA_CONSTR_RECVR) IDA_mem->ida_rr = QUARTER; IDA_mem->ida_hh *= IDA_mem->ida_rr; /* Test if there were too many convergence failures */ if (*ncfPtr < IDA_mem->ida_maxncf) return(PREDICT_AGAIN); else if (nflag == IDA_RES_RECVR) return(IDA_REP_RES_ERR); else if (nflag == IDA_QRHS_RECVR) return(IDA_REP_QRHS_ERR); else if (nflag == IDA_SRES_RECVR) return(IDA_REP_SRES_ERR); else if (nflag == IDA_QSRHS_RECVR) return(IDA_REP_QSRHS_ERR); else if (nflag == IDA_CONSTR_RECVR) return(IDA_CONSTR_FAIL); else return(IDA_CONV_FAIL); } } else { /*----------------- Error Test failed -----------------*/ (*nefPtr)++; /* local counter for error test failures */ (*netfPtr)++; /* global counter for error test failures */ if (*nefPtr == 1) { /* On first error test failure, keep current order or lower order by one. Compute new stepsize based on differences of the solution. */ err_knew = (IDA_mem->ida_kk == IDA_mem->ida_knew) ? err_k : err_km1; IDA_mem->ida_kk = IDA_mem->ida_knew; IDA_mem->ida_rr = PT9 * SUNRpowerR( TWO * err_knew + PT0001, -ONE/(IDA_mem->ida_kk + 1) ); IDA_mem->ida_rr = SUNMAX(QUARTER, SUNMIN(PT9,IDA_mem->ida_rr)); IDA_mem->ida_hh *= IDA_mem->ida_rr; return(PREDICT_AGAIN); } else if (*nefPtr == 2) { /* On second error test failure, use current order or decrease order by one. Reduce stepsize by factor of 1/4. */ IDA_mem->ida_kk = IDA_mem->ida_knew; IDA_mem->ida_rr = QUARTER; IDA_mem->ida_hh *= IDA_mem->ida_rr; return(PREDICT_AGAIN); } else if (*nefPtr < IDA_mem->ida_maxnef) { /* On third and subsequent error test failures, set order to 1. Reduce stepsize by factor of 1/4. */ IDA_mem->ida_kk = 1; IDA_mem->ida_rr = QUARTER; IDA_mem->ida_hh *= IDA_mem->ida_rr; return(PREDICT_AGAIN); } else { /* Too many error test failures */ return(IDA_ERR_FAIL); } } } /* * IDAReset * * This routine is called only if we need to predict again at the * very first step. In such a case, reset phi[1] and psi[0]. */ static void IDAReset(IDAMem IDA_mem) { int is; IDA_mem->ida_psi[0] = IDA_mem->ida_hh; N_VScale(IDA_mem->ida_rr, IDA_mem->ida_phi[1], IDA_mem->ida_phi[1]); if (IDA_mem->ida_quadr) N_VScale(IDA_mem->ida_rr, IDA_mem->ida_phiQ[1], IDA_mem->ida_phiQ[1]); if (IDA_mem->ida_sensi || IDA_mem->ida_quadr_sensi) for(is=0; isida_Ns; is++) IDA_mem->ida_cvals[is] = IDA_mem->ida_rr; if (IDA_mem->ida_sensi) (void) N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_phiS[1], IDA_mem->ida_phiS[1]); if (IDA_mem->ida_quadr_sensi) (void) N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_phiQS[1], IDA_mem->ida_phiQS[1]); } /* * ----------------------------------------------------------------- * Function called after a successful step * ----------------------------------------------------------------- */ /* * IDACompleteStep * * This routine completes a successful step. It increments nst, * saves the stepsize and order used, makes the final selection of * stepsize and order for the next step, and updates the phi array. */ static void IDACompleteStep(IDAMem IDA_mem, realtype err_k, realtype err_km1) { int i, j, is, kdiff, action; realtype terr_k, terr_km1, terr_kp1; realtype err_knew, err_kp1; realtype enorm, tmp, hnew; N_Vector tempvQ, *tempvS; IDA_mem->ida_nst++; kdiff = IDA_mem->ida_kk - IDA_mem->ida_kused; IDA_mem->ida_kused = IDA_mem->ida_kk; IDA_mem->ida_hused = IDA_mem->ida_hh; if ( (IDA_mem->ida_knew == IDA_mem->ida_kk - 1) || (IDA_mem->ida_kk == IDA_mem->ida_maxord) ) IDA_mem->ida_phase = 1; /* For the first few steps, until either a step fails, or the order is reduced, or the order reaches its maximum, we raise the order and double the stepsize. During these steps, phase = 0. Thereafter, phase = 1, and stepsize and order are set by the usual local error algorithm. Note that, after the first step, the order is not increased, as not all of the neccessary information is available yet. */ if (IDA_mem->ida_phase == 0) { if(IDA_mem->ida_nst > 1) { IDA_mem->ida_kk++; hnew = TWO * IDA_mem->ida_hh; if( (tmp = SUNRabs(hnew) * IDA_mem->ida_hmax_inv) > ONE ) hnew /= tmp; IDA_mem->ida_hh = hnew; } } else { action = UNSET; /* Set action = LOWER/MAINTAIN/RAISE to specify order decision */ if (IDA_mem->ida_knew == IDA_mem->ida_kk - 1) {action = LOWER; goto takeaction;} if (IDA_mem->ida_kk == IDA_mem->ida_maxord) {action = MAINTAIN; goto takeaction;} if ( (IDA_mem->ida_kk + 1 >= IDA_mem->ida_ns ) || (kdiff == 1)) {action = MAINTAIN; goto takeaction;} /* Estimate the error at order k+1, unless already decided to reduce order, or already using maximum order, or stepsize has not been constant, or order was just raised. */ N_VLinearSum(ONE, IDA_mem->ida_ee, -ONE, IDA_mem->ida_phi[IDA_mem->ida_kk + 1], IDA_mem->ida_tempv1); enorm = IDAWrmsNorm(IDA_mem, IDA_mem->ida_tempv1, IDA_mem->ida_ewt, IDA_mem->ida_suppressalg); if (IDA_mem->ida_errconQ) { tempvQ = IDA_mem->ida_ypQ; N_VLinearSum (ONE, IDA_mem->ida_eeQ, -ONE, IDA_mem->ida_phiQ[IDA_mem->ida_kk+1], tempvQ); enorm = IDAQuadWrmsNormUpdate(IDA_mem, enorm, tempvQ, IDA_mem->ida_ewtQ); } if (IDA_mem->ida_errconS) { tempvS = IDA_mem->ida_ypS; (void) N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_eeS, -ONE, IDA_mem->ida_phiS[IDA_mem->ida_kk+1], tempvS); enorm = IDASensWrmsNormUpdate(IDA_mem, enorm, tempvS, IDA_mem->ida_ewtS, IDA_mem->ida_suppressalg); } if (IDA_mem->ida_errconQS) { (void) N_VLinearSumVectorArray(IDA_mem->ida_Ns, ONE, IDA_mem->ida_eeQS, -ONE, IDA_mem->ida_phiQS[IDA_mem->ida_kk+1], IDA_mem->ida_tempvQS); enorm = IDAQuadSensWrmsNormUpdate(IDA_mem, enorm, IDA_mem->ida_tempvQS, IDA_mem->ida_ewtQS); } err_kp1= enorm/(IDA_mem->ida_kk + 2); /* Choose among orders k-1, k, k+1 using local truncation error norms. */ terr_k = (IDA_mem->ida_kk + 1) * err_k; terr_kp1 = (IDA_mem->ida_kk + 2) * err_kp1; if (IDA_mem->ida_kk == 1) { if (terr_kp1 >= HALF * terr_k) {action = MAINTAIN; goto takeaction;} else {action = RAISE; goto takeaction;} } else { terr_km1 = IDA_mem->ida_kk * err_km1; if (terr_km1 <= SUNMIN(terr_k, terr_kp1)) {action = LOWER; goto takeaction;} else if (terr_kp1 >= terr_k) {action = MAINTAIN; goto takeaction;} else {action = RAISE; goto takeaction;} } takeaction: /* Set the estimated error norm and, on change of order, reset kk. */ if (action == RAISE) { IDA_mem->ida_kk++; err_knew = err_kp1; } else if (action == LOWER) { IDA_mem->ida_kk--; err_knew = err_km1; } else { err_knew = err_k; } /* Compute rr = tentative ratio hnew/hh from error norm estimate. Reduce hh if rr <= 1, double hh if rr >= 2, else leave hh as is. If hh is reduced, hnew/hh is restricted to be between .5 and .9. */ hnew = IDA_mem->ida_hh; IDA_mem->ida_rr = SUNRpowerR( TWO * err_knew + PT0001, -ONE/(IDA_mem->ida_kk + 1) ); if (IDA_mem->ida_rr >= TWO) { hnew = TWO * IDA_mem->ida_hh; if( (tmp = SUNRabs(hnew) * IDA_mem->ida_hmax_inv) > ONE ) hnew /= tmp; } else if (IDA_mem->ida_rr <= ONE ) { IDA_mem->ida_rr = SUNMAX(HALF, SUNMIN(PT9,IDA_mem->ida_rr)); hnew = IDA_mem->ida_hh * IDA_mem->ida_rr; } IDA_mem->ida_hh = hnew; } /* end of phase if block */ /* Save ee for possible order increase on next step */ if (IDA_mem->ida_kused < IDA_mem->ida_maxord) { N_VScale(ONE, IDA_mem->ida_ee, IDA_mem->ida_phi[IDA_mem->ida_kused + 1]); if (IDA_mem->ida_quadr) N_VScale(ONE, IDA_mem->ida_eeQ, IDA_mem->ida_phiQ[IDA_mem->ida_kused+1]); if (IDA_mem->ida_sensi || IDA_mem->ida_quadr_sensi) for (is=0; isida_Ns; is++) IDA_mem->ida_cvals[is] = ONE; if (IDA_mem->ida_sensi) (void) N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_eeS, IDA_mem->ida_phiS[IDA_mem->ida_kused+1]); if (IDA_mem->ida_quadr_sensi) (void) N_VScaleVectorArray(IDA_mem->ida_Ns, IDA_mem->ida_cvals, IDA_mem->ida_eeQS, IDA_mem->ida_phiQS[IDA_mem->ida_kused+1]); } /* Update phi arrays */ /* To update phi arrays compute X += Z where */ /* X = [ phi[kused], phi[kused-1], phi[kused-2], ... phi[1] ] */ /* Z = [ ee, phi[kused], phi[kused-1], ... phi[0] ] */ IDA_mem->ida_Zvecs[0] = IDA_mem->ida_ee; IDA_mem->ida_Xvecs[0] = IDA_mem->ida_phi[IDA_mem->ida_kused]; for (j=1; j<=IDA_mem->ida_kused; j++) { IDA_mem->ida_Zvecs[j] = IDA_mem->ida_phi[IDA_mem->ida_kused-j+1]; IDA_mem->ida_Xvecs[j] = IDA_mem->ida_phi[IDA_mem->ida_kused-j]; } (void) N_VLinearSumVectorArray(IDA_mem->ida_kused+1, ONE, IDA_mem->ida_Xvecs, ONE, IDA_mem->ida_Zvecs, IDA_mem->ida_Xvecs); if (IDA_mem->ida_quadr) { IDA_mem->ida_Zvecs[0] = IDA_mem->ida_eeQ; IDA_mem->ida_Xvecs[0] = IDA_mem->ida_phiQ[IDA_mem->ida_kused]; for (j=1; j<=IDA_mem->ida_kused; j++) { IDA_mem->ida_Zvecs[j] = IDA_mem->ida_phiQ[IDA_mem->ida_kused-j+1]; IDA_mem->ida_Xvecs[j] = IDA_mem->ida_phiQ[IDA_mem->ida_kused-j]; } (void) N_VLinearSumVectorArray(IDA_mem->ida_kused+1, ONE, IDA_mem->ida_Xvecs, ONE, IDA_mem->ida_Zvecs, IDA_mem->ida_Xvecs); } if (IDA_mem->ida_sensi) { i=0; for (is=0; isida_Ns; is++) { IDA_mem->ida_Zvecs[i] = IDA_mem->ida_eeS[is]; IDA_mem->ida_Xvecs[i] = IDA_mem->ida_phiS[IDA_mem->ida_kused][is]; i++; for (j=1; j<=IDA_mem->ida_kused; j++) { IDA_mem->ida_Zvecs[i] = IDA_mem->ida_phiS[IDA_mem->ida_kused-j+1][is]; IDA_mem->ida_Xvecs[i] = IDA_mem->ida_phiS[IDA_mem->ida_kused-j][is]; i++; } } (void) N_VLinearSumVectorArray(IDA_mem->ida_Ns*(IDA_mem->ida_kused+1), ONE, IDA_mem->ida_Xvecs, ONE, IDA_mem->ida_Zvecs, IDA_mem->ida_Xvecs); } if (IDA_mem->ida_quadr_sensi) { i=0; for (is=0; isida_Ns; is++) { IDA_mem->ida_Zvecs[i] = IDA_mem->ida_eeQS[is]; IDA_mem->ida_Xvecs[i] = IDA_mem->ida_phiQS[IDA_mem->ida_kused][is]; i++; for (j=1; j<=IDA_mem->ida_kused; j++) { IDA_mem->ida_Zvecs[i] = IDA_mem->ida_phiQS[IDA_mem->ida_kused-j+1][is]; IDA_mem->ida_Xvecs[i] = IDA_mem->ida_phiQS[IDA_mem->ida_kused-j][is]; i++; } } (void) N_VLinearSumVectorArray(IDA_mem->ida_Ns*(IDA_mem->ida_kused+1), ONE, IDA_mem->ida_Xvecs, ONE, IDA_mem->ida_Zvecs, IDA_mem->ida_Xvecs); } } /* * ----------------------------------------------------------------- * Interpolated output * ----------------------------------------------------------------- */ /* * IDAGetSolution * * This routine evaluates y(t) and y'(t) as the value and derivative of * the interpolating polynomial at the independent variable t, and stores * the results in the vectors yret and ypret. It uses the current * independent variable value, tn, and the method order last used, kused. * This function is called by IDASolve with t = tout, t = tn, or t = tstop. * * If kused = 0 (no step has been taken), or if t = tn, then the order used * here is taken to be 1, giving yret = phi[0], ypret = phi[1]/psi[0]. * * The return values are: * IDA_SUCCESS if t is legal, or * IDA_BAD_T if t is not within the interval of the last step taken. */ int IDAGetSolution(void *ida_mem, realtype t, N_Vector yret, N_Vector ypret) { IDAMem IDA_mem; realtype tfuzz, tp, delt, c, d, gam; int j, kord, retval; if (ida_mem == NULL) { IDAProcessError(NULL, IDA_MEM_NULL, "IDAS", "IDAGetSolution", MSG_NO_MEM); return (IDA_MEM_NULL); } IDA_mem = (IDAMem) ida_mem; /* Check t for legality. Here tn - hused is t_{n-1}. */ tfuzz = HUNDRED * IDA_mem->ida_uround * (SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)); if (IDA_mem->ida_hh < ZERO) tfuzz = - tfuzz; tp = IDA_mem->ida_tn - IDA_mem->ida_hused - tfuzz; if ((t - tp)*IDA_mem->ida_hh < ZERO) { IDAProcessError(IDA_mem, IDA_BAD_T, "IDAS", "IDAGetSolution", MSG_BAD_T, t, IDA_mem->ida_tn-IDA_mem->ida_hused, IDA_mem->ida_tn); return(IDA_BAD_T); } /* Initialize kord = (kused or 1). */ kord = IDA_mem->ida_kused; if (IDA_mem->ida_kused == 0) kord = 1; /* Accumulate multiples of columns phi[j] into yret and ypret. */ delt = t - IDA_mem->ida_tn; c = ONE; d = ZERO; gam = delt / IDA_mem->ida_psi[0]; IDA_mem->ida_cvals[0] = c; for (j=1; j <= kord; j++) { d = d*gam + c / IDA_mem->ida_psi[j-1]; c = c*gam; gam = (delt + IDA_mem->ida_psi[j-1]) / IDA_mem->ida_psi[j]; IDA_mem->ida_cvals[j] = c; IDA_mem->ida_dvals[j-1] = d; } retval = N_VLinearCombination(kord+1, IDA_mem->ida_cvals, IDA_mem->ida_phi, yret); if (retval != IDA_SUCCESS) return(IDA_VECTOROP_ERR); retval = N_VLinearCombination(kord, IDA_mem->ida_dvals, IDA_mem->ida_phi+1, ypret); if (retval != IDA_SUCCESS) return(IDA_VECTOROP_ERR); return(IDA_SUCCESS); } /* * ----------------------------------------------------------------- * Norm functions * ----------------------------------------------------------------- */ /* * IDAWrmsNorm * * Returns the WRMS norm of vector x with weights w. * If mask = SUNTRUE, the weight vector w is masked by id, i.e., * nrm = N_VWrmsNormMask(x,w,id); * Otherwise, * nrm = N_VWrmsNorm(x,w); * * mask = SUNFALSE when the call is made from the nonlinear solver. * mask = suppressalg otherwise. */ realtype IDAWrmsNorm(IDAMem IDA_mem, N_Vector x, N_Vector w, booleantype mask) { realtype nrm; if (mask) nrm = N_VWrmsNormMask(x, w, IDA_mem->ida_id); else nrm = N_VWrmsNorm(x, w); return(nrm); } /* * IDASensWrmsNorm * * This routine returns the maximum over the weighted root mean * square norm of xS with weight vectors wS: * * max { wrms(xS[0],wS[0]) ... wrms(xS[Ns-1],wS[Ns-1]) } * * Called by IDASensUpdateNorm or directly in the IDA_STAGGERED approach * during the NLS solution and before the error test. * * Declared global for use in the computation of IC for sensitivities. */ realtype IDASensWrmsNorm(IDAMem IDA_mem, N_Vector *xS, N_Vector *wS, booleantype mask) { int is; realtype nrm; if (mask) (void) N_VWrmsNormMaskVectorArray(IDA_mem->ida_Ns, xS, wS, IDA_mem->ida_id, IDA_mem->ida_cvals); else (void) N_VWrmsNormVectorArray(IDA_mem->ida_Ns, xS, wS, IDA_mem->ida_cvals); nrm = IDA_mem->ida_cvals[0]; for (is=1; isida_Ns; is++) if ( IDA_mem->ida_cvals[is] > nrm ) nrm = IDA_mem->ida_cvals[is]; return (nrm); } /* * IDAQuadSensWrmsNorm * * This routine returns the maximum over the weighted root mean * square norm of xQS with weight vectors wQS: * * max { wrms(xQS[0],wQS[0]) ... wrms(xQS[Ns-1],wQS[Ns-1]) } */ static realtype IDAQuadSensWrmsNorm(IDAMem IDA_mem, N_Vector *xQS, N_Vector *wQS) { int is; realtype nrm; (void) N_VWrmsNormVectorArray(IDA_mem->ida_Ns, xQS, wQS, IDA_mem->ida_cvals); nrm = IDA_mem->ida_cvals[0]; for (is=1; isida_Ns; is++) if ( IDA_mem->ida_cvals[is] > nrm ) nrm = IDA_mem->ida_cvals[is]; return (nrm); } /* * IDAQuadWrmsNormUpdate * * Updates the norm old_nrm to account for all quadratures. */ static realtype IDAQuadWrmsNormUpdate(IDAMem IDA_mem, realtype old_nrm, N_Vector xQ, N_Vector wQ) { realtype qnrm; qnrm = N_VWrmsNorm(xQ, wQ); if (old_nrm > qnrm) return(old_nrm); else return(qnrm); } /* * IDASensWrmsNormUpdate * * Updates the norm old_nrm to account for all sensitivities. * * This function is declared global since it is used for finding * IC for sensitivities, */ realtype IDASensWrmsNormUpdate(IDAMem IDA_mem, realtype old_nrm, N_Vector *xS, N_Vector *wS, booleantype mask) { realtype snrm; snrm = IDASensWrmsNorm(IDA_mem, xS, wS, mask); if (old_nrm > snrm) return(old_nrm); else return(snrm); } static realtype IDAQuadSensWrmsNormUpdate(IDAMem IDA_mem, realtype old_nrm, N_Vector *xQS, N_Vector *wQS) { realtype qsnrm; qsnrm = IDAQuadSensWrmsNorm(IDA_mem, xQS, wQS); if (old_nrm > qsnrm) return(old_nrm); else return(qsnrm); } /* * ----------------------------------------------------------------- * Functions for rootfinding * ----------------------------------------------------------------- */ /* * IDARcheck1 * * This routine completes the initialization of rootfinding memory * information, and checks whether g has a zero both at and very near * the initial point of the IVP. * * This routine returns an int equal to: * IDA_RTFUNC_FAIL < 0 if the g function failed, or * IDA_SUCCESS = 0 otherwise. */ static int IDARcheck1(IDAMem IDA_mem) { int i, retval; realtype smallh, hratio, tplus; booleantype zroot; for (i = 0; i < IDA_mem->ida_nrtfn; i++) IDA_mem->ida_iroots[i] = 0; IDA_mem->ida_tlo = IDA_mem->ida_tn; IDA_mem->ida_ttol = ((SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)) * IDA_mem->ida_uround * HUNDRED); /* Evaluate g at initial t and check for zero values. */ retval = IDA_mem->ida_gfun(IDA_mem->ida_tlo, IDA_mem->ida_phi[0], IDA_mem->ida_phi[1], IDA_mem->ida_glo, IDA_mem->ida_user_data); IDA_mem->ida_nge = 1; if (retval != 0) return(IDA_RTFUNC_FAIL); zroot = SUNFALSE; for (i = 0; i < IDA_mem->ida_nrtfn; i++) { if (SUNRabs(IDA_mem->ida_glo[i]) == ZERO) { zroot = SUNTRUE; IDA_mem->ida_gactive[i] = SUNFALSE; } } if (!zroot) return(IDA_SUCCESS); /* Some g_i is zero at t0; look at g at t0+(small increment). */ hratio = SUNMAX(IDA_mem->ida_ttol / SUNRabs(IDA_mem->ida_hh), PT1); smallh = hratio * IDA_mem->ida_hh; tplus = IDA_mem->ida_tlo + smallh; N_VLinearSum(ONE, IDA_mem->ida_phi[0], smallh, IDA_mem->ida_phi[1], IDA_mem->ida_yy); retval = IDA_mem->ida_gfun(tplus, IDA_mem->ida_yy, IDA_mem->ida_phi[1], IDA_mem->ida_ghi, IDA_mem->ida_user_data); IDA_mem->ida_nge++; if (retval != 0) return(IDA_RTFUNC_FAIL); /* We check now only the components of g which were exactly 0.0 at t0 * to see if we can 'activate' them. */ for (i = 0; i < IDA_mem->ida_nrtfn; i++) { if (!IDA_mem->ida_gactive[i] && SUNRabs(IDA_mem->ida_ghi[i]) != ZERO) { IDA_mem->ida_gactive[i] = SUNTRUE; IDA_mem->ida_glo[i] = IDA_mem->ida_ghi[i]; } } return(IDA_SUCCESS); } /* * IDARcheck2 * * This routine checks for exact zeros of g at the last root found, * if the last return was a root. It then checks for a close pair of * zeros (an error condition), and for a new root at a nearby point. * The array glo = g(tlo) at the left endpoint of the search interval * is adjusted if necessary to assure that all g_i are nonzero * there, before returning to do a root search in the interval. * * On entry, tlo = tretlast is the last value of tret returned by * IDASolve. This may be the previous tn, the previous tout value, * or the last root location. * * This routine returns an int equal to: * IDA_RTFUNC_FAIL < 0 if the g function failed, or * CLOSERT = 3 if a close pair of zeros was found, or * RTFOUND = 1 if a new zero of g was found near tlo, or * IDA_SUCCESS = 0 otherwise. */ static int IDARcheck2(IDAMem IDA_mem) { int i, retval; realtype smallh, hratio, tplus; booleantype zroot; if (IDA_mem->ida_irfnd == 0) return(IDA_SUCCESS); (void) IDAGetSolution(IDA_mem, IDA_mem->ida_tlo, IDA_mem->ida_yy, IDA_mem->ida_yp); retval = IDA_mem->ida_gfun(IDA_mem->ida_tlo, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_glo, IDA_mem->ida_user_data); IDA_mem->ida_nge++; if (retval != 0) return(IDA_RTFUNC_FAIL); zroot = SUNFALSE; for (i = 0; i < IDA_mem->ida_nrtfn; i++) IDA_mem->ida_iroots[i] = 0; for (i = 0; i < IDA_mem->ida_nrtfn; i++) { if (!IDA_mem->ida_gactive[i]) continue; if (SUNRabs(IDA_mem->ida_glo[i]) == ZERO) { zroot = SUNTRUE; IDA_mem->ida_iroots[i] = 1; } } if (!zroot) return(IDA_SUCCESS); /* One or more g_i has a zero at tlo. Check g at tlo+smallh. */ IDA_mem->ida_ttol = ((SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)) * IDA_mem->ida_uround * HUNDRED); smallh = (IDA_mem->ida_hh > ZERO) ? IDA_mem->ida_ttol : -IDA_mem->ida_ttol; tplus = IDA_mem->ida_tlo + smallh; if ( (tplus - IDA_mem->ida_tn)*IDA_mem->ida_hh >= ZERO) { hratio = smallh/IDA_mem->ida_hh; N_VLinearSum(ONE, IDA_mem->ida_yy, hratio, IDA_mem->ida_phi[1], IDA_mem->ida_yy); } else { (void) IDAGetSolution(IDA_mem, tplus, IDA_mem->ida_yy, IDA_mem->ida_yp); } retval = IDA_mem->ida_gfun(tplus, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_ghi, IDA_mem->ida_user_data); IDA_mem->ida_nge++; if (retval != 0) return(IDA_RTFUNC_FAIL); /* Check for close roots (error return), for a new zero at tlo+smallh, and for a g_i that changed from zero to nonzero. */ zroot = SUNFALSE; for (i = 0; i < IDA_mem->ida_nrtfn; i++) { if (!IDA_mem->ida_gactive[i]) continue; if (SUNRabs(IDA_mem->ida_ghi[i]) == ZERO) { if (IDA_mem->ida_iroots[i] == 1) return(CLOSERT); zroot = SUNTRUE; IDA_mem->ida_iroots[i] = 1; } else { if (IDA_mem->ida_iroots[i] == 1) IDA_mem->ida_glo[i] = IDA_mem->ida_ghi[i]; } } if (zroot) return(RTFOUND); return(IDA_SUCCESS); } /* * IDARcheck3 * * This routine interfaces to IDARootfind to look for a root of g * between tlo and either tn or tout, whichever comes first. * Only roots beyond tlo in the direction of integration are sought. * * This routine returns an int equal to: * IDA_RTFUNC_FAIL < 0 if the g function failed, or * RTFOUND = 1 if a root of g was found, or * IDA_SUCCESS = 0 otherwise. */ static int IDARcheck3(IDAMem IDA_mem) { int i, ier, retval; /* Set thi = tn or tout, whichever comes first. */ if (IDA_mem->ida_taskc == IDA_ONE_STEP) IDA_mem->ida_thi = IDA_mem->ida_tn; if (IDA_mem->ida_taskc == IDA_NORMAL) { IDA_mem->ida_thi = ((IDA_mem->ida_toutc - IDA_mem->ida_tn)*IDA_mem->ida_hh >= ZERO) ? IDA_mem->ida_tn : IDA_mem->ida_toutc; } /* Get y and y' at thi. */ (void) IDAGetSolution(IDA_mem, IDA_mem->ida_thi, IDA_mem->ida_yy, IDA_mem->ida_yp); /* Set ghi = g(thi) and call IDARootfind to search (tlo,thi) for roots. */ retval = IDA_mem->ida_gfun(IDA_mem->ida_thi, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_ghi, IDA_mem->ida_user_data); IDA_mem->ida_nge++; if (retval != 0) return(IDA_RTFUNC_FAIL); IDA_mem->ida_ttol = ((SUNRabs(IDA_mem->ida_tn) + SUNRabs(IDA_mem->ida_hh)) * IDA_mem->ida_uround * HUNDRED); ier = IDARootfind(IDA_mem); if (ier == IDA_RTFUNC_FAIL) return(IDA_RTFUNC_FAIL); for(i=0; iida_nrtfn; i++) { if(!IDA_mem->ida_gactive[i] && IDA_mem->ida_grout[i] != ZERO) IDA_mem->ida_gactive[i] = SUNTRUE; } IDA_mem->ida_tlo = IDA_mem->ida_trout; for (i = 0; i < IDA_mem->ida_nrtfn; i++) IDA_mem->ida_glo[i] = IDA_mem->ida_grout[i]; /* If no root found, return IDA_SUCCESS. */ if (ier == IDA_SUCCESS) return(IDA_SUCCESS); /* If a root was found, interpolate to get y(trout) and return. */ (void) IDAGetSolution(IDA_mem, IDA_mem->ida_trout, IDA_mem->ida_yy, IDA_mem->ida_yp); return(RTFOUND); } /* * IDARootfind * * This routine solves for a root of g(t) between tlo and thi, if * one exists. Only roots of odd multiplicity (i.e. with a change * of sign in one of the g_i), or exact zeros, are found. * Here the sign of tlo - thi is arbitrary, but if multiple roots * are found, the one closest to tlo is returned. * * The method used is the Illinois algorithm, a modified secant method. * Reference: Kathie L. Hiebert and Lawrence F. Shampine, Implicitly * Defined Output Points for Solutions of ODEs, Sandia National * Laboratory Report SAND80-0180, February 1980. * * This routine uses the following parameters for communication: * * nrtfn = number of functions g_i, or number of components of * the vector-valued function g(t). Input only. * * gfun = user-defined function for g(t). Its form is * (void) gfun(t, y, yp, gt, user_data) * * rootdir = in array specifying the direction of zero-crossings. * If rootdir[i] > 0, search for roots of g_i only if * g_i is increasing; if rootdir[i] < 0, search for * roots of g_i only if g_i is decreasing; otherwise * always search for roots of g_i. * * gactive = array specifying whether a component of g should * or should not be monitored. gactive[i] is initially * set to SUNTRUE for all i=0,...,nrtfn-1, but it may be * reset to SUNFALSE if at the first step g[i] is 0.0 * both at the I.C. and at a small perturbation of them. * gactive[i] is then set back on SUNTRUE only after the * corresponding g function moves away from 0.0. * * nge = cumulative counter for gfun calls. * * ttol = a convergence tolerance for trout. Input only. * When a root at trout is found, it is located only to * within a tolerance of ttol. Typically, ttol should * be set to a value on the order of * 100 * UROUND * max (SUNRabs(tlo), SUNRabs(thi)) * where UROUND is the unit roundoff of the machine. * * tlo, thi = endpoints of the interval in which roots are sought. * On input, these must be distinct, but tlo - thi may * be of either sign. The direction of integration is * assumed to be from tlo to thi. On return, tlo and thi * are the endpoints of the final relevant interval. * * glo, ghi = arrays of length nrtfn containing the vectors g(tlo) * and g(thi) respectively. Input and output. On input, * none of the glo[i] should be zero. * * trout = root location, if a root was found, or thi if not. * Output only. If a root was found other than an exact * zero of g, trout is the endpoint thi of the final * interval bracketing the root, with size at most ttol. * * grout = array of length nrtfn containing g(trout) on return. * * iroots = int array of length nrtfn with root information. * Output only. If a root was found, iroots indicates * which components g_i have a root at trout. For * i = 0, ..., nrtfn-1, iroots[i] = 1 if g_i has a root * and g_i is increasing, iroots[i] = -1 if g_i has a * root and g_i is decreasing, and iroots[i] = 0 if g_i * has no roots or g_i varies in the direction opposite * to that indicated by rootdir[i]. * * This routine returns an int equal to: * IDA_RTFUNC_FAIL < 0 if the g function failed, or * RTFOUND = 1 if a root of g was found, or * IDA_SUCCESS = 0 otherwise. * */ static int IDARootfind(IDAMem IDA_mem) { realtype alph, tmid, gfrac, maxfrac, fracint, fracsub; int i, retval, imax, side, sideprev; booleantype zroot, sgnchg; imax = 0; /* First check for change in sign in ghi or for a zero in ghi. */ maxfrac = ZERO; zroot = SUNFALSE; sgnchg = SUNFALSE; for (i = 0; i < IDA_mem->ida_nrtfn; i++) { if(!IDA_mem->ida_gactive[i]) continue; if (SUNRabs(IDA_mem->ida_ghi[i]) == ZERO) { if(IDA_mem->ida_rootdir[i] * IDA_mem->ida_glo[i] <= ZERO) { zroot = SUNTRUE; } } else { if ( (IDA_mem->ida_glo[i] * IDA_mem->ida_ghi[i] < ZERO) && (IDA_mem->ida_rootdir[i] * IDA_mem->ida_glo[i] <= ZERO) ) { gfrac = SUNRabs(IDA_mem->ida_ghi[i] / (IDA_mem->ida_ghi[i] - IDA_mem->ida_glo[i])); if (gfrac > maxfrac) { sgnchg = SUNTRUE; maxfrac = gfrac; imax = i; } } } } /* If no sign change was found, reset trout and grout. Then return IDA_SUCCESS if no zero was found, or set iroots and return RTFOUND. */ if (!sgnchg) { IDA_mem->ida_trout = IDA_mem->ida_thi; for (i = 0; i < IDA_mem->ida_nrtfn; i++) IDA_mem->ida_grout[i] = IDA_mem->ida_ghi[i]; if (!zroot) return(IDA_SUCCESS); for (i = 0; i < IDA_mem->ida_nrtfn; i++) { IDA_mem->ida_iroots[i] = 0; if(!IDA_mem->ida_gactive[i]) continue; if ( (SUNRabs(IDA_mem->ida_ghi[i]) == ZERO) && (IDA_mem->ida_rootdir[i] * IDA_mem->ida_glo[i] <= ZERO) ) IDA_mem->ida_iroots[i] = IDA_mem->ida_glo[i] > 0 ? -1:1; } return(RTFOUND); } /* Initialize alph to avoid compiler warning */ alph = ONE; /* A sign change was found. Loop to locate nearest root. */ side = 0; sideprev = -1; for(;;) { /* Looping point */ /* If interval size is already less than tolerance ttol, break. */ if (SUNRabs(IDA_mem->ida_thi - IDA_mem->ida_tlo) <= IDA_mem->ida_ttol) break; /* Set weight alph. On the first two passes, set alph = 1. Thereafter, reset alph according to the side (low vs high) of the subinterval in which the sign change was found in the previous two passes. If the sides were opposite, set alph = 1. If the sides were the same, then double alph (if high side), or halve alph (if low side). The next guess tmid is the secant method value if alph = 1, but is closer to tlo if alph < 1, and closer to thi if alph > 1. */ if (sideprev == side) { alph = (side == 2) ? alph*TWO : alph*HALF; } else { alph = ONE; } /* Set next root approximation tmid and get g(tmid). If tmid is too close to tlo or thi, adjust it inward, by a fractional distance that is between 0.1 and 0.5. */ tmid = IDA_mem->ida_thi - (IDA_mem->ida_thi - IDA_mem->ida_tlo) * IDA_mem->ida_ghi[imax] / (IDA_mem->ida_ghi[imax] - alph*IDA_mem->ida_glo[imax]); if (SUNRabs(tmid - IDA_mem->ida_tlo) < HALF * IDA_mem->ida_ttol) { fracint = SUNRabs(IDA_mem->ida_thi - IDA_mem->ida_tlo) / IDA_mem->ida_ttol; fracsub = (fracint > FIVE) ? PT1 : HALF/fracint; tmid = IDA_mem->ida_tlo + fracsub*(IDA_mem->ida_thi - IDA_mem->ida_tlo); } if (SUNRabs(IDA_mem->ida_thi - tmid) < HALF * IDA_mem->ida_ttol) { fracint = SUNRabs(IDA_mem->ida_thi - IDA_mem->ida_tlo) / IDA_mem->ida_ttol; fracsub = (fracint > FIVE) ? PT1 : HALF/fracint; tmid = IDA_mem->ida_thi - fracsub*(IDA_mem->ida_thi - IDA_mem->ida_tlo); } (void) IDAGetSolution(IDA_mem, tmid, IDA_mem->ida_yy, IDA_mem->ida_yp); retval = IDA_mem->ida_gfun(tmid, IDA_mem->ida_yy, IDA_mem->ida_yp, IDA_mem->ida_grout, IDA_mem->ida_user_data); IDA_mem->ida_nge++; if (retval != 0) return(IDA_RTFUNC_FAIL); /* Check to see in which subinterval g changes sign, and reset imax. Set side = 1 if sign change is on low side, or 2 if on high side. */ maxfrac = ZERO; zroot = SUNFALSE; sgnchg = SUNFALSE; sideprev = side; for (i = 0; i < IDA_mem->ida_nrtfn; i++) { if(!IDA_mem->ida_gactive[i]) continue; if (SUNRabs(IDA_mem->ida_grout[i]) == ZERO) { if(IDA_mem->ida_rootdir[i] * IDA_mem->ida_glo[i] <= ZERO) zroot = SUNTRUE; } else { if ( (IDA_mem->ida_glo[i] * IDA_mem->ida_grout[i] < ZERO) && (IDA_mem->ida_rootdir[i] * IDA_mem->ida_glo[i] <= ZERO) ) { gfrac = SUNRabs(IDA_mem->ida_grout[i] / (IDA_mem->ida_grout[i] - IDA_mem->ida_glo[i])); if (gfrac > maxfrac) { sgnchg = SUNTRUE; maxfrac = gfrac; imax = i; } } } } if (sgnchg) { /* Sign change found in (tlo,tmid); replace thi with tmid. */ IDA_mem->ida_thi = tmid; for (i = 0; i < IDA_mem->ida_nrtfn; i++) IDA_mem->ida_ghi[i] = IDA_mem->ida_grout[i]; side = 1; /* Stop at root thi if converged; otherwise loop. */ if (SUNRabs(IDA_mem->ida_thi - IDA_mem->ida_tlo) <= IDA_mem->ida_ttol) break; continue; /* Return to looping point. */ } if (zroot) { /* No sign change in (tlo,tmid), but g = 0 at tmid; return root tmid. */ IDA_mem->ida_thi = tmid; for (i = 0; i < IDA_mem->ida_nrtfn; i++) IDA_mem->ida_ghi[i] = IDA_mem->ida_grout[i]; break; } /* No sign change in (tlo,tmid), and no zero at tmid. Sign change must be in (tmid,thi). Replace tlo with tmid. */ IDA_mem->ida_tlo = tmid; for (i = 0; i < IDA_mem->ida_nrtfn; i++) IDA_mem->ida_glo[i] = IDA_mem->ida_grout[i]; side = 2; /* Stop at root thi if converged; otherwise loop back. */ if (SUNRabs(IDA_mem->ida_thi - IDA_mem->ida_tlo) <= IDA_mem->ida_ttol) break; } /* End of root-search loop */ /* Reset trout and grout, set iroots, and return RTFOUND. */ IDA_mem->ida_trout = IDA_mem->ida_thi; for (i = 0; i < IDA_mem->ida_nrtfn; i++) { IDA_mem->ida_grout[i] = IDA_mem->ida_ghi[i]; IDA_mem->ida_iroots[i] = 0; if(!IDA_mem->ida_gactive[i]) continue; if ( (SUNRabs(IDA_mem->ida_ghi[i]) == ZERO) && (IDA_mem->ida_rootdir[i] * IDA_mem->ida_glo[i] <= ZERO) ) IDA_mem->ida_iroots[i] = IDA_mem->ida_glo[i] > 0 ? -1:1; if ( (IDA_mem->ida_glo[i] * IDA_mem->ida_ghi[i] < ZERO) && (IDA_mem->ida_rootdir[i] * IDA_mem->ida_glo[i] <= ZERO) ) IDA_mem->ida_iroots[i] = IDA_mem->ida_glo[i] > 0 ? -1:1; } return(RTFOUND); } /* * ================================================================= * Internal DQ approximations for sensitivity RHS * ================================================================= */ #undef user_dataS /* * IDASensResDQ * * IDASensRhsDQ computes the residuals of the sensitivity equations * by finite differences. It is of type IDASensResFn. * Returns 0 if successful, <0 if an unrecoverable failure occurred, * >0 for a recoverable error. */ int IDASensResDQ(int Ns, realtype t, N_Vector yy, N_Vector yp, N_Vector resval, N_Vector *yyS, N_Vector *ypS, N_Vector *resvalS, void *user_dataS, N_Vector ytemp, N_Vector yptemp, N_Vector restemp) { int retval, is; for (is=0; is0 if res has a recoverable error). */ static int IDASensRes1DQ(int Ns, realtype t, N_Vector yy, N_Vector yp, N_Vector resval, int is, N_Vector yyS, N_Vector ypS, N_Vector resvalS, void *user_dataS, N_Vector ytemp, N_Vector yptemp, N_Vector restemp) { IDAMem IDA_mem; int method; int which; int retval; realtype psave, pbari; realtype del , rdel; realtype Delp, rDelp, r2Delp; realtype Dely, rDely, r2Dely; realtype Del , rDel , r2Del ; realtype norms, ratio; /* user_dataS points to IDA_mem */ IDA_mem = (IDAMem) user_dataS; /* Set base perturbation del */ del = SUNRsqrt(SUNMAX(IDA_mem->ida_rtol, IDA_mem->ida_uround)); rdel = ONE/del; pbari = IDA_mem->ida_pbar[is]; which = IDA_mem->ida_plist[is]; psave = IDA_mem->ida_p[which]; Delp = pbari * del; rDelp = ONE/Delp; norms = N_VWrmsNorm(yyS, IDA_mem->ida_ewt) * pbari; rDely = SUNMAX(norms, rdel) / pbari; Dely = ONE/rDely; if (IDA_mem->ida_DQrhomax == ZERO) { /* No switching */ method = (IDA_mem->ida_DQtype==IDA_CENTERED) ? CENTERED1 : FORWARD1; } else { /* switch between simultaneous/separate DQ */ ratio = Dely * rDelp; if ( SUNMAX(ONE/ratio, ratio) <= IDA_mem->ida_DQrhomax ) method = (IDA_mem->ida_DQtype==IDA_CENTERED) ? CENTERED1 : FORWARD1; else method = (IDA_mem->ida_DQtype==IDA_CENTERED) ? CENTERED2 : FORWARD2; } switch (method) { case CENTERED1: Del = SUNMIN(Dely, Delp); r2Del = HALF/Del; /* Forward perturb y, y' and parameter */ N_VLinearSum(Del, yyS, ONE, yy, ytemp); N_VLinearSum(Del, ypS, ONE, yp, yptemp); IDA_mem->ida_p[which] = psave + Del; /* Save residual in resvalS */ retval = IDA_mem->ida_res(t, ytemp, yptemp, resvalS, IDA_mem->ida_user_data); IDA_mem->ida_nreS++; if (retval != 0) return(retval); /* Backward perturb y, y' and parameter */ N_VLinearSum(-Del, yyS, ONE, yy, ytemp); N_VLinearSum(-Del, ypS, ONE, yp, yptemp); IDA_mem->ida_p[which] = psave - Del; /* Save residual in restemp */ retval = IDA_mem->ida_res(t, ytemp, yptemp, restemp, IDA_mem->ida_user_data); IDA_mem->ida_nreS++; if (retval != 0) return(retval); /* Estimate the residual for the i-th sensitivity equation */ N_VLinearSum(r2Del, resvalS, -r2Del, restemp, resvalS); break; case CENTERED2: r2Delp = HALF/Delp; r2Dely = HALF/Dely; /* Forward perturb y and y' */ N_VLinearSum(Dely, yyS, ONE, yy, ytemp); N_VLinearSum(Dely, ypS, ONE, yp, yptemp); /* Save residual in resvalS */ retval = IDA_mem->ida_res(t, ytemp, yptemp, resvalS, IDA_mem->ida_user_data); IDA_mem->ida_nreS++; if (retval != 0) return(retval); /* Backward perturb y and y' */ N_VLinearSum(-Dely, yyS, ONE, yy, ytemp); N_VLinearSum(-Dely, ypS, ONE, yp, yptemp); /* Save residual in restemp */ retval = IDA_mem->ida_res(t, ytemp, yptemp, restemp, IDA_mem->ida_user_data); IDA_mem->ida_nreS++; if (retval != 0) return(retval); /* Save the first difference quotient in resvalS */ N_VLinearSum(r2Dely, resvalS, -r2Dely, restemp, resvalS); /* Forward perturb parameter */ IDA_mem->ida_p[which] = psave + Delp; /* Save residual in ytemp */ retval = IDA_mem->ida_res(t, yy, yp, ytemp, IDA_mem->ida_user_data); IDA_mem->ida_nreS++; if (retval != 0) return(retval); /* Backward perturb parameter */ IDA_mem->ida_p[which] = psave - Delp; /* Save residual in yptemp */ retval = IDA_mem->ida_res(t, yy, yp, yptemp, IDA_mem->ida_user_data); IDA_mem->ida_nreS++; if (retval != 0) return(retval); /* Save the second difference quotient in restemp */ N_VLinearSum(r2Delp, ytemp, -r2Delp, yptemp, restemp); /* Add the difference quotients for the sensitivity residual */ N_VLinearSum(ONE, resvalS, ONE, restemp, resvalS); break; case FORWARD1: Del = SUNMIN(Dely, Delp); rDel = ONE/Del; /* Forward perturb y, y' and parameter */ N_VLinearSum(Del, yyS, ONE, yy, ytemp); N_VLinearSum(Del, ypS, ONE, yp, yptemp); IDA_mem->ida_p[which] = psave + Del; /* Save residual in resvalS */ retval = IDA_mem->ida_res(t, ytemp, yptemp, resvalS, IDA_mem->ida_user_data); IDA_mem->ida_nreS++; if (retval != 0) return(retval); /* Estimate the residual for the i-th sensitivity equation */ N_VLinearSum(rDel, resvalS, -rDel, resval, resvalS); break; case FORWARD2: /* Forward perturb y and y' */ N_VLinearSum(Dely, yyS, ONE, yy, ytemp); N_VLinearSum(Dely, ypS, ONE, yp, yptemp); /* Save residual in resvalS */ retval = IDA_mem->ida_res(t, ytemp, yptemp, resvalS, IDA_mem->ida_user_data); IDA_mem->ida_nreS++; if (retval != 0) return(retval); /* Save the first difference quotient in resvalS */ N_VLinearSum(rDely, resvalS, -rDely, resval, resvalS); /* Forward perturb parameter */ IDA_mem->ida_p[which] = psave + Delp; /* Save residual in restemp */ retval = IDA_mem->ida_res(t, yy, yp, restemp, IDA_mem->ida_user_data); IDA_mem->ida_nreS++; if (retval != 0) return(retval); /* Save the second difference quotient in restemp */ N_VLinearSum(rDelp, restemp, -rDelp, resval, restemp); /* Add the difference quotients for the sensitivity residual */ N_VLinearSum(ONE, resvalS, ONE, restemp, resvalS); break; } /* Restore original value of parameter */ IDA_mem->ida_p[which] = psave; return(0); } /* IDAQuadSensRhsInternalDQ - internal IDAQuadSensRhsFn * * IDAQuadSensRhsInternalDQ computes right hand side of all quadrature * sensitivity equations by finite differences. All work is actually * done in IDAQuadSensRhs1InternalDQ. */ static int IDAQuadSensRhsInternalDQ(int Ns, realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS, N_Vector rrQ, N_Vector *resvalQS, void *ida_mem, N_Vector yytmp, N_Vector yptmp, N_Vector tmpQS) { IDAMem IDA_mem; int is, retval; /* cvode_mem is passed here as user data */ IDA_mem = (IDAMem) ida_mem; for (is=0; isida_rtol, IDA_mem->ida_uround)); rdel = ONE/del; pbari = IDA_mem->ida_pbar[is]; which = IDA_mem->ida_plist[is]; psave = IDA_mem->ida_p[which]; Delp = pbari * del; norms = N_VWrmsNorm(yyS, IDA_mem->ida_ewt) * pbari; rDely = SUNMAX(norms, rdel) / pbari; Dely = ONE/rDely; method = (IDA_mem->ida_DQtype==IDA_CENTERED) ? CENTERED1 : FORWARD1; switch(method) { case CENTERED1: Del = SUNMIN(Dely, Delp); r2Del = HALF/Del; N_VLinearSum(ONE, yy, Del, yyS, yytmp); N_VLinearSum(ONE, yp, Del, ypS, yptmp); IDA_mem->ida_p[which] = psave + Del; retval = IDA_mem->ida_rhsQ(t, yytmp, yptmp, resvalQS, IDA_mem->ida_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(-Del, yyS, ONE, yy, yytmp); N_VLinearSum(-Del, ypS, ONE, yp, yptmp); IDA_mem->ida_p[which] = psave - Del; retval = IDA_mem->ida_rhsQ(t, yytmp, yptmp, tmpQS, IDA_mem->ida_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(r2Del, resvalQS, -r2Del, tmpQS, resvalQS); break; case FORWARD1: Del = SUNMIN(Dely, Delp); rdel = ONE/Del; N_VLinearSum(ONE, yy, Del, yyS, yytmp); N_VLinearSum(ONE, yp, Del, ypS, yptmp); IDA_mem->ida_p[which] = psave + Del; retval = IDA_mem->ida_rhsQ(t, yytmp, yptmp, resvalQS, IDA_mem->ida_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(rdel, resvalQS, -rdel, resvalQ, resvalQS); break; } IDA_mem->ida_p[which] = psave; /* Increment counter nrQeS */ IDA_mem->ida_nrQeS += nfel; return(0); } /* * ================================================================= * IDA error message handling functions * ================================================================= */ /* * IDAProcessError is a high level error handling function. * - If ida_mem==NULL it prints the error message to stderr. * - Otherwise, it sets up and calls the error handling function * pointed to by ida_ehfun. */ void IDAProcessError(IDAMem IDA_mem, int error_code, const char *module, const char *fname, const char *msgfmt, ...) { va_list ap; char msg[256]; /* Initialize the argument pointer variable (msgfmt is the last required argument to IDAProcessError) */ va_start(ap, msgfmt); /* Compose the message */ vsprintf(msg, msgfmt, ap); if (IDA_mem == NULL) { /* We write to stderr */ #ifndef NO_FPRINTF_OUTPUT STAN_SUNDIALS_FPRINTF(stderr, "\n[%s ERROR] %s\n ", module, fname); STAN_SUNDIALS_FPRINTF(stderr, "%s\n\n", msg); #endif } else { /* We can call ehfun */ IDA_mem->ida_ehfun(error_code, module, fname, msg, IDA_mem->ida_eh_data); } /* Finalize argument processing */ va_end(ap); return; } /* IDAErrHandler is the default error handling function. It sends the error message to the stream pointed to by ida_errfp */ void IDAErrHandler(int error_code, const char *module, const char *function, char *msg, void *data) { IDAMem IDA_mem; char err_type[10]; /* data points to IDA_mem here */ IDA_mem = (IDAMem) data; if (error_code == IDA_WARNING) sprintf(err_type,"WARNING"); else sprintf(err_type,"ERROR"); #ifndef NO_FPRINTF_OUTPUT if (IDA_mem->ida_errfp != NULL) { STAN_SUNDIALS_FPRINTF(IDA_mem->ida_errfp,"\n[%s %s] %s\n",module,err_type,function); STAN_SUNDIALS_FPRINTF(IDA_mem->ida_errfp," %s\n\n",msg); } #endif return; } StanHeaders/src/sunlinsol/0000755000176200001440000000000014511135055015270 5ustar liggesusersStanHeaders/src/sunlinsol/pcg/0000755000176200001440000000000014511135055016041 5ustar liggesusersStanHeaders/src/sunlinsol/pcg/sunlinsol_pcg.c0000644000176200001440000004016114645137106021075 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds, Ashley Crawford @ SMU * Based on sundials_pcg.c code, written by Daniel Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the PCG implementation of * the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #include "sundials_debug.h" #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * PCG solver structure accessibility macros: * ----------------------------------------------------------------- */ #define PCG_CONTENT(S) ( (SUNLinearSolverContent_PCG)(S->content) ) #define PRETYPE(S) ( PCG_CONTENT(S)->pretype ) #define LASTFLAG(S) ( PCG_CONTENT(S)->last_flag ) /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new PCG linear solver */ SUNLinearSolver SUNLinSol_PCG(N_Vector y, int pretype, int maxl, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_PCG content; /* check for legal pretype and maxl values; if illegal use defaults */ if ((pretype != SUN_PREC_NONE) && (pretype != SUN_PREC_LEFT) && (pretype != SUN_PREC_RIGHT) && (pretype != SUN_PREC_BOTH)) pretype = SUN_PREC_NONE; if (maxl <= 0) maxl = SUNPCG_MAXL_DEFAULT; /* Create linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_PCG; S->ops->getid = SUNLinSolGetID_PCG; S->ops->setatimes = SUNLinSolSetATimes_PCG; S->ops->setpreconditioner = SUNLinSolSetPreconditioner_PCG; S->ops->setscalingvectors = SUNLinSolSetScalingVectors_PCG; S->ops->setzeroguess = SUNLinSolSetZeroGuess_PCG; S->ops->initialize = SUNLinSolInitialize_PCG; S->ops->setup = SUNLinSolSetup_PCG; S->ops->solve = SUNLinSolSolve_PCG; S->ops->numiters = SUNLinSolNumIters_PCG; S->ops->resnorm = SUNLinSolResNorm_PCG; S->ops->resid = SUNLinSolResid_PCG; S->ops->lastflag = SUNLinSolLastFlag_PCG; S->ops->space = SUNLinSolSpace_PCG; S->ops->free = SUNLinSolFree_PCG; /* Create content */ content = NULL; content = (SUNLinearSolverContent_PCG) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->last_flag = 0; content->maxl = maxl; content->pretype = pretype; content->zeroguess = SUNFALSE; content->numiters = 0; content->resnorm = ZERO; content->r = NULL; content->p = NULL; content->z = NULL; content->Ap = NULL; content->s = NULL; content->ATimes = NULL; content->ATData = NULL; content->Psetup = NULL; content->Psolve = NULL; content->PData = NULL; content->print_level = 0; content->info_file = stdout; /* Allocate content */ content->r = N_VClone(y); if (content->r == NULL) { SUNLinSolFree(S); return NULL; } content->p = N_VClone(y); if (content->p == NULL) { SUNLinSolFree(S); return NULL; } content->z = N_VClone(y); if (content->z == NULL) { SUNLinSolFree(S); return NULL; } content->Ap = N_VClone(y); if (content->Ap == NULL) { SUNLinSolFree(S); return NULL; } return(S); } /* ---------------------------------------------------------------------------- * Function to set the type of preconditioning for PCG to use */ int SUNLinSol_PCGSetPrecType(SUNLinearSolver S, int pretype) { /* Check for legal pretype */ if ((pretype != SUN_PREC_NONE) && (pretype != SUN_PREC_LEFT) && (pretype != SUN_PREC_RIGHT) && (pretype != SUN_PREC_BOTH)) { return(SUNLS_ILL_INPUT); } /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set pretype */ PRETYPE(S) = pretype; return(SUNLS_SUCCESS); } /* ---------------------------------------------------------------------------- * Function to set the maximum number of iterations for PCG to use */ int SUNLinSol_PCGSetMaxl(SUNLinearSolver S, int maxl) { /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Check for legal number of iters */ if (maxl <= 0) maxl = SUNPCG_MAXL_DEFAULT; /* Set max iters */ PCG_CONTENT(S)->maxl = maxl; return(SUNLS_SUCCESS); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_PCG(SUNLinearSolver S) { return(SUNLINEARSOLVER_ITERATIVE); } SUNLinearSolver_ID SUNLinSolGetID_PCG(SUNLinearSolver S) { return(SUNLINEARSOLVER_PCG); } int SUNLinSolInitialize_PCG(SUNLinearSolver S) { /* ensure valid options */ if (S == NULL) return(SUNLS_MEM_NULL); if (PCG_CONTENT(S)->maxl <= 0) PCG_CONTENT(S)->maxl = SUNPCG_MAXL_DEFAULT; if (PCG_CONTENT(S)->ATimes == NULL) { LASTFLAG(S) = SUNLS_ATIMES_NULL; return(LASTFLAG(S)); } if ( (PRETYPE(S) != SUN_PREC_LEFT) && (PRETYPE(S) != SUN_PREC_RIGHT) && (PRETYPE(S) != SUN_PREC_BOTH) ) PRETYPE(S) = SUN_PREC_NONE; if ((PRETYPE(S) != SUN_PREC_NONE) && (PCG_CONTENT(S)->Psolve == NULL)) { LASTFLAG(S) = SUNLS_PSOLVE_NULL; return(LASTFLAG(S)); } /* no additional memory to allocate */ /* return with success */ LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetATimes_PCG(SUNLinearSolver S, void* ATData, SUNATimesFn ATimes) { /* set function pointers to integrator-supplied ATimes routine and data, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); PCG_CONTENT(S)->ATimes = ATimes; PCG_CONTENT(S)->ATData = ATData; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetPreconditioner_PCG(SUNLinearSolver S, void* PData, SUNPSetupFn Psetup, SUNPSolveFn Psolve) { /* set function pointers to integrator-supplied Psetup and PSolve routines and data, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); PCG_CONTENT(S)->Psetup = Psetup; PCG_CONTENT(S)->Psolve = Psolve; PCG_CONTENT(S)->PData = PData; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetScalingVectors_PCG(SUNLinearSolver S, N_Vector s, N_Vector nul) { /* set N_Vector pointer to integrator-supplied scaling vector (only use the first one), and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); PCG_CONTENT(S)->s = s; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetZeroGuess_PCG(SUNLinearSolver S, booleantype onoff) { /* set flag indicating a zero initial guess */ if (S == NULL) return(SUNLS_MEM_NULL); PCG_CONTENT(S)->zeroguess = onoff; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetup_PCG(SUNLinearSolver S, SUNMatrix nul) { int ier; SUNPSetupFn Psetup; void* PData; /* Set shortcuts to PCG memory structures */ if (S == NULL) return(SUNLS_MEM_NULL); Psetup = PCG_CONTENT(S)->Psetup; PData = PCG_CONTENT(S)->PData; /* no solver-specific setup is required, but if user-supplied Psetup routine exists, call that here */ if (Psetup != NULL) { ier = Psetup(PData); if (ier != 0) { LASTFLAG(S) = (ier < 0) ? SUNLS_PSET_FAIL_UNREC : SUNLS_PSET_FAIL_REC; return(LASTFLAG(S)); } } /* return with success */ LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSolve_PCG(SUNLinearSolver S, SUNMatrix nul, N_Vector x, N_Vector b, realtype delta) { /* local data and shortcut variables */ realtype alpha, beta, r0_norm, rho, rz, rz_old; N_Vector r, p, z, Ap, w; booleantype UsePrec, UseScaling, converged; booleantype *zeroguess; int l, l_max, pretype, ier; void *A_data, *P_data; SUNATimesFn atimes; SUNPSolveFn psolve; realtype *res_norm; int *nli; /* Make local shorcuts to solver variables. */ if (S == NULL) return(SUNLS_MEM_NULL); l_max = PCG_CONTENT(S)->maxl; r = PCG_CONTENT(S)->r; p = PCG_CONTENT(S)->p; z = PCG_CONTENT(S)->z; Ap = PCG_CONTENT(S)->Ap; w = PCG_CONTENT(S)->s; A_data = PCG_CONTENT(S)->ATData; P_data = PCG_CONTENT(S)->PData; atimes = PCG_CONTENT(S)->ATimes; psolve = PCG_CONTENT(S)->Psolve; pretype = PCG_CONTENT(S)->pretype; zeroguess = &(PCG_CONTENT(S)->zeroguess); nli = &(PCG_CONTENT(S)->numiters); res_norm = &(PCG_CONTENT(S)->resnorm); /* Initialize counters and convergence flag */ *nli = 0; converged = SUNFALSE; /* set booleantype flags for internal solver options */ UsePrec = ( (pretype == SUN_PREC_BOTH) || (pretype == SUN_PREC_LEFT) || (pretype == SUN_PREC_RIGHT) ); UseScaling = (w != NULL); #ifdef SUNDIALS_BUILD_WITH_MONITORING if (PCG_CONTENT(S)->print_level && PCG_CONTENT(S)->info_file) STAN_SUNDIALS_FPRINTF(PCG_CONTENT(S)->info_file, "SUNLINSOL_PCG:\n"); #endif /* Check if Atimes function has been set */ if (atimes == NULL) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_ATIMES_NULL; return(LASTFLAG(S)); } /* If preconditioning, check if psolve has been set */ if (UsePrec && psolve == NULL) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_PSOLVE_NULL; return(LASTFLAG(S)); } /* Set r to initial residual r_0 = b - A*x_0 */ if (*zeroguess) { N_VScale(ONE, b, r); } else { ier = atimes(A_data, x, r); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } N_VLinearSum(ONE, b, -ONE, r, r); } /* Set rho to scaled L2 norm of r, and return if small */ if (UseScaling) N_VProd(r, w, Ap); else N_VScale(ONE, r, Ap); *res_norm = r0_norm = rho = SUNRsqrt(N_VDotProd(Ap, Ap)); #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print initial residual */ if (PCG_CONTENT(S)->print_level && PCG_CONTENT(S)->info_file) { STAN_SUNDIALS_FPRINTF(PCG_CONTENT(S)->info_file, SUNLS_MSG_RESIDUAL, (long int) 0, *res_norm); } #endif if (rho <= delta) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } /* Apply preconditioner and b-scaling to r = r_0 */ if (UsePrec) { ier = psolve(P_data, r, z, delta, SUN_PREC_LEFT); /* z = P^{-1}r */ if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, r, z); /* Initialize rz to */ rz = N_VDotProd(r, z); /* Copy z to p */ N_VScale(ONE, z, p); /* Begin main iteration loop */ for(l=0; l / */ alpha = rz / N_VDotProd(Ap, p); /* Update x = x + alpha*p */ if (l == 0 && *zeroguess) N_VScale(alpha, p, x); else N_VLinearSum(ONE, x, alpha, p, x); /* Update r = r - alpha*Ap */ N_VLinearSum(ONE, r, -alpha, Ap, r); /* Set rho and check convergence */ if (UseScaling) N_VProd(r, w, Ap); else N_VScale(ONE, r, Ap); *res_norm = rho = SUNRsqrt(N_VDotProd(Ap, Ap)); #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print current iteration number and the residual */ if (PCG_CONTENT(S)->print_level && PCG_CONTENT(S)->info_file) { STAN_SUNDIALS_FPRINTF(PCG_CONTENT(S)->info_file, SUNLS_MSG_RESIDUAL, (long int) *nli, *res_norm); } #endif if (rho <= delta) { converged = SUNTRUE; break; } /* Exit early on last iteration */ if (l == l_max - 1) break; /* Apply preconditioner: z = P^{-1}*r */ if (UsePrec) { ier = psolve(P_data, r, z, delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, r, z); /* update rz */ rz_old = rz; rz = N_VDotProd(r, z); /* Calculate beta = / */ beta = rz / rz_old; /* Update p = z + beta*p */ N_VLinearSum(ONE, z, beta, p, p); } /* Main loop finished, return with result */ *zeroguess = SUNFALSE; if (converged == SUNTRUE) { LASTFLAG(S) = SUNLS_SUCCESS; } else if (rho < r0_norm) { LASTFLAG(S) = SUNLS_RES_REDUCED; } else { LASTFLAG(S) = SUNLS_CONV_FAIL; } return(LASTFLAG(S)); } int SUNLinSolNumIters_PCG(SUNLinearSolver S) { /* return the stored 'numiters' value */ if (S == NULL) return(-1); return (PCG_CONTENT(S)->numiters); } realtype SUNLinSolResNorm_PCG(SUNLinearSolver S) { /* return the stored 'resnorm' value */ if (S == NULL) return(-ONE); return (PCG_CONTENT(S)->resnorm); } N_Vector SUNLinSolResid_PCG(SUNLinearSolver S) { /* return the stored 'r' vector */ return (PCG_CONTENT(S)->r); } sunindextype SUNLinSolLastFlag_PCG(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return (LASTFLAG(S)); } int SUNLinSolSpace_PCG(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { sunindextype liw1, lrw1; N_VSpace(PCG_CONTENT(S)->r, &lrw1, &liw1); *lenrwLS = 1 + lrw1*4; *leniwLS = 4 + liw1*4; return(SUNLS_SUCCESS); } int SUNLinSolFree_PCG(SUNLinearSolver S) { if (S == NULL) return(SUNLS_SUCCESS); if (S->content) { /* delete items from within the content structure */ if (PCG_CONTENT(S)->r) { N_VDestroy(PCG_CONTENT(S)->r); PCG_CONTENT(S)->r = NULL; } if (PCG_CONTENT(S)->p) { N_VDestroy(PCG_CONTENT(S)->p); PCG_CONTENT(S)->p = NULL; } if (PCG_CONTENT(S)->z) { N_VDestroy(PCG_CONTENT(S)->z); PCG_CONTENT(S)->z = NULL; } if (PCG_CONTENT(S)->Ap) { N_VDestroy(PCG_CONTENT(S)->Ap); PCG_CONTENT(S)->Ap = NULL; } free(S->content); S->content = NULL; } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } int SUNLinSolSetInfoFile_PCG(SUNLinearSolver S, FILE* info_file) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the linear solver is non-null */ if (S == NULL) return(SUNLS_MEM_NULL); PCG_CONTENT(S)->info_file = info_file; return(SUNLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSolSetInfoFile_PCG: SUNDIALS was not built with monitoring\n"); return(SUNLS_ILL_INPUT); #endif } int SUNLinSolSetPrintLevel_PCG(SUNLinearSolver S, int print_level) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the linear solver is non-null */ if (S == NULL) return(SUNLS_MEM_NULL); /* check for valid print level */ if (print_level < 0 || print_level > 1) return(SUNLS_ILL_INPUT); PCG_CONTENT(S)->print_level = print_level; return(SUNLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSolSetPrintLevel_PCG: SUNDIALS was not built with monitoring\n"); return(SUNLS_ILL_INPUT); #endif } StanHeaders/src/sunlinsol/pcg/fmod/0000755000176200001440000000000014511135055016766 5ustar liggesusersStanHeaders/src/sunlinsol/pcg/fmod/fsunlinsol_pcg_mod.f900000644000176200001440000003623114645137106023206 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunlinsol_pcg_mod use, intrinsic :: ISO_C_BINDING use fsundials_linearsolver_mod use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: SUNPCG_MAXL_DEFAULT = 5_C_INT public :: FSUNLinSol_PCG public :: FSUNLinSol_PCGSetPrecType public :: FSUNLinSol_PCGSetMaxl public :: FSUNLinSolGetType_PCG public :: FSUNLinSolGetID_PCG public :: FSUNLinSolInitialize_PCG public :: FSUNLinSolSetATimes_PCG public :: FSUNLinSolSetPreconditioner_PCG public :: FSUNLinSolSetScalingVectors_PCG public :: FSUNLinSolSetZeroGuess_PCG public :: FSUNLinSolSetup_PCG public :: FSUNLinSolSolve_PCG public :: FSUNLinSolNumIters_PCG public :: FSUNLinSolResNorm_PCG public :: FSUNLinSolResid_PCG public :: FSUNLinSolLastFlag_PCG public :: FSUNLinSolSpace_PCG public :: FSUNLinSolFree_PCG public :: FSUNLinSolSetInfoFile_PCG public :: FSUNLinSolSetPrintLevel_PCG ! WRAPPER DECLARATIONS interface function swigc_FSUNLinSol_PCG(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSol_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FSUNLinSol_PCGSetPrecType(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_PCGSetPrecType") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSol_PCGSetMaxl(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_PCGSetMaxl") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetType_PCG(farg1) & bind(C, name="_wrap_FSUNLinSolGetType_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetID_PCG(farg1) & bind(C, name="_wrap_FSUNLinSolGetID_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolInitialize_PCG(farg1) & bind(C, name="_wrap_FSUNLinSolInitialize_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetATimes_PCG(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetATimes_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPreconditioner_PCG(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSolSetPreconditioner_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetScalingVectors_PCG(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetScalingVectors_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetZeroGuess_PCG(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetZeroGuess_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetup_PCG(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetup_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSolve_PCG(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNLinSolSolve_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FSUNLinSolNumIters_PCG(farg1) & bind(C, name="_wrap_FSUNLinSolNumIters_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolResNorm_PCG(farg1) & bind(C, name="_wrap_FSUNLinSolResNorm_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FSUNLinSolResid_PCG(farg1) & bind(C, name="_wrap_FSUNLinSolResid_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNLinSolLastFlag_PCG(farg1) & bind(C, name="_wrap_FSUNLinSolLastFlag_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNLinSolSpace_PCG(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSpace_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolFree_PCG(farg1) & bind(C, name="_wrap_FSUNLinSolFree_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetInfoFile_PCG(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetInfoFile_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPrintLevel_PCG(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetPrintLevel_PCG") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNLinSol_PCG(y, pretype, maxl, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNLinearSolver), pointer :: swig_result type(N_Vector), target, intent(inout) :: y integer(C_INT), intent(in) :: pretype integer(C_INT), intent(in) :: maxl type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = c_loc(y) farg2 = pretype farg3 = maxl farg4 = sunctx fresult = swigc_FSUNLinSol_PCG(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FSUNLinSol_PCGSetPrecType(s, pretype) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: pretype integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = pretype fresult = swigc_FSUNLinSol_PCGSetPrecType(farg1, farg2) swig_result = fresult end function function FSUNLinSol_PCGSetMaxl(s, maxl) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: maxl integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = maxl fresult = swigc_FSUNLinSol_PCGSetMaxl(farg1, farg2) swig_result = fresult end function function FSUNLinSolGetType_PCG(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_Type) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetType_PCG(farg1) swig_result = fresult end function function FSUNLinSolGetID_PCG(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_ID) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetID_PCG(farg1) swig_result = fresult end function function FSUNLinSolInitialize_PCG(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolInitialize_PCG(farg1) swig_result = fresult end function function FSUNLinSolSetATimes_PCG(s, a_data, atimes) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: a_data type(C_FUNPTR), intent(in), value :: atimes integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = c_loc(s) farg2 = a_data farg3 = atimes fresult = swigc_FSUNLinSolSetATimes_PCG(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetPreconditioner_PCG(s, p_data, pset, psol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: p_data type(C_FUNPTR), intent(in), value :: pset type(C_FUNPTR), intent(in), value :: psol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = c_loc(s) farg2 = p_data farg3 = pset farg4 = psol fresult = swigc_FSUNLinSolSetPreconditioner_PCG(farg1, farg2, farg3, farg4) swig_result = fresult end function function FSUNLinSolSetScalingVectors_PCG(s, s1, nul) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(N_Vector), target, intent(inout) :: s1 type(N_Vector), target, intent(inout) :: nul integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(s1) farg3 = c_loc(nul) fresult = swigc_FSUNLinSolSetScalingVectors_PCG(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetZeroGuess_PCG(s, onoff) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: onoff integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = onoff fresult = swigc_FSUNLinSolSetZeroGuess_PCG(farg1, farg2) swig_result = fresult end function function FSUNLinSolSetup_PCG(s, nul) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: nul integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(s) farg2 = c_loc(nul) fresult = swigc_FSUNLinSolSetup_PCG(farg1, farg2) swig_result = fresult end function function FSUNLinSolSolve_PCG(s, nul, x, b, tol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: nul type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: b real(C_DOUBLE), intent(in) :: tol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 farg1 = c_loc(s) farg2 = c_loc(nul) farg3 = c_loc(x) farg4 = c_loc(b) farg5 = tol fresult = swigc_FSUNLinSolSolve_PCG(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FSUNLinSolNumIters_PCG(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolNumIters_PCG(farg1) swig_result = fresult end function function FSUNLinSolResNorm_PCG(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResNorm_PCG(farg1) swig_result = fresult end function function FSUNLinSolResid_PCG(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResid_PCG(farg1) call c_f_pointer(fresult, swig_result) end function function FSUNLinSolLastFlag_PCG(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolLastFlag_PCG(farg1) swig_result = fresult end function function FSUNLinSolSpace_PCG(s, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FSUNLinSolSpace_PCG(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolFree_PCG(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolFree_PCG(farg1) swig_result = fresult end function function FSUNLinSolSetInfoFile_PCG(ls, info_file) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: ls type(C_PTR) :: info_file integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(ls) farg2 = info_file fresult = swigc_FSUNLinSolSetInfoFile_PCG(farg1, farg2) swig_result = fresult end function function FSUNLinSolSetPrintLevel_PCG(ls, print_level) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: ls integer(C_INT), intent(in) :: print_level integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(ls) farg2 = print_level fresult = swigc_FSUNLinSolSetPrintLevel_PCG(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/sunlinsol/pcg/fmod/fsunlinsol_pcg_mod.c0000644000176200001440000003267114645137106023036 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_linearsolver.h" #include "sunlinsol/sunlinsol_pcg.h" SWIGEXPORT SUNLinearSolver _wrap_FSUNLinSol_PCG(N_Vector farg1, int const *farg2, int const *farg3, void *farg4) { SUNLinearSolver fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int arg3 ; SUNContext arg4 = (SUNContext) 0 ; SUNLinearSolver result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); arg4 = (SUNContext)(farg4); result = (SUNLinearSolver)SUNLinSol_PCG(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_PCGSetPrecType(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_PCGSetPrecType(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_PCGSetMaxl(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_PCGSetMaxl(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetType_PCG(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_Type result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_Type)SUNLinSolGetType_PCG(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetID_PCG(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_ID result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_ID)SUNLinSolGetID_PCG(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolInitialize_PCG(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolInitialize_PCG(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetATimes_PCG(SUNLinearSolver farg1, void *farg2, SUNATimesFn farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNATimesFn arg3 = (SUNATimesFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNATimesFn)(farg3); result = (int)SUNLinSolSetATimes_PCG(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPreconditioner_PCG(SUNLinearSolver farg1, void *farg2, SUNPSetupFn farg3, SUNPSolveFn farg4) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNPSetupFn arg3 = (SUNPSetupFn) 0 ; SUNPSolveFn arg4 = (SUNPSolveFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNPSetupFn)(farg3); arg4 = (SUNPSolveFn)(farg4); result = (int)SUNLinSolSetPreconditioner_PCG(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetScalingVectors_PCG(SUNLinearSolver farg1, N_Vector farg2, N_Vector farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)SUNLinSolSetScalingVectors_PCG(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetZeroGuess_PCG(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetZeroGuess_PCG(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetup_PCG(SUNLinearSolver farg1, SUNMatrix farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNLinSolSetup_PCG(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSolve_PCG(SUNLinearSolver farg1, SUNMatrix farg2, N_Vector farg3, N_Vector farg4, double const *farg5) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); result = (int)SUNLinSolSolve_PCG(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolNumIters_PCG(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolNumIters_PCG(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FSUNLinSolResNorm_PCG(SUNLinearSolver farg1) { double fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; realtype result; arg1 = (SUNLinearSolver)(farg1); result = (realtype)SUNLinSolResNorm_PCG(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FSUNLinSolResid_PCG(SUNLinearSolver farg1) { N_Vector fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector result; arg1 = (SUNLinearSolver)(farg1); result = (N_Vector)SUNLinSolResid_PCG(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FSUNLinSolLastFlag_PCG(SUNLinearSolver farg1) { int64_t fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; sunindextype result; arg1 = (SUNLinearSolver)(farg1); result = SUNLinSolLastFlag_PCG(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSpace_PCG(SUNLinearSolver farg1, long *farg2, long *farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNLinSolSpace_PCG(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolFree_PCG(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolFree_PCG(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetInfoFile_PCG(SUNLinearSolver farg1, void *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (FILE *)(farg2); result = (int)SUNLinSolSetInfoFile_PCG(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPrintLevel_PCG(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetPrintLevel_PCG(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/sunlinsol/dense/0000755000176200001440000000000014645137104016373 5ustar liggesusersStanHeaders/src/sunlinsol/dense/sunlinsol_dense.c0000644000176200001440000001432614645137106021753 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds, Ashley Crawford @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the dense implementation of * the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * Dense solver structure accessibility macros: * ----------------------------------------------------------------- */ #define DENSE_CONTENT(S) ( (SUNLinearSolverContent_Dense)(S->content) ) #define PIVOTS(S) ( DENSE_CONTENT(S)->pivots ) #define LASTFLAG(S) ( DENSE_CONTENT(S)->last_flag ) /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new dense linear solver */ SUNLinearSolver SUNLinSol_Dense(N_Vector y, SUNMatrix A, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_Dense content; sunindextype MatrixRows; /* Check compatibility with supplied SUNMatrix and N_Vector */ if (SUNMatGetID(A) != SUNMATRIX_DENSE) return(NULL); if (SUNDenseMatrix_Rows(A) != SUNDenseMatrix_Columns(A)) return(NULL); if ( (N_VGetVectorID(y) != SUNDIALS_NVEC_SERIAL) && (N_VGetVectorID(y) != SUNDIALS_NVEC_OPENMP) && (N_VGetVectorID(y) != SUNDIALS_NVEC_PTHREADS) ) return(NULL); MatrixRows = SUNDenseMatrix_Rows(A); if (MatrixRows != N_VGetLength(y)) return(NULL); /* Create an empty linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_Dense; S->ops->getid = SUNLinSolGetID_Dense; S->ops->initialize = SUNLinSolInitialize_Dense; S->ops->setup = SUNLinSolSetup_Dense; S->ops->solve = SUNLinSolSolve_Dense; S->ops->lastflag = SUNLinSolLastFlag_Dense; S->ops->space = SUNLinSolSpace_Dense; S->ops->free = SUNLinSolFree_Dense; /* Create content */ content = NULL; content = (SUNLinearSolverContent_Dense) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->N = MatrixRows; content->last_flag = 0; content->pivots = NULL; /* Allocate content */ content->pivots = (sunindextype *) malloc(MatrixRows * sizeof(sunindextype)); if (content->pivots == NULL) { SUNLinSolFree(S); return(NULL); } return(S); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_Dense(SUNLinearSolver S) { return(SUNLINEARSOLVER_DIRECT); } SUNLinearSolver_ID SUNLinSolGetID_Dense(SUNLinearSolver S) { return(SUNLINEARSOLVER_DENSE); } int SUNLinSolInitialize_Dense(SUNLinearSolver S) { /* all solver-specific memory has already been allocated */ LASTFLAG(S) = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } int SUNLinSolSetup_Dense(SUNLinearSolver S, SUNMatrix A) { realtype **A_cols; sunindextype *pivots; /* check for valid inputs */ if ( (A == NULL) || (S == NULL) ) return(SUNLS_MEM_NULL); /* Ensure that A is a dense matrix */ if (SUNMatGetID(A) != SUNMATRIX_DENSE) { LASTFLAG(S) = SUNLS_ILL_INPUT; return(SUNLS_ILL_INPUT); } /* access data pointers (return with failure on NULL) */ A_cols = NULL; pivots = NULL; A_cols = SUNDenseMatrix_Cols(A); pivots = PIVOTS(S); if ( (A_cols == NULL) || (pivots == NULL) ) { LASTFLAG(S) = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } /* perform LU factorization of input matrix */ LASTFLAG(S) = SUNDlsMat_denseGETRF(A_cols, SUNDenseMatrix_Rows(A), SUNDenseMatrix_Columns(A), pivots); /* store error flag (if nonzero, this row encountered zero-valued pivod) */ if (LASTFLAG(S) > 0) return(SUNLS_LUFACT_FAIL); return(SUNLS_SUCCESS); } int SUNLinSolSolve_Dense(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { realtype **A_cols, *xdata; sunindextype *pivots; if ( (A == NULL) || (S == NULL) || (x == NULL) || (b == NULL) ) return(SUNLS_MEM_NULL); /* copy b into x */ N_VScale(ONE, b, x); /* access data pointers (return with failure on NULL) */ A_cols = NULL; xdata = NULL; pivots = NULL; A_cols = SUNDenseMatrix_Cols(A); xdata = N_VGetArrayPointer(x); pivots = PIVOTS(S); if ( (A_cols == NULL) || (xdata == NULL) || (pivots == NULL) ) { LASTFLAG(S) = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } /* solve using LU factors */ SUNDlsMat_denseGETRS(A_cols, SUNDenseMatrix_Rows(A), pivots, xdata); LASTFLAG(S) = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } sunindextype SUNLinSolLastFlag_Dense(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return(LASTFLAG(S)); } int SUNLinSolSpace_Dense(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { *leniwLS = 2 + DENSE_CONTENT(S)->N; *lenrwLS = 0; return(SUNLS_SUCCESS); } int SUNLinSolFree_Dense(SUNLinearSolver S) { /* return if S is already free */ if (S == NULL) return(SUNLS_SUCCESS); /* delete items from contents, then delete generic structure */ if (S->content) { if (PIVOTS(S)) { free(PIVOTS(S)); PIVOTS(S) = NULL; } free(S->content); S->content = NULL; } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } StanHeaders/src/sunlinsol/dense/fmod/0000755000176200001440000000000014511135055017313 5ustar liggesusersStanHeaders/src/sunlinsol/dense/fmod/fsunlinsol_dense_mod.f900000644000176200001440000001647614645137106024071 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunlinsol_dense_mod use, intrinsic :: ISO_C_BINDING use fsundials_linearsolver_mod use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FSUNLinSol_Dense public :: FSUNLinSolGetType_Dense public :: FSUNLinSolGetID_Dense public :: FSUNLinSolInitialize_Dense public :: FSUNLinSolSetup_Dense public :: FSUNLinSolSolve_Dense public :: FSUNLinSolLastFlag_Dense public :: FSUNLinSolSpace_Dense public :: FSUNLinSolFree_Dense ! WRAPPER DECLARATIONS interface function swigc_FSUNLinSol_Dense(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSol_Dense") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FSUNLinSolGetType_Dense(farg1) & bind(C, name="_wrap_FSUNLinSolGetType_Dense") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetID_Dense(farg1) & bind(C, name="_wrap_FSUNLinSolGetID_Dense") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolInitialize_Dense(farg1) & bind(C, name="_wrap_FSUNLinSolInitialize_Dense") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetup_Dense(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetup_Dense") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSolve_Dense(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNLinSolSolve_Dense") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FSUNLinSolLastFlag_Dense(farg1) & bind(C, name="_wrap_FSUNLinSolLastFlag_Dense") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNLinSolSpace_Dense(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSpace_Dense") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolFree_Dense(farg1) & bind(C, name="_wrap_FSUNLinSolFree_Dense") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNLinSol_Dense(y, a, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNLinearSolver), pointer :: swig_result type(N_Vector), target, intent(inout) :: y type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(y) farg2 = c_loc(a) farg3 = sunctx fresult = swigc_FSUNLinSol_Dense(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FSUNLinSolGetType_Dense(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_Type) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetType_Dense(farg1) swig_result = fresult end function function FSUNLinSolGetID_Dense(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_ID) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetID_Dense(farg1) swig_result = fresult end function function FSUNLinSolInitialize_Dense(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolInitialize_Dense(farg1) swig_result = fresult end function function FSUNLinSolSetup_Dense(s, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(s) farg2 = c_loc(a) fresult = swigc_FSUNLinSolSetup_Dense(farg1, farg2) swig_result = fresult end function function FSUNLinSolSolve_Dense(s, a, x, b, tol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: b real(C_DOUBLE), intent(in) :: tol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 farg1 = c_loc(s) farg2 = c_loc(a) farg3 = c_loc(x) farg4 = c_loc(b) farg5 = tol fresult = swigc_FSUNLinSolSolve_Dense(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FSUNLinSolLastFlag_Dense(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolLastFlag_Dense(farg1) swig_result = fresult end function function FSUNLinSolSpace_Dense(s, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FSUNLinSolSpace_Dense(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolFree_Dense(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolFree_Dense(farg1) swig_result = fresult end function end module StanHeaders/src/sunlinsol/dense/fmod/fsunlinsol_dense_mod.c0000644000176200001440000002275114645137106023706 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_linearsolver.h" #include "sunlinsol/sunlinsol_dense.h" SWIGEXPORT SUNLinearSolver _wrap_FSUNLinSol_Dense(N_Vector farg1, SUNMatrix farg2, void *farg3) { SUNLinearSolver fresult ; N_Vector arg1 = (N_Vector) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; SUNContext arg3 = (SUNContext) 0 ; SUNLinearSolver result; arg1 = (N_Vector)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (SUNContext)(farg3); result = (SUNLinearSolver)SUNLinSol_Dense(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetType_Dense(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_Type result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_Type)SUNLinSolGetType_Dense(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetID_Dense(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_ID result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_ID)SUNLinSolGetID_Dense(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolInitialize_Dense(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolInitialize_Dense(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetup_Dense(SUNLinearSolver farg1, SUNMatrix farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNLinSolSetup_Dense(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSolve_Dense(SUNLinearSolver farg1, SUNMatrix farg2, N_Vector farg3, N_Vector farg4, double const *farg5) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); result = (int)SUNLinSolSolve_Dense(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNLinSolLastFlag_Dense(SUNLinearSolver farg1) { int64_t fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; sunindextype result; arg1 = (SUNLinearSolver)(farg1); result = SUNLinSolLastFlag_Dense(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSpace_Dense(SUNLinearSolver farg1, long *farg2, long *farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNLinSolSpace_Dense(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolFree_Dense(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolFree_Dense(arg1); fresult = (int)(result); return fresult; } StanHeaders/src/sunlinsol/lapackdense/0000755000176200001440000000000014511135055017542 5ustar liggesusersStanHeaders/src/sunlinsol/lapackdense/sunlinsol_lapackdense.c0000644000176200001440000001477514645137106024313 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on codes _lapack.c by: Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the LAPACK dense * implementation of the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #include "sundials_lapack_defs.h" /* Interfaces to match 'realtype' with the correct LAPACK functions */ #if defined(SUNDIALS_DOUBLE_PRECISION) #define xgetrf_f77 dgetrf_f77 #define xgetrs_f77 dgetrs_f77 #elif defined(SUNDIALS_SINGLE_PRECISION) #define xgetrf_f77 sgetrf_f77 #define xgetrs_f77 sgetrs_f77 #else #error Incompatible realtype for LAPACK; disable LAPACK and rebuild #endif #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * LapackDense solver structure accessibility macros: * ----------------------------------------------------------------- */ #define LAPACKDENSE_CONTENT(S) ( (SUNLinearSolverContent_LapackDense)(S->content) ) #define PIVOTS(S) ( LAPACKDENSE_CONTENT(S)->pivots ) #define LASTFLAG(S) ( LAPACKDENSE_CONTENT(S)->last_flag ) /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new LAPACK dense linear solver */ SUNLinearSolver SUNLinSol_LapackDense(N_Vector y, SUNMatrix A, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_LapackDense content; sunindextype MatrixRows; /* Check compatibility with supplied SUNMatrix and N_Vector */ if (SUNMatGetID(A) != SUNMATRIX_DENSE) return(NULL); if (SUNDenseMatrix_Rows(A) != SUNDenseMatrix_Columns(A)) return(NULL); if ( (N_VGetVectorID(y) != SUNDIALS_NVEC_SERIAL) && (N_VGetVectorID(y) != SUNDIALS_NVEC_OPENMP) && (N_VGetVectorID(y) != SUNDIALS_NVEC_PTHREADS) ) return(NULL); MatrixRows = SUNDenseMatrix_Rows(A); if (MatrixRows != N_VGetLength(y)) return(NULL); /* Create linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_LapackDense; S->ops->getid = SUNLinSolGetID_LapackDense; S->ops->initialize = SUNLinSolInitialize_LapackDense; S->ops->setup = SUNLinSolSetup_LapackDense; S->ops->solve = SUNLinSolSolve_LapackDense; S->ops->lastflag = SUNLinSolLastFlag_LapackDense; S->ops->space = SUNLinSolSpace_LapackDense; S->ops->free = SUNLinSolFree_LapackDense; /* Create content */ content = NULL; content = (SUNLinearSolverContent_LapackDense) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->N = MatrixRows; content->last_flag = 0; content->pivots = NULL; /* Allocate content */ content->pivots = (sunindextype *) malloc(MatrixRows * sizeof(sunindextype)); if (content->pivots == NULL) { SUNLinSolFree(S); return(NULL); } return(S); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_LapackDense(SUNLinearSolver S) { return(SUNLINEARSOLVER_DIRECT); } SUNLinearSolver_ID SUNLinSolGetID_LapackDense(SUNLinearSolver S) { return(SUNLINEARSOLVER_LAPACKDENSE); } int SUNLinSolInitialize_LapackDense(SUNLinearSolver S) { /* all solver-specific memory has already been allocated */ LASTFLAG(S) = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } int SUNLinSolSetup_LapackDense(SUNLinearSolver S, SUNMatrix A) { sunindextype n, ier; /* check for valid inputs */ if ( (A == NULL) || (S == NULL) ) return(SUNLS_MEM_NULL); /* Ensure that A is a dense matrix */ if (SUNMatGetID(A) != SUNMATRIX_DENSE) { LASTFLAG(S) = SUNLS_ILL_INPUT; return(SUNLS_ILL_INPUT); } /* Call LAPACK to do LU factorization of A */ n = SUNDenseMatrix_Rows(A); xgetrf_f77(&n, &n, SUNDenseMatrix_Data(A), &n, PIVOTS(S), &ier); LASTFLAG(S) = ier; if (ier > 0) return(SUNLS_LUFACT_FAIL); if (ier < 0) return(SUNLS_PACKAGE_FAIL_UNREC); return(SUNLS_SUCCESS); } int SUNLinSolSolve_LapackDense(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { sunindextype n, one, ier; realtype *xdata; if ( (A == NULL) || (S == NULL) || (x == NULL) || (b == NULL) ) return(SUNLS_MEM_NULL); /* copy b into x */ N_VScale(ONE, b, x); /* access x data array */ xdata = N_VGetArrayPointer(x); if (xdata == NULL) { LASTFLAG(S) = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } /* Call LAPACK to solve the linear system */ n = SUNDenseMatrix_Rows(A); one = 1; xgetrs_f77("N", &n, &one, SUNDenseMatrix_Data(A), &n, PIVOTS(S), xdata, &n, &ier); LASTFLAG(S) = ier; if (ier < 0) return(SUNLS_PACKAGE_FAIL_UNREC); LASTFLAG(S) = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } sunindextype SUNLinSolLastFlag_LapackDense(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return(LASTFLAG(S)); } int SUNLinSolSpace_LapackDense(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { *lenrwLS = 0; *leniwLS = 2 + LAPACKDENSE_CONTENT(S)->N; return(SUNLS_SUCCESS); } int SUNLinSolFree_LapackDense(SUNLinearSolver S) { /* return if S is already free */ if (S == NULL) return(SUNLS_SUCCESS); /* delete items from contents, then delete generic structure */ if (S->content) { if (PIVOTS(S)) { free(PIVOTS(S)); PIVOTS(S) = NULL; } free(S->content); S->content = NULL; } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } StanHeaders/src/sunlinsol/cusolversp/0000755000176200001440000000000014511135055017475 5ustar liggesusersStanHeaders/src/sunlinsol/cusolversp/sunlinsol_cusolversp_batchqr.cu0000644000176200001440000002727314645137106026067 0ustar liggesusers/* ---------------------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ---------------------------------------------------------------------------- * Based on work by Donald Wilcox @ LBNL * ---------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ---------------------------------------------------------------------------- * Implementation file for cuSolverSp batched QR SUNLinearSolver interface. * ----------------------------------------------------------------------------*/ #include #include #include #include #include "sundials_cuda.h" #include "sundials_debug.h" #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define TWO RCONST(2.0) /* macros for handling the different function names based on precision */ #if defined(SUNDIALS_DOUBLE_PRECISION) #define _cusolverSpXcsrqrBufferInfoBatched cusolverSpDcsrqrBufferInfoBatched #define _cusolverSpXcsrqrsvBatched cusolverSpDcsrqrsvBatched #elif defined(SUNDIALS_SINGLE_PRECISION) #define _cusolverSpXcsrqrBufferInfoBatched cusolverSpScsrqrBufferInfoBatched #define _cusolverSpXcsrqrsvBatched cusolverSpScsrqrsvBatched #endif /* * ----------------------------------------------------------------- * cuSolverSp solver structure accessibility macros: * ----------------------------------------------------------------- */ #define SUN_CUSP_CONTENT(S) ( (SUNLinearSolverContent_cuSolverSp_batchQR)(S->content) ) #define SUN_CUSP_QRWORKSPACE(S) ( SUN_CUSP_CONTENT(S)->workspace ) #define SUN_CUSP_FIRSTFACTORIZE(S) ( SUN_CUSP_CONTENT(S)->first_factorize ) #define SUN_CUSP_LASTFLAG(S) ( SUN_CUSP_CONTENT(S)->last_flag ) #define SUN_CUSOL_HANDLE(S) ( SUN_CUSP_CONTENT(S)->cusolver_handle ) #define SUN_CUSP_DESC(S) ( SUN_CUSP_CONTENT(S)->desc ) #define SUN_CUSP_QRINFO(S) ( SUN_CUSP_CONTENT(S)->info ) #define SUN_CUSP_INTERNAL_SIZE(S) ( SUN_CUSP_CONTENT(S)->internal_size ) #define SUN_CUSP_WORK_SIZE(S) ( SUN_CUSP_CONTENT(S)->workspace_size ) /* * ---------------------------------------------------------------------------- * Implementations of exported functions. * ---------------------------------------------------------------------------- */ SUNLinearSolver SUNLinSol_cuSolverSp_batchQR(N_Vector y, SUNMatrix A, cusolverSpHandle_t cusol_handle, SUNContext sunctx) { /* Check that required arguments are not NULL */ if (y == NULL || A == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSol_cuSolverSp_batchQR: y or A is null\n"); return NULL; } /* Check compatibility with supplied SUNMatrix and N_Vector */ if (SUNMatGetID(A) != SUNMATRIX_CUSPARSE || y->ops->nvgetdevicearraypointer == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSol_cuSolverSp_batchQR: illegal type for y or A\n"); return NULL; } /* Matrix and vector dimensions must agree */ if (N_VGetLength(y) != SUNMatrix_cuSparse_Columns(A)) { SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSol_cuSolverSp_batchQR: matrix and vector dimensions don't agree\n"); return NULL; } /* Create an empty linear solver */ SUNLinearSolver S; S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) { return NULL; } /* Attach operations */ S->ops->gettype = SUNLinSolGetType_cuSolverSp_batchQR; S->ops->getid = SUNLinSolGetID_cuSolverSp_batchQR; S->ops->initialize = SUNLinSolInitialize_cuSolverSp_batchQR; S->ops->setup = SUNLinSolSetup_cuSolverSp_batchQR; S->ops->solve = SUNLinSolSolve_cuSolverSp_batchQR; S->ops->lastflag = SUNLinSolLastFlag_cuSolverSp_batchQR; S->ops->free = SUNLinSolFree_cuSolverSp_batchQR; /* Create content */ SUNLinearSolverContent_cuSolverSp_batchQR content; content = NULL; content = (SUNLinearSolverContent_cuSolverSp_batchQR) malloc(sizeof(*content)); if (content == NULL) { SUNLinSolFree(S); return NULL; } /* Attach content */ S->content = content; /* Fill content */ content->last_flag = SUNLS_SUCCESS; content->first_factorize = SUNTRUE; content->internal_size = 0; content->workspace_size = 0; content->cusolver_handle = cusol_handle; content->info = NULL; content->workspace = NULL; content->desc = NULL; return S; } /* * ----------------------------------------------------------------- * Implementation of accessor and setter functions. * ----------------------------------------------------------------- */ void SUNLinSol_cuSolverSp_batchQR_GetDescription(SUNLinearSolver S, const char** desc) { *desc = SUN_CUSP_DESC(S); } void SUNLinSol_cuSolverSp_batchQR_SetDescription(SUNLinearSolver S, const char* desc) { SUN_CUSP_DESC(S) = desc; } void SUNLinSol_cuSolverSp_batchQR_GetDeviceSpace(SUNLinearSolver S, size_t* cuSolverInternal, size_t* cuSolverWorkspace) { /* size is in bytes */ *cuSolverInternal = SUN_CUSP_INTERNAL_SIZE(S); /* buffer for Q and R factors */ *cuSolverWorkspace = SUN_CUSP_WORK_SIZE(S); /* numerical factorization buffer */ } /* * ----------------------------------------------------------------- * Implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_cuSolverSp_batchQR(SUNLinearSolver S) { return(SUNLINEARSOLVER_DIRECT); } SUNLinearSolver_ID SUNLinSolGetID_cuSolverSp_batchQR(SUNLinearSolver S) { return(SUNLINEARSOLVER_CUSOLVERSP_BATCHQR); } int SUNLinSolInitialize_cuSolverSp_batchQR(SUNLinearSolver S) { SUN_CUSP_FIRSTFACTORIZE(S) = SUNTRUE; SUN_CUSP_LASTFLAG(S) = SUNLS_SUCCESS; return(SUN_CUSP_LASTFLAG(S)); } int SUNLinSolSetup_cuSolverSp_batchQR(SUNLinearSolver S, SUNMatrix A) { int blockrows, blockcols, blocknnz, nblock; int *d_rowptr, *d_colind; realtype *d_data; cusparseMatDescr_t mat_descr; cudaError_t cuerr; cusolverStatus_t status; if (SUN_CUSP_LASTFLAG(S) != SUNLS_SUCCESS) return SUN_CUSP_LASTFLAG(S); if (SUN_CUSP_FIRSTFACTORIZE(S)) { /* Free old workspace and symbloic analysis */ if (SUN_CUSP_QRWORKSPACE(S)) { cudaFree(SUN_CUSP_QRWORKSPACE(S)); cusolverSpDestroyCsrqrInfo(SUN_CUSP_QRINFO(S)); } /* We must create a new csrqrinfo_t context every time we want to do a symbolic analysis. Trying to reuse it results in a CUSOLVER_STATUS_INVALID_VALUE error. */ status = cusolverSpCreateCsrqrInfo(&SUN_CUSP_QRINFO(S)); if (!SUNDIALS_CUSOLVER_VERIFY(status)) { SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC; return SUN_CUSP_LASTFLAG(S); } nblock = SUNMatrix_cuSparse_NumBlocks(A); blocknnz = SUNMatrix_cuSparse_BlockNNZ(A); blockrows = SUNMatrix_cuSparse_BlockRows(A); blockcols = SUNMatrix_cuSparse_BlockColumns(A); d_data = SUNMatrix_cuSparse_Data(A); d_rowptr = SUNMatrix_cuSparse_IndexPointers(A); d_colind = SUNMatrix_cuSparse_IndexValues(A); mat_descr = SUNMatrix_cuSparse_MatDescr(A); /* Perform symbolic analysis of sparsity structure */ status = cusolverSpXcsrqrAnalysisBatched(SUN_CUSOL_HANDLE(S), blockrows, blockcols, blocknnz, mat_descr, d_rowptr, d_colind, SUN_CUSP_QRINFO(S)); if (!SUNDIALS_CUSOLVER_VERIFY(status)) { SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC; return SUN_CUSP_LASTFLAG(S); } /* Compute the workspace we will need */ status = _cusolverSpXcsrqrBufferInfoBatched(SUN_CUSOL_HANDLE(S), blockrows, blockcols, blocknnz, mat_descr, d_data, d_rowptr, d_colind, nblock, SUN_CUSP_QRINFO(S), &SUN_CUSP_INTERNAL_SIZE(S), &SUN_CUSP_WORK_SIZE(S)); if (!SUNDIALS_CUSOLVER_VERIFY(status)) { SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC; return SUN_CUSP_LASTFLAG(S); } cuerr = cudaMalloc((void**) &SUN_CUSP_QRWORKSPACE(S), SUN_CUSP_WORK_SIZE(S)); if (!SUNDIALS_CUDA_VERIFY(cuerr)) { SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC; return SUN_CUSP_LASTFLAG(S); } SUN_CUSP_FIRSTFACTORIZE(S) = SUNFALSE; } SUN_CUSP_LASTFLAG(S) = SUNLS_SUCCESS; return(SUN_CUSP_LASTFLAG(S)); } int SUNLinSolSolve_cuSolverSp_batchQR(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { cusolverStatus_t status; int blockrows, blockcols, blocknnz, nblock; int *d_rowptr, *d_colind; realtype *d_data; cusparseMatDescr_t mat_descr; if ((S == NULL) || (A == NULL) || (x == NULL) || (b == NULL)) return SUNLS_MEM_NULL; SUN_CUSP_LASTFLAG(S) = SUNLS_SUCCESS; realtype* device_b = N_VGetDeviceArrayPointer(b); realtype* device_x = N_VGetDeviceArrayPointer(x); if (SUN_CUSP_LASTFLAG(S) != SUNLS_SUCCESS) return SUN_CUSP_LASTFLAG(S); /* solve the system */ nblock = SUNMatrix_cuSparse_NumBlocks(A); blocknnz = SUNMatrix_cuSparse_BlockNNZ(A); blockrows = SUNMatrix_cuSparse_BlockRows(A); blockcols = SUNMatrix_cuSparse_BlockColumns(A); d_data = SUNMatrix_cuSparse_Data(A); d_rowptr = SUNMatrix_cuSparse_IndexPointers(A); d_colind = SUNMatrix_cuSparse_IndexValues(A); mat_descr = SUNMatrix_cuSparse_MatDescr(A); status = _cusolverSpXcsrqrsvBatched(SUN_CUSOL_HANDLE(S), blockrows, blockcols, blocknnz, mat_descr, d_data, d_rowptr, d_colind, device_b, device_x, nblock, SUN_CUSP_QRINFO(S), SUN_CUSP_QRWORKSPACE(S)); if (!SUNDIALS_CUSOLVER_VERIFY(status)) { SUN_CUSP_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC; return SUN_CUSP_LASTFLAG(S); } return SUN_CUSP_LASTFLAG(S); } sunindextype SUNLinSolLastFlag_cuSolverSp_batchQR(SUNLinearSolver S) { if (S == NULL) return -1; return SUN_CUSP_LASTFLAG(S); } int SUNLinSolFree_cuSolverSp_batchQR(SUNLinearSolver S) { /* return with success if already freed */ if (S == NULL) return SUNLS_SUCCESS; /* free stuff in the content structure */ cusolverSpDestroyCsrqrInfo(SUN_CUSP_QRINFO(S)); cudaFree(SUN_CUSP_QRWORKSPACE(S)); /* free content structure */ if (S->content) { free(S->content); S->content = NULL; } /* free ops structure */ if (S->ops) { free(S->ops); S->ops = NULL; } /* free the actual SUNLinSol */ free(S); S = NULL; return(SUNLS_SUCCESS); } StanHeaders/src/sunlinsol/lapackband/0000755000176200001440000000000014511135055017350 5ustar liggesusersStanHeaders/src/sunlinsol/lapackband/sunlinsol_lapackband.c0000644000176200001440000001544014645137106023715 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on codes _lapack.c by: Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the LAPACK band * implementation of the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #include "sundials_lapack_defs.h" /* Interfaces to match 'realtype' with the correct LAPACK functions */ #if defined(SUNDIALS_DOUBLE_PRECISION) #define xgbtrf_f77 dgbtrf_f77 #define xgbtrs_f77 dgbtrs_f77 #elif defined(SUNDIALS_SINGLE_PRECISION) #define xgbtrf_f77 sgbtrf_f77 #define xgbtrs_f77 sgbtrs_f77 #else #error Incompatible realtype for LAPACK; disable LAPACK and rebuild #endif #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * Band solver structure accessibility macros: * ----------------------------------------------------------------- */ #define LAPACKBAND_CONTENT(S) ( (SUNLinearSolverContent_LapackBand)(S->content) ) #define PIVOTS(S) ( LAPACKBAND_CONTENT(S)->pivots ) #define LASTFLAG(S) ( LAPACKBAND_CONTENT(S)->last_flag ) /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new LAPACK band linear solver */ SUNLinearSolver SUNLinSol_LapackBand(N_Vector y, SUNMatrix A, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_LapackBand content; sunindextype MatrixRows; /* Check compatibility with supplied SUNMatrix and N_Vector */ if (SUNMatGetID(A) != SUNMATRIX_BAND) return(NULL); if (SUNBandMatrix_Rows(A) != SUNBandMatrix_Columns(A)) return(NULL); if ( (N_VGetVectorID(y) != SUNDIALS_NVEC_SERIAL) && (N_VGetVectorID(y) != SUNDIALS_NVEC_OPENMP) && (N_VGetVectorID(y) != SUNDIALS_NVEC_PTHREADS) ) return(NULL); MatrixRows = SUNBandMatrix_Rows(A); if (MatrixRows != N_VGetLength(y)) return(NULL); /* Create an empty linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_LapackBand; S->ops->getid = SUNLinSolGetID_LapackBand; S->ops->initialize = SUNLinSolInitialize_LapackBand; S->ops->setup = SUNLinSolSetup_LapackBand; S->ops->solve = SUNLinSolSolve_LapackBand; S->ops->lastflag = SUNLinSolLastFlag_LapackBand; S->ops->space = SUNLinSolSpace_LapackBand; S->ops->free = SUNLinSolFree_LapackBand; /* Create content */ content = NULL; content = (SUNLinearSolverContent_LapackBand) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->N = MatrixRows; content->last_flag = 0; content->pivots = NULL; /* Allocate content */ content->pivots = (sunindextype *) malloc(MatrixRows * sizeof(sunindextype)); if (content->pivots == NULL) { SUNLinSolFree(S); return(NULL); } return(S); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_LapackBand(SUNLinearSolver S) { return(SUNLINEARSOLVER_DIRECT); } SUNLinearSolver_ID SUNLinSolGetID_LapackBand(SUNLinearSolver S) { return(SUNLINEARSOLVER_LAPACKBAND); } int SUNLinSolInitialize_LapackBand(SUNLinearSolver S) { /* all solver-specific memory has already been allocated */ LASTFLAG(S) = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } int SUNLinSolSetup_LapackBand(SUNLinearSolver S, SUNMatrix A) { sunindextype n, ml, mu, ldim, ier; /* check for valid inputs */ if ( (A == NULL) || (S == NULL) ) return(SUNLS_MEM_NULL); /* Ensure that A is a band matrix */ if (SUNMatGetID(A) != SUNMATRIX_BAND) { LASTFLAG(S) = SUNLS_ILL_INPUT; return(SUNLS_ILL_INPUT); } /* Call LAPACK to do LU factorization of A */ n = SUNBandMatrix_Rows(A); ml = SUNBandMatrix_LowerBandwidth(A); mu = SUNBandMatrix_UpperBandwidth(A); ldim = SUNBandMatrix_LDim(A); xgbtrf_f77(&n, &n, &ml, &mu, SUNBandMatrix_Data(A), &ldim, PIVOTS(S), &ier); LASTFLAG(S) = ier; if (ier > 0) return(SUNLS_LUFACT_FAIL); if (ier < 0) return(SUNLS_PACKAGE_FAIL_UNREC); return(SUNLS_SUCCESS); } int SUNLinSolSolve_LapackBand(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { sunindextype n, ml, mu, ldim, one, ier; realtype *xdata; /* check for valid inputs */ if ( (A == NULL) || (S == NULL) || (x == NULL) || (b == NULL) ) return(SUNLS_MEM_NULL); /* copy b into x */ N_VScale(ONE, b, x); /* access x data array */ xdata = N_VGetArrayPointer(x); if (xdata == NULL) { LASTFLAG(S) = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } /* Call LAPACK to solve the linear system */ n = SUNBandMatrix_Rows(A); ml = SUNBandMatrix_LowerBandwidth(A); mu = SUNBandMatrix_UpperBandwidth(A); ldim = SUNBandMatrix_LDim(A); one = 1; xgbtrs_f77("N", &n, &ml, &mu, &one, SUNBandMatrix_Data(A), &ldim, PIVOTS(S), xdata, &n, &ier); LASTFLAG(S) = ier; if (ier < 0) return(SUNLS_PACKAGE_FAIL_UNREC); LASTFLAG(S) = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } sunindextype SUNLinSolLastFlag_LapackBand(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return(LASTFLAG(S)); } int SUNLinSolSpace_LapackBand(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { *lenrwLS = 0; *leniwLS = 2 + LAPACKBAND_CONTENT(S)->N; return(SUNLS_SUCCESS); } int SUNLinSolFree_LapackBand(SUNLinearSolver S) { /* return with success if already freed */ if (S == NULL) return(SUNLS_SUCCESS); /* delete items from contents, then delete generic structure */ if (S->content) { if (PIVOTS(S)) { free(PIVOTS(S)); PIVOTS(S) = NULL; } free(S->content); S->content = NULL; } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } StanHeaders/src/sunlinsol/superludist/0000755000176200001440000000000014511135055017653 5ustar liggesusersStanHeaders/src/sunlinsol/superludist/sunlinsol_superludist.c0000644000176200001440000002127214645137106024523 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the implementation file for the SuperLU-DIST implementation of the * SUNLINSOL package. * ---------------------------------------------------------------------------*/ #include #include #include #include #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define TWO RCONST(2.0) /* * ----------------------------------------------------------------- * SuperLUDIST solver structure accessibility macros: * ----------------------------------------------------------------- */ #define SLUDIST_CONTENT(S) ( (SUNLinearSolverContent_SuperLUDIST)(S->content) ) #define SLU_FIRSTFACTORIZE(S) ( SLUDIST_CONTENT(S)->first_factorize ) #define SLU_BERR(S) ( SLUDIST_CONTENT(S)->berr ) #define SLU_GRID(S) ( SLUDIST_CONTENT(S)->grid ) #define SLU_LASTFLAG(S) ( SLUDIST_CONTENT(S)->last_flag ) #define SLU_LU(S) ( SLUDIST_CONTENT(S)->lu ) #define SLU_OPTIONS(S) ( SLUDIST_CONTENT(S)->options ) #define SLU_SCALEPERM(S) ( SLUDIST_CONTENT(S)->scaleperm ) #define SLU_SOLVESTRUCT(S) ( SLUDIST_CONTENT(S)->solve ) #define SLU_STAT(S) ( SLUDIST_CONTENT(S)->stat ) #define SLU_SIZE(S) ( SLUDIST_CONTENT(S)->N ) /* * ---------------------------------------------------------------------------- * Implementations of exported functions. * ---------------------------------------------------------------------------- */ SUNLinearSolver SUNLinSol_SuperLUDIST(N_Vector y, SUNMatrix A, gridinfo_t *grid, xLUstruct_t *lu, xScalePermstruct_t *scaleperm, xSOLVEstruct_t *solve, SuperLUStat_t *stat, superlu_dist_options_t *options, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_SuperLUDIST content; SuperMatrix *Asuper; NRformat_loc *Astore; /* Check that required arguments are not NULL */ if (y == NULL || A == NULL || grid == NULL || lu == NULL || scaleperm == NULL || solve == NULL || stat == NULL || options == NULL) return(NULL); /* Check compatibility with supplied SUNMatrix and N_Vector */ if (SUNMatGetID(A) != SUNMATRIX_SLUNRLOC) return(NULL); Asuper = SUNMatrix_SLUNRloc_SuperMatrix(A); Astore = (NRformat_loc *) Asuper->Store; /* Check that the matrix is square */ if (Asuper->nrow != Asuper->ncol) return(NULL); /* Create an empty linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_SuperLUDIST; S->ops->getid = SUNLinSolGetID_SuperLUDIST; S->ops->initialize = SUNLinSolInitialize_SuperLUDIST; S->ops->setup = SUNLinSolSetup_SuperLUDIST; S->ops->solve = SUNLinSolSolve_SuperLUDIST; S->ops->lastflag = SUNLinSolLastFlag_SuperLUDIST; S->ops->space = SUNLinSolSpace_SuperLUDIST; S->ops->free = SUNLinSolFree_SuperLUDIST; /* Create content */ content = NULL; content = (SUNLinearSolverContent_SuperLUDIST) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->grid = grid; content->lu = lu; content->N = Astore->m_loc; content->options = options; content->scaleperm = scaleperm; content->solve = solve; content->stat = stat; content->berr = ZERO; content->last_flag = 0; return(S); } /* * ----------------------------------------------------------------- * Implementation of accessor functions. * ----------------------------------------------------------------- */ realtype SUNLinSol_SuperLUDIST_GetBerr(SUNLinearSolver LS) { return(SLU_BERR(LS)); } gridinfo_t* SUNLinSol_SuperLUDIST_GetGridinfo(SUNLinearSolver LS) { return(SLU_GRID(LS)); } xLUstruct_t* SUNLinSol_SuperLUDIST_GetLUstruct(SUNLinearSolver LS) { return(SLU_LU(LS)); } superlu_dist_options_t* SUNLinSol_SuperLUDIST_GetSuperLUOptions(SUNLinearSolver LS) { return(SLU_OPTIONS(LS)); } xScalePermstruct_t* SUNLinSol_SuperLUDIST_GetScalePermstruct(SUNLinearSolver LS) { return(SLU_SCALEPERM(LS)); } xSOLVEstruct_t* SUNLinSol_SuperLUDIST_GetSOLVEstruct(SUNLinearSolver LS) { return(SLU_SOLVESTRUCT(LS)); } SuperLUStat_t* SUNLinSol_SuperLUDIST_GetSuperLUStat(SUNLinearSolver LS) { return(SLU_STAT(LS)); } /* * ----------------------------------------------------------------- * Implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_SuperLUDIST(SUNLinearSolver S) { return(SUNLINEARSOLVER_DIRECT); } SUNLinearSolver_ID SUNLinSolGetID_SuperLUDIST(SUNLinearSolver S) { return(SUNLINEARSOLVER_SUPERLUDIST); } int SUNLinSolInitialize_SuperLUDIST(SUNLinearSolver S) { SLU_FIRSTFACTORIZE(S) = SUNTRUE; SLU_LASTFLAG(S) = SUNLS_SUCCESS; return(SLU_LASTFLAG(S)); } int SUNLinSolSetup_SuperLUDIST(SUNLinearSolver S, SUNMatrix A) { if (SLU_FIRSTFACTORIZE(S)) { /* Force factorization on next solve. */ SLU_OPTIONS(S)->Fact = DOFACT; /* if the solve struct was already initialized, we need to finalize the last solve to avoid leaking memory */ if (SLU_OPTIONS(S)->SolveInitialized == YES) { xDestroy_LU(SLU_SIZE(S), SLU_GRID(S), SLU_LU(S)); dSolveFinalize(SLU_OPTIONS(S), SLU_SOLVESTRUCT(S)); } } else { /* Force factorization on next solve, but reuse column permutation */ SLU_OPTIONS(S)->Fact = SamePattern; } SLU_LASTFLAG(S) = SUNLS_SUCCESS; return(SLU_LASTFLAG(S)); } int SUNLinSolSolve_SuperLUDIST(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { int retval; realtype *xdata; SuperMatrix *Asuper; NRformat_loc *Astore; if ((S == NULL) || (A == NULL) || (x == NULL) || (b == NULL)) return(SUNLS_MEM_NULL); Asuper = SUNMatrix_SLUNRloc_SuperMatrix(A); Astore = (NRformat_loc *) Asuper->Store; /* Copy b into x, and use xdata with SuperLU-DIST because SuperLU reuses b to store the solution vector. */ N_VScale(ONE, b, x); xdata = N_VGetArrayPointer(x); if (xdata == NULL) { SLU_LASTFLAG(S) = SUNLS_MEM_FAIL; return(SLU_LASTFLAG(S)); } /* Call SuperLU-DIST to solve the linear system */ pdgssvx(SLU_OPTIONS(S), Asuper, SLU_SCALEPERM(S), xdata, Astore->m_loc, 1, SLU_GRID(S), SLU_LU(S), SLU_SOLVESTRUCT(S), &SLU_BERR(S), SLU_STAT(S), &retval); if (retval != 0) { /* retval should never be < 0, and if it is > than ncol it means a memory allocation error occured. If 0 < retval <= ncol, then U is singular and the solve couldn't be completed. */ if (retval < 0 || retval > Asuper->ncol) SLU_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC; else SLU_LASTFLAG(S) = SUNLS_PACKAGE_FAIL_REC; return(SLU_LASTFLAG(S)); } /* Tell SuperLU-DIST that A is already factored so do not factorize */ SLU_OPTIONS(S)->Fact = FACTORED; SLU_LASTFLAG(S) = SUNLS_SUCCESS; return(SLU_LASTFLAG(S)); } sunindextype SUNLinSolLastFlag_SuperLUDIST(SUNLinearSolver S) { if (S == NULL) return(-1); return(SLU_LASTFLAG(S)); } int SUNLinSolFree_SuperLUDIST(SUNLinearSolver S) { /* return with success if already freed */ if (S == NULL) return(SUNLS_SUCCESS); /* Call SuperLU DIST destroy/finalize routines, but don't free the sturctures themselves - that is the user's job */ xDestroy_LU(SLU_SIZE(S), SLU_GRID(S), SLU_LU(S)); dSolveFinalize(SLU_OPTIONS(S), SLU_SOLVESTRUCT(S)); /* free content structure */ if (S->content) { free(S->content); S->content = NULL; } /* free ops structure */ if (S->ops) { free(S->ops); S->ops = NULL; } /* free the actual SUNLinSol */ free(S); S = NULL; return(SUNLS_SUCCESS); } int SUNLinSolSpace_SuperLUDIST(SUNLinearSolver S, long int *leniwLS, long int *lenrwLS) { /* since the SuperLU_DIST structures are opaque objects, we omit those from these results */ *leniwLS = 2; /* last_flag, N */ *lenrwLS = 1; /* berr */ return(SUNLS_SUCCESS); } StanHeaders/src/sunlinsol/klu/0000755000176200001440000000000014511135055016063 5ustar liggesusersStanHeaders/src/sunlinsol/klu/fmod/0000755000176200001440000000000014511135055017010 5ustar liggesusersStanHeaders/src/sunlinsol/klu/fmod/fsunlinsol_klu_mod.c0000644000176200001440000002740714645137106023103 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } enum { SWIG_MEM_OWN = 0x01, SWIG_MEM_RVALUE = 0x02, SWIG_MEM_CONST = 0x04 }; #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_linearsolver.h" #include "sunlinsol/sunlinsol_klu.h" typedef struct { void* cptr; int cmemflags; } SwigClassWrapper; SWIGINTERN SwigClassWrapper SwigClassWrapper_uninitialized() { SwigClassWrapper result; result.cptr = NULL; result.cmemflags = 0; return result; } SWIGEXPORT SUNLinearSolver _wrap_FSUNLinSol_KLU(N_Vector farg1, SUNMatrix farg2, void *farg3) { SUNLinearSolver fresult ; N_Vector arg1 = (N_Vector) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; SUNContext arg3 = (SUNContext) 0 ; SUNLinearSolver result; arg1 = (N_Vector)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (SUNContext)(farg3); result = (SUNLinearSolver)SUNLinSol_KLU(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_KLUReInit(SUNLinearSolver farg1, SUNMatrix farg2, int64_t const *farg3, int const *farg4) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; sunindextype arg3 ; int arg4 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (sunindextype)(*farg3); arg4 = (int)(*farg4); result = (int)SUNLinSol_KLUReInit(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_KLUSetOrdering(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_KLUSetOrdering(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT SwigClassWrapper _wrap_FSUNLinSol_KLUGetSymbolic(SUNLinearSolver farg1) { SwigClassWrapper fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; klu_l_symbolic *result = 0 ; arg1 = (SUNLinearSolver)(farg1); result = (klu_l_symbolic *)SUNLinSol_KLUGetSymbolic(arg1); fresult.cptr = result; fresult.cmemflags = SWIG_MEM_RVALUE | (0 ? SWIG_MEM_OWN : 0); return fresult; } SWIGEXPORT SwigClassWrapper _wrap_FSUNLinSol_KLUGetNumeric(SUNLinearSolver farg1) { SwigClassWrapper fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; klu_l_numeric *result = 0 ; arg1 = (SUNLinearSolver)(farg1); result = (klu_l_numeric *)SUNLinSol_KLUGetNumeric(arg1); fresult.cptr = result; fresult.cmemflags = SWIG_MEM_RVALUE | (0 ? SWIG_MEM_OWN : 0); return fresult; } SWIGEXPORT SwigClassWrapper _wrap_FSUNLinSol_KLUGetCommon(SUNLinearSolver farg1) { SwigClassWrapper fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; klu_l_common *result = 0 ; arg1 = (SUNLinearSolver)(farg1); result = (klu_l_common *)SUNLinSol_KLUGetCommon(arg1); fresult.cptr = result; fresult.cmemflags = SWIG_MEM_RVALUE | (0 ? SWIG_MEM_OWN : 0); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetType_KLU(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_Type result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_Type)SUNLinSolGetType_KLU(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetID_KLU(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_ID result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_ID)SUNLinSolGetID_KLU(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolInitialize_KLU(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolInitialize_KLU(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetup_KLU(SUNLinearSolver farg1, SUNMatrix farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNLinSolSetup_KLU(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSolve_KLU(SUNLinearSolver farg1, SUNMatrix farg2, N_Vector farg3, N_Vector farg4, double const *farg5) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); result = (int)SUNLinSolSolve_KLU(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNLinSolLastFlag_KLU(SUNLinearSolver farg1) { int64_t fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; sunindextype result; arg1 = (SUNLinearSolver)(farg1); result = SUNLinSolLastFlag_KLU(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSpace_KLU(SUNLinearSolver farg1, long *farg2, long *farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNLinSolSpace_KLU(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolFree_KLU(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolFree_KLU(arg1); fresult = (int)(result); return fresult; } StanHeaders/src/sunlinsol/klu/fmod/fsunlinsol_klu_mod.f900000644000176200001440000002702014645137106023246 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunlinsol_klu_mod use, intrinsic :: ISO_C_BINDING use fsundials_linearsolver_mod use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: SUNKLU_ORDERING_DEFAULT = 1_C_INT integer(C_INT), parameter, public :: SUNKLU_REINIT_FULL = 1_C_INT integer(C_INT), parameter, public :: SUNKLU_REINIT_PARTIAL = 2_C_INT public :: FSUNLinSol_KLU public :: FSUNLinSol_KLUReInit public :: FSUNLinSol_KLUSetOrdering integer, parameter :: swig_cmem_own_bit = 0 integer, parameter :: swig_cmem_rvalue_bit = 1 integer, parameter :: swig_cmem_const_bit = 2 type, bind(C) :: SwigClassWrapper type(C_PTR), public :: cptr = C_NULL_PTR integer(C_INT), public :: cmemflags = 0 end type type, public :: SWIGTYPE_p_klu_l_symbolic type(SwigClassWrapper), public :: swigdata end type public :: FSUNLinSol_KLUGetSymbolic type, public :: SWIGTYPE_p_klu_l_numeric type(SwigClassWrapper), public :: swigdata end type public :: FSUNLinSol_KLUGetNumeric type, public :: SWIGTYPE_p_klu_l_common type(SwigClassWrapper), public :: swigdata end type public :: FSUNLinSol_KLUGetCommon public :: FSUNLinSolGetType_KLU public :: FSUNLinSolGetID_KLU public :: FSUNLinSolInitialize_KLU public :: FSUNLinSolSetup_KLU public :: FSUNLinSolSolve_KLU public :: FSUNLinSolLastFlag_KLU public :: FSUNLinSolSpace_KLU public :: FSUNLinSolFree_KLU ! WRAPPER DECLARATIONS interface function swigc_FSUNLinSol_KLU(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSol_KLU") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FSUNLinSol_KLUReInit(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSol_KLUReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT), intent(in) :: farg4 integer(C_INT) :: fresult end function function swigc_FSUNLinSol_KLUSetOrdering(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_KLUSetOrdering") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSol_KLUGetSymbolic(farg1) & bind(C, name="_wrap_FSUNLinSol_KLUGetSymbolic") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(C_PTR), value :: farg1 type(SwigClassWrapper) :: fresult end function function swigc_FSUNLinSol_KLUGetNumeric(farg1) & bind(C, name="_wrap_FSUNLinSol_KLUGetNumeric") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(C_PTR), value :: farg1 type(SwigClassWrapper) :: fresult end function function swigc_FSUNLinSol_KLUGetCommon(farg1) & bind(C, name="_wrap_FSUNLinSol_KLUGetCommon") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(C_PTR), value :: farg1 type(SwigClassWrapper) :: fresult end function function swigc_FSUNLinSolGetType_KLU(farg1) & bind(C, name="_wrap_FSUNLinSolGetType_KLU") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetID_KLU(farg1) & bind(C, name="_wrap_FSUNLinSolGetID_KLU") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolInitialize_KLU(farg1) & bind(C, name="_wrap_FSUNLinSolInitialize_KLU") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetup_KLU(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetup_KLU") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSolve_KLU(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNLinSolSolve_KLU") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FSUNLinSolLastFlag_KLU(farg1) & bind(C, name="_wrap_FSUNLinSolLastFlag_KLU") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNLinSolSpace_KLU(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSpace_KLU") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolFree_KLU(farg1) & bind(C, name="_wrap_FSUNLinSolFree_KLU") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNLinSol_KLU(y, a, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNLinearSolver), pointer :: swig_result type(N_Vector), target, intent(inout) :: y type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(y) farg2 = c_loc(a) farg3 = sunctx fresult = swigc_FSUNLinSol_KLU(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FSUNLinSol_KLUReInit(s, a, nnz, reinit_type) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a integer(C_INT64_T), intent(in) :: nnz integer(C_INT), intent(in) :: reinit_type integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT) :: farg4 farg1 = c_loc(s) farg2 = c_loc(a) farg3 = nnz farg4 = reinit_type fresult = swigc_FSUNLinSol_KLUReInit(farg1, farg2, farg3, farg4) swig_result = fresult end function function FSUNLinSol_KLUSetOrdering(s, ordering_choice) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: ordering_choice integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = ordering_choice fresult = swigc_FSUNLinSol_KLUSetOrdering(farg1, farg2) swig_result = fresult end function function FSUNLinSol_KLUGetSymbolic(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SWIGTYPE_p_klu_l_symbolic) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SwigClassWrapper) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSol_KLUGetSymbolic(farg1) swig_result%swigdata = fresult end function function FSUNLinSol_KLUGetNumeric(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SWIGTYPE_p_klu_l_numeric) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SwigClassWrapper) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSol_KLUGetNumeric(farg1) swig_result%swigdata = fresult end function function FSUNLinSol_KLUGetCommon(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SWIGTYPE_p_klu_l_common) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SwigClassWrapper) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSol_KLUGetCommon(farg1) swig_result%swigdata = fresult end function function FSUNLinSolGetType_KLU(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_Type) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetType_KLU(farg1) swig_result = fresult end function function FSUNLinSolGetID_KLU(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_ID) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetID_KLU(farg1) swig_result = fresult end function function FSUNLinSolInitialize_KLU(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolInitialize_KLU(farg1) swig_result = fresult end function function FSUNLinSolSetup_KLU(s, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(s) farg2 = c_loc(a) fresult = swigc_FSUNLinSolSetup_KLU(farg1, farg2) swig_result = fresult end function function FSUNLinSolSolve_KLU(s, a, x, b, tol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: b real(C_DOUBLE), intent(in) :: tol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 farg1 = c_loc(s) farg2 = c_loc(a) farg3 = c_loc(x) farg4 = c_loc(b) farg5 = tol fresult = swigc_FSUNLinSolSolve_KLU(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FSUNLinSolLastFlag_KLU(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolLastFlag_KLU(farg1) swig_result = fresult end function function FSUNLinSolSpace_KLU(s, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FSUNLinSolSpace_KLU(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolFree_KLU(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolFree_KLU(farg1) swig_result = fresult end function end module StanHeaders/src/sunlinsol/klu/sunlinsol_klu.c0000644000176200001440000003051714645137106021145 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on codes _klu.c, written by Carol Woodward @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the KLU implementation of * the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define TWOTHIRDS RCONST(0.666666666666666666666666666666667) /* * ----------------------------------------------------------------- * KLU solver structure accessibility macros: * ----------------------------------------------------------------- */ #define KLU_CONTENT(S) ( (SUNLinearSolverContent_KLU)(S->content) ) #define LASTFLAG(S) ( KLU_CONTENT(S)->last_flag ) #define FIRSTFACTORIZE(S) ( KLU_CONTENT(S)->first_factorize ) #define SYMBOLIC(S) ( KLU_CONTENT(S)->symbolic ) #define NUMERIC(S) ( KLU_CONTENT(S)->numeric ) #define COMMON(S) ( KLU_CONTENT(S)->common ) #define SOLVE(S) ( KLU_CONTENT(S)->klu_solver ) /* * ----------------------------------------------------------------- * typedef to handle pointer casts from sunindextype to KLU type * ----------------------------------------------------------------- */ #if defined(SUNDIALS_INT64_T) #define KLU_INDEXTYPE long int #else #define KLU_INDEXTYPE int #endif /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new KLU linear solver */ SUNLinearSolver SUNLinSol_KLU(N_Vector y, SUNMatrix A, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_KLU content; int flag; /* Check compatibility with supplied SUNMatrix and N_Vector */ if (SUNMatGetID(A) != SUNMATRIX_SPARSE) return(NULL); if (SUNSparseMatrix_Rows(A) != SUNSparseMatrix_Columns(A)) return(NULL); if ( (N_VGetVectorID(y) != SUNDIALS_NVEC_SERIAL) && (N_VGetVectorID(y) != SUNDIALS_NVEC_OPENMP) && (N_VGetVectorID(y) != SUNDIALS_NVEC_PTHREADS) ) return(NULL); if (SUNSparseMatrix_Rows(A) != N_VGetLength(y)) return(NULL); /* Create an empty linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_KLU; S->ops->getid = SUNLinSolGetID_KLU; S->ops->initialize = SUNLinSolInitialize_KLU; S->ops->setup = SUNLinSolSetup_KLU; S->ops->solve = SUNLinSolSolve_KLU; S->ops->lastflag = SUNLinSolLastFlag_KLU; S->ops->space = SUNLinSolSpace_KLU; S->ops->free = SUNLinSolFree_KLU; /* Create content */ content = NULL; content = (SUNLinearSolverContent_KLU) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->last_flag = 0; content->first_factorize = 1; content->symbolic = NULL; content->numeric = NULL; #if defined(SUNDIALS_INT64_T) if (SUNSparseMatrix_SparseType(A) == CSC_MAT) { content->klu_solver = (KLUSolveFn) &klu_l_solve; } else { content->klu_solver = (KLUSolveFn) &klu_l_tsolve; } #elif defined(SUNDIALS_INT32_T) if (SUNSparseMatrix_SparseType(A) == CSC_MAT) { content->klu_solver = &klu_solve; } else { content->klu_solver = &klu_tsolve; } #else /* incompatible sunindextype for KLU */ #error Incompatible sunindextype for KLU #endif flag = sun_klu_defaults(&(content->common)); if (flag == 0) { SUNLinSolFree(S); return(NULL); } (content->common).ordering = SUNKLU_ORDERING_DEFAULT; return(S); } /* ---------------------------------------------------------------------------- * Function to reinitialize a KLU linear solver */ int SUNLinSol_KLUReInit(SUNLinearSolver S, SUNMatrix A, sunindextype nnz, int reinit_type) { /* Check for non-NULL SUNLinearSolver */ if ((S == NULL) || (A == NULL)) return(SUNLS_MEM_NULL); /* Check for valid SUNMatrix */ if (SUNMatGetID(A) != SUNMATRIX_SPARSE) return(SUNLS_ILL_INPUT); /* Check for valid reinit_type */ if ((reinit_type != SUNKLU_REINIT_FULL) && (reinit_type != SUNKLU_REINIT_PARTIAL)) return(SUNLS_ILL_INPUT); /* Full re-initialization: reallocate matrix for updated storage */ if (reinit_type == SUNKLU_REINIT_FULL) if (SUNSparseMatrix_Reallocate(A, nnz) != 0) return(SUNLS_MEM_FAIL); /* Free the prior factorazation and reset for first factorization */ if( SYMBOLIC(S) != NULL) sun_klu_free_symbolic(&SYMBOLIC(S), &COMMON(S)); if( NUMERIC(S) != NULL) sun_klu_free_numeric(&NUMERIC(S), &COMMON(S)); FIRSTFACTORIZE(S) = 1; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } /* ---------------------------------------------------------------------------- * Function to set the ordering type for a KLU linear solver */ int SUNLinSol_KLUSetOrdering(SUNLinearSolver S, int ordering_choice) { /* Check for legal ordering_choice */ if ((ordering_choice < 0) || (ordering_choice > 2)) return(SUNLS_ILL_INPUT); /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set ordering_choice */ COMMON(S).ordering = ordering_choice; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } /* * ----------------------------------------------------------------- * accessor functions * ----------------------------------------------------------------- */ sun_klu_symbolic* SUNLinSol_KLUGetSymbolic(SUNLinearSolver S) { return(SYMBOLIC(S)); } sun_klu_numeric* SUNLinSol_KLUGetNumeric(SUNLinearSolver S) { return(NUMERIC(S)); } sun_klu_common* SUNLinSol_KLUGetCommon(SUNLinearSolver S) { return(&(COMMON(S))); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_KLU(SUNLinearSolver S) { return(SUNLINEARSOLVER_DIRECT); } SUNLinearSolver_ID SUNLinSolGetID_KLU(SUNLinearSolver S) { return(SUNLINEARSOLVER_KLU); } int SUNLinSolInitialize_KLU(SUNLinearSolver S) { /* Force factorization */ FIRSTFACTORIZE(S) = 1; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetup_KLU(SUNLinearSolver S, SUNMatrix A) { int retval; realtype uround_twothirds; uround_twothirds = SUNRpowerR(UNIT_ROUNDOFF,TWOTHIRDS); /* Ensure that A is a sparse matrix */ if (SUNMatGetID(A) != SUNMATRIX_SPARSE) { LASTFLAG(S) = SUNLS_ILL_INPUT; return(LASTFLAG(S)); } /* On first decomposition, get the symbolic factorization */ if (FIRSTFACTORIZE(S)) { /* Perform symbolic analysis of sparsity structure */ if (SYMBOLIC(S)) sun_klu_free_symbolic(&SYMBOLIC(S), &COMMON(S)); SYMBOLIC(S) = sun_klu_analyze(SUNSparseMatrix_NP(A), (KLU_INDEXTYPE*) SUNSparseMatrix_IndexPointers(A), (KLU_INDEXTYPE*) SUNSparseMatrix_IndexValues(A), &COMMON(S)); if (SYMBOLIC(S) == NULL) { LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC; return(LASTFLAG(S)); } /* ------------------------------------------------------------ Compute the LU factorization of the matrix ------------------------------------------------------------*/ if(NUMERIC(S)) sun_klu_free_numeric(&NUMERIC(S), &COMMON(S)); NUMERIC(S) = sun_klu_factor((KLU_INDEXTYPE*) SUNSparseMatrix_IndexPointers(A), (KLU_INDEXTYPE*) SUNSparseMatrix_IndexValues(A), SUNSparseMatrix_Data(A), SYMBOLIC(S), &COMMON(S)); if (NUMERIC(S) == NULL) { LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC; return(LASTFLAG(S)); } FIRSTFACTORIZE(S) = 0; } else { /* not the first decomposition, so just refactor */ retval = sun_klu_refactor((KLU_INDEXTYPE*) SUNSparseMatrix_IndexPointers(A), (KLU_INDEXTYPE*) SUNSparseMatrix_IndexValues(A), SUNSparseMatrix_Data(A), SYMBOLIC(S), NUMERIC(S), &COMMON(S)); if (retval == 0) { LASTFLAG(S) = SUNLS_PACKAGE_FAIL_REC; return(LASTFLAG(S)); } /*----------------------------------------------------------- Check if a cheap estimate of the reciprocal of the condition number is getting too small. If so, delete the prior numeric factorization and recompute it. -----------------------------------------------------------*/ retval = sun_klu_rcond(SYMBOLIC(S), NUMERIC(S), &COMMON(S)); if (retval == 0) { LASTFLAG(S) = SUNLS_PACKAGE_FAIL_REC; return(LASTFLAG(S)); } if ( COMMON(S).rcond < uround_twothirds ) { /* Condition number may be getting large. Compute more accurate estimate */ retval = sun_klu_condest((KLU_INDEXTYPE*) SUNSparseMatrix_IndexPointers(A), SUNSparseMatrix_Data(A), SYMBOLIC(S), NUMERIC(S), &COMMON(S)); if (retval == 0) { LASTFLAG(S) = SUNLS_PACKAGE_FAIL_REC; return(LASTFLAG(S)); } if ( COMMON(S).condest > (ONE/uround_twothirds) ) { /* More accurate estimate also says condition number is large, so recompute the numeric factorization */ sun_klu_free_numeric(&NUMERIC(S), &COMMON(S)); NUMERIC(S) = sun_klu_factor((KLU_INDEXTYPE*) SUNSparseMatrix_IndexPointers(A), (KLU_INDEXTYPE*) SUNSparseMatrix_IndexValues(A), SUNSparseMatrix_Data(A), SYMBOLIC(S), &COMMON(S)); if (NUMERIC(S) == NULL) { LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC; return(LASTFLAG(S)); } } } } LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSolve_KLU(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { int flag; realtype *xdata; /* check for valid inputs */ if ( (A == NULL) || (S == NULL) || (x == NULL) || (b == NULL) ) return(SUNLS_MEM_NULL); /* copy b into x */ N_VScale(ONE, b, x); /* access x data array */ xdata = N_VGetArrayPointer(x); if (xdata == NULL) { LASTFLAG(S) = SUNLS_MEM_FAIL; return(LASTFLAG(S)); } /* Call KLU to solve the linear system */ flag = SOLVE(S)(SYMBOLIC(S), NUMERIC(S), SUNSparseMatrix_NP(A), 1, xdata, &COMMON(S)); if (flag == 0) { LASTFLAG(S) = SUNLS_PACKAGE_FAIL_REC; return(LASTFLAG(S)); } LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } sunindextype SUNLinSolLastFlag_KLU(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return(LASTFLAG(S)); } int SUNLinSolSpace_KLU(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { /* since the klu structures are opaque objects, we omit those from these results */ *leniwLS = 2; *lenrwLS = 0; return(SUNLS_SUCCESS); } int SUNLinSolFree_KLU(SUNLinearSolver S) { /* return with success if already freed */ if (S == NULL) return(SUNLS_SUCCESS); /* delete items from the contents structure (if it exists) */ if (S->content) { if (NUMERIC(S)) sun_klu_free_numeric(&NUMERIC(S), &COMMON(S)); if (SYMBOLIC(S)) sun_klu_free_symbolic(&SYMBOLIC(S), &COMMON(S)); free(S->content); S->content = NULL; } /* delete generic structures */ if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } StanHeaders/src/sunlinsol/band/0000755000176200001440000000000014645137104016201 5ustar liggesusersStanHeaders/src/sunlinsol/band/sunlinsol_band.c0000644000176200001440000001537514645137106021374 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the band implementation of * the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define ROW(i,j,smu) (i-j+smu) /* * ----------------------------------------------------------------- * Band solver structure accessibility macros: * ----------------------------------------------------------------- */ #define BAND_CONTENT(S) ( (SUNLinearSolverContent_Band)(S->content) ) #define PIVOTS(S) ( BAND_CONTENT(S)->pivots ) #define LASTFLAG(S) ( BAND_CONTENT(S)->last_flag ) /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new band linear solver */ SUNLinearSolver SUNLinSol_Band(N_Vector y, SUNMatrix A, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_Band content; sunindextype MatrixRows; /* Check compatibility with supplied SUNMatrix and N_Vector */ if (SUNMatGetID(A) != SUNMATRIX_BAND) return(NULL); if (SUNBandMatrix_Rows(A) != SUNBandMatrix_Columns(A)) return(NULL); if ( (N_VGetVectorID(y) != SUNDIALS_NVEC_SERIAL) && (N_VGetVectorID(y) != SUNDIALS_NVEC_OPENMP) && (N_VGetVectorID(y) != SUNDIALS_NVEC_PTHREADS) ) return(NULL); /* Check that A has appropriate storage upper bandwidth for factorization */ MatrixRows = SUNBandMatrix_Rows(A); if (SUNBandMatrix_StoredUpperBandwidth(A) < SUNMIN(MatrixRows-1, SUNBandMatrix_LowerBandwidth(A)+SUNBandMatrix_UpperBandwidth(A))) return(NULL); if (MatrixRows != N_VGetLength(y)) return(NULL); /* Create an empty linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_Band; S->ops->getid = SUNLinSolGetID_Band; S->ops->initialize = SUNLinSolInitialize_Band; S->ops->setup = SUNLinSolSetup_Band; S->ops->solve = SUNLinSolSolve_Band; S->ops->lastflag = SUNLinSolLastFlag_Band; S->ops->space = SUNLinSolSpace_Band; S->ops->free = SUNLinSolFree_Band; /* Create content */ content = NULL; content = (SUNLinearSolverContent_Band) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->N = MatrixRows; content->last_flag = 0; content->pivots = NULL; /* Allocate content */ content->pivots = (sunindextype *) malloc(MatrixRows * sizeof(sunindextype)); if (content->pivots == NULL) { SUNLinSolFree(S); return(NULL); } return(S); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_Band(SUNLinearSolver S) { return(SUNLINEARSOLVER_DIRECT); } SUNLinearSolver_ID SUNLinSolGetID_Band(SUNLinearSolver S) { return(SUNLINEARSOLVER_BAND); } int SUNLinSolInitialize_Band(SUNLinearSolver S) { /* all solver-specific memory has already been allocated */ LASTFLAG(S) = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } int SUNLinSolSetup_Band(SUNLinearSolver S, SUNMatrix A) { realtype **A_cols; sunindextype *pivots; /* check for valid inputs */ if ( (A == NULL) || (S == NULL) ) return(SUNLS_MEM_NULL); /* Ensure that A is a band matrix */ if (SUNMatGetID(A) != SUNMATRIX_BAND) { LASTFLAG(S) = SUNLS_ILL_INPUT; return(SUNLS_ILL_INPUT); } /* access data pointers (return with failure on NULL) */ A_cols = NULL; pivots = NULL; A_cols = SM_COLS_B(A); pivots = PIVOTS(S); if ( (A_cols == NULL) || (pivots == NULL) ) { LASTFLAG(S) = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } /* ensure that storage upper bandwidth is sufficient for fill-in */ if (SM_SUBAND_B(A) < SUNMIN(SM_COLUMNS_B(A)-1, SM_UBAND_B(A) + SM_LBAND_B(A))) { LASTFLAG(S) = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } /* perform LU factorization of input matrix */ LASTFLAG(S) = SUNDlsMat_bandGBTRF(A_cols, SM_COLUMNS_B(A), SM_UBAND_B(A), SM_LBAND_B(A), SM_SUBAND_B(A), pivots); /* store error flag (if nonzero, that row encountered zero-valued pivod) */ if (LASTFLAG(S) > 0) return(SUNLS_LUFACT_FAIL); return(SUNLS_SUCCESS); } int SUNLinSolSolve_Band(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { realtype **A_cols, *xdata; sunindextype *pivots; /* check for valid inputs */ if ( (A == NULL) || (S == NULL) || (x == NULL) || (b == NULL) ) return(SUNLS_MEM_NULL); /* copy b into x */ N_VScale(ONE, b, x); /* access data pointers (return with failure on NULL) */ A_cols = NULL; xdata = NULL; pivots = NULL; A_cols = SUNBandMatrix_Cols(A); xdata = N_VGetArrayPointer(x); pivots = PIVOTS(S); if ( (A_cols == NULL) || (xdata == NULL) || (pivots == NULL) ) { LASTFLAG(S) = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } /* solve using LU factors */ SUNDlsMat_bandGBTRS(A_cols, SM_COLUMNS_B(A), SM_SUBAND_B(A), SM_LBAND_B(A), pivots, xdata); LASTFLAG(S) = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } sunindextype SUNLinSolLastFlag_Band(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return(LASTFLAG(S)); } int SUNLinSolSpace_Band(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { *leniwLS = 2 + BAND_CONTENT(S)->N; *lenrwLS = 0; return(SUNLS_SUCCESS); } int SUNLinSolFree_Band(SUNLinearSolver S) { /* return if S is already free */ if (S == NULL) return(SUNLS_SUCCESS); /* delete items from contents, then delete generic structure */ if (S->content) { if (PIVOTS(S)) { free(PIVOTS(S)); PIVOTS(S) = NULL; } free(S->content); S->content = NULL; } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } StanHeaders/src/sunlinsol/band/fmod/0000755000176200001440000000000014511135055017121 5ustar liggesusersStanHeaders/src/sunlinsol/band/fmod/fsunlinsol_band_mod.f900000644000176200001440000001642014645137106023472 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunlinsol_band_mod use, intrinsic :: ISO_C_BINDING use fsundials_linearsolver_mod use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS public :: FSUNLinSol_Band public :: FSUNLinSolGetType_Band public :: FSUNLinSolGetID_Band public :: FSUNLinSolInitialize_Band public :: FSUNLinSolSetup_Band public :: FSUNLinSolSolve_Band public :: FSUNLinSolLastFlag_Band public :: FSUNLinSolSpace_Band public :: FSUNLinSolFree_Band ! WRAPPER DECLARATIONS interface function swigc_FSUNLinSol_Band(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSol_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR) :: fresult end function function swigc_FSUNLinSolGetType_Band(farg1) & bind(C, name="_wrap_FSUNLinSolGetType_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetID_Band(farg1) & bind(C, name="_wrap_FSUNLinSolGetID_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolInitialize_Band(farg1) & bind(C, name="_wrap_FSUNLinSolInitialize_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetup_Band(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetup_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSolve_Band(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNLinSolSolve_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FSUNLinSolLastFlag_Band(farg1) & bind(C, name="_wrap_FSUNLinSolLastFlag_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNLinSolSpace_Band(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSpace_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolFree_Band(farg1) & bind(C, name="_wrap_FSUNLinSolFree_Band") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNLinSol_Band(y, a, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNLinearSolver), pointer :: swig_result type(N_Vector), target, intent(inout) :: y type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(y) farg2 = c_loc(a) farg3 = sunctx fresult = swigc_FSUNLinSol_Band(farg1, farg2, farg3) call c_f_pointer(fresult, swig_result) end function function FSUNLinSolGetType_Band(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_Type) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetType_Band(farg1) swig_result = fresult end function function FSUNLinSolGetID_Band(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_ID) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetID_Band(farg1) swig_result = fresult end function function FSUNLinSolInitialize_Band(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolInitialize_Band(farg1) swig_result = fresult end function function FSUNLinSolSetup_Band(s, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(s) farg2 = c_loc(a) fresult = swigc_FSUNLinSolSetup_Band(farg1, farg2) swig_result = fresult end function function FSUNLinSolSolve_Band(s, a, x, b, tol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: b real(C_DOUBLE), intent(in) :: tol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 farg1 = c_loc(s) farg2 = c_loc(a) farg3 = c_loc(x) farg4 = c_loc(b) farg5 = tol fresult = swigc_FSUNLinSolSolve_Band(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FSUNLinSolLastFlag_Band(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolLastFlag_Band(farg1) swig_result = fresult end function function FSUNLinSolSpace_Band(s, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FSUNLinSolSpace_Band(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolFree_Band(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolFree_Band(farg1) swig_result = fresult end function end module StanHeaders/src/sunlinsol/band/fmod/fsunlinsol_band_mod.c0000644000176200001440000002272614645137106023324 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_linearsolver.h" #include "sunlinsol/sunlinsol_band.h" SWIGEXPORT SUNLinearSolver _wrap_FSUNLinSol_Band(N_Vector farg1, SUNMatrix farg2, void *farg3) { SUNLinearSolver fresult ; N_Vector arg1 = (N_Vector) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; SUNContext arg3 = (SUNContext) 0 ; SUNLinearSolver result; arg1 = (N_Vector)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (SUNContext)(farg3); result = (SUNLinearSolver)SUNLinSol_Band(arg1,arg2,arg3); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetType_Band(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_Type result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_Type)SUNLinSolGetType_Band(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetID_Band(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_ID result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_ID)SUNLinSolGetID_Band(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolInitialize_Band(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolInitialize_Band(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetup_Band(SUNLinearSolver farg1, SUNMatrix farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNLinSolSetup_Band(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSolve_Band(SUNLinearSolver farg1, SUNMatrix farg2, N_Vector farg3, N_Vector farg4, double const *farg5) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); result = (int)SUNLinSolSolve_Band(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int64_t _wrap_FSUNLinSolLastFlag_Band(SUNLinearSolver farg1) { int64_t fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; sunindextype result; arg1 = (SUNLinearSolver)(farg1); result = SUNLinSolLastFlag_Band(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSpace_Band(SUNLinearSolver farg1, long *farg2, long *farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNLinSolSpace_Band(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolFree_Band(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolFree_Band(arg1); fresult = (int)(result); return fresult; } StanHeaders/src/sunlinsol/spbcgs/0000755000176200001440000000000014511135055016551 5ustar liggesusersStanHeaders/src/sunlinsol/spbcgs/sunlinsol_spbcgs.c0000644000176200001440000005357114645137106022326 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on sundials_spbcgs.c code, written by Peter Brown and * Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the SPBCGS implementation of * the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #include "sundials_debug.h" #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * SPBCGS solver structure accessibility macros: * ----------------------------------------------------------------- */ #define SPBCGS_CONTENT(S) ( (SUNLinearSolverContent_SPBCGS)(S->content) ) #define PRETYPE(S) ( SPBCGS_CONTENT(S)->pretype ) #define LASTFLAG(S) ( SPBCGS_CONTENT(S)->last_flag ) /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new SPBCGS linear solver */ SUNLinearSolver SUNLinSol_SPBCGS(N_Vector y, int pretype, int maxl, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_SPBCGS content; /* check for legal pretype and maxl values; if illegal use defaults */ if ((pretype != SUN_PREC_NONE) && (pretype != SUN_PREC_LEFT) && (pretype != SUN_PREC_RIGHT) && (pretype != SUN_PREC_BOTH)) pretype = SUN_PREC_NONE; if (maxl <= 0) maxl = SUNSPBCGS_MAXL_DEFAULT; /* check that the supplied N_Vector supports all requisite operations */ if ( (y->ops->nvclone == NULL) || (y->ops->nvdestroy == NULL) || (y->ops->nvlinearsum == NULL) || (y->ops->nvprod == NULL) || (y->ops->nvdiv == NULL) || (y->ops->nvscale == NULL) || (y->ops->nvdotprod == NULL) ) return(NULL); /* Create linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_SPBCGS; S->ops->getid = SUNLinSolGetID_SPBCGS; S->ops->setatimes = SUNLinSolSetATimes_SPBCGS; S->ops->setpreconditioner = SUNLinSolSetPreconditioner_SPBCGS; S->ops->setscalingvectors = SUNLinSolSetScalingVectors_SPBCGS; S->ops->setzeroguess = SUNLinSolSetZeroGuess_SPBCGS; S->ops->initialize = SUNLinSolInitialize_SPBCGS; S->ops->setup = SUNLinSolSetup_SPBCGS; S->ops->solve = SUNLinSolSolve_SPBCGS; S->ops->numiters = SUNLinSolNumIters_SPBCGS; S->ops->resnorm = SUNLinSolResNorm_SPBCGS; S->ops->resid = SUNLinSolResid_SPBCGS; S->ops->lastflag = SUNLinSolLastFlag_SPBCGS; S->ops->space = SUNLinSolSpace_SPBCGS; S->ops->free = SUNLinSolFree_SPBCGS; /* Create content */ content = NULL; content = (SUNLinearSolverContent_SPBCGS) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->last_flag = 0; content->maxl = maxl; content->pretype = pretype; content->zeroguess = SUNFALSE; content->numiters = 0; content->resnorm = ZERO; content->r_star = NULL; content->r = NULL; content->p = NULL; content->q = NULL; content->u = NULL; content->Ap = NULL; content->vtemp = NULL; content->s1 = NULL; content->s2 = NULL; content->ATimes = NULL; content->ATData = NULL; content->Psetup = NULL; content->Psolve = NULL; content->PData = NULL; content->print_level = 0; content->info_file = stdout; /* Allocate content */ content->r_star = N_VClone(y); if (content->r_star == NULL) { SUNLinSolFree(S); return(NULL); } content->r = N_VClone(y); if (content->r == NULL) { SUNLinSolFree(S); return(NULL); } content->p = N_VClone(y); if (content->p == NULL) { SUNLinSolFree(S); return(NULL); } content->q = N_VClone(y); if (content->q == NULL) { SUNLinSolFree(S); return(NULL); } content->u = N_VClone(y); if (content->u == NULL) { SUNLinSolFree(S); return(NULL); } content->Ap = N_VClone(y); if (content->Ap == NULL) { SUNLinSolFree(S); return(NULL); } content->vtemp = N_VClone(y); if (content->vtemp == NULL) { SUNLinSolFree(S); return(NULL); } return(S); } /* ---------------------------------------------------------------------------- * Function to set the type of preconditioning for SPBCGS to use */ int SUNLinSol_SPBCGSSetPrecType(SUNLinearSolver S, int pretype) { /* Check for legal pretype */ if ((pretype != SUN_PREC_NONE) && (pretype != SUN_PREC_LEFT) && (pretype != SUN_PREC_RIGHT) && (pretype != SUN_PREC_BOTH)) { return(SUNLS_ILL_INPUT); } /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set pretype */ PRETYPE(S) = pretype; return(SUNLS_SUCCESS); } /* ---------------------------------------------------------------------------- * Function to set the maximum number of iterations for SPBCGS to use */ int SUNLinSol_SPBCGSSetMaxl(SUNLinearSolver S, int maxl) { /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Check for legal pretype */ if (maxl <= 0) maxl = SUNSPBCGS_MAXL_DEFAULT; /* Set pretype */ SPBCGS_CONTENT(S)->maxl = maxl; return(SUNLS_SUCCESS); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_SPBCGS(SUNLinearSolver S) { return(SUNLINEARSOLVER_ITERATIVE); } SUNLinearSolver_ID SUNLinSolGetID_SPBCGS(SUNLinearSolver S) { return(SUNLINEARSOLVER_SPBCGS); } int SUNLinSolInitialize_SPBCGS(SUNLinearSolver S) { /* ensure valid options */ if (S == NULL) return(SUNLS_MEM_NULL); if (SPBCGS_CONTENT(S)->maxl <= 0) SPBCGS_CONTENT(S)->maxl = SUNSPBCGS_MAXL_DEFAULT; if (SPBCGS_CONTENT(S)->ATimes == NULL) { LASTFLAG(S) = SUNLS_ATIMES_NULL; return(LASTFLAG(S)); } if ( (PRETYPE(S) != SUN_PREC_LEFT) && (PRETYPE(S) != SUN_PREC_RIGHT) && (PRETYPE(S) != SUN_PREC_BOTH) ) PRETYPE(S) = SUN_PREC_NONE; if ((PRETYPE(S) != SUN_PREC_NONE) && (SPBCGS_CONTENT(S)->Psolve == NULL)) { LASTFLAG(S) = SUNLS_PSOLVE_NULL; return(LASTFLAG(S)); } /* no additional memory to allocate */ /* return with success */ LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetATimes_SPBCGS(SUNLinearSolver S, void* ATData, SUNATimesFn ATimes) { /* set function pointers to integrator-supplied ATimes routine and data, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPBCGS_CONTENT(S)->ATimes = ATimes; SPBCGS_CONTENT(S)->ATData = ATData; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetPreconditioner_SPBCGS(SUNLinearSolver S, void* PData, SUNPSetupFn Psetup, SUNPSolveFn Psolve) { /* set function pointers to integrator-supplied Psetup and PSolve routines and data, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPBCGS_CONTENT(S)->Psetup = Psetup; SPBCGS_CONTENT(S)->Psolve = Psolve; SPBCGS_CONTENT(S)->PData = PData; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetScalingVectors_SPBCGS(SUNLinearSolver S, N_Vector s1, N_Vector s2) { /* set N_Vector pointers to integrator-supplied scaling vectors, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPBCGS_CONTENT(S)->s1 = s1; SPBCGS_CONTENT(S)->s2 = s2; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetZeroGuess_SPBCGS(SUNLinearSolver S, booleantype onoff) { /* set flag indicating a zero initial guess */ if (S == NULL) return(SUNLS_MEM_NULL); SPBCGS_CONTENT(S)->zeroguess = onoff; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetup_SPBCGS(SUNLinearSolver S, SUNMatrix A) { int ier; SUNPSetupFn Psetup; void* PData; /* Set shortcuts to SPBCGS memory structures */ if (S == NULL) return(SUNLS_MEM_NULL); Psetup = SPBCGS_CONTENT(S)->Psetup; PData = SPBCGS_CONTENT(S)->PData; /* no solver-specific setup is required, but if user-supplied Psetup routine exists, call that here */ if (Psetup != NULL) { ier = Psetup(PData); if (ier != 0) { LASTFLAG(S) = (ier < 0) ? SUNLS_PSET_FAIL_UNREC : SUNLS_PSET_FAIL_REC; return(LASTFLAG(S)); } } /* return with success */ LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSolve_SPBCGS(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype delta) { /* local data and shortcut variables */ realtype alpha, beta, omega, omega_denom, beta_num, beta_denom, r_norm, rho; N_Vector r_star, r, p, q, u, Ap, vtemp; booleantype preOnLeft, preOnRight, scale_x, scale_b, converged; booleantype *zeroguess; int l, l_max, ier; void *A_data, *P_data; N_Vector sx, sb; SUNATimesFn atimes; SUNPSolveFn psolve; realtype *res_norm; int *nli; /* local variables for fused vector operations */ realtype cv[3]; N_Vector Xv[3]; /* Make local shorcuts to solver variables. */ if (S == NULL) return(SUNLS_MEM_NULL); l_max = SPBCGS_CONTENT(S)->maxl; r_star = SPBCGS_CONTENT(S)->r_star; r = SPBCGS_CONTENT(S)->r; p = SPBCGS_CONTENT(S)->p; q = SPBCGS_CONTENT(S)->q; u = SPBCGS_CONTENT(S)->u; Ap = SPBCGS_CONTENT(S)->Ap; vtemp = SPBCGS_CONTENT(S)->vtemp; sb = SPBCGS_CONTENT(S)->s1; sx = SPBCGS_CONTENT(S)->s2; A_data = SPBCGS_CONTENT(S)->ATData; P_data = SPBCGS_CONTENT(S)->PData; atimes = SPBCGS_CONTENT(S)->ATimes; psolve = SPBCGS_CONTENT(S)->Psolve; zeroguess = &(SPBCGS_CONTENT(S)->zeroguess); nli = &(SPBCGS_CONTENT(S)->numiters); res_norm = &(SPBCGS_CONTENT(S)->resnorm); /* Initialize counters and convergence flag */ *nli = 0; converged = SUNFALSE; /* set booleantype flags for internal solver options */ preOnLeft = ( (PRETYPE(S) == SUN_PREC_LEFT) || (PRETYPE(S) == SUN_PREC_BOTH) ); preOnRight = ( (PRETYPE(S) == SUN_PREC_RIGHT) || (PRETYPE(S) == SUN_PREC_BOTH) ); scale_x = (sx != NULL); scale_b = (sb != NULL); /* Check for unsupported use case */ if (preOnRight && !(*zeroguess)) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_ILL_INPUT; return(SUNLS_ILL_INPUT); } #ifdef SUNDIALS_BUILD_WITH_MONITORING if (SPBCGS_CONTENT(S)->print_level && SPBCGS_CONTENT(S)->info_file) STAN_SUNDIALS_FPRINTF(SPBCGS_CONTENT(S)->info_file, "SUNLINSOL_SPBCGS:\n"); #endif /* Check if Atimes function has been set */ if (atimes == NULL) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_ATIMES_NULL; return(LASTFLAG(S)); } /* If preconditioning, check if psolve has been set */ if ((preOnLeft || preOnRight) && psolve == NULL) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_PSOLVE_NULL; return(LASTFLAG(S)); } /* Set r_star to initial (unscaled) residual r_0 = b - A*x_0 */ if (*zeroguess) { N_VScale(ONE, b, r_star); } else { ier = atimes(A_data, x, r_star); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } N_VLinearSum(ONE, b, -ONE, r_star, r_star); } /* Apply left preconditioner and b-scaling to r_star = r_0 */ if (preOnLeft) { ier = psolve(P_data, r_star, r, delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, r_star, r); if (scale_b) N_VProd(sb, r, r_star); else N_VScale(ONE, r, r_star); /* Initialize beta_denom to the dot product of r0 with r0 */ beta_denom = N_VDotProd(r_star, r_star); /* Set r_norm to L2 norm of r_star = sb P1_inv r_0, and return if small */ *res_norm = r_norm = rho = SUNRsqrt(beta_denom); #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print the initial residual */ if (SPBCGS_CONTENT(S)->print_level && SPBCGS_CONTENT(S)->info_file) { STAN_SUNDIALS_FPRINTF(SPBCGS_CONTENT(S)->info_file, SUNLS_MSG_RESIDUAL, (long int) 0, *res_norm); } #endif if (r_norm <= delta) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } /* Copy r_star to r and p */ N_VScale(ONE, r_star, r); N_VScale(ONE, r_star, p); /* Set x = sx x if non-zero guess */ if (scale_x && !(*zeroguess)) N_VProd(sx, x, x); /* Begin main iteration loop */ for(l = 0; l < l_max; l++) { (*nli)++; /* Generate Ap = A-tilde p, where A-tilde = sb P1_inv A P2_inv sx_inv */ /* Apply x-scaling: vtemp = sx_inv p */ if (scale_x) N_VDiv(p, sx, vtemp); else N_VScale(ONE, p, vtemp); /* Apply right preconditioner: vtemp = P2_inv sx_inv p */ if (preOnRight) { N_VScale(ONE, vtemp, Ap); ier = psolve(P_data, Ap, vtemp, delta, SUN_PREC_RIGHT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } /* Apply A: Ap = A P2_inv sx_inv p */ ier = atimes(A_data, vtemp, Ap ); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } /* Apply left preconditioner: vtemp = P1_inv A P2_inv sx_inv p */ if (preOnLeft) { ier = psolve(P_data, Ap, vtemp, delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, Ap, vtemp); /* Apply b-scaling: Ap = sb P1_inv A P2_inv sx_inv p */ if (scale_b) N_VProd(sb, vtemp, Ap); else N_VScale(ONE, vtemp, Ap); /* Calculate alpha = / */ alpha = ((beta_denom / N_VDotProd(Ap, r_star))); /* Update q = r - alpha*Ap = r - alpha*(sb P1_inv A P2_inv sx_inv p) */ N_VLinearSum(ONE, r, -alpha, Ap, q); /* Generate u = A-tilde q */ /* Apply x-scaling: vtemp = sx_inv q */ if (scale_x) N_VDiv(q, sx, vtemp); else N_VScale(ONE, q, vtemp); /* Apply right preconditioner: vtemp = P2_inv sx_inv q */ if (preOnRight) { N_VScale(ONE, vtemp, u); ier = psolve(P_data, u, vtemp, delta, SUN_PREC_RIGHT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } /* Apply A: u = A P2_inv sx_inv u */ ier = atimes(A_data, vtemp, u ); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } /* Apply left preconditioner: vtemp = P1_inv A P2_inv sx_inv p */ if (preOnLeft) { ier = psolve(P_data, u, vtemp, delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, u, vtemp); /* Apply b-scaling: u = sb P1_inv A P2_inv sx_inv u */ if (scale_b) N_VProd(sb, vtemp, u); else N_VScale(ONE, vtemp, u); /* Calculate omega = / */ omega_denom = N_VDotProd(u, u); if (omega_denom == ZERO) omega_denom = ONE; omega = (N_VDotProd(u, q) / omega_denom); /* Update x = x + alpha*p + omega*q */ if (l == 0 && *zeroguess) { N_VLinearSum(alpha, p, omega, q, x); } else { cv[0] = ONE; Xv[0] = x; cv[1] = alpha; Xv[1] = p; cv[2] = omega; Xv[2] = q; ier = N_VLinearCombination(3, cv, Xv, x); if (ier != SUNLS_SUCCESS) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_VECTOROP_ERR; return(SUNLS_VECTOROP_ERR); } } /* Update the residual r = q - omega*u */ N_VLinearSum(ONE, q, -omega, u, r); /* Set rho = norm(r) and check convergence */ *res_norm = rho = SUNRsqrt(N_VDotProd(r, r)); #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print current iteration number and the residual */ if (SPBCGS_CONTENT(S)->print_level && SPBCGS_CONTENT(S)->info_file) { STAN_SUNDIALS_FPRINTF(SPBCGS_CONTENT(S)->info_file, SUNLS_MSG_RESIDUAL, (long int) *nli, *res_norm); } #endif if (rho <= delta) { converged = SUNTRUE; break; } /* Not yet converged, continue iteration */ /* Update beta = / * alpha / omega */ beta_num = N_VDotProd(r, r_star); beta = ((beta_num / beta_denom) * (alpha / omega)); /* Update p = r + beta*(p - omega*Ap) = beta*p - beta*omega*Ap + r */ cv[0] = beta; Xv[0] = p; cv[1] = -alpha*(beta_num / beta_denom); Xv[1] = Ap; cv[2] = ONE; Xv[2] = r; ier = N_VLinearCombination(3, cv, Xv, p); if (ier != SUNLS_SUCCESS) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_VECTOROP_ERR; return(SUNLS_VECTOROP_ERR); } /* udpate beta_denom for next iteration */ beta_denom = beta_num; } /* Main loop finished */ if ((converged == SUNTRUE) || (rho < r_norm)) { /* Apply the x-scaling and right preconditioner: x = P2_inv sx_inv x */ if (scale_x) N_VDiv(x, sx, x); if (preOnRight) { ier = psolve(P_data, x, vtemp, delta, SUN_PREC_RIGHT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } N_VScale(ONE, vtemp, x); } *zeroguess = SUNFALSE; if (converged == SUNTRUE) LASTFLAG(S) = SUNLS_SUCCESS; else LASTFLAG(S) = SUNLS_RES_REDUCED; return(LASTFLAG(S)); } else { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_CONV_FAIL; return(LASTFLAG(S)); } } int SUNLinSolNumIters_SPBCGS(SUNLinearSolver S) { /* return the stored 'numiters' value */ if (S == NULL) return(-1); return (SPBCGS_CONTENT(S)->numiters); } realtype SUNLinSolResNorm_SPBCGS(SUNLinearSolver S) { /* return the stored 'resnorm' value */ if (S == NULL) return(-ONE); return (SPBCGS_CONTENT(S)->resnorm); } N_Vector SUNLinSolResid_SPBCGS(SUNLinearSolver S) { /* return the stored 'r' vector */ return (SPBCGS_CONTENT(S)->r); } sunindextype SUNLinSolLastFlag_SPBCGS(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return (LASTFLAG(S)); } int SUNLinSolSpace_SPBCGS(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { sunindextype liw1, lrw1; if (SPBCGS_CONTENT(S)->vtemp->ops->nvspace) N_VSpace(SPBCGS_CONTENT(S)->vtemp, &lrw1, &liw1); else lrw1 = liw1 = 0; *lenrwLS = lrw1*9; *leniwLS = liw1*9; return(SUNLS_SUCCESS); } int SUNLinSolFree_SPBCGS(SUNLinearSolver S) { if (S == NULL) return(SUNLS_SUCCESS); if (S->content) { /* delete items from within the content structure */ if (SPBCGS_CONTENT(S)->r_star) { N_VDestroy(SPBCGS_CONTENT(S)->r_star); SPBCGS_CONTENT(S)->r_star = NULL; } if (SPBCGS_CONTENT(S)->r) { N_VDestroy(SPBCGS_CONTENT(S)->r); SPBCGS_CONTENT(S)->r = NULL; } if (SPBCGS_CONTENT(S)->p) { N_VDestroy(SPBCGS_CONTENT(S)->p); SPBCGS_CONTENT(S)->p = NULL; } if (SPBCGS_CONTENT(S)->q) { N_VDestroy(SPBCGS_CONTENT(S)->q); SPBCGS_CONTENT(S)->q = NULL; } if (SPBCGS_CONTENT(S)->u) { N_VDestroy(SPBCGS_CONTENT(S)->u); SPBCGS_CONTENT(S)->u = NULL; } if (SPBCGS_CONTENT(S)->Ap) { N_VDestroy(SPBCGS_CONTENT(S)->Ap); SPBCGS_CONTENT(S)->Ap = NULL; } if (SPBCGS_CONTENT(S)->vtemp) { N_VDestroy(SPBCGS_CONTENT(S)->vtemp); SPBCGS_CONTENT(S)->vtemp = NULL; } free(S->content); S->content = NULL; } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } int SUNLinSolSetInfoFile_SPBCGS(SUNLinearSolver S, FILE* info_file) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the linear solver is non-null */ if (S == NULL) return(SUNLS_MEM_NULL); SPBCGS_CONTENT(S)->info_file = info_file; return(SUNLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSolSetInfoFile_SPBCGS: SUNDIALS was not built with monitoring\n"); return(SUNLS_ILL_INPUT); #endif } int SUNLinSolSetPrintLevel_SPBCGS(SUNLinearSolver S, int print_level) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the linear solver is non-null */ if (S == NULL) return(SUNLS_MEM_NULL); /* check for valid print level */ if (print_level < 0 || print_level > 1) return(SUNLS_ILL_INPUT); SPBCGS_CONTENT(S)->print_level = print_level; return(SUNLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSolSetPrintLevel_SPBCGS: SUNDIALS was not built with monitoring\n"); return(SUNLS_ILL_INPUT); #endif } StanHeaders/src/sunlinsol/spbcgs/fmod/0000755000176200001440000000000014511135055017476 5ustar liggesusersStanHeaders/src/sunlinsol/spbcgs/fmod/fsunlinsol_spbcgs_mod.f900000644000176200001440000003666614645137106024442 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunlinsol_spbcgs_mod use, intrinsic :: ISO_C_BINDING use fsundials_linearsolver_mod use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: SUNSPBCGS_MAXL_DEFAULT = 5_C_INT public :: FSUNLinSol_SPBCGS public :: FSUNLinSol_SPBCGSSetPrecType public :: FSUNLinSol_SPBCGSSetMaxl public :: FSUNLinSolGetType_SPBCGS public :: FSUNLinSolGetID_SPBCGS public :: FSUNLinSolInitialize_SPBCGS public :: FSUNLinSolSetATimes_SPBCGS public :: FSUNLinSolSetPreconditioner_SPBCGS public :: FSUNLinSolSetScalingVectors_SPBCGS public :: FSUNLinSolSetZeroGuess_SPBCGS public :: FSUNLinSolSetup_SPBCGS public :: FSUNLinSolSolve_SPBCGS public :: FSUNLinSolNumIters_SPBCGS public :: FSUNLinSolResNorm_SPBCGS public :: FSUNLinSolResid_SPBCGS public :: FSUNLinSolLastFlag_SPBCGS public :: FSUNLinSolSpace_SPBCGS public :: FSUNLinSolFree_SPBCGS public :: FSUNLinSolSetInfoFile_SPBCGS public :: FSUNLinSolSetPrintLevel_SPBCGS ! WRAPPER DECLARATIONS interface function swigc_FSUNLinSol_SPBCGS(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSol_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FSUNLinSol_SPBCGSSetPrecType(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_SPBCGSSetPrecType") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSol_SPBCGSSetMaxl(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_SPBCGSSetMaxl") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetType_SPBCGS(farg1) & bind(C, name="_wrap_FSUNLinSolGetType_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetID_SPBCGS(farg1) & bind(C, name="_wrap_FSUNLinSolGetID_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolInitialize_SPBCGS(farg1) & bind(C, name="_wrap_FSUNLinSolInitialize_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetATimes_SPBCGS(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetATimes_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPreconditioner_SPBCGS(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSolSetPreconditioner_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetScalingVectors_SPBCGS(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetScalingVectors_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetZeroGuess_SPBCGS(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetZeroGuess_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetup_SPBCGS(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetup_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSolve_SPBCGS(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNLinSolSolve_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FSUNLinSolNumIters_SPBCGS(farg1) & bind(C, name="_wrap_FSUNLinSolNumIters_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolResNorm_SPBCGS(farg1) & bind(C, name="_wrap_FSUNLinSolResNorm_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FSUNLinSolResid_SPBCGS(farg1) & bind(C, name="_wrap_FSUNLinSolResid_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNLinSolLastFlag_SPBCGS(farg1) & bind(C, name="_wrap_FSUNLinSolLastFlag_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNLinSolSpace_SPBCGS(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSpace_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolFree_SPBCGS(farg1) & bind(C, name="_wrap_FSUNLinSolFree_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetInfoFile_SPBCGS(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetInfoFile_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPrintLevel_SPBCGS(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetPrintLevel_SPBCGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNLinSol_SPBCGS(y, pretype, maxl, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNLinearSolver), pointer :: swig_result type(N_Vector), target, intent(inout) :: y integer(C_INT), intent(in) :: pretype integer(C_INT), intent(in) :: maxl type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = c_loc(y) farg2 = pretype farg3 = maxl farg4 = sunctx fresult = swigc_FSUNLinSol_SPBCGS(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FSUNLinSol_SPBCGSSetPrecType(s, pretype) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: pretype integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = pretype fresult = swigc_FSUNLinSol_SPBCGSSetPrecType(farg1, farg2) swig_result = fresult end function function FSUNLinSol_SPBCGSSetMaxl(s, maxl) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: maxl integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = maxl fresult = swigc_FSUNLinSol_SPBCGSSetMaxl(farg1, farg2) swig_result = fresult end function function FSUNLinSolGetType_SPBCGS(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_Type) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetType_SPBCGS(farg1) swig_result = fresult end function function FSUNLinSolGetID_SPBCGS(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_ID) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetID_SPBCGS(farg1) swig_result = fresult end function function FSUNLinSolInitialize_SPBCGS(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolInitialize_SPBCGS(farg1) swig_result = fresult end function function FSUNLinSolSetATimes_SPBCGS(s, a_data, atimes) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: a_data type(C_FUNPTR), intent(in), value :: atimes integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = c_loc(s) farg2 = a_data farg3 = atimes fresult = swigc_FSUNLinSolSetATimes_SPBCGS(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetPreconditioner_SPBCGS(s, p_data, pset, psol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: p_data type(C_FUNPTR), intent(in), value :: pset type(C_FUNPTR), intent(in), value :: psol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = c_loc(s) farg2 = p_data farg3 = pset farg4 = psol fresult = swigc_FSUNLinSolSetPreconditioner_SPBCGS(farg1, farg2, farg3, farg4) swig_result = fresult end function function FSUNLinSolSetScalingVectors_SPBCGS(s, s1, s2) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(N_Vector), target, intent(inout) :: s1 type(N_Vector), target, intent(inout) :: s2 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(s1) farg3 = c_loc(s2) fresult = swigc_FSUNLinSolSetScalingVectors_SPBCGS(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetZeroGuess_SPBCGS(s, onoff) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: onoff integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = onoff fresult = swigc_FSUNLinSolSetZeroGuess_SPBCGS(farg1, farg2) swig_result = fresult end function function FSUNLinSolSetup_SPBCGS(s, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(s) farg2 = c_loc(a) fresult = swigc_FSUNLinSolSetup_SPBCGS(farg1, farg2) swig_result = fresult end function function FSUNLinSolSolve_SPBCGS(s, a, x, b, tol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: b real(C_DOUBLE), intent(in) :: tol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 farg1 = c_loc(s) farg2 = c_loc(a) farg3 = c_loc(x) farg4 = c_loc(b) farg5 = tol fresult = swigc_FSUNLinSolSolve_SPBCGS(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FSUNLinSolNumIters_SPBCGS(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolNumIters_SPBCGS(farg1) swig_result = fresult end function function FSUNLinSolResNorm_SPBCGS(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResNorm_SPBCGS(farg1) swig_result = fresult end function function FSUNLinSolResid_SPBCGS(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResid_SPBCGS(farg1) call c_f_pointer(fresult, swig_result) end function function FSUNLinSolLastFlag_SPBCGS(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolLastFlag_SPBCGS(farg1) swig_result = fresult end function function FSUNLinSolSpace_SPBCGS(s, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FSUNLinSolSpace_SPBCGS(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolFree_SPBCGS(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolFree_SPBCGS(farg1) swig_result = fresult end function function FSUNLinSolSetInfoFile_SPBCGS(s, info_file) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: info_file integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(s) farg2 = info_file fresult = swigc_FSUNLinSolSetInfoFile_SPBCGS(farg1, farg2) swig_result = fresult end function function FSUNLinSolSetPrintLevel_SPBCGS(s, print_level) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: print_level integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = print_level fresult = swigc_FSUNLinSolSetPrintLevel_SPBCGS(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/sunlinsol/spbcgs/fmod/fsunlinsol_spbcgs_mod.c0000644000176200001440000003306414645137106024253 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_linearsolver.h" #include "sunlinsol/sunlinsol_spbcgs.h" SWIGEXPORT SUNLinearSolver _wrap_FSUNLinSol_SPBCGS(N_Vector farg1, int const *farg2, int const *farg3, void *farg4) { SUNLinearSolver fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int arg3 ; SUNContext arg4 = (SUNContext) 0 ; SUNLinearSolver result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); arg4 = (SUNContext)(farg4); result = (SUNLinearSolver)SUNLinSol_SPBCGS(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_SPBCGSSetPrecType(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_SPBCGSSetPrecType(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_SPBCGSSetMaxl(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_SPBCGSSetMaxl(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetType_SPBCGS(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_Type result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_Type)SUNLinSolGetType_SPBCGS(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetID_SPBCGS(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_ID result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_ID)SUNLinSolGetID_SPBCGS(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolInitialize_SPBCGS(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolInitialize_SPBCGS(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetATimes_SPBCGS(SUNLinearSolver farg1, void *farg2, SUNATimesFn farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNATimesFn arg3 = (SUNATimesFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNATimesFn)(farg3); result = (int)SUNLinSolSetATimes_SPBCGS(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPreconditioner_SPBCGS(SUNLinearSolver farg1, void *farg2, SUNPSetupFn farg3, SUNPSolveFn farg4) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNPSetupFn arg3 = (SUNPSetupFn) 0 ; SUNPSolveFn arg4 = (SUNPSolveFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNPSetupFn)(farg3); arg4 = (SUNPSolveFn)(farg4); result = (int)SUNLinSolSetPreconditioner_SPBCGS(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetScalingVectors_SPBCGS(SUNLinearSolver farg1, N_Vector farg2, N_Vector farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)SUNLinSolSetScalingVectors_SPBCGS(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetZeroGuess_SPBCGS(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetZeroGuess_SPBCGS(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetup_SPBCGS(SUNLinearSolver farg1, SUNMatrix farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNLinSolSetup_SPBCGS(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSolve_SPBCGS(SUNLinearSolver farg1, SUNMatrix farg2, N_Vector farg3, N_Vector farg4, double const *farg5) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); result = (int)SUNLinSolSolve_SPBCGS(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolNumIters_SPBCGS(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolNumIters_SPBCGS(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FSUNLinSolResNorm_SPBCGS(SUNLinearSolver farg1) { double fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; realtype result; arg1 = (SUNLinearSolver)(farg1); result = (realtype)SUNLinSolResNorm_SPBCGS(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FSUNLinSolResid_SPBCGS(SUNLinearSolver farg1) { N_Vector fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector result; arg1 = (SUNLinearSolver)(farg1); result = (N_Vector)SUNLinSolResid_SPBCGS(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FSUNLinSolLastFlag_SPBCGS(SUNLinearSolver farg1) { int64_t fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; sunindextype result; arg1 = (SUNLinearSolver)(farg1); result = SUNLinSolLastFlag_SPBCGS(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSpace_SPBCGS(SUNLinearSolver farg1, long *farg2, long *farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNLinSolSpace_SPBCGS(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolFree_SPBCGS(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolFree_SPBCGS(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetInfoFile_SPBCGS(SUNLinearSolver farg1, void *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (FILE *)(farg2); result = (int)SUNLinSolSetInfoFile_SPBCGS(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPrintLevel_SPBCGS(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetPrintLevel_SPBCGS(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/sunlinsol/magmadense/0000755000176200001440000000000014511135055017371 5ustar liggesusersStanHeaders/src/sunlinsol/magmadense/sunlinsol_magmadense.cpp0000644000176200001440000003134414645137106024320 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------*/ #include #include #include #include #include /* Interfaces to match 'realtype' with the correct MAGMA functions */ #if defined(SUNDIALS_DOUBLE_PRECISION) #define xgetrf magma_dgetrf_gpu #define xgetrf_batched magma_dgetrf_batched #define xgetrs magma_dgetrs_gpu #define xgetrs_batched magma_dgetrs_batched #define xset_pointer magma_dset_pointer #elif defined(SUNDIALS_SINGLE_PRECISION) #define xgetrf magma_sgetrf_gpu #define xgetrf_batched magma_sgetrf_batched #define xgetrs magma_sgetrs_gpu #define xgetrs_batched magma_sgetrs_batched #define xset_pointer magma_sset_pointer #else #error Incompatible realtype for MAGMA #endif #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * MAGMADENSE solver structure accessibility macros: * ----------------------------------------------------------------- */ #define MAGMADENSE_CONTENT(S) ( (SUNLinearSolverContent_MagmaDense)(S->content) ) #define MHELP(S) ( MAGMADENSE_CONTENT(S)->memhelp ) #define QUEUE(S) ( MAGMADENSE_CONTENT(S)->q ) #define PIVOTS(S) ( (sunindextype*)MAGMADENSE_CONTENT(S)->pivots->ptr ) #define PIVOTSARRAY(S) ( (sunindextype**)MAGMADENSE_CONTENT(S)->pivotsarr->ptr ) #define RHSARRAY(S) ( (realtype**)MAGMADENSE_CONTENT(S)->rhsarr->ptr ) #define INFOARRAY(S) ( (sunindextype*)MAGMADENSE_CONTENT(S)->infoarr->ptr ) #define LASTFLAG(S) ( MAGMADENSE_CONTENT(S)->last_flag ) #define ASYNCHRONOUS(S) ( MAGMADENSE_CONTENT(S)->async) /* * ---------------------------------------------------------------------------- * Implementation specific routines * ---------------------------------------------------------------------------- */ /* * Constructor functions */ SUNLinearSolver SUNLinSol_MagmaDense(N_Vector y, SUNMatrix Amat, SUNContext sunctx) { int retval = 0; SUNLinearSolver S; SUNLinearSolverContent_MagmaDense content; SUNMatrixContent_MagmaDense A; sunindextype M, nblocks; /* Check inputs */ if (y == NULL || Amat == NULL) return(NULL); if (y->ops == NULL || Amat->ops == NULL) return(NULL); if (y->ops->nvgetlength == NULL || y->ops->nvgetdevicearraypointer == NULL || Amat->ops->getid == NULL) return(NULL); /* Check compatibility with supplied SUNMatrix */ if (SUNMatGetID(Amat) != SUNMATRIX_MAGMADENSE) return(NULL); if (Amat->content == NULL) return(NULL); A = (SUNMatrixContent_MagmaDense) Amat->content; /* Check that the matrix is square */ if (A->M != A->N) return(NULL); M = A->M; nblocks = A->nblocks; /* Check that the matirx and vector dimensions agree */ if (M*nblocks != N_VGetLength(y)) return(NULL); /* Create the linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_MagmaDense; S->ops->getid = SUNLinSolGetID_MagmaDense; S->ops->initialize = SUNLinSolInitialize_MagmaDense; S->ops->setup = SUNLinSolSetup_MagmaDense; S->ops->solve = SUNLinSolSolve_MagmaDense; S->ops->lastflag = SUNLinSolLastFlag_MagmaDense; S->ops->space = SUNLinSolSpace_MagmaDense; S->ops->free = SUNLinSolFree_MagmaDense; /* Create content */ content = NULL; content = (SUNLinearSolverContent_MagmaDense) malloc(sizeof(*content)); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->last_flag = 0; content->async = SUNTRUE; content->N = M; content->pivots = NULL; content->pivotsarr = NULL; content->infoarr = NULL; content->rhsarr = NULL; content->memhelp = A->memhelp; content->q = A->q; /* Allocate data */ /* The pivots need to be in host memory when calling the non-batched methods, but in device memory for the batched methods. */ retval = SUNMemoryHelper_Alloc(content->memhelp, &content->pivots, M * nblocks * sizeof(sunindextype), nblocks > 1 ? SUNMEMTYPE_DEVICE : SUNMEMTYPE_HOST, nullptr); if (retval) { SUNLinSolFree(S); return(NULL); } /* If we have multiple blocks, then we need to allocate some extra pointer arrays needed when calling MAGMA batched methods. */ if (nblocks > 1) { retval = SUNMemoryHelper_Alloc(content->memhelp, &content->pivotsarr, nblocks * sizeof(sunindextype*), SUNMEMTYPE_DEVICE, nullptr); if (retval) { SUNLinSolFree(S); return(NULL); } /* Set the pivots array on the device */ magma_iset_pointer((sunindextype**)content->pivotsarr->ptr, /* 2D output array */ (sunindextype*)content->pivots->ptr, /* 1D input array */ 1, /* leading dimension of input */ 0, /* row */ 0, /* column */ M, /* rows in a block */ nblocks, /* number of blocks */ content->q); /* We use pinned memory for the info array because we are going to check its values on the host and we need it to have fast transfers. */ retval = SUNMemoryHelper_Alloc(content->memhelp, &content->infoarr, nblocks * sizeof(sunindextype), SUNMEMTYPE_PINNED, nullptr); if (retval) { SUNLinSolFree(S); return(NULL); } retval = SUNMemoryHelper_Alloc(content->memhelp, &content->rhsarr, nblocks * sizeof(realtype*), SUNMEMTYPE_DEVICE, nullptr); if (retval) { SUNLinSolFree(S); return(NULL); } } return(S); } /* * Set functions */ int SUNLinSol_MagmaDense_SetAsync(SUNLinearSolver S, booleantype onoff) { if (S == NULL) return SUNLS_MEM_NULL; ASYNCHRONOUS(S) = onoff; return SUNLS_SUCCESS; } /* * ----------------------------------------------------------------- * Implementation of generic SUNLinearSolver operations. * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_MagmaDense(SUNLinearSolver S) { return(SUNLINEARSOLVER_DIRECT); } SUNLinearSolver_ID SUNLinSolGetID_MagmaDense(SUNLinearSolver S) { return(SUNLINEARSOLVER_MAGMADENSE); } int SUNLinSolInitialize_MagmaDense(SUNLinearSolver S) { /* All solver-specific memory has already been allocated */ if (S == NULL) return SUNLS_MEM_NULL; LASTFLAG(S) = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } int SUNLinSolSetup_MagmaDense(SUNLinearSolver S, SUNMatrix A) { /* Check for valid inputs */ if (S == NULL) return SUNLS_MEM_NULL; if (A == NULL) { LASTFLAG(S) = SUNLS_MEM_NULL; return(SUNLS_MEM_NULL); } /* Ensure that A is a magma dense matrix */ if (SUNMatGetID(A) != SUNMATRIX_MAGMADENSE) { LASTFLAG(S) = SUNLS_ILL_INPUT; return(SUNLS_ILL_INPUT); } sunindextype ier = 0; sunindextype M = SUNMatrix_MagmaDense_BlockRows(A); sunindextype nblocks = SUNMatrix_MagmaDense_NumBlocks(A); /* Call MAGMA to do LU factorization of A */ if (nblocks > 1) { #ifndef SUNDIALS_MAGMA_USE_GETRF_LOOP xgetrf_batched(M, /* number of rows per block */ M, /* number of columns per block */ SUNMatrix_MagmaDense_BlockData(A), M, /* leading dimension of each block */ PIVOTSARRAY(S), INFOARRAY(S), nblocks, QUEUE(S)); #else realtype** blocks = SUNMatrix_MagmaDense_BlockData(A); for (int k = 0; k < nblocks; k++) { xgetrf(M, /* number of rows */ M, /* number of columns */ blocks[k], M, /* leading dimension of A */ PIVOTSARRAY(S)[k], &INFOARRAY(S)[k]); } #endif if (!ASYNCHRONOUS(S)) { magma_queue_sync(QUEUE(S)); /* Check if there were any failures when factoring */ for (sunindextype k = 0; k < nblocks; k++) { if (INFOARRAY(S)[k] < 0) ier = INFOARRAY(S)[k]; if (INFOARRAY(S)[k] > 0) { ier = INFOARRAY(S)[k]; break; } } } } else { xgetrf(M, /* number of rows */ M, /* number of columns */ SUNMatrix_MagmaDense_Data(A), M, /* leading dimension of A */ PIVOTS(S), &ier); if (!ASYNCHRONOUS(S)) magma_queue_sync(QUEUE(S)); } LASTFLAG(S) = ier; if (ier > 0) return(SUNLS_LUFACT_FAIL); if (ier < 0) return(SUNLS_PACKAGE_FAIL_UNREC); return(SUNLS_SUCCESS); } int SUNLinSolSolve_MagmaDense(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { /* Check for valid inputs */ if (S == NULL) return(SUNLS_MEM_NULL); if ( (A == NULL) || (x == NULL) || (b == NULL) ) { LASTFLAG(S) = SUNLS_MEM_NULL; return(SUNLS_MEM_NULL); } /* Ensure that A is a magma dense matrix */ if (SUNMatGetID(A) != SUNMATRIX_MAGMADENSE) { LASTFLAG(S) = SUNLS_ILL_INPUT; return(SUNLS_ILL_INPUT); } int ier = 0; sunindextype M = SUNMatrix_MagmaDense_BlockRows(A); sunindextype nblocks = SUNMatrix_MagmaDense_NumBlocks(A); /* Copy b into x */ N_VScale(ONE, b, x); /* Access x data array */ realtype* xdata = N_VGetDeviceArrayPointer(x); if (xdata == NULL) { LASTFLAG(S) = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } /* Call MAGMA to solve the linear system */ if (nblocks > 1) { /* First, set pointers to RHS blocks */ xset_pointer(RHSARRAY(S), /* 2D output array */ xdata, /* 1D input array */ 1, /* leading dimension of input */ 0, /* rows */ 0, /* cols */ M, /* number of rows in block */ nblocks, QUEUE(S)); /* Now, solve the batch system */ xgetrs_batched(MagmaNoTrans, M, /* order of the matrix */ 1, /* number of right hand sides */ SUNMatrix_MagmaDense_BlockData(A), M, /* leading dimension of A */ PIVOTSARRAY(S), RHSARRAY(S), /* right hand side (input), solution (output) */ M, /* leading dimension of b */ nblocks, QUEUE(S)); } else { xgetrs(MagmaNoTrans, M, /* order of the matrix */ 1, /* number of right hand sides */ SUNMatrix_MagmaDense_Data(A), M, /* leading dimension of A */ PIVOTS(S), xdata, /* right hand side (input), solution (output) */ M, /* leading dimension of x */ &ier); } if(!ASYNCHRONOUS(S)) magma_queue_sync(QUEUE(S)); LASTFLAG(S) = ier; return((ier < 0) ? SUNLS_PACKAGE_FAIL_UNREC : SUNLS_SUCCESS); } sunindextype SUNLinSolLastFlag_MagmaDense(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return(LASTFLAG(S)); } int SUNLinSolSpace_MagmaDense(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { *lenrwLS = 0; *leniwLS = 2 + MAGMADENSE_CONTENT(S)->N; return(SUNLS_SUCCESS); } int SUNLinSolFree_MagmaDense(SUNLinearSolver S) { /* return if S is already free */ if (S == NULL) return(SUNLS_SUCCESS); /* delete items from contents, then delete generic structure */ if (S->content) { if (MAGMADENSE_CONTENT(S)->pivots) SUNMemoryHelper_Dealloc(MHELP(S), MAGMADENSE_CONTENT(S)->pivots, nullptr); if (MAGMADENSE_CONTENT(S)->pivotsarr) SUNMemoryHelper_Dealloc(MHELP(S), MAGMADENSE_CONTENT(S)->pivotsarr, nullptr); if (MAGMADENSE_CONTENT(S)->infoarr) SUNMemoryHelper_Dealloc(MHELP(S), MAGMADENSE_CONTENT(S)->infoarr, nullptr); if (MAGMADENSE_CONTENT(S)->rhsarr) SUNMemoryHelper_Dealloc(MHELP(S), MAGMADENSE_CONTENT(S)->rhsarr, nullptr); } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } StanHeaders/src/sunlinsol/onemkldense/0000755000176200001440000000000014511135055017574 5ustar liggesusersStanHeaders/src/sunlinsol/onemkldense/sunlinsol_onemkldense.cpp0000644000176200001440000004514414645137106024731 0ustar liggesusers/* --------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * --------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * --------------------------------------------------------------------------- * This is the implementation file for the dense implementation of the * SUNLINEARSOLVER class using the Intel oneAPI Math Kernel Library (oneMKL). * ---------------------------------------------------------------------------*/ #include #include #include using namespace oneapi::mkl::lapack; // SUNDIALS public headers #include #include // SUNDIALS private headers #include "sundials_debug.h" // Check for a valid precision and index size #if defined(SUNDIALS_EXTENDED_PRECISION) #error "oneMLK unsupported precision" #endif #if defined(SUNDIALS_INT32_T) #error "oneMLK unsupported index size" #endif // Accessor macros // Content and last error flag #define LS_CONTENT(S) ((SUNLinearSolverContent_OneMklDense)(S->content)) #define LS_LASTFLAG(S) (LS_CONTENT(S)->last_flag ) // Pivots array length and memory #define LS_ROWS(S) (LS_CONTENT(S)->rows) #define LS_PIVOTS(S) (LS_CONTENT(S)->pivots) #define LS_PIVOTSp(S) ((sunindextype*) LS_CONTENT(S)->pivots->ptr) // Getrf scratch space size and memory #define LS_F_SCRATCH_SIZE(S) (LS_CONTENT(S)->f_scratch_size) #define LS_F_SCRATCH(S) (LS_CONTENT(S)->f_scratchpad) #define LS_F_SCRATCHp(S) ((realtype*) LS_CONTENT(S)->f_scratchpad->ptr) // Getrs scratch space size and memory #define LS_S_SCRATCH_SIZE(S) (LS_CONTENT(S)->s_scratch_size) #define LS_S_SCRATCH(S) (LS_CONTENT(S)->s_scratchpad) #define LS_S_SCRATCHp(S) ((realtype*) LS_CONTENT(S)->s_scratchpad->ptr) // Memory type, helper, and SYCL queue #define LS_MEM_TYPE(S) (LS_CONTENT(S)->mem_type) #define LS_MEM_HELPER(S) (LS_CONTENT(S)->mem_helper) #define LS_QUEUE(S) (LS_CONTENT(S)->queue) /* -------------------------------------------------------------------------- * Constructors * -------------------------------------------------------------------------- */ SUNLinearSolver SUNLinSol_OneMklDense(N_Vector y, SUNMatrix Amat, SUNContext sunctx) { int retval = 0; // Check inputs if (!y || !Amat) { SUNDIALS_DEBUG_ERROR("Illegal input, y or A is NULL\n"); return NULL; } if (!(y->ops) || !(Amat->ops)) { SUNDIALS_DEBUG_ERROR("Illegal input, y->ops or A->ops is NULL\n"); return NULL; } if ( !(y->ops->nvgetlength) || !(y->ops->nvgetdevicearraypointer) || !(Amat->ops->getid) ) { SUNDIALS_DEBUG_ERROR("Illegal input, y or A missing required operations\n"); return NULL; } // Check compatibility with supplied SUNMatrix if (SUNMatGetID(Amat) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Illegal input, SUNMatID != SUNMATRIX_ONEMKLDENSE\n"); return NULL; } if (!(Amat->content)) { SUNDIALS_DEBUG_ERROR("Illegal input, SUNMatID != SUNMATRIX_ONEMKLDENSE\n"); return NULL; } SUNMatrixContent_OneMklDense A = (SUNMatrixContent_OneMklDense) Amat->content; // Check that the matrix is square if (A->rows != A->cols) { SUNDIALS_DEBUG_ERROR("Illegal input, A is not square\n"); return NULL; } // Check that the matrix and vector dimensions agree if (A->cols != N_VGetLength(y)) { SUNDIALS_DEBUG_ERROR("Illegal input, number of columns in A != length of y\n"); return NULL; } // Create the linear solver SUNLinearSolver S = SUNLinSolNewEmpty(sunctx); if (!S) { SUNDIALS_DEBUG_ERROR("SUNLinSolNewEmpty returned NULL\n"); return NULL; } // Attach operations S->ops->gettype = SUNLinSolGetType_OneMklDense; S->ops->getid = SUNLinSolGetID_OneMklDense; S->ops->initialize = SUNLinSolInitialize_OneMklDense; S->ops->setup = SUNLinSolSetup_OneMklDense; S->ops->solve = SUNLinSolSolve_OneMklDense; S->ops->lastflag = SUNLinSolLastFlag_OneMklDense; S->ops->space = SUNLinSolSpace_OneMklDense; S->ops->free = SUNLinSolFree_OneMklDense; // Create content S->content = (SUNLinearSolverContent_OneMklDense) malloc(sizeof(_SUNLinearSolverContent_OneMklDense)); if (!(S->content)) { SUNDIALS_DEBUG_ERROR("Content allocation failed\n"); SUNLinSolFree(S); return NULL; } // Fill content LS_CONTENT(S)->last_flag = 0; LS_CONTENT(S)->rows = A->rows; LS_CONTENT(S)->pivots = NULL; LS_CONTENT(S)->f_scratch_size = 0; LS_CONTENT(S)->f_scratchpad = NULL; LS_CONTENT(S)->s_scratch_size = 0; LS_CONTENT(S)->s_scratchpad = NULL; LS_CONTENT(S)->mem_type = A->mem_type; LS_CONTENT(S)->mem_helper = A->mem_helper; LS_CONTENT(S)->queue = A->queue; // Allocate data retval = SUNMemoryHelper_Alloc(LS_MEM_HELPER(S), &(LS_PIVOTS(S)), A->rows * sizeof(sunindextype), LS_MEM_TYPE(S), A->queue); if (retval) { SUNDIALS_DEBUG_ERROR("Pivots allocation failed\n"); SUNLinSolFree(S); return NULL; } // Compute scratchpad size for factorization and solve ::sycl::queue* queue = A->queue; sunindextype M = SUNMatrix_OneMklDense_BlockRows(Amat); sunindextype N = SUNMatrix_OneMklDense_BlockColumns(Amat); sunindextype num_blocks = SUNMatrix_OneMklDense_NumBlocks(Amat); if (num_blocks > 1) { LS_F_SCRATCH_SIZE(S) = getrf_batch_scratchpad_size(*queue, // device queue M, // rows in A_i N, // columns in A_i M, // leading dimension M * N, // stride between A_i M, // stride in P_i num_blocks); // number of blocks #ifdef SUNDIALS_ONEMKL_USE_GETRS_BATCHED LS_S_SCRATCH_SIZE(S)= getrs_batch_scratchpad_size(*queue, // device queue oneapi::mkl::transpose::nontrans, M, // number of rows in A_i 1, // number of right-hand sides M, // leading dimensino of A_i M * N, // stride between A_i M, // stride between pivots M, // leading dimension of B_i M, // stride between B_i num_blocks); // number of blocks #else LS_S_SCRATCH_SIZE(S) = getrs_scratchpad_size(*queue, // device queue oneapi::mkl::transpose::nontrans, M, // number of rows in A 1, // number of right-hand sizes M, // leading dimension of A M); // leading dimension of B #endif } else { LS_F_SCRATCH_SIZE(S) = getrf_scratchpad_size(*queue, // device queue M, // rows in A_i N, // columns in A_i M); // leading dimension LS_S_SCRATCH_SIZE(S) = getrs_scratchpad_size(*queue, // device queue oneapi::mkl::transpose::nontrans, M, // number of rows in A 1, // number of right-hand sizes M, // leading dimension of A M); // leading dimension of B } // Allocate factorization scratchpad if necessary retval = SUNMemoryHelper_Alloc(LS_MEM_HELPER(S), &(LS_F_SCRATCH(S)), LS_F_SCRATCH_SIZE(S) * sizeof(realtype), LS_MEM_TYPE(S), queue); if (retval) { SUNDIALS_DEBUG_ERROR("Scratchpad allocation failed\n"); SUNLinSolFree(S); return NULL; } // Allocate solve scratchpad if necessary retval = SUNMemoryHelper_Alloc(LS_MEM_HELPER(S), &(LS_S_SCRATCH(S)), LS_S_SCRATCH_SIZE(S) * sizeof(realtype), LS_MEM_TYPE(S), queue); if (retval) { SUNDIALS_DEBUG_ERROR("Scratchpad allocation failed\n"); SUNLinSolFree(S); return NULL; } return S; } /* -------------------------------------------------------------------------- * Implementation of SUNLinearSolver operations * -------------------------------------------------------------------------- */ int SUNLinSolInitialize_OneMklDense(SUNLinearSolver S) { // All solver-specific memory has already been allocated if (!S) { SUNDIALS_DEBUG_ERROR("Linear solver is NULL\n"); return SUNLS_MEM_NULL; } LS_LASTFLAG(S) = SUNLS_SUCCESS; return SUNLS_SUCCESS; } int SUNLinSolSetup_OneMklDense(SUNLinearSolver S, SUNMatrix A) { // Check for valid inputs if (!S) { SUNDIALS_DEBUG_ERROR("Linear solver is NULL\n"); return SUNLS_MEM_NULL; } if (!A) { SUNDIALS_DEBUG_ERROR("Matrix is NULL\n"); LS_LASTFLAG(S) = SUNLS_MEM_NULL; return SUNLS_MEM_NULL; } // Ensure that A is a oneMKL dense matrix if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Matrix is not the oneMKL matrix\n"); LS_LASTFLAG(S) = SUNLS_ILL_INPUT; return SUNLS_ILL_INPUT; } // Access A matrix data array realtype* Adata = SUNMatrix_OneMklDense_Data(A); if (!Adata) { SUNDIALS_DEBUG_ERROR("Matrix data array is NULL\n"); LS_LASTFLAG(S) = SUNLS_MEM_FAIL; return SUNLS_MEM_FAIL; } // Access pivots data array sunindextype* pivots = LS_PIVOTSp(S); if (!pivots) { SUNDIALS_DEBUG_ERROR("Matrix data array is NULL\n"); LS_LASTFLAG(S) = SUNLS_MEM_FAIL; return SUNLS_MEM_FAIL; } // Call oneMKL to do LU factorization of A ::sycl::queue* queue = LS_QUEUE(S); sunindextype ier = 0; sunindextype M = SUNMatrix_OneMklDense_BlockRows(A); sunindextype N = SUNMatrix_OneMklDense_BlockColumns(A); sunindextype num_blocks = SUNMatrix_OneMklDense_NumBlocks(A); sunindextype scratch_size = LS_F_SCRATCH_SIZE(S); realtype* scratchpad = LS_F_SCRATCHp(S); if (num_blocks > 1) { try { getrf_batch(*queue, // device queue M, // number of block rows N, // number of block columns Adata, // matrix data M, // leading dimension of A M * N, // stride between A_i pivots, // array of pivots M, // stride between P_i num_blocks, // number of blocks scratchpad, // scratchpad memory scratch_size); // scratchpad size } catch(oneapi::mkl::lapack::exception const& e) { SUNDIALS_DEBUG_ERROR("An exception occured in getrf_batch\n"); if (e.info()) { // An illegal value was providied or the scratch pad is too small ier = -1; } else { // The diagonal element of some of U_i is zero ier = 1; } } } else { try { getrf(*queue, // device queue M, // number of rows N, // number of columns Adata, // matrix data M, // leading dimension of A pivots, // array of pivots scratchpad, // scratchpad memory scratch_size); // scratchpad size } catch(oneapi::mkl::lapack::exception const& e) { SUNDIALS_DEBUG_ERROR("An exception occured in getrf\n"); if (e.info()) { // An illegal value was providied or the scratch pad is too small ier = -1; } else { // The diagonal element of some of U_i is zero ier = 1; } } } if (ier > 0) { LS_LASTFLAG(S) = ier; return SUNLS_LUFACT_FAIL; } if (ier < 0) { LS_LASTFLAG(S) = ier; return SUNLS_PACKAGE_FAIL_UNREC; } LS_LASTFLAG(S) = SUNLS_SUCCESS; return SUNLS_SUCCESS; } int SUNLinSolSolve_OneMklDense(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { // Check for valid inputs if (!S) { SUNDIALS_DEBUG_ERROR("Linear solver is NULL\n"); return SUNLS_MEM_NULL; } if (!A || !x || !b) { SUNDIALS_DEBUG_ERROR("A, x, or b is NULL\n"); LS_LASTFLAG(S) = SUNLS_MEM_NULL; return SUNLS_MEM_NULL; } // Ensure that A is a onemkl dense matrix if (SUNMatGetID(A) != SUNMATRIX_ONEMKLDENSE) { SUNDIALS_DEBUG_ERROR("Matrix is not the oneMKL matrix\n"); LS_LASTFLAG(S) = SUNLS_ILL_INPUT; return SUNLS_ILL_INPUT; } // Copy b into x N_VScale(RCONST(1.0), b, x); // Access x vector data array realtype* xdata = N_VGetDeviceArrayPointer(x); if (!xdata) { SUNDIALS_DEBUG_ERROR("Vector data array is NULL\n"); LS_LASTFLAG(S) = SUNLS_MEM_FAIL; return SUNLS_MEM_FAIL; } // Access A matrix data array realtype* Adata = SUNMatrix_OneMklDense_Data(A); if (!Adata) { SUNDIALS_DEBUG_ERROR("Matrix data array is NULL\n"); LS_LASTFLAG(S) = SUNLS_MEM_FAIL; return SUNLS_MEM_FAIL; } // Access pivots data array sunindextype* pivots = LS_PIVOTSp(S); if (!pivots) { SUNDIALS_DEBUG_ERROR("Matrix data array is NULL\n"); LS_LASTFLAG(S) = SUNLS_MEM_FAIL; return SUNLS_MEM_FAIL; } // Call oneMKL to solve the linear system sunindextype ier = 0; ::sycl::queue* queue = LS_QUEUE(S); sunindextype M = SUNMatrix_OneMklDense_BlockRows(A); sunindextype N = SUNMatrix_OneMklDense_BlockColumns(A); sunindextype num_blocks = SUNMatrix_OneMklDense_NumBlocks(A); sunindextype scratch_size = LS_S_SCRATCH_SIZE(S); realtype* scratchpad = LS_S_SCRATCHp(S); if (num_blocks > 1) { #ifdef SUNDIALS_ONEMKL_USE_GETRS_BATCHED try { getrs_batch(*queue, // device queue oneapi::mkl::transpose::nontrans, M, // number of rows 1, // number of right-hand sides Adata, // factorized matrix data M, // leading dimension of A_i M * N, // stride between A_i pivots, // array of pivots M, // stride between pivots xdata, // right-hand side data M, // leading dimension of B_i M, // stride between B_i num_blocks, // number of blocks scratchpad, // scratchpad memory scratch_size); // scratchpad size } catch(oneapi::mkl::lapack::exception const& e) { SUNDIALS_DEBUG_ERROR("An exception occured in getrs_batch\n"); ier = -1; } #else try { for (sunindextype i = 0; i < num_blocks; i++) { getrs(*queue, // device queue oneapi::mkl::transpose::nontrans, M, // number of rows 1, // number of right-hand sides Adata + i * M * N, // factorized matrix data M, // leading dimension of A pivots, // array of pivots xdata + i * M, // right-hand side data M, // leading dimension of B_i scratchpad, // scratchpad memory scratch_size); // scratchpad size } } catch(oneapi::mkl::lapack::exception const& e) { SUNDIALS_DEBUG_ERROR("An exception occured in getrs\n"); ier = -1; } #endif } else { try { getrs(*queue, // device queue oneapi::mkl::transpose::nontrans, M, // number of rows 1, // number of right-hand sides Adata, // factorized matrix data M, // leading dimension of A pivots, // array of pivots xdata, // right-hand side data M, // leading dimension of B_i scratchpad, // scratchpad memory scratch_size); // scratchpad size } catch(oneapi::mkl::lapack::exception const& e) { SUNDIALS_DEBUG_ERROR("An exception occured in getrs\n"); ier = -1; } } if (ier < 0) { LS_LASTFLAG(S) = ier; return SUNLS_PACKAGE_FAIL_UNREC; } LS_LASTFLAG(S) = SUNLS_SUCCESS; return SUNLS_SUCCESS; } sunindextype SUNLinSolLastFlag_OneMklDense(SUNLinearSolver S) { // return the stored 'last_flag' value if (!S) { SUNDIALS_DEBUG_ERROR("Linear solver is NULL\n"); return SUNLS_MEM_NULL; } return LS_LASTFLAG(S); } int SUNLinSolSpace_OneMklDense(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { if (!S) { SUNDIALS_DEBUG_ERROR("Linear solver is NULL\n"); return SUNLS_MEM_NULL; } *lenrwLS = 0; *leniwLS = 2 + LS_CONTENT(S)->rows; LS_LASTFLAG(S) = SUNLS_SUCCESS; return SUNLS_SUCCESS; } int SUNLinSolFree_OneMklDense(SUNLinearSolver S) { // return if S is already free if (!S) return SUNLS_SUCCESS; // delete items from contents, then delete generic structure if (S->content) { // Pivots memory if (LS_PIVOTS(S)) { SUNMemoryHelper_Dealloc(LS_MEM_HELPER(S), LS_PIVOTS(S), LS_QUEUE(S)); } // Factorization scrach memory if (LS_F_SCRATCH(S)) { SUNMemoryHelper_Dealloc(LS_MEM_HELPER(S), LS_F_SCRATCH(S), LS_QUEUE(S)); } LS_F_SCRATCH_SIZE(S) = 0; // Solve scratch memory if (LS_S_SCRATCH(S)) { SUNMemoryHelper_Dealloc(LS_MEM_HELPER(S), LS_S_SCRATCH(S), LS_QUEUE(S)); } LS_S_SCRATCH_SIZE(S) = 0; } SUNLinSolFreeEmpty(S); S = NULL; return SUNLS_SUCCESS; } StanHeaders/src/sunlinsol/sptfqmr/0000755000176200001440000000000014511135055016764 5ustar liggesusersStanHeaders/src/sunlinsol/sptfqmr/sunlinsol_sptfqmr.c0000644000176200001440000006673514645137106022762 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on sundials_sptfqmr.c code, written by Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the SPTFQMR implementation of * the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #include "sundials_debug.h" #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * SPTFQMR solver structure accessibility macros: * ----------------------------------------------------------------- */ #define SPTFQMR_CONTENT(S) ( (SUNLinearSolverContent_SPTFQMR)(S->content) ) #define LASTFLAG(S) ( SPTFQMR_CONTENT(S)->last_flag ) /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new SPTFQMR linear solver */ SUNLinearSolver SUNLinSol_SPTFQMR(N_Vector y, int pretype, int maxl, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_SPTFQMR content; /* check for legal pretype and maxl values; if illegal use defaults */ if ((pretype != SUN_PREC_NONE) && (pretype != SUN_PREC_LEFT) && (pretype != SUN_PREC_RIGHT) && (pretype != SUN_PREC_BOTH)) pretype = SUN_PREC_NONE; if (maxl <= 0) maxl = SUNSPTFQMR_MAXL_DEFAULT; /* check that the supplied N_Vector supports all requisite operations */ if ( (y->ops->nvclone == NULL) || (y->ops->nvdestroy == NULL) || (y->ops->nvlinearsum == NULL) || (y->ops->nvconst == NULL) || (y->ops->nvprod == NULL) || (y->ops->nvdiv == NULL) || (y->ops->nvscale == NULL) || (y->ops->nvdotprod == NULL) ) return(NULL); /* Create linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_SPTFQMR; S->ops->getid = SUNLinSolGetID_SPTFQMR; S->ops->setatimes = SUNLinSolSetATimes_SPTFQMR; S->ops->setpreconditioner = SUNLinSolSetPreconditioner_SPTFQMR; S->ops->setscalingvectors = SUNLinSolSetScalingVectors_SPTFQMR; S->ops->setzeroguess = SUNLinSolSetZeroGuess_SPTFQMR; S->ops->initialize = SUNLinSolInitialize_SPTFQMR; S->ops->setup = SUNLinSolSetup_SPTFQMR; S->ops->solve = SUNLinSolSolve_SPTFQMR; S->ops->numiters = SUNLinSolNumIters_SPTFQMR; S->ops->resnorm = SUNLinSolResNorm_SPTFQMR; S->ops->resid = SUNLinSolResid_SPTFQMR; S->ops->lastflag = SUNLinSolLastFlag_SPTFQMR; S->ops->space = SUNLinSolSpace_SPTFQMR; S->ops->free = SUNLinSolFree_SPTFQMR; /* Create content */ content = NULL; content = (SUNLinearSolverContent_SPTFQMR) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->last_flag = 0; content->maxl = maxl; content->pretype = pretype; content->zeroguess = SUNFALSE; content->numiters = 0; content->resnorm = ZERO; content->r_star = NULL; content->q = NULL; content->d = NULL; content->v = NULL; content->p = NULL; content->r = NULL; content->u = NULL; content->vtemp1 = NULL; content->vtemp2 = NULL; content->vtemp3 = NULL; content->s1 = NULL; content->s2 = NULL; content->ATimes = NULL; content->ATData = NULL; content->Psetup = NULL; content->Psolve = NULL; content->PData = NULL; content->print_level = 0; content->info_file = stdout; /* Allocate content */ content->r_star = N_VClone(y); if (content->r_star == NULL) { SUNLinSolFree(S); return(NULL); } content->q = N_VClone(y); if (content->q == NULL) { SUNLinSolFree(S); return(NULL); } content->d = N_VClone(y); if (content->d == NULL) { SUNLinSolFree(S); return(NULL); } content->v = N_VClone(y); if (content->v == NULL) { SUNLinSolFree(S); return(NULL); } content->p = N_VClone(y); if (content->p == NULL) { SUNLinSolFree(S); return(NULL); } content->r = N_VCloneVectorArray(2, y); if (content->r == NULL) { SUNLinSolFree(S); return(NULL); } content->u = N_VClone(y); if (content->u == NULL) { SUNLinSolFree(S); return(NULL); } content->vtemp1 = N_VClone(y); if (content->vtemp1 == NULL) { SUNLinSolFree(S); return(NULL); } content->vtemp2 = N_VClone(y); if (content->vtemp2 == NULL) { SUNLinSolFree(S); return(NULL); } content->vtemp3 = N_VClone(y); if (content->vtemp3 == NULL) { SUNLinSolFree(S); return(NULL); } return(S); } /* ---------------------------------------------------------------------------- * Function to set the type of preconditioning for SPTFQMR to use */ int SUNLinSol_SPTFQMRSetPrecType(SUNLinearSolver S, int pretype) { /* Check for legal pretype */ if ((pretype != SUN_PREC_NONE) && (pretype != SUN_PREC_LEFT) && (pretype != SUN_PREC_RIGHT) && (pretype != SUN_PREC_BOTH)) { return(SUNLS_ILL_INPUT); } /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set pretype */ SPTFQMR_CONTENT(S)->pretype = pretype; return(SUNLS_SUCCESS); } /* ---------------------------------------------------------------------------- * Function to set the maximum number of iterations for SPTFQMR to use */ int SUNLinSol_SPTFQMRSetMaxl(SUNLinearSolver S, int maxl) { /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Check for legal pretype */ if (maxl <= 0) maxl = SUNSPTFQMR_MAXL_DEFAULT; /* Set pretype */ SPTFQMR_CONTENT(S)->maxl = maxl; return(SUNLS_SUCCESS); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_SPTFQMR(SUNLinearSolver S) { return(SUNLINEARSOLVER_ITERATIVE); } SUNLinearSolver_ID SUNLinSolGetID_SPTFQMR(SUNLinearSolver S) { return(SUNLINEARSOLVER_SPTFQMR); } int SUNLinSolInitialize_SPTFQMR(SUNLinearSolver S) { SUNLinearSolverContent_SPTFQMR content; /* set shortcut to SPTFQMR memory structure */ if (S == NULL) return(SUNLS_MEM_NULL); content = SPTFQMR_CONTENT(S); /* ensure valid options */ if (content->maxl <= 0) content->maxl = SUNSPTFQMR_MAXL_DEFAULT; if (content->ATimes == NULL) { LASTFLAG(S) = SUNLS_ATIMES_NULL; return(LASTFLAG(S)); } if ( (content->pretype != SUN_PREC_LEFT) && (content->pretype != SUN_PREC_RIGHT) && (content->pretype != SUN_PREC_BOTH) ) content->pretype = SUN_PREC_NONE; if ((content->pretype != SUN_PREC_NONE) && (content->Psolve == NULL)) { LASTFLAG(S) = SUNLS_PSOLVE_NULL; return(LASTFLAG(S)); } /* no additional memory to allocate */ /* return with success */ content->last_flag = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } int SUNLinSolSetATimes_SPTFQMR(SUNLinearSolver S, void* ATData, SUNATimesFn ATimes) { /* set function pointers to integrator-supplied ATimes routine and data, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPTFQMR_CONTENT(S)->ATimes = ATimes; SPTFQMR_CONTENT(S)->ATData = ATData; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetPreconditioner_SPTFQMR(SUNLinearSolver S, void* PData, SUNPSetupFn Psetup, SUNPSolveFn Psolve) { /* set function pointers to integrator-supplied Psetup and PSolve routines and data, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPTFQMR_CONTENT(S)->Psetup = Psetup; SPTFQMR_CONTENT(S)->Psolve = Psolve; SPTFQMR_CONTENT(S)->PData = PData; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetScalingVectors_SPTFQMR(SUNLinearSolver S, N_Vector s1, N_Vector s2) { /* set N_Vector pointers to integrator-supplied scaling vectors, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPTFQMR_CONTENT(S)->s1 = s1; SPTFQMR_CONTENT(S)->s2 = s2; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetZeroGuess_SPTFQMR(SUNLinearSolver S, booleantype onoff) { /* set flag indicating a zero initial guess */ if (S == NULL) return(SUNLS_MEM_NULL); SPTFQMR_CONTENT(S)->zeroguess = onoff; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetup_SPTFQMR(SUNLinearSolver S, SUNMatrix A) { int ier; SUNPSetupFn Psetup; void* PData; /* Set shortcuts to SPTFQMR memory structures */ if (S == NULL) return(SUNLS_MEM_NULL); Psetup = SPTFQMR_CONTENT(S)->Psetup; PData = SPTFQMR_CONTENT(S)->PData; /* no solver-specific setup is required, but if user-supplied Psetup routine exists, call that here */ if (Psetup != NULL) { ier = Psetup(PData); if (ier != 0) { LASTFLAG(S) = (ier < 0) ? SUNLS_PSET_FAIL_UNREC : SUNLS_PSET_FAIL_REC; return(LASTFLAG(S)); } } /* return with success */ return(SUNLS_SUCCESS); } int SUNLinSolSolve_SPTFQMR(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype delta) { /* local data and shortcut variables */ realtype alpha, tau, eta, beta, c, sigma, v_bar, omega; realtype rho[2]; realtype r_init_norm, r_curr_norm; realtype temp_val; booleantype preOnLeft, preOnRight, scale_x, scale_b, converged, b_ok; booleantype *zeroguess; int n, m, ier, l_max; void *A_data, *P_data; SUNATimesFn atimes; SUNPSolveFn psolve; realtype *res_norm; int *nli; N_Vector sx, sb, r_star, q, d, v, p, *r, u, vtemp1, vtemp2, vtemp3; /* local variables for fused vector operations */ realtype cv[3]; N_Vector Xv[3]; /* Make local shorcuts to solver variables. */ if (S == NULL) return(SUNLS_MEM_NULL); l_max = SPTFQMR_CONTENT(S)->maxl; r_star = SPTFQMR_CONTENT(S)->r_star; q = SPTFQMR_CONTENT(S)->q; d = SPTFQMR_CONTENT(S)->d; v = SPTFQMR_CONTENT(S)->v; p = SPTFQMR_CONTENT(S)->p; r = SPTFQMR_CONTENT(S)->r; u = SPTFQMR_CONTENT(S)->u; vtemp1 = SPTFQMR_CONTENT(S)->vtemp1; vtemp2 = SPTFQMR_CONTENT(S)->vtemp2; vtemp3 = SPTFQMR_CONTENT(S)->vtemp3; sb = SPTFQMR_CONTENT(S)->s1; sx = SPTFQMR_CONTENT(S)->s2; A_data = SPTFQMR_CONTENT(S)->ATData; P_data = SPTFQMR_CONTENT(S)->PData; atimes = SPTFQMR_CONTENT(S)->ATimes; psolve = SPTFQMR_CONTENT(S)->Psolve; zeroguess = &(SPTFQMR_CONTENT(S)->zeroguess); nli = &(SPTFQMR_CONTENT(S)->numiters); res_norm = &(SPTFQMR_CONTENT(S)->resnorm); /* Initialize counters and convergence flag */ temp_val = r_curr_norm = -ONE; *nli = 0; converged = SUNFALSE; b_ok = SUNFALSE; /* set booleantype flags for internal solver options */ preOnLeft = ( (SPTFQMR_CONTENT(S)->pretype == SUN_PREC_LEFT) || (SPTFQMR_CONTENT(S)->pretype == SUN_PREC_BOTH) ); preOnRight = ( (SPTFQMR_CONTENT(S)->pretype == SUN_PREC_RIGHT) || (SPTFQMR_CONTENT(S)->pretype == SUN_PREC_BOTH) ); scale_x = (sx != NULL); scale_b = (sb != NULL); /* Check for unsupported use case */ if (preOnRight && !(*zeroguess)) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_ILL_INPUT; return(SUNLS_ILL_INPUT); } #ifdef SUNDIALS_BUILD_WITH_MONITORING if (SPTFQMR_CONTENT(S)->print_level && SPTFQMR_CONTENT(S)->info_file) STAN_SUNDIALS_FPRINTF(SPTFQMR_CONTENT(S)->info_file, "SUNLINSOL_SPTFQMR:\n"); #endif /* Check if Atimes function has been set */ if (atimes == NULL) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_ATIMES_NULL; return(LASTFLAG(S)); } /* If preconditioning, check if psolve has been set */ if ((preOnLeft || preOnRight) && psolve == NULL) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_PSOLVE_NULL; return(LASTFLAG(S)); } /* Set r_star to initial (unscaled) residual r_star = r_0 = b - A*x_0 */ /* NOTE: if x == 0 then just set residual to b and continue */ if (*zeroguess) { N_VScale(ONE, b, r_star); } else { ier = atimes(A_data, x, r_star); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } N_VLinearSum(ONE, b, -ONE, r_star, r_star); } /* Apply left preconditioner and b-scaling to r_star (or really just r_0) */ if (preOnLeft) { ier = psolve(P_data, r_star, vtemp1, delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, r_star, vtemp1); if (scale_b) N_VProd(sb, vtemp1, r_star); else N_VScale(ONE, vtemp1, r_star); /* Initialize rho[0] */ /* NOTE: initialized here to reduce number of computations - avoid need to compute r_star^T*r_star twice, and avoid needlessly squaring values */ rho[0] = N_VDotProd(r_star, r_star); /* Compute norm of initial residual (r_0) to see if we really need to do anything */ *res_norm = r_init_norm = SUNRsqrt(rho[0]); #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print initial residual */ if (SPTFQMR_CONTENT(S)->print_level && SPTFQMR_CONTENT(S)->info_file) { STAN_SUNDIALS_FPRINTF(SPTFQMR_CONTENT(S)->info_file, SUNLS_MSG_RESIDUAL, (long int) 0, *res_norm); } #endif if (r_init_norm <= delta) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } /* Set v = A*r_0 (preconditioned and scaled) */ if (scale_x) N_VDiv(r_star, sx, vtemp1); else N_VScale(ONE, r_star, vtemp1); if (preOnRight) { N_VScale(ONE, vtemp1, v); ier = psolve(P_data, v, vtemp1, delta, SUN_PREC_RIGHT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } ier = atimes(A_data, vtemp1, v); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } if (preOnLeft) { ier = psolve(P_data, v, vtemp1, delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, v, vtemp1); if (scale_b) N_VProd(sb, vtemp1, v); else N_VScale(ONE, vtemp1, v); /* Initialize remaining variables */ N_VScale(ONE, r_star, r[0]); N_VScale(ONE, r_star, u); N_VScale(ONE, r_star, p); N_VConst(ZERO, d); /* Set x = sx x if non-zero guess */ if (scale_x && !(*zeroguess)) N_VProd(sx, x, x); tau = r_init_norm; v_bar = eta = ZERO; /* START outer loop */ for (n = 0; n < l_max; ++n) { /* Increment linear iteration counter */ (*nli)++; /* sigma = r_star^T*v */ sigma = N_VDotProd(r_star, v); /* alpha = rho[0]/sigma */ alpha = rho[0]/sigma; /* q = u-alpha*v */ N_VLinearSum(ONE, u, -alpha, v, q); /* r[1] = r[0]-alpha*A*(u+q) */ N_VLinearSum(ONE, u, ONE, q, r[1]); if (scale_x) N_VDiv(r[1], sx, r[1]); if (preOnRight) { N_VScale(ONE, r[1], vtemp1); ier = psolve(P_data, vtemp1, r[1], delta, SUN_PREC_RIGHT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } ier = atimes(A_data, r[1], vtemp1); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } if (preOnLeft) { ier = psolve(P_data, vtemp1, r[1], delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, vtemp1, r[1]); if (scale_b) N_VProd(sb, r[1], vtemp1); else N_VScale(ONE, r[1], vtemp1); N_VLinearSum(ONE, r[0], -alpha, vtemp1, r[1]); /* START inner loop */ for (m = 0; m < 2; ++m) { /* d = [*]+(v_bar^2*eta/alpha)*d */ /* NOTES: * (1) [*] = u if m == 0, and q if m == 1 * (2) using temp_val reduces the number of required computations * if the inner loop is executed twice */ if (m == 0) { temp_val = SUNRsqrt(N_VDotProd(r[1], r[1])); omega = SUNRsqrt(SUNRsqrt(N_VDotProd(r[0], r[0]))*temp_val); N_VLinearSum(ONE, u, SUNSQR(v_bar)*eta/alpha, d, d); } else { omega = temp_val; N_VLinearSum(ONE, q, SUNSQR(v_bar)*eta/alpha, d, d); } /* v_bar = omega/tau */ v_bar = omega/tau; /* c = (1+v_bar^2)^(-1/2) */ c = ONE / SUNRsqrt(ONE+SUNSQR(v_bar)); /* tau = tau*v_bar*c */ tau = tau*v_bar*c; /* eta = c^2*alpha */ eta = SUNSQR(c)*alpha; /* x = x+eta*d */ if (n == 0 && m == 0 && *zeroguess) N_VScale(eta, d, x); else N_VLinearSum(ONE, x, eta, d, x); /* Check for convergence... */ /* NOTE: just use approximation to norm of residual, if possible */ *res_norm = r_curr_norm = tau*SUNRsqrt(m+1); #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print current iteration number and the residual */ if (SPTFQMR_CONTENT(S)->print_level && SPTFQMR_CONTENT(S)->info_file) { STAN_SUNDIALS_FPRINTF(SPTFQMR_CONTENT(S)->info_file, SUNLS_MSG_RESIDUAL, (long int) *nli, *res_norm); } #endif /* Exit inner loop if iteration has converged based upon approximation to norm of current residual */ if (r_curr_norm <= delta) { converged = SUNTRUE; break; } /* Decide if actual norm of residual vector should be computed */ /* NOTES: * (1) if r_curr_norm > delta, then check if actual residual norm * is OK (recall we first compute an approximation) * (2) if r_curr_norm >= r_init_norm and m == 1 and n == l_max, then * compute actual residual norm to see if the iteration can be * saved * (3) the scaled and preconditioned right-hand side of the given * linear system (denoted by b) is only computed once, and the * result is stored in vtemp3 so it can be reused - reduces the * number of psovles if using left preconditioning */ if ((r_curr_norm > delta) || (r_curr_norm >= r_init_norm && m == 1 && n == l_max)) { /* Compute norm of residual ||b-A*x||_2 (preconditioned and scaled) */ if (scale_x) N_VDiv(x, sx, vtemp1); else N_VScale(ONE, x, vtemp1); if (preOnRight) { ier = psolve(P_data, vtemp1, vtemp2, delta, SUN_PREC_RIGHT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_UNREC; return(LASTFLAG(S)); } N_VScale(ONE, vtemp2, vtemp1); } ier = atimes(A_data, vtemp1, vtemp2); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } if (preOnLeft) { ier = psolve(P_data, vtemp2, vtemp1, delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, vtemp2, vtemp1); if (scale_b) N_VProd(sb, vtemp1, vtemp2); else N_VScale(ONE, vtemp1, vtemp2); /* Only precondition and scale b once (result saved for reuse) */ if (!b_ok) { b_ok = SUNTRUE; if (preOnLeft) { ier = psolve(P_data, b, vtemp3, delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, b, vtemp3); if (scale_b) N_VProd(sb, vtemp3, vtemp3); } N_VLinearSum(ONE, vtemp3, -ONE, vtemp2, vtemp1); *res_norm = r_curr_norm = SUNRsqrt(N_VDotProd(vtemp1, vtemp1)); /* Exit inner loop if inequality condition is satisfied (meaning exit if we have converged) */ if (r_curr_norm <= delta) { converged = SUNTRUE; break; } } } /* END inner loop */ /* If converged, then exit outer loop as well */ if (converged == SUNTRUE) break; /* rho[1] = r_star^T*r_[1] */ rho[1] = N_VDotProd(r_star, r[1]); /* beta = rho[1]/rho[0] */ beta = rho[1]/rho[0]; /* u = r[1]+beta*q */ N_VLinearSum(ONE, r[1], beta, q, u); /* p = u+beta*(q+beta*p) = beta*beta*p + beta*q + u */ cv[0] = SUNSQR(beta); Xv[0] = p; cv[1] = beta; Xv[1] = q; cv[2] = ONE; Xv[2] = u; ier = N_VLinearCombination(3, cv, Xv, p); if (ier != SUNLS_SUCCESS) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_VECTOROP_ERR; return(SUNLS_VECTOROP_ERR); } /* v = A*p */ if (scale_x) N_VDiv(p, sx, vtemp1); else N_VScale(ONE, p, vtemp1); if (preOnRight) { N_VScale(ONE, vtemp1, v); ier = psolve(P_data, v, vtemp1, delta, SUN_PREC_RIGHT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } ier = atimes(A_data, vtemp1, v); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } if (preOnLeft) { ier = psolve(P_data, v, vtemp1, delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else N_VScale(ONE, v, vtemp1); if (scale_b) N_VProd(sb, vtemp1, v); else N_VScale(ONE, vtemp1, v); /* Shift variable values */ /* NOTE: reduces storage requirements */ N_VScale(ONE, r[1], r[0]); rho[0] = rho[1]; } /* END outer loop */ /* Determine return value */ /* If iteration converged or residual was reduced, then return current iterate (x) */ if ((converged == SUNTRUE) || (r_curr_norm < r_init_norm)) { if (scale_x) N_VDiv(x, sx, x); if (preOnRight) { ier = psolve(P_data, x, vtemp1, delta, SUN_PREC_RIGHT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_UNREC; return(LASTFLAG(S)); } N_VScale(ONE, vtemp1, x); } *zeroguess = SUNFALSE; if (converged == SUNTRUE) LASTFLAG(S) = SUNLS_SUCCESS; else LASTFLAG(S) = SUNLS_RES_REDUCED; return(LASTFLAG(S)); } /* Otherwise, return error code */ else { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_CONV_FAIL; return(LASTFLAG(S)); } } int SUNLinSolNumIters_SPTFQMR(SUNLinearSolver S) { /* return the stored 'numiters' value */ if (S == NULL) return(-1); return (SPTFQMR_CONTENT(S)->numiters); } realtype SUNLinSolResNorm_SPTFQMR(SUNLinearSolver S) { /* return the stored 'resnorm' value */ if (S == NULL) return(-ONE); return (SPTFQMR_CONTENT(S)->resnorm); } N_Vector SUNLinSolResid_SPTFQMR(SUNLinearSolver S) { /* return the stored 'vtemp1' vector */ return (SPTFQMR_CONTENT(S)->vtemp1); } sunindextype SUNLinSolLastFlag_SPTFQMR(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return (LASTFLAG(S)); } int SUNLinSolSpace_SPTFQMR(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { sunindextype liw1, lrw1; if (SPTFQMR_CONTENT(S)->vtemp1->ops->nvspace) N_VSpace(SPTFQMR_CONTENT(S)->vtemp1, &lrw1, &liw1); else lrw1 = liw1 = 0; *lenrwLS = lrw1*11; *leniwLS = liw1*11; return(SUNLS_SUCCESS); } int SUNLinSolFree_SPTFQMR(SUNLinearSolver S) { if (S == NULL) return(SUNLS_SUCCESS); if (S->content) { /* delete items from within the content structure */ if (SPTFQMR_CONTENT(S)->r_star) { N_VDestroy(SPTFQMR_CONTENT(S)->r_star); SPTFQMR_CONTENT(S)->r_star = NULL; } if (SPTFQMR_CONTENT(S)->q) { N_VDestroy(SPTFQMR_CONTENT(S)->q); SPTFQMR_CONTENT(S)->q = NULL; } if (SPTFQMR_CONTENT(S)->d) { N_VDestroy(SPTFQMR_CONTENT(S)->d); SPTFQMR_CONTENT(S)->d = NULL; } if (SPTFQMR_CONTENT(S)->v) { N_VDestroy(SPTFQMR_CONTENT(S)->v); SPTFQMR_CONTENT(S)->v = NULL; } if (SPTFQMR_CONTENT(S)->p) { N_VDestroy(SPTFQMR_CONTENT(S)->p); SPTFQMR_CONTENT(S)->p = NULL; } if (SPTFQMR_CONTENT(S)->r) { N_VDestroyVectorArray(SPTFQMR_CONTENT(S)->r, 2); SPTFQMR_CONTENT(S)->r = NULL; } if (SPTFQMR_CONTENT(S)->u) { N_VDestroy(SPTFQMR_CONTENT(S)->u); SPTFQMR_CONTENT(S)->u = NULL; } if (SPTFQMR_CONTENT(S)->vtemp1) { N_VDestroy(SPTFQMR_CONTENT(S)->vtemp1); SPTFQMR_CONTENT(S)->vtemp1 = NULL; } if (SPTFQMR_CONTENT(S)->vtemp2) { N_VDestroy(SPTFQMR_CONTENT(S)->vtemp2); SPTFQMR_CONTENT(S)->vtemp2 = NULL; } if (SPTFQMR_CONTENT(S)->vtemp3) { N_VDestroy(SPTFQMR_CONTENT(S)->vtemp3); SPTFQMR_CONTENT(S)->vtemp3 = NULL; } free(S->content); S->content = NULL; } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } int SUNLinSolSetInfoFile_SPTFQMR(SUNLinearSolver S, FILE* info_file) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the linear solver is non-null */ if (S == NULL) return(SUNLS_MEM_NULL); SPTFQMR_CONTENT(S)->info_file = info_file; return(SUNLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSolSetInfoFile_SPTFQMR: SUNDIALS was not built with monitoring\n"); return(SUNLS_ILL_INPUT); #endif } int SUNLinSolSetPrintLevel_SPTFQMR(SUNLinearSolver S, int print_level) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the linear solver is non-null */ if (S == NULL) return(SUNLS_MEM_NULL); /* check for valid print level */ if (print_level < 0 || print_level > 1) return(SUNLS_ILL_INPUT); SPTFQMR_CONTENT(S)->print_level = print_level; return(SUNLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSolSetPrintLevel_SPTFQMR: SUNDIALS was not built with monitoring\n"); return(SUNLS_ILL_INPUT); #endif } StanHeaders/src/sunlinsol/sptfqmr/fmod/0000755000176200001440000000000014511135055017711 5ustar liggesusersStanHeaders/src/sunlinsol/sptfqmr/fmod/fsunlinsol_sptfqmr_mod.f900000644000176200001440000003704214645137106025055 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunlinsol_sptfqmr_mod use, intrinsic :: ISO_C_BINDING use fsundials_linearsolver_mod use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: SUNSPTFQMR_MAXL_DEFAULT = 5_C_INT public :: FSUNLinSol_SPTFQMR public :: FSUNLinSol_SPTFQMRSetPrecType public :: FSUNLinSol_SPTFQMRSetMaxl public :: FSUNLinSolGetType_SPTFQMR public :: FSUNLinSolGetID_SPTFQMR public :: FSUNLinSolInitialize_SPTFQMR public :: FSUNLinSolSetATimes_SPTFQMR public :: FSUNLinSolSetPreconditioner_SPTFQMR public :: FSUNLinSolSetScalingVectors_SPTFQMR public :: FSUNLinSolSetZeroGuess_SPTFQMR public :: FSUNLinSolSetup_SPTFQMR public :: FSUNLinSolSolve_SPTFQMR public :: FSUNLinSolNumIters_SPTFQMR public :: FSUNLinSolResNorm_SPTFQMR public :: FSUNLinSolResid_SPTFQMR public :: FSUNLinSolLastFlag_SPTFQMR public :: FSUNLinSolSpace_SPTFQMR public :: FSUNLinSolFree_SPTFQMR public :: FSUNLinSolSetInfoFile_SPTFQMR public :: FSUNLinSolSetPrintLevel_SPTFQMR ! WRAPPER DECLARATIONS interface function swigc_FSUNLinSol_SPTFQMR(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSol_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FSUNLinSol_SPTFQMRSetPrecType(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_SPTFQMRSetPrecType") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSol_SPTFQMRSetMaxl(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_SPTFQMRSetMaxl") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetType_SPTFQMR(farg1) & bind(C, name="_wrap_FSUNLinSolGetType_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetID_SPTFQMR(farg1) & bind(C, name="_wrap_FSUNLinSolGetID_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolInitialize_SPTFQMR(farg1) & bind(C, name="_wrap_FSUNLinSolInitialize_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetATimes_SPTFQMR(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetATimes_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPreconditioner_SPTFQMR(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSolSetPreconditioner_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetScalingVectors_SPTFQMR(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetScalingVectors_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetZeroGuess_SPTFQMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetZeroGuess_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetup_SPTFQMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetup_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSolve_SPTFQMR(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNLinSolSolve_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FSUNLinSolNumIters_SPTFQMR(farg1) & bind(C, name="_wrap_FSUNLinSolNumIters_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolResNorm_SPTFQMR(farg1) & bind(C, name="_wrap_FSUNLinSolResNorm_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FSUNLinSolResid_SPTFQMR(farg1) & bind(C, name="_wrap_FSUNLinSolResid_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNLinSolLastFlag_SPTFQMR(farg1) & bind(C, name="_wrap_FSUNLinSolLastFlag_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNLinSolSpace_SPTFQMR(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSpace_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolFree_SPTFQMR(farg1) & bind(C, name="_wrap_FSUNLinSolFree_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetInfoFile_SPTFQMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetInfoFile_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPrintLevel_SPTFQMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetPrintLevel_SPTFQMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNLinSol_SPTFQMR(y, pretype, maxl, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNLinearSolver), pointer :: swig_result type(N_Vector), target, intent(inout) :: y integer(C_INT), intent(in) :: pretype integer(C_INT), intent(in) :: maxl type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = c_loc(y) farg2 = pretype farg3 = maxl farg4 = sunctx fresult = swigc_FSUNLinSol_SPTFQMR(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FSUNLinSol_SPTFQMRSetPrecType(s, pretype) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: pretype integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = pretype fresult = swigc_FSUNLinSol_SPTFQMRSetPrecType(farg1, farg2) swig_result = fresult end function function FSUNLinSol_SPTFQMRSetMaxl(s, maxl) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: maxl integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = maxl fresult = swigc_FSUNLinSol_SPTFQMRSetMaxl(farg1, farg2) swig_result = fresult end function function FSUNLinSolGetType_SPTFQMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_Type) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetType_SPTFQMR(farg1) swig_result = fresult end function function FSUNLinSolGetID_SPTFQMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_ID) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetID_SPTFQMR(farg1) swig_result = fresult end function function FSUNLinSolInitialize_SPTFQMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolInitialize_SPTFQMR(farg1) swig_result = fresult end function function FSUNLinSolSetATimes_SPTFQMR(s, a_data, atimes) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: a_data type(C_FUNPTR), intent(in), value :: atimes integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = c_loc(s) farg2 = a_data farg3 = atimes fresult = swigc_FSUNLinSolSetATimes_SPTFQMR(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetPreconditioner_SPTFQMR(s, p_data, pset, psol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: p_data type(C_FUNPTR), intent(in), value :: pset type(C_FUNPTR), intent(in), value :: psol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = c_loc(s) farg2 = p_data farg3 = pset farg4 = psol fresult = swigc_FSUNLinSolSetPreconditioner_SPTFQMR(farg1, farg2, farg3, farg4) swig_result = fresult end function function FSUNLinSolSetScalingVectors_SPTFQMR(s, s1, s2) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(N_Vector), target, intent(inout) :: s1 type(N_Vector), target, intent(inout) :: s2 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(s1) farg3 = c_loc(s2) fresult = swigc_FSUNLinSolSetScalingVectors_SPTFQMR(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetZeroGuess_SPTFQMR(s, onoff) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: onoff integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = onoff fresult = swigc_FSUNLinSolSetZeroGuess_SPTFQMR(farg1, farg2) swig_result = fresult end function function FSUNLinSolSetup_SPTFQMR(s, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(s) farg2 = c_loc(a) fresult = swigc_FSUNLinSolSetup_SPTFQMR(farg1, farg2) swig_result = fresult end function function FSUNLinSolSolve_SPTFQMR(s, a, x, b, tol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: b real(C_DOUBLE), intent(in) :: tol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 farg1 = c_loc(s) farg2 = c_loc(a) farg3 = c_loc(x) farg4 = c_loc(b) farg5 = tol fresult = swigc_FSUNLinSolSolve_SPTFQMR(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FSUNLinSolNumIters_SPTFQMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolNumIters_SPTFQMR(farg1) swig_result = fresult end function function FSUNLinSolResNorm_SPTFQMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResNorm_SPTFQMR(farg1) swig_result = fresult end function function FSUNLinSolResid_SPTFQMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResid_SPTFQMR(farg1) call c_f_pointer(fresult, swig_result) end function function FSUNLinSolLastFlag_SPTFQMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolLastFlag_SPTFQMR(farg1) swig_result = fresult end function function FSUNLinSolSpace_SPTFQMR(s, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FSUNLinSolSpace_SPTFQMR(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolFree_SPTFQMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolFree_SPTFQMR(farg1) swig_result = fresult end function function FSUNLinSolSetInfoFile_SPTFQMR(ls, info_file) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: ls type(C_PTR) :: info_file integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(ls) farg2 = info_file fresult = swigc_FSUNLinSolSetInfoFile_SPTFQMR(farg1, farg2) swig_result = fresult end function function FSUNLinSolSetPrintLevel_SPTFQMR(ls, print_level) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: ls integer(C_INT), intent(in) :: print_level integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(ls) farg2 = print_level fresult = swigc_FSUNLinSolSetPrintLevel_SPTFQMR(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/sunlinsol/sptfqmr/fmod/fsunlinsol_sptfqmr_mod.c0000644000176200001440000003313514645137106024700 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_linearsolver.h" #include "sunlinsol/sunlinsol_sptfqmr.h" SWIGEXPORT SUNLinearSolver _wrap_FSUNLinSol_SPTFQMR(N_Vector farg1, int const *farg2, int const *farg3, void *farg4) { SUNLinearSolver fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int arg3 ; SUNContext arg4 = (SUNContext) 0 ; SUNLinearSolver result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); arg4 = (SUNContext)(farg4); result = (SUNLinearSolver)SUNLinSol_SPTFQMR(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_SPTFQMRSetPrecType(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_SPTFQMRSetPrecType(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_SPTFQMRSetMaxl(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_SPTFQMRSetMaxl(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetType_SPTFQMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_Type result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_Type)SUNLinSolGetType_SPTFQMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetID_SPTFQMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_ID result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_ID)SUNLinSolGetID_SPTFQMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolInitialize_SPTFQMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolInitialize_SPTFQMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetATimes_SPTFQMR(SUNLinearSolver farg1, void *farg2, SUNATimesFn farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNATimesFn arg3 = (SUNATimesFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNATimesFn)(farg3); result = (int)SUNLinSolSetATimes_SPTFQMR(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPreconditioner_SPTFQMR(SUNLinearSolver farg1, void *farg2, SUNPSetupFn farg3, SUNPSolveFn farg4) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNPSetupFn arg3 = (SUNPSetupFn) 0 ; SUNPSolveFn arg4 = (SUNPSolveFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNPSetupFn)(farg3); arg4 = (SUNPSolveFn)(farg4); result = (int)SUNLinSolSetPreconditioner_SPTFQMR(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetScalingVectors_SPTFQMR(SUNLinearSolver farg1, N_Vector farg2, N_Vector farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)SUNLinSolSetScalingVectors_SPTFQMR(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetZeroGuess_SPTFQMR(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetZeroGuess_SPTFQMR(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetup_SPTFQMR(SUNLinearSolver farg1, SUNMatrix farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNLinSolSetup_SPTFQMR(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSolve_SPTFQMR(SUNLinearSolver farg1, SUNMatrix farg2, N_Vector farg3, N_Vector farg4, double const *farg5) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); result = (int)SUNLinSolSolve_SPTFQMR(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolNumIters_SPTFQMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolNumIters_SPTFQMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FSUNLinSolResNorm_SPTFQMR(SUNLinearSolver farg1) { double fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; realtype result; arg1 = (SUNLinearSolver)(farg1); result = (realtype)SUNLinSolResNorm_SPTFQMR(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FSUNLinSolResid_SPTFQMR(SUNLinearSolver farg1) { N_Vector fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector result; arg1 = (SUNLinearSolver)(farg1); result = (N_Vector)SUNLinSolResid_SPTFQMR(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FSUNLinSolLastFlag_SPTFQMR(SUNLinearSolver farg1) { int64_t fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; sunindextype result; arg1 = (SUNLinearSolver)(farg1); result = SUNLinSolLastFlag_SPTFQMR(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSpace_SPTFQMR(SUNLinearSolver farg1, long *farg2, long *farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNLinSolSpace_SPTFQMR(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolFree_SPTFQMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolFree_SPTFQMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetInfoFile_SPTFQMR(SUNLinearSolver farg1, void *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (FILE *)(farg2); result = (int)SUNLinSolSetInfoFile_SPTFQMR(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPrintLevel_SPTFQMR(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetPrintLevel_SPTFQMR(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/sunlinsol/superlumt/0000755000176200001440000000000014511135055017330 5ustar liggesusersStanHeaders/src/sunlinsol/superlumt/sunlinsol_superlumt.c0000644000176200001440000002753414645137106023664 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * ----------------------------------------------------------------- * Based on codes _superlumt.c, written by * Carol S. Woodward @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the SuperLUMT implementation * of the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define TWO RCONST(2.0) /* * ----------------------------------------------------------------- * SuperLUMT solver structure accessibility macros: * ----------------------------------------------------------------- */ #define SLUMT_CONTENT(S) ( (SUNLinearSolverContent_SuperLUMT)(S->content) ) #define LASTFLAG(S) ( SLUMT_CONTENT(S)->last_flag ) #define FIRSTFACTORIZE(S) ( SLUMT_CONTENT(S)->first_factorize ) #define SM_A(S) ( SLUMT_CONTENT(S)->A ) #define SM_AC(S) ( SLUMT_CONTENT(S)->AC ) #define SM_L(S) ( SLUMT_CONTENT(S)->L ) #define SM_U(S) ( SLUMT_CONTENT(S)->U ) #define SM_B(S) ( SLUMT_CONTENT(S)->B ) #define GSTAT(S) ( SLUMT_CONTENT(S)->Gstat ) #define PERMR(S) ( SLUMT_CONTENT(S)->perm_r ) #define PERMC(S) ( SLUMT_CONTENT(S)->perm_c ) #define SIZE(S) ( SLUMT_CONTENT(S)->N ) #define NUMTHREADS(S) ( SLUMT_CONTENT(S)->num_threads ) #define DIAGPIVOTTHRESH(S) ( SLUMT_CONTENT(S)->diag_pivot_thresh ) #define ORDERING(S) ( SLUMT_CONTENT(S)->ordering ) #define OPTIONS(S) ( SLUMT_CONTENT(S)->options ) /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new SuperLUMT linear solver */ SUNLinearSolver SUNLinSol_SuperLUMT(N_Vector y, SUNMatrix A, int num_threads, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_SuperLUMT content; sunindextype MatrixRows; /* Check compatibility with supplied SUNMatrix and N_Vector */ if (SUNMatGetID(A) != SUNMATRIX_SPARSE) return(NULL); if (SUNSparseMatrix_Rows(A) != SUNSparseMatrix_Columns(A)) return(NULL); if ( (N_VGetVectorID(y) != SUNDIALS_NVEC_SERIAL) && (N_VGetVectorID(y) != SUNDIALS_NVEC_OPENMP) && (N_VGetVectorID(y) != SUNDIALS_NVEC_PTHREADS) ) return(NULL); MatrixRows = SUNSparseMatrix_Rows(A); if (MatrixRows != N_VGetLength(y)) return(NULL); /* Create an empty linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_SuperLUMT; S->ops->getid = SUNLinSolGetID_SuperLUMT; S->ops->initialize = SUNLinSolInitialize_SuperLUMT; S->ops->setup = SUNLinSolSetup_SuperLUMT; S->ops->solve = SUNLinSolSolve_SuperLUMT; S->ops->lastflag = SUNLinSolLastFlag_SuperLUMT; S->ops->space = SUNLinSolSpace_SuperLUMT; S->ops->free = SUNLinSolFree_SuperLUMT; /* Create content */ content = NULL; content = (SUNLinearSolverContent_SuperLUMT) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->N = MatrixRows; content->last_flag = 0; content->num_threads = num_threads; content->diag_pivot_thresh = ONE; content->ordering = SUNSLUMT_ORDERING_DEFAULT; content->perm_r = NULL; content->perm_c = NULL; content->Gstat = NULL; content->A = NULL; content->AC = NULL; content->L = NULL; content->U = NULL; content->B = NULL; content->options = NULL; /* Allocate content */ content->perm_r = (sunindextype *) malloc(MatrixRows*sizeof(sunindextype)); if (content->perm_r == NULL) { SUNLinSolFree(S); return(NULL); } content->perm_c = (sunindextype *) malloc(MatrixRows*sizeof(sunindextype)); if (content->perm_c == NULL) { SUNLinSolFree(S); return(NULL); } content->Gstat = (Gstat_t *) malloc(sizeof(Gstat_t)); if (content->Gstat == NULL) { SUNLinSolFree(S); return(NULL); } content->A = (SuperMatrix *) malloc(sizeof(SuperMatrix)); if (content->A == NULL) { SUNLinSolFree(S); return(NULL); } content->A->Store = NULL; content->AC = (SuperMatrix *) malloc(sizeof(SuperMatrix)); if (content->AC == NULL) { SUNLinSolFree(S); return(NULL); } content->AC->Store = NULL; content->L = (SuperMatrix *) malloc(sizeof(SuperMatrix)); if (content->L == NULL) { SUNLinSolFree(S); return(NULL); } content->L->Store = NULL; content->U = (SuperMatrix *) malloc(sizeof(SuperMatrix)); if (content->U == NULL) { SUNLinSolFree(S); return(NULL); } content->U->Store = NULL; content->B = (SuperMatrix *) malloc(sizeof(SuperMatrix)); if (content->B == NULL) { SUNLinSolFree(S); return(NULL); } content->B->Store = NULL; xCreate_Dense_Matrix(content->B, MatrixRows, 1, NULL, MatrixRows, SLU_DN, SLU_D, SLU_GE); content->options = (superlumt_options_t *) malloc(sizeof(superlumt_options_t)); if (content->options == NULL) { SUNLinSolFree(S); return(NULL); } StatAlloc(MatrixRows, num_threads, sp_ienv(1), sp_ienv(2), content->Gstat); return(S); } /* ---------------------------------------------------------------------------- * Function to set the ordering type for a SuperLUMT linear solver */ int SUNLinSol_SuperLUMTSetOrdering(SUNLinearSolver S, int ordering_choice) { /* Check for legal ordering_choice */ if ((ordering_choice < 0) || (ordering_choice > 3)) return(SUNLS_ILL_INPUT); /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set ordering_choice */ ORDERING(S) = ordering_choice; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_SuperLUMT(SUNLinearSolver S) { return(SUNLINEARSOLVER_DIRECT); } SUNLinearSolver_ID SUNLinSolGetID_SuperLUMT(SUNLinearSolver S) { return(SUNLINEARSOLVER_SUPERLUMT); } int SUNLinSolInitialize_SuperLUMT(SUNLinearSolver S) { /* force a first factorization */ FIRSTFACTORIZE(S) = 1; /* Initialize statistics variables */ StatInit(SIZE(S), NUMTHREADS(S), GSTAT(S)); LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetup_SuperLUMT(SUNLinearSolver S, SUNMatrix A) { int_t retval; int panel_size, relax, lwork; double drop_tol; fact_t fact; trans_t trans; yes_no_t refact, usepr; void *work; /* Set option values for SuperLU_MT */ panel_size = sp_ienv(1); relax = sp_ienv(2); fact = EQUILIBRATE; trans = (SUNSparseMatrix_SparseType(A) == CSC_MAT) ? NOTRANS : TRANS; usepr = NO; drop_tol = ZERO; lwork = 0; work = NULL; /* free and reallocate sparse matrix */ if (SM_A(S)->Store) SUPERLU_FREE(SM_A(S)->Store); xCreate_CompCol_Matrix(SM_A(S), SUNSparseMatrix_Rows(A), SUNSparseMatrix_Columns(A), SUNSparseMatrix_NNZ(A), SUNSparseMatrix_Data(A), (int_t*) SUNSparseMatrix_IndexValues(A), (int_t*) SUNSparseMatrix_IndexPointers(A), SLU_NC, SLU_D, SLU_GE); /* On first decomposition, set up reusable pieces */ if (FIRSTFACTORIZE(S)) { /* Get column permutation vector perm_c[], according to ordering */ get_perm_c(ORDERING(S), SM_A(S), (int_t *) PERMC(S)); refact = NO; FIRSTFACTORIZE(S) = 0; } else { /* Re-initialize statistics variables */ StatInit(SIZE(S), NUMTHREADS(S), GSTAT(S)); Destroy_CompCol_Permuted(SM_AC(S)); refact = YES; } /* Initialize the option structure using the user-input parameters. Subsequent calls will re-initialize options. Apply perm_c to columns of original A to form AC */ pxgstrf_init(NUMTHREADS(S), fact, trans, refact, panel_size, relax, DIAGPIVOTTHRESH(S), usepr, drop_tol, (int_t *) PERMC(S), (int_t *) PERMR(S), work, lwork, SM_A(S), SM_AC(S), OPTIONS(S), GSTAT(S)); /* Compute the LU factorization of A. The following routine will create num_threads threads. */ pxgstrf(OPTIONS(S), SM_AC(S), (int_t *) PERMR(S), SM_L(S), SM_U(S), GSTAT(S), &retval); if (retval != 0) { LASTFLAG(S) = (retval < 0) ? SUNLS_PACKAGE_FAIL_UNREC : SUNLS_PACKAGE_FAIL_REC; return(LASTFLAG(S)); } LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSolve_SuperLUMT(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { int_t retval; realtype *xdata; DNformat *Bstore; trans_t trans; /* copy b into x */ N_VScale(ONE, b, x); /* access x data array */ xdata = N_VGetArrayPointer(x); if (xdata == NULL) { LASTFLAG(S) = SUNLS_MEM_FAIL; return(LASTFLAG(S)); } Bstore = (DNformat *) (SM_B(S)->Store); Bstore->nzval = xdata; /* Call SuperLUMT to solve the linear system using L and U */ trans = (SUNSparseMatrix_SparseType(A) == CSC_MAT) ? NOTRANS : TRANS; xgstrs(trans, SM_L(S), SM_U(S), (int_t *) PERMR(S), (int_t *) PERMC(S), SM_B(S), GSTAT(S), &retval); if (retval != 0) { LASTFLAG(S) = SUNLS_PACKAGE_FAIL_UNREC; return(LASTFLAG(S)); } LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } sunindextype SUNLinSolLastFlag_SuperLUMT(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return(LASTFLAG(S)); } int SUNLinSolSpace_SuperLUMT(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { /* since the SuperLU_MT structures are opaque objects, we omit those from these results */ *leniwLS = 5 + 2*SIZE(S); *lenrwLS = 1; return(SUNLS_SUCCESS); } int SUNLinSolFree_SuperLUMT(SUNLinearSolver S) { /* return with success if already freed */ if (S == NULL) return(SUNLS_SUCCESS); /* delete items from the contents structure (if it exists) */ if (S->content) { if (OPTIONS(S) && SM_AC(S)) { pxgstrf_finalize(OPTIONS(S), SM_AC(S)); } if (PERMR(S)) { free(PERMR(S)); PERMR(S) = NULL; } if (PERMC(S)) { free(PERMC(S)); PERMC(S) = NULL; } if (OPTIONS(S)) { free(OPTIONS(S)); OPTIONS(S) = NULL; } if (SM_L(S)) { Destroy_SuperNode_SCP(SM_L(S)); free(SM_L(S)); SM_L(S) = NULL; } if (SM_U(S)) { Destroy_CompCol_NCP(SM_U(S)); free(SM_U(S)); SM_U(S) = NULL; } if (GSTAT(S)) { StatFree(GSTAT(S)); free(GSTAT(S)); GSTAT(S) = NULL; } if (SM_B(S)) { Destroy_SuperMatrix_Store(SM_B(S)); free(SM_B(S)); SM_B(S) = NULL; } if (SM_A(S)) { if (SM_A(S)->Store) { SUPERLU_FREE(SM_A(S)->Store); SM_A(S)->Store = NULL; } free(SM_A(S)); SM_A(S) = NULL; } if (SM_AC(S)) { free(SM_AC(S)); SM_AC(S) = NULL; } free(S->content); S->content = NULL; } /* delete generic structures */ if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } StanHeaders/src/sunlinsol/spfgmr/0000755000176200001440000000000014511135055016566 5ustar liggesusersStanHeaders/src/sunlinsol/spfgmr/sunlinsol_spfgmr.c0000644000176200001440000006077614645137106022365 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on sundials_spfgmr.c code, written by Daniel R. Reynolds * and Hilari C. Tiedeman @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the SPFGMR implementation of * the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #include "sundials_debug.h" #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * SPFGMR solver structure accessibility macros: * ----------------------------------------------------------------- */ #define SPFGMR_CONTENT(S) ( (SUNLinearSolverContent_SPFGMR)(S->content) ) #define LASTFLAG(S) ( SPFGMR_CONTENT(S)->last_flag ) /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new SPFGMR linear solver */ SUNLinearSolver SUNLinSol_SPFGMR(N_Vector y, int pretype, int maxl, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_SPFGMR content; /* set preconditioning flag (enabling any preconditioner implies right preconditioning, since SPFGMR does not support left preconditioning) */ pretype = ( (pretype == SUN_PREC_LEFT) || (pretype == SUN_PREC_RIGHT) || (pretype == SUN_PREC_BOTH) ) ? SUN_PREC_RIGHT : SUN_PREC_NONE; /* if maxl input is illegal, set to default */ if (maxl <= 0) maxl = SUNSPFGMR_MAXL_DEFAULT; /* check that the supplied N_Vector supports all requisite operations */ if ( (y->ops->nvclone == NULL) || (y->ops->nvdestroy == NULL) || (y->ops->nvlinearsum == NULL) || (y->ops->nvconst == NULL) || (y->ops->nvprod == NULL) || (y->ops->nvdiv == NULL) || (y->ops->nvscale == NULL) || (y->ops->nvdotprod == NULL) ) return(NULL); /* Create linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_SPFGMR; S->ops->getid = SUNLinSolGetID_SPFGMR; S->ops->setatimes = SUNLinSolSetATimes_SPFGMR; S->ops->setpreconditioner = SUNLinSolSetPreconditioner_SPFGMR; S->ops->setscalingvectors = SUNLinSolSetScalingVectors_SPFGMR; S->ops->setzeroguess = SUNLinSolSetZeroGuess_SPFGMR; S->ops->initialize = SUNLinSolInitialize_SPFGMR; S->ops->setup = SUNLinSolSetup_SPFGMR; S->ops->solve = SUNLinSolSolve_SPFGMR; S->ops->numiters = SUNLinSolNumIters_SPFGMR; S->ops->resnorm = SUNLinSolResNorm_SPFGMR; S->ops->resid = SUNLinSolResid_SPFGMR; S->ops->lastflag = SUNLinSolLastFlag_SPFGMR; S->ops->space = SUNLinSolSpace_SPFGMR; S->ops->free = SUNLinSolFree_SPFGMR; /* Create content */ content = NULL; content = (SUNLinearSolverContent_SPFGMR) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->last_flag = 0; content->maxl = maxl; content->pretype = pretype; content->gstype = SUNSPFGMR_GSTYPE_DEFAULT; content->max_restarts = SUNSPFGMR_MAXRS_DEFAULT; content->zeroguess = SUNFALSE; content->numiters = 0; content->resnorm = ZERO; content->xcor = NULL; content->vtemp = NULL; content->s1 = NULL; content->s2 = NULL; content->ATimes = NULL; content->ATData = NULL; content->Psetup = NULL; content->Psolve = NULL; content->PData = NULL; content->V = NULL; content->Z = NULL; content->Hes = NULL; content->givens = NULL; content->yg = NULL; content->cv = NULL; content->Xv = NULL; content->print_level = 0; content->info_file = stdout; /* Allocate content */ content->xcor = N_VClone(y); if (content->xcor == NULL) { SUNLinSolFree(S); return(NULL); } content->vtemp = N_VClone(y); if (content->vtemp == NULL) { SUNLinSolFree(S); return(NULL); } return(S); } /* ---------------------------------------------------------------------------- * Function to toggle preconditioning on/off -- turns on if pretype is any * one of SUN_PREC_LEFT, SUN_PREC_RIGHT or SUN_PREC_BOTH; otherwise turns off */ int SUNLinSol_SPFGMRSetPrecType(SUNLinearSolver S, int pretype) { /* Check for legal pretype */ pretype = ( (pretype == SUN_PREC_LEFT) || (pretype == SUN_PREC_RIGHT) || (pretype == SUN_PREC_BOTH) ) ? SUN_PREC_RIGHT : SUN_PREC_NONE; /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set pretype */ SPFGMR_CONTENT(S)->pretype = pretype; return(SUNLS_SUCCESS); } /* ---------------------------------------------------------------------------- * Function to set the type of Gram-Schmidt orthogonalization for SPFGMR to use */ int SUNLinSol_SPFGMRSetGSType(SUNLinearSolver S, int gstype) { /* Check for legal gstype */ if ((gstype != SUN_MODIFIED_GS) && (gstype != SUN_CLASSICAL_GS)) { return(SUNLS_ILL_INPUT); } /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set pretype */ SPFGMR_CONTENT(S)->gstype = gstype; return(SUNLS_SUCCESS); } /* ---------------------------------------------------------------------------- * Function to set the maximum number of FGMRES restarts to allow */ int SUNLinSol_SPFGMRSetMaxRestarts(SUNLinearSolver S, int maxrs) { /* Illegal maxrs implies use of default value */ if (maxrs < 0) maxrs = SUNSPFGMR_MAXRS_DEFAULT; /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set max_restarts */ SPFGMR_CONTENT(S)->max_restarts = maxrs; return(SUNLS_SUCCESS); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_SPFGMR(SUNLinearSolver S) { return(SUNLINEARSOLVER_ITERATIVE); } SUNLinearSolver_ID SUNLinSolGetID_SPFGMR(SUNLinearSolver S) { return(SUNLINEARSOLVER_SPFGMR); } int SUNLinSolInitialize_SPFGMR(SUNLinearSolver S) { int k; SUNLinearSolverContent_SPFGMR content; /* set shortcut to SPFGMR memory structure */ if (S == NULL) return(SUNLS_MEM_NULL); content = SPFGMR_CONTENT(S); /* ensure valid options */ if (content->max_restarts < 0) content->max_restarts = SUNSPFGMR_MAXRS_DEFAULT; if (content->ATimes == NULL) { LASTFLAG(S) = SUNLS_ATIMES_NULL; return(LASTFLAG(S)); } if ( (content->pretype != SUN_PREC_LEFT) && (content->pretype != SUN_PREC_RIGHT) && (content->pretype != SUN_PREC_BOTH) ) content->pretype = SUN_PREC_NONE; if ((content->pretype != SUN_PREC_NONE) && (content->Psolve == NULL)) { LASTFLAG(S) = SUNLS_PSOLVE_NULL; return(LASTFLAG(S)); } /* allocate solver-specific memory (where the size depends on the choice of maxl) here */ /* Krylov subspace vectors */ if (content->V == NULL) { content->V = N_VCloneVectorArray(content->maxl+1, content->vtemp); if (content->V == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* Preconditioned basis vectors */ if (content->Z == NULL) { content->Z = N_VCloneVectorArray(content->maxl+1, content->vtemp); if (content->Z == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* Hessenberg matrix Hes */ if (content->Hes == NULL) { content->Hes = (realtype **) malloc((content->maxl+1)*sizeof(realtype *)); if (content->Hes == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } for (k=0; k<=content->maxl; k++) { content->Hes[k] = NULL; content->Hes[k] = (realtype *) malloc(content->maxl*sizeof(realtype)); if (content->Hes[k] == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } } /* Givens rotation components */ if (content->givens == NULL) { content->givens = (realtype *) malloc(2*content->maxl*sizeof(realtype)); if (content->givens == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* y and g vectors */ if (content->yg == NULL) { content->yg = (realtype *) malloc((content->maxl+1)*sizeof(realtype)); if (content->yg == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* cv vector for fused vector ops */ if (content->cv == NULL) { content->cv = (realtype *) malloc((content->maxl+1)*sizeof(realtype)); if (content->cv == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* Xv vector for fused vector ops */ if (content->Xv == NULL) { content->Xv = (N_Vector *) malloc((content->maxl+1)*sizeof(N_Vector)); if (content->Xv == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* return with success */ content->last_flag = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } int SUNLinSolSetATimes_SPFGMR(SUNLinearSolver S, void* ATData, SUNATimesFn ATimes) { /* set function pointers to integrator-supplied ATimes routine and data, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPFGMR_CONTENT(S)->ATimes = ATimes; SPFGMR_CONTENT(S)->ATData = ATData; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetPreconditioner_SPFGMR(SUNLinearSolver S, void* PData, SUNPSetupFn Psetup, SUNPSolveFn Psolve) { /* set function pointers to integrator-supplied Psetup and PSolve routines and data, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPFGMR_CONTENT(S)->Psetup = Psetup; SPFGMR_CONTENT(S)->Psolve = Psolve; SPFGMR_CONTENT(S)->PData = PData; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetScalingVectors_SPFGMR(SUNLinearSolver S, N_Vector s1, N_Vector s2) { /* set N_Vector pointers to integrator-supplied scaling vectors, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPFGMR_CONTENT(S)->s1 = s1; SPFGMR_CONTENT(S)->s2 = s2; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetZeroGuess_SPFGMR(SUNLinearSolver S, booleantype onoff) { /* set flag indicating a zero initial guess */ if (S == NULL) return(SUNLS_MEM_NULL); SPFGMR_CONTENT(S)->zeroguess = onoff; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetup_SPFGMR(SUNLinearSolver S, SUNMatrix A) { int ier; SUNPSetupFn Psetup; void* PData; /* Set shortcuts to SPFGMR memory structures */ if (S == NULL) return(SUNLS_MEM_NULL); Psetup = SPFGMR_CONTENT(S)->Psetup; PData = SPFGMR_CONTENT(S)->PData; /* no solver-specific setup is required, but if user-supplied Psetup routine exists, call that here */ if (Psetup != NULL) { ier = Psetup(PData); if (ier != 0) { LASTFLAG(S) = (ier < 0) ? SUNLS_PSET_FAIL_UNREC : SUNLS_PSET_FAIL_REC; return(LASTFLAG(S)); } } /* return with success */ return(SUNLS_SUCCESS); } int SUNLinSolSolve_SPFGMR(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype delta) { /* local data and shortcut variables */ N_Vector *V, *Z, xcor, vtemp, s1, s2; realtype **Hes, *givens, *yg, *res_norm; realtype beta, rotation_product, r_norm, s_product, rho; booleantype preOnRight, scale1, scale2, converged; booleantype *zeroguess; int i, j, k, l, l_max, krydim, ier, ntries, max_restarts, gstype; int *nli; void *A_data, *P_data; SUNATimesFn atimes; SUNPSolveFn psolve; /* local shortcuts for fused vector operations */ realtype* cv; N_Vector* Xv; /* Initialize some variables */ krydim = 0; /* Make local shorcuts to solver variables. */ if (S == NULL) return(SUNLS_MEM_NULL); l_max = SPFGMR_CONTENT(S)->maxl; max_restarts = SPFGMR_CONTENT(S)->max_restarts; gstype = SPFGMR_CONTENT(S)->gstype; V = SPFGMR_CONTENT(S)->V; Z = SPFGMR_CONTENT(S)->Z; Hes = SPFGMR_CONTENT(S)->Hes; givens = SPFGMR_CONTENT(S)->givens; xcor = SPFGMR_CONTENT(S)->xcor; yg = SPFGMR_CONTENT(S)->yg; vtemp = SPFGMR_CONTENT(S)->vtemp; s1 = SPFGMR_CONTENT(S)->s1; s2 = SPFGMR_CONTENT(S)->s2; A_data = SPFGMR_CONTENT(S)->ATData; P_data = SPFGMR_CONTENT(S)->PData; atimes = SPFGMR_CONTENT(S)->ATimes; psolve = SPFGMR_CONTENT(S)->Psolve; zeroguess = &(SPFGMR_CONTENT(S)->zeroguess); nli = &(SPFGMR_CONTENT(S)->numiters); res_norm = &(SPFGMR_CONTENT(S)->resnorm); cv = SPFGMR_CONTENT(S)->cv; Xv = SPFGMR_CONTENT(S)->Xv; /* Initialize counters and convergence flag */ *nli = 0; converged = SUNFALSE; /* Set booleantype flags for internal solver options */ preOnRight = ( (SPFGMR_CONTENT(S)->pretype == SUN_PREC_LEFT) || (SPFGMR_CONTENT(S)->pretype == SUN_PREC_RIGHT) || (SPFGMR_CONTENT(S)->pretype == SUN_PREC_BOTH) ); scale1 = (s1 != NULL); scale2 = (s2 != NULL); #ifdef SUNDIALS_BUILD_WITH_MONITORING if (SPFGMR_CONTENT(S)->print_level && SPFGMR_CONTENT(S)->info_file) STAN_SUNDIALS_FPRINTF(SPFGMR_CONTENT(S)->info_file, "SUNLINSOL_SPFGMR:\n"); #endif /* Check if Atimes function has been set */ if (atimes == NULL) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_ATIMES_NULL; return(LASTFLAG(S)); } /* If preconditioning, check if psolve has been set */ if (preOnRight && psolve == NULL) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_PSOLVE_NULL; return(LASTFLAG(S)); } /* Set vtemp and V[0] to initial (unscaled) residual r_0 = b - A*x_0 */ if (*zeroguess) { N_VScale(ONE, b, vtemp); } else { ier = atimes(A_data, x, vtemp); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } N_VLinearSum(ONE, b, -ONE, vtemp, vtemp); } /* Apply left scaling to vtemp = r_0 to fill V[0]. */ if (scale1) { N_VProd(s1, vtemp, V[0]); } else { N_VScale(ONE, vtemp, V[0]); } /* Set r_norm = beta to L2 norm of V[0] = s1 r_0, and return if small */ *res_norm = r_norm = beta = SUNRsqrt(N_VDotProd(V[0], V[0])); #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print initial residual */ if (SPFGMR_CONTENT(S)->print_level && SPFGMR_CONTENT(S)->info_file) { STAN_SUNDIALS_FPRINTF(SPFGMR_CONTENT(S)->info_file, SUNLS_MSG_RESIDUAL, (long int) 0, *res_norm); } #endif if (r_norm <= delta) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } /* Initialize rho to avoid compiler warning message */ rho = beta; /* Set xcor = 0. */ N_VConst(ZERO, xcor); /* Begin outer iterations: up to (max_restarts + 1) attempts. */ for (ntries=0; ntries<=max_restarts; ntries++) { /* Initialize the Hessenberg matrix Hes and Givens rotation product. Normalize the initial vector V[0]. */ for (i=0; i<=l_max; i++) for (j=0; jprint_level && SPFGMR_CONTENT(S)->info_file) { STAN_SUNDIALS_FPRINTF(SPFGMR_CONTENT(S)->info_file, SUNLS_MSG_RESIDUAL, (long int) *nli, *res_norm); } #endif if (rho <= delta) { converged = SUNTRUE; break; } /* Normalize V[l+1] with norm value from the Gram-Schmidt routine. */ N_VScale(ONE/Hes[l+1][l], V[l+1], V[l+1]); } /* Inner loop is done. Compute the new correction vector xcor. */ /* Construct g, then solve for y. */ yg[0] = r_norm; for (i=1; i<=krydim; i++) yg[i]=ZERO; if (SUNQRsol(krydim, Hes, givens, yg) != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_QRSOL_FAIL; return(LASTFLAG(S)); } /* Add correction vector Z_l y to xcor. */ cv[0] = ONE; Xv[0] = xcor; for (k=0; k0; i--) { yg[i] = s_product*givens[2*i-2]; s_product *= givens[2*i-1]; } yg[0] = s_product; /* Scale r_norm and yg. */ r_norm *= s_product; for (i=0; i<=krydim; i++) yg[i] *= r_norm; r_norm = SUNRabs(r_norm); /* Multiply yg by V_(krydim+1) to get last residual vector; restart. */ for (k=0; k<=krydim; k++) { cv[k] = yg[k]; Xv[k] = V[k]; } ier = N_VLinearCombination(krydim+1, cv, Xv, V[0]); if (ier != SUNLS_SUCCESS) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_VECTOROP_ERR; return(SUNLS_VECTOROP_ERR); } } /* Failed to converge, even after allowed restarts. If the residual norm was reduced below its initial value, compute and return x anyway. Otherwise return failure flag. */ if (rho < beta) { if (*zeroguess) N_VScale(ONE, xcor, x); else N_VLinearSum(ONE, x, ONE, xcor, x); *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_RES_REDUCED; return(LASTFLAG(S)); } *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_CONV_FAIL; return(LASTFLAG(S)); } int SUNLinSolNumIters_SPFGMR(SUNLinearSolver S) { /* return the stored 'numiters' value */ if (S == NULL) return(-1); return (SPFGMR_CONTENT(S)->numiters); } realtype SUNLinSolResNorm_SPFGMR(SUNLinearSolver S) { /* return the stored 'resnorm' value */ if (S == NULL) return(-ONE); return (SPFGMR_CONTENT(S)->resnorm); } N_Vector SUNLinSolResid_SPFGMR(SUNLinearSolver S) { /* return the stored 'vtemp' vector */ return (SPFGMR_CONTENT(S)->vtemp); } sunindextype SUNLinSolLastFlag_SPFGMR(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return (LASTFLAG(S)); } int SUNLinSolSpace_SPFGMR(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { int maxl; sunindextype liw1, lrw1; maxl = SPFGMR_CONTENT(S)->maxl; if (SPFGMR_CONTENT(S)->vtemp->ops->nvspace) N_VSpace(SPFGMR_CONTENT(S)->vtemp, &lrw1, &liw1); else lrw1 = liw1 = 0; *lenrwLS = lrw1*(2*maxl + 4) + maxl*(maxl + 5) + 2; *leniwLS = liw1*(2*maxl + 4); return(SUNLS_SUCCESS); } int SUNLinSolFree_SPFGMR(SUNLinearSolver S) { int k; if (S == NULL) return(SUNLS_SUCCESS); if (S->content) { /* delete items from within the content structure */ if (SPFGMR_CONTENT(S)->xcor) { N_VDestroy(SPFGMR_CONTENT(S)->xcor); SPFGMR_CONTENT(S)->xcor = NULL; } if (SPFGMR_CONTENT(S)->vtemp) { N_VDestroy(SPFGMR_CONTENT(S)->vtemp); SPFGMR_CONTENT(S)->vtemp = NULL; } if (SPFGMR_CONTENT(S)->V) { N_VDestroyVectorArray(SPFGMR_CONTENT(S)->V, SPFGMR_CONTENT(S)->maxl+1); SPFGMR_CONTENT(S)->V = NULL; } if (SPFGMR_CONTENT(S)->Z) { N_VDestroyVectorArray(SPFGMR_CONTENT(S)->Z, SPFGMR_CONTENT(S)->maxl+1); SPFGMR_CONTENT(S)->Z = NULL; } if (SPFGMR_CONTENT(S)->Hes) { for (k=0; k<=SPFGMR_CONTENT(S)->maxl; k++) if (SPFGMR_CONTENT(S)->Hes[k]) { free(SPFGMR_CONTENT(S)->Hes[k]); SPFGMR_CONTENT(S)->Hes[k] = NULL; } free(SPFGMR_CONTENT(S)->Hes); SPFGMR_CONTENT(S)->Hes = NULL; } if (SPFGMR_CONTENT(S)->givens) { free(SPFGMR_CONTENT(S)->givens); SPFGMR_CONTENT(S)->givens = NULL; } if (SPFGMR_CONTENT(S)->yg) { free(SPFGMR_CONTENT(S)->yg); SPFGMR_CONTENT(S)->yg = NULL; } if (SPFGMR_CONTENT(S)->cv) { free(SPFGMR_CONTENT(S)->cv); SPFGMR_CONTENT(S)->cv = NULL; } if (SPFGMR_CONTENT(S)->Xv) { free(SPFGMR_CONTENT(S)->Xv); SPFGMR_CONTENT(S)->Xv = NULL; } free(S->content); S->content = NULL; } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } int SUNLinSolSetInfoFile_SPFGMR(SUNLinearSolver S, FILE* info_file) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the linear solver is non-null */ if (S == NULL) return(SUNLS_MEM_NULL); SPFGMR_CONTENT(S)->info_file = info_file; return(SUNLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSolSetInfoFile_SPFGMR: SUNDIALS was not built with monitoring\n"); return(SUNLS_ILL_INPUT); #endif } int SUNLinSolSetPrintLevel_SPFGMR(SUNLinearSolver S, int print_level) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the linear solver is non-null */ if (S == NULL) return(SUNLS_MEM_NULL); /* check for valid print level */ if (print_level < 0 || print_level > 1) return(SUNLS_ILL_INPUT); SPFGMR_CONTENT(S)->print_level = print_level; return(SUNLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSolSetPrintLevel_SPFGMR: SUNDIALS was not built with monitoring\n"); return(SUNLS_ILL_INPUT); #endif } StanHeaders/src/sunlinsol/spfgmr/fmod/0000755000176200001440000000000014511135055017513 5ustar liggesusersStanHeaders/src/sunlinsol/spfgmr/fmod/fsunlinsol_spfgmr_mod.c0000644000176200001440000003363414645137106024310 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_linearsolver.h" #include "sunlinsol/sunlinsol_spfgmr.h" SWIGEXPORT SUNLinearSolver _wrap_FSUNLinSol_SPFGMR(N_Vector farg1, int const *farg2, int const *farg3, void *farg4) { SUNLinearSolver fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int arg3 ; SUNContext arg4 = (SUNContext) 0 ; SUNLinearSolver result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); arg4 = (SUNContext)(farg4); result = (SUNLinearSolver)SUNLinSol_SPFGMR(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_SPFGMRSetPrecType(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_SPFGMRSetPrecType(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_SPFGMRSetGSType(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_SPFGMRSetGSType(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_SPFGMRSetMaxRestarts(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_SPFGMRSetMaxRestarts(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetType_SPFGMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_Type result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_Type)SUNLinSolGetType_SPFGMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetID_SPFGMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_ID result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_ID)SUNLinSolGetID_SPFGMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolInitialize_SPFGMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolInitialize_SPFGMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetATimes_SPFGMR(SUNLinearSolver farg1, void *farg2, SUNATimesFn farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNATimesFn arg3 = (SUNATimesFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNATimesFn)(farg3); result = (int)SUNLinSolSetATimes_SPFGMR(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPreconditioner_SPFGMR(SUNLinearSolver farg1, void *farg2, SUNPSetupFn farg3, SUNPSolveFn farg4) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNPSetupFn arg3 = (SUNPSetupFn) 0 ; SUNPSolveFn arg4 = (SUNPSolveFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNPSetupFn)(farg3); arg4 = (SUNPSolveFn)(farg4); result = (int)SUNLinSolSetPreconditioner_SPFGMR(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetScalingVectors_SPFGMR(SUNLinearSolver farg1, N_Vector farg2, N_Vector farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)SUNLinSolSetScalingVectors_SPFGMR(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetZeroGuess_SPFGMR(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetZeroGuess_SPFGMR(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetup_SPFGMR(SUNLinearSolver farg1, SUNMatrix farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNLinSolSetup_SPFGMR(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSolve_SPFGMR(SUNLinearSolver farg1, SUNMatrix farg2, N_Vector farg3, N_Vector farg4, double const *farg5) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); result = (int)SUNLinSolSolve_SPFGMR(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolNumIters_SPFGMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolNumIters_SPFGMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FSUNLinSolResNorm_SPFGMR(SUNLinearSolver farg1) { double fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; realtype result; arg1 = (SUNLinearSolver)(farg1); result = (realtype)SUNLinSolResNorm_SPFGMR(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FSUNLinSolResid_SPFGMR(SUNLinearSolver farg1) { N_Vector fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector result; arg1 = (SUNLinearSolver)(farg1); result = (N_Vector)SUNLinSolResid_SPFGMR(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FSUNLinSolLastFlag_SPFGMR(SUNLinearSolver farg1) { int64_t fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; sunindextype result; arg1 = (SUNLinearSolver)(farg1); result = SUNLinSolLastFlag_SPFGMR(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSpace_SPFGMR(SUNLinearSolver farg1, long *farg2, long *farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNLinSolSpace_SPFGMR(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolFree_SPFGMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolFree_SPFGMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetInfoFile_SPFGMR(SUNLinearSolver farg1, void *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (FILE *)(farg2); result = (int)SUNLinSolSetInfoFile_SPFGMR(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPrintLevel_SPFGMR(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetPrintLevel_SPFGMR(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/sunlinsol/spfgmr/fmod/fsunlinsol_spfgmr_mod.f900000644000176200001440000004036714645137106024465 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunlinsol_spfgmr_mod use, intrinsic :: ISO_C_BINDING use fsundials_linearsolver_mod use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: SUNSPFGMR_MAXL_DEFAULT = 5_C_INT integer(C_INT), parameter, public :: SUNSPFGMR_MAXRS_DEFAULT = 0_C_INT public :: FSUNLinSol_SPFGMR public :: FSUNLinSol_SPFGMRSetPrecType public :: FSUNLinSol_SPFGMRSetGSType public :: FSUNLinSol_SPFGMRSetMaxRestarts public :: FSUNLinSolGetType_SPFGMR public :: FSUNLinSolGetID_SPFGMR public :: FSUNLinSolInitialize_SPFGMR public :: FSUNLinSolSetATimes_SPFGMR public :: FSUNLinSolSetPreconditioner_SPFGMR public :: FSUNLinSolSetScalingVectors_SPFGMR public :: FSUNLinSolSetZeroGuess_SPFGMR public :: FSUNLinSolSetup_SPFGMR public :: FSUNLinSolSolve_SPFGMR public :: FSUNLinSolNumIters_SPFGMR public :: FSUNLinSolResNorm_SPFGMR public :: FSUNLinSolResid_SPFGMR public :: FSUNLinSolLastFlag_SPFGMR public :: FSUNLinSolSpace_SPFGMR public :: FSUNLinSolFree_SPFGMR public :: FSUNLinSolSetInfoFile_SPFGMR public :: FSUNLinSolSetPrintLevel_SPFGMR ! WRAPPER DECLARATIONS interface function swigc_FSUNLinSol_SPFGMR(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSol_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FSUNLinSol_SPFGMRSetPrecType(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_SPFGMRSetPrecType") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSol_SPFGMRSetGSType(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_SPFGMRSetGSType") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSol_SPFGMRSetMaxRestarts(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_SPFGMRSetMaxRestarts") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetType_SPFGMR(farg1) & bind(C, name="_wrap_FSUNLinSolGetType_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetID_SPFGMR(farg1) & bind(C, name="_wrap_FSUNLinSolGetID_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolInitialize_SPFGMR(farg1) & bind(C, name="_wrap_FSUNLinSolInitialize_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetATimes_SPFGMR(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetATimes_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPreconditioner_SPFGMR(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSolSetPreconditioner_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetScalingVectors_SPFGMR(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetScalingVectors_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetZeroGuess_SPFGMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetZeroGuess_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetup_SPFGMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetup_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSolve_SPFGMR(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNLinSolSolve_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FSUNLinSolNumIters_SPFGMR(farg1) & bind(C, name="_wrap_FSUNLinSolNumIters_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolResNorm_SPFGMR(farg1) & bind(C, name="_wrap_FSUNLinSolResNorm_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FSUNLinSolResid_SPFGMR(farg1) & bind(C, name="_wrap_FSUNLinSolResid_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNLinSolLastFlag_SPFGMR(farg1) & bind(C, name="_wrap_FSUNLinSolLastFlag_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNLinSolSpace_SPFGMR(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSpace_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolFree_SPFGMR(farg1) & bind(C, name="_wrap_FSUNLinSolFree_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetInfoFile_SPFGMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetInfoFile_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPrintLevel_SPFGMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetPrintLevel_SPFGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNLinSol_SPFGMR(y, pretype, maxl, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNLinearSolver), pointer :: swig_result type(N_Vector), target, intent(inout) :: y integer(C_INT), intent(in) :: pretype integer(C_INT), intent(in) :: maxl type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = c_loc(y) farg2 = pretype farg3 = maxl farg4 = sunctx fresult = swigc_FSUNLinSol_SPFGMR(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FSUNLinSol_SPFGMRSetPrecType(s, pretype) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: pretype integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = pretype fresult = swigc_FSUNLinSol_SPFGMRSetPrecType(farg1, farg2) swig_result = fresult end function function FSUNLinSol_SPFGMRSetGSType(s, gstype) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: gstype integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = gstype fresult = swigc_FSUNLinSol_SPFGMRSetGSType(farg1, farg2) swig_result = fresult end function function FSUNLinSol_SPFGMRSetMaxRestarts(s, maxrs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: maxrs integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = maxrs fresult = swigc_FSUNLinSol_SPFGMRSetMaxRestarts(farg1, farg2) swig_result = fresult end function function FSUNLinSolGetType_SPFGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_Type) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetType_SPFGMR(farg1) swig_result = fresult end function function FSUNLinSolGetID_SPFGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_ID) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetID_SPFGMR(farg1) swig_result = fresult end function function FSUNLinSolInitialize_SPFGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolInitialize_SPFGMR(farg1) swig_result = fresult end function function FSUNLinSolSetATimes_SPFGMR(s, a_data, atimes) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: a_data type(C_FUNPTR), intent(in), value :: atimes integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = c_loc(s) farg2 = a_data farg3 = atimes fresult = swigc_FSUNLinSolSetATimes_SPFGMR(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetPreconditioner_SPFGMR(s, p_data, pset, psol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: p_data type(C_FUNPTR), intent(in), value :: pset type(C_FUNPTR), intent(in), value :: psol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = c_loc(s) farg2 = p_data farg3 = pset farg4 = psol fresult = swigc_FSUNLinSolSetPreconditioner_SPFGMR(farg1, farg2, farg3, farg4) swig_result = fresult end function function FSUNLinSolSetScalingVectors_SPFGMR(s, s1, s2) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(N_Vector), target, intent(inout) :: s1 type(N_Vector), target, intent(inout) :: s2 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(s1) farg3 = c_loc(s2) fresult = swigc_FSUNLinSolSetScalingVectors_SPFGMR(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetZeroGuess_SPFGMR(s, onoff) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: onoff integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = onoff fresult = swigc_FSUNLinSolSetZeroGuess_SPFGMR(farg1, farg2) swig_result = fresult end function function FSUNLinSolSetup_SPFGMR(s, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(s) farg2 = c_loc(a) fresult = swigc_FSUNLinSolSetup_SPFGMR(farg1, farg2) swig_result = fresult end function function FSUNLinSolSolve_SPFGMR(s, a, x, b, tol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: b real(C_DOUBLE), intent(in) :: tol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 farg1 = c_loc(s) farg2 = c_loc(a) farg3 = c_loc(x) farg4 = c_loc(b) farg5 = tol fresult = swigc_FSUNLinSolSolve_SPFGMR(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FSUNLinSolNumIters_SPFGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolNumIters_SPFGMR(farg1) swig_result = fresult end function function FSUNLinSolResNorm_SPFGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResNorm_SPFGMR(farg1) swig_result = fresult end function function FSUNLinSolResid_SPFGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResid_SPFGMR(farg1) call c_f_pointer(fresult, swig_result) end function function FSUNLinSolLastFlag_SPFGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolLastFlag_SPFGMR(farg1) swig_result = fresult end function function FSUNLinSolSpace_SPFGMR(s, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FSUNLinSolSpace_SPFGMR(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolFree_SPFGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolFree_SPFGMR(farg1) swig_result = fresult end function function FSUNLinSolSetInfoFile_SPFGMR(ls, info_file) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: ls type(C_PTR) :: info_file integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(ls) farg2 = info_file fresult = swigc_FSUNLinSolSetInfoFile_SPFGMR(farg1, farg2) swig_result = fresult end function function FSUNLinSolSetPrintLevel_SPFGMR(ls, print_level) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: ls integer(C_INT), intent(in) :: print_level integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(ls) farg2 = print_level fresult = swigc_FSUNLinSolSetPrintLevel_SPFGMR(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/sunlinsol/spgmr/0000755000176200001440000000000014511135055016420 5ustar liggesusersStanHeaders/src/sunlinsol/spgmr/fmod/0000755000176200001440000000000014511135055017345 5ustar liggesusersStanHeaders/src/sunlinsol/spgmr/fmod/fsunlinsol_spgmr_mod.c0000644000176200001440000003356114645137106023773 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_linearsolver.h" #include "sunlinsol/sunlinsol_spgmr.h" SWIGEXPORT SUNLinearSolver _wrap_FSUNLinSol_SPGMR(N_Vector farg1, int const *farg2, int const *farg3, void *farg4) { SUNLinearSolver fresult ; N_Vector arg1 = (N_Vector) 0 ; int arg2 ; int arg3 ; SUNContext arg4 = (SUNContext) 0 ; SUNLinearSolver result; arg1 = (N_Vector)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); arg4 = (SUNContext)(farg4); result = (SUNLinearSolver)SUNLinSol_SPGMR(arg1,arg2,arg3,arg4); fresult = result; return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_SPGMRSetPrecType(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_SPGMRSetPrecType(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_SPGMRSetGSType(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_SPGMRSetGSType(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSol_SPGMRSetMaxRestarts(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSol_SPGMRSetMaxRestarts(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetType_SPGMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_Type result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_Type)SUNLinSolGetType_SPGMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetID_SPGMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_ID result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_ID)SUNLinSolGetID_SPGMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolInitialize_SPGMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolInitialize_SPGMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetATimes_SPGMR(SUNLinearSolver farg1, void *farg2, SUNATimesFn farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNATimesFn arg3 = (SUNATimesFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNATimesFn)(farg3); result = (int)SUNLinSolSetATimes_SPGMR(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPreconditioner_SPGMR(SUNLinearSolver farg1, void *farg2, SUNPSetupFn farg3, SUNPSolveFn farg4) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNPSetupFn arg3 = (SUNPSetupFn) 0 ; SUNPSolveFn arg4 = (SUNPSolveFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNPSetupFn)(farg3); arg4 = (SUNPSolveFn)(farg4); result = (int)SUNLinSolSetPreconditioner_SPGMR(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetScalingVectors_SPGMR(SUNLinearSolver farg1, N_Vector farg2, N_Vector farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)SUNLinSolSetScalingVectors_SPGMR(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetZeroGuess_SPGMR(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetZeroGuess_SPGMR(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetup_SPGMR(SUNLinearSolver farg1, SUNMatrix farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNLinSolSetup_SPGMR(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSolve_SPGMR(SUNLinearSolver farg1, SUNMatrix farg2, N_Vector farg3, N_Vector farg4, double const *farg5) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); result = (int)SUNLinSolSolve_SPGMR(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolNumIters_SPGMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolNumIters_SPGMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FSUNLinSolResNorm_SPGMR(SUNLinearSolver farg1) { double fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; realtype result; arg1 = (SUNLinearSolver)(farg1); result = (realtype)SUNLinSolResNorm_SPGMR(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FSUNLinSolResid_SPGMR(SUNLinearSolver farg1) { N_Vector fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector result; arg1 = (SUNLinearSolver)(farg1); result = (N_Vector)SUNLinSolResid_SPGMR(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FSUNLinSolLastFlag_SPGMR(SUNLinearSolver farg1) { int64_t fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; sunindextype result; arg1 = (SUNLinearSolver)(farg1); result = SUNLinSolLastFlag_SPGMR(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSpace_SPGMR(SUNLinearSolver farg1, long *farg2, long *farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNLinSolSpace_SPGMR(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolFree_SPGMR(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolFree_SPGMR(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetInfoFile_SPGMR(SUNLinearSolver farg1, void *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (FILE *)(farg2); result = (int)SUNLinSolSetInfoFile_SPGMR(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPrintLevel_SPGMR(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetPrintLevel_SPGMR(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/sunlinsol/spgmr/fmod/fsunlinsol_spgmr_mod.f900000644000176200001440000004021014645137106024134 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsunlinsol_spgmr_mod use, intrinsic :: ISO_C_BINDING use fsundials_linearsolver_mod use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: SUNSPGMR_MAXL_DEFAULT = 5_C_INT integer(C_INT), parameter, public :: SUNSPGMR_MAXRS_DEFAULT = 0_C_INT public :: FSUNLinSol_SPGMR public :: FSUNLinSol_SPGMRSetPrecType public :: FSUNLinSol_SPGMRSetGSType public :: FSUNLinSol_SPGMRSetMaxRestarts public :: FSUNLinSolGetType_SPGMR public :: FSUNLinSolGetID_SPGMR public :: FSUNLinSolInitialize_SPGMR public :: FSUNLinSolSetATimes_SPGMR public :: FSUNLinSolSetPreconditioner_SPGMR public :: FSUNLinSolSetScalingVectors_SPGMR public :: FSUNLinSolSetZeroGuess_SPGMR public :: FSUNLinSolSetup_SPGMR public :: FSUNLinSolSolve_SPGMR public :: FSUNLinSolNumIters_SPGMR public :: FSUNLinSolResNorm_SPGMR public :: FSUNLinSolResid_SPGMR public :: FSUNLinSolLastFlag_SPGMR public :: FSUNLinSolSpace_SPGMR public :: FSUNLinSolFree_SPGMR public :: FSUNLinSolSetInfoFile_SPGMR public :: FSUNLinSolSetPrintLevel_SPGMR ! WRAPPER DECLARATIONS interface function swigc_FSUNLinSol_SPGMR(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSol_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR) :: fresult end function function swigc_FSUNLinSol_SPGMRSetPrecType(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_SPGMRSetPrecType") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSol_SPGMRSetGSType(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_SPGMRSetGSType") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSol_SPGMRSetMaxRestarts(farg1, farg2) & bind(C, name="_wrap_FSUNLinSol_SPGMRSetMaxRestarts") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetType_SPGMR(farg1) & bind(C, name="_wrap_FSUNLinSolGetType_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetID_SPGMR(farg1) & bind(C, name="_wrap_FSUNLinSolGetID_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolInitialize_SPGMR(farg1) & bind(C, name="_wrap_FSUNLinSolInitialize_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetATimes_SPGMR(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetATimes_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPreconditioner_SPGMR(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSolSetPreconditioner_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetScalingVectors_SPGMR(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetScalingVectors_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetZeroGuess_SPGMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetZeroGuess_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetup_SPGMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetup_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSolve_SPGMR(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNLinSolSolve_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FSUNLinSolNumIters_SPGMR(farg1) & bind(C, name="_wrap_FSUNLinSolNumIters_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolResNorm_SPGMR(farg1) & bind(C, name="_wrap_FSUNLinSolResNorm_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FSUNLinSolResid_SPGMR(farg1) & bind(C, name="_wrap_FSUNLinSolResid_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNLinSolLastFlag_SPGMR(farg1) & bind(C, name="_wrap_FSUNLinSolLastFlag_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNLinSolSpace_SPGMR(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSpace_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolFree_SPGMR(farg1) & bind(C, name="_wrap_FSUNLinSolFree_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetInfoFile_SPGMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetInfoFile_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPrintLevel_SPGMR(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetPrintLevel_SPGMR") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNLinSol_SPGMR(y, pretype, maxl, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNLinearSolver), pointer :: swig_result type(N_Vector), target, intent(inout) :: y integer(C_INT), intent(in) :: pretype integer(C_INT), intent(in) :: maxl type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = c_loc(y) farg2 = pretype farg3 = maxl farg4 = sunctx fresult = swigc_FSUNLinSol_SPGMR(farg1, farg2, farg3, farg4) call c_f_pointer(fresult, swig_result) end function function FSUNLinSol_SPGMRSetPrecType(s, pretype) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: pretype integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = pretype fresult = swigc_FSUNLinSol_SPGMRSetPrecType(farg1, farg2) swig_result = fresult end function function FSUNLinSol_SPGMRSetGSType(s, gstype) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: gstype integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = gstype fresult = swigc_FSUNLinSol_SPGMRSetGSType(farg1, farg2) swig_result = fresult end function function FSUNLinSol_SPGMRSetMaxRestarts(s, maxrs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: maxrs integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = maxrs fresult = swigc_FSUNLinSol_SPGMRSetMaxRestarts(farg1, farg2) swig_result = fresult end function function FSUNLinSolGetType_SPGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_Type) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetType_SPGMR(farg1) swig_result = fresult end function function FSUNLinSolGetID_SPGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_ID) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetID_SPGMR(farg1) swig_result = fresult end function function FSUNLinSolInitialize_SPGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolInitialize_SPGMR(farg1) swig_result = fresult end function function FSUNLinSolSetATimes_SPGMR(s, a_data, atimes) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: a_data type(C_FUNPTR), intent(in), value :: atimes integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = c_loc(s) farg2 = a_data farg3 = atimes fresult = swigc_FSUNLinSolSetATimes_SPGMR(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetPreconditioner_SPGMR(s, p_data, pset, psol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: p_data type(C_FUNPTR), intent(in), value :: pset type(C_FUNPTR), intent(in), value :: psol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = c_loc(s) farg2 = p_data farg3 = pset farg4 = psol fresult = swigc_FSUNLinSolSetPreconditioner_SPGMR(farg1, farg2, farg3, farg4) swig_result = fresult end function function FSUNLinSolSetScalingVectors_SPGMR(s, s1, s2) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(N_Vector), target, intent(inout) :: s1 type(N_Vector), target, intent(inout) :: s2 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(s1) farg3 = c_loc(s2) fresult = swigc_FSUNLinSolSetScalingVectors_SPGMR(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetZeroGuess_SPGMR(s, onff) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: onff integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = onff fresult = swigc_FSUNLinSolSetZeroGuess_SPGMR(farg1, farg2) swig_result = fresult end function function FSUNLinSolSetup_SPGMR(s, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(s) farg2 = c_loc(a) fresult = swigc_FSUNLinSolSetup_SPGMR(farg1, farg2) swig_result = fresult end function function FSUNLinSolSolve_SPGMR(s, a, x, b, tol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: b real(C_DOUBLE), intent(in) :: tol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 farg1 = c_loc(s) farg2 = c_loc(a) farg3 = c_loc(x) farg4 = c_loc(b) farg5 = tol fresult = swigc_FSUNLinSolSolve_SPGMR(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FSUNLinSolNumIters_SPGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolNumIters_SPGMR(farg1) swig_result = fresult end function function FSUNLinSolResNorm_SPGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResNorm_SPGMR(farg1) swig_result = fresult end function function FSUNLinSolResid_SPGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResid_SPGMR(farg1) call c_f_pointer(fresult, swig_result) end function function FSUNLinSolLastFlag_SPGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolLastFlag_SPGMR(farg1) swig_result = fresult end function function FSUNLinSolSpace_SPGMR(s, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FSUNLinSolSpace_SPGMR(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolFree_SPGMR(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolFree_SPGMR(farg1) swig_result = fresult end function function FSUNLinSolSetInfoFile_SPGMR(ls, info_file) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: ls type(C_PTR) :: info_file integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(ls) farg2 = info_file fresult = swigc_FSUNLinSolSetInfoFile_SPGMR(farg1, farg2) swig_result = fresult end function function FSUNLinSolSetPrintLevel_SPGMR(ls, print_level) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: ls integer(C_INT), intent(in) :: print_level integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(ls) farg2 = print_level fresult = swigc_FSUNLinSolSetPrintLevel_SPGMR(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/sunlinsol/spgmr/sunlinsol_spgmr.c0000644000176200001440000006325014645137106022037 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on sundials_spgmr.c code, written by Scott D. Cohen, * Alan C. Hindmarsh and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the SPGMR implementation of * the SUNLINSOL package. * -----------------------------------------------------------------*/ #include #include #include #include #include "sundials_debug.h" #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * SPGMR solver structure accessibility macros: * ----------------------------------------------------------------- */ #define SPGMR_CONTENT(S) ( (SUNLinearSolverContent_SPGMR)(S->content) ) #define LASTFLAG(S) ( SPGMR_CONTENT(S)->last_flag ) /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Function to create a new SPGMR linear solver */ SUNLinearSolver SUNLinSol_SPGMR(N_Vector y, int pretype, int maxl, SUNContext sunctx) { SUNLinearSolver S; SUNLinearSolverContent_SPGMR content; /* check for legal pretype and maxl values; if illegal use defaults */ if ((pretype != SUN_PREC_NONE) && (pretype != SUN_PREC_LEFT) && (pretype != SUN_PREC_RIGHT) && (pretype != SUN_PREC_BOTH)) pretype = SUN_PREC_NONE; if (maxl <= 0) maxl = SUNSPGMR_MAXL_DEFAULT; /* check that the supplied N_Vector supports all requisite operations */ if ( (y->ops->nvclone == NULL) || (y->ops->nvdestroy == NULL) || (y->ops->nvlinearsum == NULL) || (y->ops->nvconst == NULL) || (y->ops->nvprod == NULL) || (y->ops->nvdiv == NULL) || (y->ops->nvscale == NULL) || (y->ops->nvdotprod == NULL) ) return(NULL); /* Create linear solver */ S = NULL; S = SUNLinSolNewEmpty(sunctx); if (S == NULL) return(NULL); /* Attach operations */ S->ops->gettype = SUNLinSolGetType_SPGMR; S->ops->getid = SUNLinSolGetID_SPGMR; S->ops->setatimes = SUNLinSolSetATimes_SPGMR; S->ops->setpreconditioner = SUNLinSolSetPreconditioner_SPGMR; S->ops->setscalingvectors = SUNLinSolSetScalingVectors_SPGMR; S->ops->setzeroguess = SUNLinSolSetZeroGuess_SPGMR; S->ops->initialize = SUNLinSolInitialize_SPGMR; S->ops->setup = SUNLinSolSetup_SPGMR; S->ops->solve = SUNLinSolSolve_SPGMR; S->ops->numiters = SUNLinSolNumIters_SPGMR; S->ops->resnorm = SUNLinSolResNorm_SPGMR; S->ops->resid = SUNLinSolResid_SPGMR; S->ops->lastflag = SUNLinSolLastFlag_SPGMR; S->ops->space = SUNLinSolSpace_SPGMR; S->ops->free = SUNLinSolFree_SPGMR; /* Create content */ content = NULL; content = (SUNLinearSolverContent_SPGMR) malloc(sizeof *content); if (content == NULL) { SUNLinSolFree(S); return(NULL); } /* Attach content */ S->content = content; /* Fill content */ content->last_flag = 0; content->maxl = maxl; content->pretype = pretype; content->gstype = SUNSPGMR_GSTYPE_DEFAULT; content->max_restarts = SUNSPGMR_MAXRS_DEFAULT; content->zeroguess = SUNFALSE; content->numiters = 0; content->resnorm = ZERO; content->xcor = NULL; content->vtemp = NULL; content->s1 = NULL; content->s2 = NULL; content->ATimes = NULL; content->ATData = NULL; content->Psetup = NULL; content->Psolve = NULL; content->PData = NULL; content->V = NULL; content->Hes = NULL; content->givens = NULL; content->yg = NULL; content->cv = NULL; content->Xv = NULL; content->print_level = 0; content->info_file = stdout; /* Allocate content */ content->xcor = N_VClone(y); if (content->xcor == NULL) { SUNLinSolFree(S); return(NULL); } content->vtemp = N_VClone(y); if (content->vtemp == NULL) { SUNLinSolFree(S); return(NULL); } return(S); } /* ---------------------------------------------------------------------------- * Function to set the type of preconditioning for SPGMR to use */ int SUNLinSol_SPGMRSetPrecType(SUNLinearSolver S, int pretype) { /* Check for legal pretype */ if ((pretype != SUN_PREC_NONE) && (pretype != SUN_PREC_LEFT) && (pretype != SUN_PREC_RIGHT) && (pretype != SUN_PREC_BOTH)) { return(SUNLS_ILL_INPUT); } /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set pretype */ SPGMR_CONTENT(S)->pretype = pretype; return(SUNLS_SUCCESS); } /* ---------------------------------------------------------------------------- * Function to set the type of Gram-Schmidt orthogonalization for SPGMR to use */ int SUNLinSol_SPGMRSetGSType(SUNLinearSolver S, int gstype) { /* Check for legal gstype */ if ((gstype != SUN_MODIFIED_GS) && (gstype != SUN_CLASSICAL_GS)) { return(SUNLS_ILL_INPUT); } /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set pretype */ SPGMR_CONTENT(S)->gstype = gstype; return(SUNLS_SUCCESS); } /* ---------------------------------------------------------------------------- * Function to set the maximum number of GMRES restarts to allow */ int SUNLinSol_SPGMRSetMaxRestarts(SUNLinearSolver S, int maxrs) { /* Illegal maxrs implies use of default value */ if (maxrs < 0) maxrs = SUNSPGMR_MAXRS_DEFAULT; /* Check for non-NULL SUNLinearSolver */ if (S == NULL) return(SUNLS_MEM_NULL); /* Set max_restarts */ SPGMR_CONTENT(S)->max_restarts = maxrs; return(SUNLS_SUCCESS); } /* * ----------------------------------------------------------------- * implementation of linear solver operations * ----------------------------------------------------------------- */ SUNLinearSolver_Type SUNLinSolGetType_SPGMR(SUNLinearSolver S) { return(SUNLINEARSOLVER_ITERATIVE); } SUNLinearSolver_ID SUNLinSolGetID_SPGMR(SUNLinearSolver S) { return(SUNLINEARSOLVER_SPGMR); } int SUNLinSolInitialize_SPGMR(SUNLinearSolver S) { int k; SUNLinearSolverContent_SPGMR content; /* set shortcut to SPGMR memory structure */ if (S == NULL) return(SUNLS_MEM_NULL); content = SPGMR_CONTENT(S); /* ensure valid options */ if (content->max_restarts < 0) content->max_restarts = SUNSPGMR_MAXRS_DEFAULT; if (content->ATimes == NULL) { LASTFLAG(S) = SUNLS_ATIMES_NULL; return(LASTFLAG(S)); } if ( (content->pretype != SUN_PREC_LEFT) && (content->pretype != SUN_PREC_RIGHT) && (content->pretype != SUN_PREC_BOTH) ) content->pretype = SUN_PREC_NONE; if ((content->pretype != SUN_PREC_NONE) && (content->Psolve == NULL)) { LASTFLAG(S) = SUNLS_PSOLVE_NULL; return(LASTFLAG(S)); } /* allocate solver-specific memory (where the size depends on the choice of maxl) here */ /* Krylov subspace vectors */ if (content->V == NULL) { content->V = N_VCloneVectorArray(content->maxl+1, content->vtemp); if (content->V == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* Hessenberg matrix Hes */ if (content->Hes == NULL) { content->Hes = (realtype **) malloc((content->maxl+1)*sizeof(realtype *)); if (content->Hes == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } for (k=0; k<=content->maxl; k++) { content->Hes[k] = NULL; content->Hes[k] = (realtype *) malloc(content->maxl*sizeof(realtype)); if (content->Hes[k] == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } } /* Givens rotation components */ if (content->givens == NULL) { content->givens = (realtype *) malloc(2*content->maxl*sizeof(realtype)); if (content->givens == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* y and g vectors */ if (content->yg == NULL) { content->yg = (realtype *) malloc((content->maxl+1)*sizeof(realtype)); if (content->yg == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* cv vector for fused vector ops */ if (content->cv == NULL) { content->cv = (realtype *) malloc((content->maxl+1)*sizeof(realtype)); if (content->cv == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* Xv vector for fused vector ops */ if (content->Xv == NULL) { content->Xv = (N_Vector *) malloc((content->maxl+1)*sizeof(N_Vector)); if (content->Xv == NULL) { content->last_flag = SUNLS_MEM_FAIL; return(SUNLS_MEM_FAIL); } } /* return with success */ content->last_flag = SUNLS_SUCCESS; return(SUNLS_SUCCESS); } int SUNLinSolSetATimes_SPGMR(SUNLinearSolver S, void* ATData, SUNATimesFn ATimes) { /* set function pointers to integrator-supplied ATimes routine and data, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPGMR_CONTENT(S)->ATimes = ATimes; SPGMR_CONTENT(S)->ATData = ATData; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetPreconditioner_SPGMR(SUNLinearSolver S, void* PData, SUNPSetupFn Psetup, SUNPSolveFn Psolve) { /* set function pointers to integrator-supplied Psetup and PSolve routines and data, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPGMR_CONTENT(S)->Psetup = Psetup; SPGMR_CONTENT(S)->Psolve = Psolve; SPGMR_CONTENT(S)->PData = PData; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetScalingVectors_SPGMR(SUNLinearSolver S, N_Vector s1, N_Vector s2) { /* set N_Vector pointers to integrator-supplied scaling vectors, and return with success */ if (S == NULL) return(SUNLS_MEM_NULL); SPGMR_CONTENT(S)->s1 = s1; SPGMR_CONTENT(S)->s2 = s2; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetZeroGuess_SPGMR(SUNLinearSolver S, booleantype onff) { /* set flag indicating a zero initial guess */ if (S == NULL) return(SUNLS_MEM_NULL); SPGMR_CONTENT(S)->zeroguess = onff; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } int SUNLinSolSetup_SPGMR(SUNLinearSolver S, SUNMatrix A) { int ier; SUNPSetupFn Psetup; void* PData; /* Set shortcuts to SPGMR memory structures */ if (S == NULL) return(SUNLS_MEM_NULL); Psetup = SPGMR_CONTENT(S)->Psetup; PData = SPGMR_CONTENT(S)->PData; /* no solver-specific setup is required, but if user-supplied Psetup routine exists, call that here */ if (Psetup != NULL) { ier = Psetup(PData); if (ier != 0) { LASTFLAG(S) = (ier < 0) ? SUNLS_PSET_FAIL_UNREC : SUNLS_PSET_FAIL_REC; return(LASTFLAG(S)); } } /* return with success */ return(SUNLS_SUCCESS); } int SUNLinSolSolve_SPGMR(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype delta) { /* local data and shortcut variables */ N_Vector *V, xcor, vtemp, s1, s2; realtype **Hes, *givens, *yg, *res_norm; realtype beta, rotation_product, r_norm, s_product, rho; booleantype preOnLeft, preOnRight, scale2, scale1, converged; booleantype *zeroguess; int i, j, k, l, l_plus_1, l_max, krydim, ier, ntries, max_restarts, gstype; int *nli; void *A_data, *P_data; SUNATimesFn atimes; SUNPSolveFn psolve; /* local shortcuts for fused vector operations */ realtype* cv; N_Vector* Xv; /* Initialize some variables */ l_plus_1 = 0; krydim = 0; /* Make local shorcuts to solver variables. */ if (S == NULL) return(SUNLS_MEM_NULL); l_max = SPGMR_CONTENT(S)->maxl; max_restarts = SPGMR_CONTENT(S)->max_restarts; gstype = SPGMR_CONTENT(S)->gstype; V = SPGMR_CONTENT(S)->V; Hes = SPGMR_CONTENT(S)->Hes; givens = SPGMR_CONTENT(S)->givens; xcor = SPGMR_CONTENT(S)->xcor; yg = SPGMR_CONTENT(S)->yg; vtemp = SPGMR_CONTENT(S)->vtemp; s1 = SPGMR_CONTENT(S)->s1; s2 = SPGMR_CONTENT(S)->s2; A_data = SPGMR_CONTENT(S)->ATData; P_data = SPGMR_CONTENT(S)->PData; atimes = SPGMR_CONTENT(S)->ATimes; psolve = SPGMR_CONTENT(S)->Psolve; zeroguess = &(SPGMR_CONTENT(S)->zeroguess); nli = &(SPGMR_CONTENT(S)->numiters); res_norm = &(SPGMR_CONTENT(S)->resnorm); cv = SPGMR_CONTENT(S)->cv; Xv = SPGMR_CONTENT(S)->Xv; /* Initialize counters and convergence flag */ *nli = 0; converged = SUNFALSE; /* Set booleantype flags for internal solver options */ preOnLeft = ( (SPGMR_CONTENT(S)->pretype == SUN_PREC_LEFT) || (SPGMR_CONTENT(S)->pretype == SUN_PREC_BOTH) ); preOnRight = ( (SPGMR_CONTENT(S)->pretype == SUN_PREC_RIGHT) || (SPGMR_CONTENT(S)->pretype == SUN_PREC_BOTH) ); scale1 = (s1 != NULL); scale2 = (s2 != NULL); #ifdef SUNDIALS_BUILD_WITH_MONITORING if (SPGMR_CONTENT(S)->print_level && SPGMR_CONTENT(S)->info_file) STAN_SUNDIALS_FPRINTF(SPGMR_CONTENT(S)->info_file, "SUNLINSOL_SPGMR:\n"); #endif /* Check if Atimes function has been set */ if (atimes == NULL) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_ATIMES_NULL; return(LASTFLAG(S)); } /* If preconditioning, check if psolve has been set */ if ((preOnLeft || preOnRight) && psolve == NULL) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_PSOLVE_NULL; return(LASTFLAG(S)); } /* Set vtemp and V[0] to initial (unscaled) residual r_0 = b - A*x_0 */ if (*zeroguess) { N_VScale(ONE, b, vtemp); } else { ier = atimes(A_data, x, vtemp); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_ATIMES_FAIL_UNREC : SUNLS_ATIMES_FAIL_REC; return(LASTFLAG(S)); } N_VLinearSum(ONE, b, -ONE, vtemp, vtemp); } N_VScale(ONE, vtemp, V[0]); /* Apply left preconditioner and left scaling to V[0] = r_0 */ if (preOnLeft) { ier = psolve(P_data, V[0], vtemp, delta, SUN_PREC_LEFT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else { N_VScale(ONE, V[0], vtemp); } if (scale1) { N_VProd(s1, vtemp, V[0]); } else { N_VScale(ONE, vtemp, V[0]); } /* Set r_norm = beta to L2 norm of V[0] = s1 P1_inv r_0, and return if small */ *res_norm = r_norm = beta = SUNRsqrt(N_VDotProd(V[0], V[0])); #ifdef SUNDIALS_BUILD_WITH_MONITORING /* print initial residual */ if (SPGMR_CONTENT(S)->print_level && SPGMR_CONTENT(S)->info_file) { STAN_SUNDIALS_FPRINTF(SPGMR_CONTENT(S)->info_file, SUNLS_MSG_RESIDUAL, (long int) 0, *res_norm); } #endif if (r_norm <= delta) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_SUCCESS; return(LASTFLAG(S)); } /* Initialize rho to avoid compiler warning message */ rho = beta; /* Set xcor = 0 */ N_VConst(ZERO, xcor); /* Begin outer iterations: up to (max_restarts + 1) attempts */ for (ntries=0; ntries<=max_restarts; ntries++) { /* Initialize the Hessenberg matrix Hes and Givens rotation product. Normalize the initial vector V[0] */ for (i=0; i<=l_max; i++) for (j=0; jprint_level && SPGMR_CONTENT(S)->info_file) { STAN_SUNDIALS_FPRINTF(SPGMR_CONTENT(S)->info_file, SUNLS_MSG_RESIDUAL, (long int) *nli, *res_norm); } #endif if (rho <= delta) { converged = SUNTRUE; break; } /* Normalize V[l+1] with norm value from the Gram-Schmidt routine */ N_VScale(ONE/Hes[l_plus_1][l], V[l_plus_1], V[l_plus_1]); } /* Inner loop is done. Compute the new correction vector xcor */ /* Construct g, then solve for y */ yg[0] = r_norm; for (i=1; i<=krydim; i++) yg[i]=ZERO; if (SUNQRsol(krydim, Hes, givens, yg) != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_QRSOL_FAIL; return(LASTFLAG(S)); } /* Add correction vector V_l y to xcor */ cv[0] = ONE; Xv[0] = xcor; for (k=0; k0; i--) { yg[i] = s_product*givens[2*i-2]; s_product *= givens[2*i-1]; } yg[0] = s_product; /* Scale r_norm and yg */ r_norm *= s_product; for (i=0; i<=krydim; i++) yg[i] *= r_norm; r_norm = SUNRabs(r_norm); /* Multiply yg by V_(krydim+1) to get last residual vector; restart */ for (k=0; k<=krydim; k++) { cv[k] = yg[k]; Xv[k] = V[k]; } ier = N_VLinearCombination(krydim+1, cv, Xv, V[0]); if (ier != SUNLS_SUCCESS) { *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_VECTOROP_ERR; return(SUNLS_VECTOROP_ERR); } } /* Failed to converge, even after allowed restarts. If the residual norm was reduced below its initial value, compute and return x anyway. Otherwise return failure flag. */ if (rho < beta) { /* Apply right scaling and right precond.: vtemp = P2_inv s2_inv xcor */ if (scale2) N_VDiv(xcor, s2, xcor); if (preOnRight) { ier = psolve(P_data, xcor, vtemp, delta, SUN_PREC_RIGHT); if (ier != 0) { *zeroguess = SUNFALSE; LASTFLAG(S) = (ier < 0) ? SUNLS_PSOLVE_FAIL_UNREC : SUNLS_PSOLVE_FAIL_REC; return(LASTFLAG(S)); } } else { N_VScale(ONE, xcor, vtemp); } /* Add vtemp to initial x to get final solution x, and return */ if (*zeroguess) N_VScale(ONE, vtemp, x); else N_VLinearSum(ONE, x, ONE, vtemp, x); *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_RES_REDUCED; return(LASTFLAG(S)); } *zeroguess = SUNFALSE; LASTFLAG(S) = SUNLS_CONV_FAIL; return(LASTFLAG(S)); } int SUNLinSolNumIters_SPGMR(SUNLinearSolver S) { /* return the stored 'numiters' value */ if (S == NULL) return(-1); return (SPGMR_CONTENT(S)->numiters); } realtype SUNLinSolResNorm_SPGMR(SUNLinearSolver S) { /* return the stored 'resnorm' value */ if (S == NULL) return(-ONE); return (SPGMR_CONTENT(S)->resnorm); } N_Vector SUNLinSolResid_SPGMR(SUNLinearSolver S) { /* return the stored 'vtemp' vector */ return (SPGMR_CONTENT(S)->vtemp); } sunindextype SUNLinSolLastFlag_SPGMR(SUNLinearSolver S) { /* return the stored 'last_flag' value */ if (S == NULL) return(-1); return (LASTFLAG(S)); } int SUNLinSolSpace_SPGMR(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { int maxl; sunindextype liw1, lrw1; maxl = SPGMR_CONTENT(S)->maxl; if (SPGMR_CONTENT(S)->vtemp->ops->nvspace) N_VSpace(SPGMR_CONTENT(S)->vtemp, &lrw1, &liw1); else lrw1 = liw1 = 0; *lenrwLS = lrw1*(maxl + 5) + maxl*(maxl + 5) + 2; *leniwLS = liw1*(maxl + 5); return(SUNLS_SUCCESS); } int SUNLinSolFree_SPGMR(SUNLinearSolver S) { int k; if (S == NULL) return(SUNLS_SUCCESS); if (S->content) { /* delete items from within the content structure */ if (SPGMR_CONTENT(S)->xcor) { N_VDestroy(SPGMR_CONTENT(S)->xcor); SPGMR_CONTENT(S)->xcor = NULL; } if (SPGMR_CONTENT(S)->vtemp) { N_VDestroy(SPGMR_CONTENT(S)->vtemp); SPGMR_CONTENT(S)->vtemp = NULL; } if (SPGMR_CONTENT(S)->V) { N_VDestroyVectorArray(SPGMR_CONTENT(S)->V, SPGMR_CONTENT(S)->maxl+1); SPGMR_CONTENT(S)->V = NULL; } if (SPGMR_CONTENT(S)->Hes) { for (k=0; k<=SPGMR_CONTENT(S)->maxl; k++) if (SPGMR_CONTENT(S)->Hes[k]) { free(SPGMR_CONTENT(S)->Hes[k]); SPGMR_CONTENT(S)->Hes[k] = NULL; } free(SPGMR_CONTENT(S)->Hes); SPGMR_CONTENT(S)->Hes = NULL; } if (SPGMR_CONTENT(S)->givens) { free(SPGMR_CONTENT(S)->givens); SPGMR_CONTENT(S)->givens = NULL; } if (SPGMR_CONTENT(S)->yg) { free(SPGMR_CONTENT(S)->yg); SPGMR_CONTENT(S)->yg = NULL; } if (SPGMR_CONTENT(S)->cv) { free(SPGMR_CONTENT(S)->cv); SPGMR_CONTENT(S)->cv = NULL; } if (SPGMR_CONTENT(S)->Xv) { free(SPGMR_CONTENT(S)->Xv); SPGMR_CONTENT(S)->Xv = NULL; } free(S->content); S->content = NULL; } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } int SUNLinSolSetInfoFile_SPGMR(SUNLinearSolver S, FILE* info_file) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the linear solver is non-null */ if (S == NULL) return(SUNLS_MEM_NULL); SPGMR_CONTENT(S)->info_file = info_file; return(SUNLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSolSetInfoFile_SPGMR: SUNDIALS was not built with monitoring\n"); return(SUNLS_ILL_INPUT); #endif } int SUNLinSolSetPrintLevel_SPGMR(SUNLinearSolver S, int print_level) { #ifdef SUNDIALS_BUILD_WITH_MONITORING /* check that the linear solver is non-null */ if (S == NULL) return(SUNLS_MEM_NULL); /* check for valid print level */ if (print_level < 0 || print_level > 1) return(SUNLS_ILL_INPUT); SPGMR_CONTENT(S)->print_level = print_level; return(SUNLS_SUCCESS); #else SUNDIALS_DEBUG_PRINT("ERROR in SUNLinSolSetPrintLevel_SPGMR: SUNDIALS was not built with monitoring\n"); return(SUNLS_ILL_INPUT); #endif } StanHeaders/src/kinsol/0000755000176200001440000000000014645137104014546 5ustar liggesusersStanHeaders/src/kinsol/kinsol_ls_impl.h0000644000176200001440000001555314645137106017750 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * David J. Gardner, Radu Serban and Aaron Collier @ LLNL *----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *----------------------------------------------------------------- * Implementation header file for KINSOL's linear solver interface. *-----------------------------------------------------------------*/ #ifndef _KINLS_IMPL_H #define _KINLS_IMPL_H #include #include "kinsol_impl.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*------------------------------------------------------------------ keys for KINPrintInfo (do not use 1 -> conflict with PRNT_RETVAL) ------------------------------------------------------------------*/ #define PRNT_NLI 101 #define PRNT_EPS 102 /*------------------------------------------------------------------ Types : struct KINLsMemRec, struct *KINLsMem The type KINLsMem is a pointer to a KINLsMemRec, which is a structure containing fields that must be accessible by LS module routines. ------------------------------------------------------------------*/ typedef struct KINLsMemRec { /* Linear solver type information */ booleantype iterative; /* is the solver iterative? */ booleantype matrixbased; /* is a matrix structure used? */ /* Jacobian construction & storage */ booleantype jacDQ; /* SUNTRUE if using internal DQ Jacobian approx. */ KINLsJacFn jac; /* Jacobian routine to be called */ void *J_data; /* J_data is passed to jac */ /* Linear solver, matrix and vector objects/pointers */ SUNLinearSolver LS; /* generic iterative linear solver object */ SUNMatrix J; /* problem Jacobian */ /* Solver tolerance adjustment factor (if needed, see kinLsSolve) */ realtype tol_fac; /* Statistics and associated parameters */ long int nje; /* no. of calls to jac */ long int nfeDQ; /* no. of calls to F due to DQ Jacobian or J*v approximations */ long int npe; /* npe = total number of precond calls */ long int nli; /* nli = total number of linear iterations */ long int nps; /* nps = total number of psolve calls */ long int ncfl; /* ncfl = total number of convergence failures */ long int njtimes; /* njtimes = total number of calls to jtimes */ booleantype new_uu; /* flag indicating if the iterate has been updated - the Jacobian must be updated or reevaluated (meant to be used by a user-supplied jtimes function */ int last_flag; /* last error return flag */ /* Preconditioner computation (a) user-provided: - pdata == user_data - pfree == NULL (the user dealocates memory) (b) internal preconditioner module - pdata == kin_mem - pfree == set by the prec. module and called in kinLsFree */ KINLsPrecSetupFn pset; KINLsPrecSolveFn psolve; int (*pfree)(KINMem kin_mem); void *pdata; /* Jacobian times vector compuation (a) jtimes function provided by the user: - jt_data == user_data - jtimesDQ == SUNFALSE (b) internal jtimes - jt_data == kin_mem - jtimesDQ == SUNTRUE */ booleantype jtimesDQ; KINLsJacTimesVecFn jtimes; KINSysFn jt_func; void *jt_data; } *KINLsMem; /*------------------------------------------------------------------ Prototypes of internal functions ------------------------------------------------------------------*/ /* Interface routines called by system SUNLinearSolvers */ int kinLsATimes(void *kinmem, N_Vector v, N_Vector z); int kinLsPSetup(void *kinmem); int kinLsPSolve(void *kinmem, N_Vector r, N_Vector z, realtype tol, int lr); /* Difference quotient approximation for Jacobian times vector */ int kinLsDQJtimes(N_Vector v, N_Vector Jv, N_Vector u, booleantype *new_u, void *data); /* Difference-quotient Jacobian approximation routines */ int kinLsDQJac(N_Vector u, N_Vector fu, SUNMatrix Jac, void *data, N_Vector tmp1, N_Vector tmp2); int kinLsDenseDQJac(N_Vector u, N_Vector fu, SUNMatrix Jac, KINMem kin_mem, N_Vector tmp1, N_Vector tmp2); int kinLsBandDQJac(N_Vector u, N_Vector fu, SUNMatrix Jac, KINMem kin_mem, N_Vector tmp1, N_Vector tmp2); /* Generic linit/lsetup/lsolve/lfree interface routines for KINSOL to call */ int kinLsInitialize(KINMem kin_mem); int kinLsSetup(KINMem kin_mem); int kinLsSolve(KINMem kin_mem, N_Vector x, N_Vector b, realtype *sJpnorm, realtype *sFdotJp); int kinLsFree(KINMem kin_mem); /* Auxilliary functions */ int kinLsInitializeCounters(KINLsMem kinls_mem); int kinLs_AccessLMem(void* kinmem, const char *fname, KINMem* kin_mem, KINLsMem *kinls_mem); /*------------------------------------------------------------------ Error messages ------------------------------------------------------------------*/ #define MSG_LS_KINMEM_NULL "KINSOL memory is NULL." #define MSG_LS_MEM_FAIL "A memory request failed." #define MSG_LS_BAD_NVECTOR "A required vector operation is not implemented." #define MSG_LS_LMEM_NULL "Linear solver memory is NULL." #define MSG_LS_NEG_MAXRS "maxrs < 0 illegal." #define MSG_LS_BAD_SIZES "Illegal bandwidth parameter(s). Must have 0 <= ml, mu <= N-1." #define MSG_LS_JACFUNC_FAILED "The Jacobian routine failed in an unrecoverable manner." #define MSG_LS_PSET_FAILED "The preconditioner setup routine failed in an unrecoverable manner." #define MSG_LS_PSOLVE_FAILED "The preconditioner solve routine failed in an unrecoverable manner." #define MSG_LS_JTIMES_FAILED "The Jacobian x vector routine failed in an unrecoverable manner." #define MSG_LS_MATZERO_FAILED "The SUNMatZero routine failed in an unrecoverable manner." /*------------------------------------------------------------------ Info messages ------------------------------------------------------------------*/ #define INFO_NLI "nli_inc = %d" #if defined(SUNDIALS_EXTENDED_PRECISION) #define INFO_EPS "residual norm = %12.3Lg eps = %12.3Lg" #elif defined(SUNDIALS_DOUBLE_PRECISION) #define INFO_EPS "residual norm = %12.3lg eps = %12.3lg" #else #define INFO_EPS "residual norm = %12.3g eps = %12.3g" #endif #ifdef __cplusplus } #endif #endif StanHeaders/src/kinsol/kinsol_impl.h0000644000176200001440000006353614645137106017256 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Allan Taylor, Alan Hindmarsh, Radu Serban, and * Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * KINSOL solver module header file (private version) * ----------------------------------------------------------------- */ #ifndef _KINSOL_IMPL_H #define _KINSOL_IMPL_H #include #include #include "sundials_context_impl.h" #include "sundials_iterative_impl.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ================================================================= * M A I N S O L V E R M E M O R Y B L O C K * ================================================================= */ /* KINSOL default constants */ #define PRINTFL_DEFAULT 0 #define MXITER_DEFAULT 200 #define MXNBCF_DEFAULT 10 #define MSBSET_DEFAULT 10 #define MSBSET_SUB_DEFAULT 5 #define OMEGA_MIN RCONST(0.00001) #define OMEGA_MAX RCONST(0.9) /* * ----------------------------------------------------------------- * Types : struct KINMemRec and struct *KINMem * ----------------------------------------------------------------- * A variable declaration of type struct *KINMem denotes a * pointer to a data structure of type struct KINMemRec. The * KINMemRec structure contains numerous fields that must be * accessible by KINSOL solver module routines. * ----------------------------------------------------------------- */ typedef struct KINMemRec { SUNContext kin_sunctx; realtype kin_uround; /* machine epsilon (or unit roundoff error) (defined in sundials_types.h) */ /* problem specification data */ KINSysFn kin_func; /* nonlinear system function implementation */ void *kin_user_data; /* work space available to func routine */ realtype kin_fnormtol; /* stopping tolerance on L2-norm of function value */ realtype kin_scsteptol; /* scaled step length tolerance */ int kin_globalstrategy; /* choices are KIN_NONE, KIN_LINESEARCH KIN_PICARD and KIN_FP */ int kin_printfl; /* level of verbosity of output */ long int kin_mxiter; /* maximum number of nonlinear iterations */ long int kin_msbset; /* maximum number of nonlinear iterations that may be performed between calls to the linear solver setup routine (lsetup) */ long int kin_msbset_sub; /* subinterval length for residual monitoring */ long int kin_mxnbcf; /* maximum number of beta condition failures */ int kin_etaflag; /* choices are KIN_ETACONSTANT, KIN_ETACHOICE1 and KIN_ETACHOICE2 */ booleantype kin_noMinEps; /* flag controlling whether or not the value of eps is bounded below */ booleantype kin_constraintsSet; /* flag indicating if constraints are being used */ booleantype kin_jacCurrent; /* flag indicating if the Jacobian info. used by the linear solver is current */ booleantype kin_callForcingTerm; /* flag set if using either KIN_ETACHOICE1 or KIN_ETACHOICE2 */ booleantype kin_noResMon; /* flag indicating if the nonlinear residual monitoring scheme should be used */ booleantype kin_retry_nni; /* flag indicating if nonlinear iteration should be retried (set by residual monitoring algorithm) */ booleantype kin_update_fnorm_sub; /* flag indicating if the fnorm associated with the subinterval needs to be updated (set by residual monitoring algorithm) */ realtype kin_mxnewtstep; /* maximum allowable scaled step length */ realtype kin_mxnstepin; /* input (or preset) value for mxnewtstep */ realtype kin_sqrt_relfunc; /* relative error bound for func(u) */ realtype kin_stepl; /* scaled length of current step */ realtype kin_stepmul; /* step scaling factor */ realtype kin_eps; /* current value of eps */ realtype kin_eta; /* current value of eta */ realtype kin_eta_gamma; /* gamma value used in eta calculation (choice #2) */ realtype kin_eta_alpha; /* alpha value used in eta calculation (choice #2) */ booleantype kin_noInitSetup; /* flag controlling whether or not the KINSol routine makes an initial call to the linear solver setup routine (lsetup) */ realtype kin_sthrsh; /* threshold value for calling the linear solver setup routine */ /* counters */ long int kin_nni; /* number of nonlinear iterations */ long int kin_nfe; /* number of calls made to func routine */ long int kin_nnilset; /* value of nni counter when the linear solver setup was last called */ long int kin_nnilset_sub; /* value of nni counter when the linear solver setup was last called (subinterval) */ long int kin_nbcf; /* number of times the beta-condition could not be met in KINLineSearch */ long int kin_nbktrk; /* number of backtracks performed by KINLineSearch */ long int kin_ncscmx; /* number of consecutive steps of size mxnewtstep taken */ /* vectors */ N_Vector kin_uu; /* solution vector/current iterate (initially contains initial guess, but holds approximate solution upon completion if no errors occurred) */ N_Vector kin_unew; /* next iterate (unew = uu+pp) */ N_Vector kin_fval; /* vector containing result of nonlinear system function evaluated at a given iterate (fval = func(uu)) */ N_Vector kin_gval; /* vector containing result of the fixed point function evaluated at a given iterate; used in KIN_PICARD strategy only. (gval = uu - L^{-1}fval(uu)) */ N_Vector kin_uscale; /* iterate scaling vector */ N_Vector kin_fscale; /* fval scaling vector */ N_Vector kin_pp; /* incremental change vector (pp = unew-uu) */ N_Vector kin_constraints; /* constraints vector */ N_Vector kin_vtemp1; /* scratch vector #1 */ N_Vector kin_vtemp2; /* scratch vector #2 */ N_Vector kin_vtemp3; /* scratch vector #3 */ /* fixed point and Picard options */ booleantype kin_ret_newest; /* return the newest FP iteration */ booleantype kin_damping; /* flag to apply damping in FP/Picard */ realtype kin_beta; /* damping parameter for FP/Picard */ /* space requirements for AA, Broyden and NLEN */ N_Vector kin_fold_aa; /* vector needed for AA, Broyden, and NLEN */ N_Vector kin_gold_aa; /* vector needed for AA, Broyden, and NLEN */ N_Vector *kin_df_aa; /* vector array needed for AA, Broyden, and NLEN */ N_Vector *kin_dg_aa; /* vector array needed for AA, Broyden and NLEN */ N_Vector *kin_q_aa; /* vector array needed for AA */ realtype kin_beta_aa; /* beta damping parameter for AA */ realtype *kin_gamma_aa; /* array of size maa used in AA */ realtype *kin_R_aa; /* array of size maa*maa used in AA */ realtype *kin_T_aa; /* array of size maa*maa used in AA with ICWY MGS */ long int *kin_ipt_map; /* array of size maa*maa/2 used in AA */ long int kin_m_aa; /* parameter for AA, Broyden or NLEN */ long int kin_delay_aa; /* number of iterations to delay AA */ int kin_orth_aa; /* parameter for AA determining orthogonalization routine 0 - Modified Gram Schmidt (standard) 1 - ICWY Modified Gram Schmidt (Bjorck) 2 - CGS2 (Hernandez) 3 - Delayed CGS2 (Hernandez) */ SUNQRAddFn kin_qr_func; /* QRAdd function for AA orthogonalization */ SUNQRData kin_qr_data; /* Additional parameters required for QRAdd routine set for AA */ booleantype kin_damping_aa; /* flag to apply damping in AA */ realtype *kin_cv; /* scalar array for fused vector operations */ N_Vector *kin_Xv; /* vector array for fused vector operations */ /* space requirements for vector storage */ sunindextype kin_lrw1; /* number of realtype-sized memory blocks needed for a single N_Vector */ sunindextype kin_liw1; /* number of int-sized memory blocks needed for a single N_Vecotr */ long int kin_lrw; /* total number of realtype-sized memory blocks needed for all KINSOL work vectors */ long int kin_liw; /* total number of int-sized memory blocks needed for all KINSOL work vectors */ /* linear solver data */ /* function prototypes (pointers) */ int (*kin_linit)(struct KINMemRec *kin_mem); int (*kin_lsetup)(struct KINMemRec *kin_mem); int (*kin_lsolve)(struct KINMemRec *kin_mem, N_Vector xx, N_Vector bb, realtype *sJpnorm, realtype *sFdotJp); int (*kin_lfree)(struct KINMemRec *kin_mem); booleantype kin_inexact_ls; /* flag set by the linear solver module (in linit) indicating whether this is an iterative linear solver (SUNTRUE), or a direct linear solver (SUNFALSE) */ void *kin_lmem; /* pointer to linear solver memory block */ realtype kin_fnorm; /* value of L2-norm of fscale*fval */ realtype kin_f1norm; /* f1norm = 0.5*(fnorm)^2 */ realtype kin_sFdotJp; /* value of scaled F(u) vector (fscale*fval) dotted with scaled J(u)*pp vector (set by lsolve) */ realtype kin_sJpnorm; /* value of L2-norm of fscale*(J(u)*pp) (set by lsolve) */ realtype kin_fnorm_sub; /* value of L2-norm of fscale*fval (subinterval) */ booleantype kin_eval_omega; /* flag indicating that omega must be evaluated. */ realtype kin_omega; /* constant value for real scalar used in test to determine if reduction of norm of nonlinear residual is sufficient. Unless a valid constant value is specified by the user, omega is estimated from omega_min and omega_max at each iteration. */ realtype kin_omega_min; /* lower bound on omega */ realtype kin_omega_max; /* upper bound on omega */ /* * ----------------------------------------------------------------- * Note: The KINLineSearch subroutine scales the values of the * variables sFdotJp and sJpnorm by a factor rl (lambda) that is * chosen by the line search algorithm such that the sclaed Newton * step satisfies the following conditions: * * F(u_k+1) <= F(u_k) + alpha*(F(u_k)^T * J(u_k))*p*rl * * F(u_k+1) >= F(u_k) + beta*(F(u_k)^T * J(u_k))*p*rl * * where alpha = 1.0e-4, beta = 0.9, u_k+1 = u_k + rl*p, * 0 < rl <= 1, J denotes the system Jacobian, and F represents * the nonliner system function. * ----------------------------------------------------------------- */ booleantype kin_MallocDone; /* flag indicating if KINMalloc has been called yet */ /* message files */ /*------------------------------------------- Error handler function and error ouput file -------------------------------------------*/ KINErrHandlerFn kin_ehfun; /* Error messages are handled by ehfun */ void *kin_eh_data; /* dats pointer passed to ehfun */ FILE *kin_errfp; /* KINSOL error messages are sent to errfp */ KINInfoHandlerFn kin_ihfun; /* Info messages are handled by ihfun */ void *kin_ih_data; /* dats pointer passed to ihfun */ FILE *kin_infofp; /* where KINSol info messages are sent */ /*--------- Debugging ---------*/ FILE *kin_debugfp; /* debugging output file */ } *KINMem; /* * ================================================================= * I N T E R F A C E T O L I N E A R S O L V E R * ================================================================= */ /* * ----------------------------------------------------------------- * Function : int (*kin_linit)(KINMem kin_mem) * ----------------------------------------------------------------- * kin_linit initializes solver-specific data structures (including * variables used as counters or for storing statistical information), * but system memory allocation should be done by the subroutine * that actually initializes the environment for liner solver * package. If the linear system is to be preconditioned, then the * variable setupNonNull (type booleantype) should be set to SUNTRUE * (predefined constant) and the kin_lsetup routine should be * appropriately defined. * * kinmem pointer to an internal memory block allocated during * prior calls to KINCreate and KINMalloc * * If the necessary variables have been successfully initialized, * then the kin_linit function should return 0 (zero). Otherwise, * the subroutine should indicate a failure has occurred by * returning a non-zero integer value. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * Function : int (*kin_lsetup)(KINMem kin_mem) * ----------------------------------------------------------------- * kin_lsetup interfaces with the user-supplied pset subroutine (the * preconditioner setup routine), and updates relevant variable * values (see KINSpgmrSetup/KINSpbcgSetup). Simply stated, the * kin_lsetup routine prepares the linear solver for a subsequent * call to the user-supplied kin_lsolve function. * * kinmem pointer to an internal memory block allocated during * prior calls to KINCreate and KINMalloc * * If successful, the kin_lsetup routine should return 0 (zero). * Otherwise it should return a non-zero value. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * Function : int (*kin_lsolve)(KINMem kin_mem, N_Vector xx, * N_Vector bb, realtype *sJpnorm, realtype *sFdotJp) * ----------------------------------------------------------------- * kin_lsolve interfaces with the subroutine implementing the * numerical method to be used to solve the linear system J*xx = bb, * and must increment the relevant counter variable values in * addition to computing certain values used by the global strategy * and forcing term routines (see KINInexactNewton, KINLineSearch, * KINForcingTerm, and KINSpgmrSolve/KINSpbcgSolve). * * kinmem pointer to an internal memory block allocated during * prior calls to KINCreate and KINMalloc * * xx vector (type N_Vector) set to initial guess by kin_lsolve * routine prior to calling the linear solver, but which upon * return contains an approximate solution of the linear * system J*xx = bb, where J denotes the system Jacobian * * bb vector (type N_Vector) set to -func(u) (negative of the * value of the system function evaluated at the current * iterate) by KINLinSolDrv before kin_lsolve is called * * sJpnorm holds the value of the L2-norm (Euclidean norm) of * fscale*(J(u)*pp) upon return * * sFdotJp holds the value of the scaled F(u) (fscale*F) dotted * with the scaled J(u)*pp vector upon return * * If successful, the kin_lsolve routine should return 0 (zero). * Otherwise it should return a positive value if a re-evaluation * of the lsetup function could recover, or a negative value if * no such recovery is possible. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * Function : int (*kin_lfree)(KINMem kin_mem) * ----------------------------------------------------------------- * kin_lfree is called by KINFree and should free (deallocate) all * system memory resources allocated for the linear solver module * (see KINSpgmrFree/KINSpbcgFree). It should return 0 upon * success, nonzero on failure. * * kinmem pointer to an internal memory block allocated during * prior calls to KINCreate and KINMalloc * ----------------------------------------------------------------- */ /* * ================================================================= * K I N S O L I N T E R N A L F U N C T I O N S * ================================================================= */ /* High level error handler */ void KINProcessError(KINMem kin_mem, int error_code, const char *module, const char *fname, const char *msgfmt, ...); /* Prototype of internal errHandler function */ void KINErrHandler(int error_code, const char *module, const char *function, char *msg, void *user_data); /* High level info handler */ void KINPrintInfo(KINMem kin_mem, int info_code, const char *module, const char *fname, const char *msgfmt, ...); /* Prototype of internal infoHandler function */ void KINInfoHandler(const char *module, const char *function, char *msg, void *user_data); /* * ================================================================= * K I N S O L E R R O R M E S S A G E S * ================================================================= */ #define MSG_MEM_FAIL "A memory request failed." #define MSG_NO_MEM "kinsol_mem = NULL illegal." #define MSG_NULL_SUNCTX "sunctx = NULL illegal." #define MSG_BAD_NVECTOR "A required vector operation is not implemented." #define MSG_FUNC_NULL "func = NULL illegal." #define MSG_NO_MALLOC "Attempt to call before KINMalloc illegal." #define MSG_BAD_PRINTFL "Illegal value for printfl." #define MSG_BAD_MXITER "Illegal value for mxiter." #define MSG_BAD_MSBSET "Illegal msbset < 0." #define MSG_BAD_MSBSETSUB "Illegal msbsetsub < 0." #define MSG_BAD_ETACHOICE "Illegal value for etachoice." #define MSG_BAD_ETACONST "eta out of range." #define MSG_BAD_GAMMA "gamma out of range." #define MSG_BAD_ALPHA "alpha out of range." #define MSG_BAD_MXNEWTSTEP "Illegal mxnewtstep < 0." #define MSG_BAD_RELFUNC "relfunc < 0 illegal." #define MSG_BAD_FNORMTOL "fnormtol < 0 illegal." #define MSG_BAD_SCSTEPTOL "scsteptol < 0 illegal." #define MSG_BAD_MXNBCF "mxbcf < 0 illegal." #define MSG_BAD_CONSTRAINTS "Illegal values in constraints vector." #define MSG_BAD_OMEGA "scalars < 0 illegal." #define MSG_BAD_MAA "maa < 0 illegal." #define MSG_BAD_ORTHAA "Illegal value for orthaa." #define MSG_ZERO_MAA "maa = 0 illegal." #define MSG_LSOLV_NO_MEM "The linear solver memory pointer is NULL." #define MSG_UU_NULL "uu = NULL illegal." #define MSG_BAD_GLSTRAT "Illegal value for global strategy." #define MSG_BAD_USCALE "uscale = NULL illegal." #define MSG_USCALE_NONPOSITIVE "uscale has nonpositive elements." #define MSG_BAD_FSCALE "fscale = NULL illegal." #define MSG_FSCALE_NONPOSITIVE "fscale has nonpositive elements." #define MSG_CONSTRAINTS_NOTOK "Constraints not allowed with fixed point or Picard iterations" #define MSG_INITIAL_CNSTRNT "Initial guess does NOT meet constraints." #define MSG_LINIT_FAIL "The linear solver's init routine failed." #define MSG_SYSFUNC_FAILED "The system function failed in an unrecoverable manner." #define MSG_SYSFUNC_FIRST "The system function failed at the first call." #define MSG_LSETUP_FAILED "The linear solver's setup function failed in an unrecoverable manner." #define MSG_LSOLVE_FAILED "The linear solver's solve function failed in an unrecoverable manner." #define MSG_LINSOLV_NO_RECOVERY "The linear solver's solve function failed recoverably, but the Jacobian data is already current." #define MSG_LINESEARCH_NONCONV "The line search algorithm was unable to find an iterate sufficiently distinct from the current iterate." #define MSG_LINESEARCH_BCFAIL "The line search algorithm was unable to satisfy the beta-condition for nbcfails iterations." #define MSG_MAXITER_REACHED "The maximum number of iterations was reached before convergence." #define MSG_MXNEWT_5X_EXCEEDED "Five consecutive steps have been taken that satisfy a scaled step length test." #define MSG_SYSFUNC_REPTD "Unable to correct repeated recoverable system function errors." #define MSG_NOL_FAIL "Unable to find user's Linear Jacobian, which is required for the KIN_PICARD Strategy" /* * ================================================================= * K I N S O L I N F O M E S S A G E S * ================================================================= */ #define INFO_IVAR "%s = %d" #define INFO_LIVAR "%s = %ld" #define INFO_RETVAL "Return value: %d" #define INFO_ADJ "no. of lambda adjustments = %ld" #if defined(SUNDIALS_EXTENDED_PRECISION) #define INFO_RVAR "%s = %26.16Lg" #define INFO_NNI "nni = %4ld nfe = %6ld fnorm = %26.16Lg" #define INFO_TOL "scsteptol = %12.3Lg fnormtol = %12.3Lg" #define INFO_FMAX "scaled f norm (for stopping) = %12.3Lg" #define INFO_PNORM "pnorm = %12.4Le" #define INFO_PNORM1 "(ivio=1) pnorm = %12.4Le" #define INFO_FNORM "fnorm(L2) = %20.8Le" #define INFO_LAM "min_lam = %11.4Le f1norm = %11.4Le pnorm = %11.4Le" #define INFO_ALPHA "fnorm = %15.8Le f1norm = %15.8Le alpha_cond = %15.8Le lam = %15.8Le" #define INFO_BETA "f1norm = %15.8Le beta_cond = %15.8Le lam = %15.8Le" #define INFO_ALPHABETA "f1norm = %15.8Le alpha_cond = %15.8Le beta_cond = %15.8Le lam = %15.8Le" #elif defined(SUNDIALS_DOUBLE_PRECISION) #define INFO_RVAR "%s = %26.16lg" #define INFO_NNI "nni = %4ld nfe = %6ld fnorm = %26.16lg" #define INFO_TOL "scsteptol = %12.3lg fnormtol = %12.3lg" #define INFO_FMAX "scaled f norm (for stopping) = %12.3lg" #define INFO_PNORM "pnorm = %12.4le" #define INFO_PNORM1 "(ivio=1) pnorm = %12.4le" #define INFO_FNORM "fnorm(L2) = %20.8le" #define INFO_LAM "min_lam = %11.4le f1norm = %11.4le pnorm = %11.4le" #define INFO_ALPHA "fnorm = %15.8le f1norm = %15.8le alpha_cond = %15.8le lam = %15.8le" #define INFO_BETA "f1norm = %15.8le beta_cond = %15.8le lam = %15.8le" #define INFO_ALPHABETA "f1norm = %15.8le alpha_cond = %15.8le beta_cond = %15.8le lam = %15.8le" #else #define INFO_RVAR "%s = %26.16g" #define INFO_NNI "nni = %4ld nfe = %6ld fnorm = %26.16g" #define INFO_TOL "scsteptol = %12.3g fnormtol = %12.3g" #define INFO_FMAX "scaled f norm (for stopping) = %12.3g" #define INFO_PNORM "pnorm = %12.4e" #define INFO_PNORM1 "(ivio=1) pnorm = %12.4e" #define INFO_FNORM "fnorm(L2) = %20.8e" #define INFO_LAM "min_lam = %11.4e f1norm = %11.4e pnorm = %11.4e" #define INFO_ALPHA "fnorm = %15.8e f1norm = %15.8e alpha_cond = %15.8e lam = %15.8e" #define INFO_BETA "f1norm = %15.8e beta_cond = %15.8e lam = %15.8e" #define INFO_ALPHABETA "f1norm = %15.8e alpha_cond = %15.8e beta_cond = %15.8e lam = %15.8e" #endif #ifdef __cplusplus } #endif #endif StanHeaders/src/kinsol/kinsol_bbdpre_impl.h0000644000176200001440000000517714645137106020571 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * Allan Taylor, Alan Hindmarsh, Radu Serban, and * Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * KINBBDPRE module header file (private version) * -----------------------------------------------------------------*/ #ifndef _KINBBDPRE_IMPL_H #define _KINBBDPRE_IMPL_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*------------------------------------------------------------------ Definition of KBBDData ------------------------------------------------------------------*/ typedef struct KBBDPrecDataRec { /* passed by user to KINBBDPrecAlloc, used by pset/psolve functions */ sunindextype mudq, mldq, mukeep, mlkeep; realtype rel_uu; /* relative error for the Jacobian DQ routine */ KINBBDLocalFn gloc; KINBBDCommFn gcomm; /* set by KINBBDPrecSetup and used by KINBBDPrecSetup and KINBBDPrecSolve functions */ sunindextype n_local; SUNMatrix PP; SUNLinearSolver LS; N_Vector rlocal; N_Vector zlocal; N_Vector tempv1; N_Vector tempv2; N_Vector tempv3; /* available for optional output */ long int rpwsize; long int ipwsize; long int nge; /* pointer to KINSol memory */ void *kin_mem; } *KBBDPrecData; /* *----------------------------------------------------------------- * KINBBDPRE error messages *----------------------------------------------------------------- */ #define MSGBBD_MEM_NULL "KINSOL Memory is NULL." #define MSGBBD_LMEM_NULL "Linear solver memory is NULL. One of the SPILS linear solvers must be attached." #define MSGBBD_MEM_FAIL "A memory request failed." #define MSGBBD_BAD_NVECTOR "A required vector operation is not implemented." #define MSGBBD_SUNMAT_FAIL "An error arose from a SUNBandMatrix routine." #define MSGBBD_SUNLS_FAIL "An error arose from a SUNBandLinearSolver routine." #define MSGBBD_PMEM_NULL "BBD peconditioner memory is NULL. IDABBDPrecInit must be called." #define MSGBBD_FUNC_FAILED "The gloc or gcomm routine failed in an unrecoverable manner." #ifdef __cplusplus } #endif #endif StanHeaders/src/kinsol/kinsol.c0000644000176200001440000027203314645137106016222 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): Allan Taylor, Alan Hindmarsh, Radu Serban, Carol Woodward, * John Loffeld, Aaron Collier, and Shelby Lockhart @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the implementation file for the main KINSol solver. * It is independent of the KINSol linear solver in use. * ----------------------------------------------------------------------------- * * EXPORTED FUNCTIONS * ------------------ * Creation and allocation functions * KINCreate * KINInit * Main solver function * KINSol * Deallocation function * KINFree * * PRIVATE FUNCTIONS * ----------------- * KINCheckNvector * Memory allocation/deallocation * KINAllocVectors * KINFreeVectors * Initial setup * KINSolInit * Step functions * KINLinSolDrv * KINFullNewton * KINLineSearch * KINConstraint * KINFP * KINPicardAA * Stopping tests * KINStop * KINForcingTerm * Norm functions * KINScFNorm * KINScSNorm * KINSOL Verbose output functions * KINPrintInfo * KINInfoHandler * KINSOL Error Handling functions * KINProcessError * KINErrHandler * ---------------------------------------------------------------------------*/ /* * ================================================================= * IMPORTED HEADER FILES * ================================================================= */ #include #include #include #include #include #include "kinsol_impl.h" #include /* * ================================================================= * KINSOL PRIVATE CONSTANTS * ================================================================= */ #define HALF RCONST(0.5) #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) #define TWO RCONST(2.0) #define THREE RCONST(3.0) #define FIVE RCONST(5.0) #define TWELVE RCONST(12.0) #define POINT1 RCONST(0.1) #define POINT01 RCONST(0.01) #define POINT99 RCONST(0.99) #define THOUSAND RCONST(1000.0) #define ONETHIRD RCONST(0.3333333333333333) #define TWOTHIRDS RCONST(0.6666666666666667) #define POINT9 RCONST(0.9) #define POINT0001 RCONST(0.0001) /* * ================================================================= * KINSOL ROUTINE-SPECIFIC CONSTANTS * ================================================================= */ /* * Control constants for lower-level functions used by KINSol * ---------------------------------------------------------- * * KINStop return value requesting more iterations * RETRY_ITERATION * CONTINUE_ITERATIONS * * KINFullNewton, KINLineSearch, KINFP, and KINPicardAA return values: * KIN_SUCCESS * KIN_SYSFUNC_FAIL * STEP_TOO_SMALL * * KINConstraint return values: * KIN_SUCCESS * CONSTR_VIOLATED */ #define RETRY_ITERATION -998 #define CONTINUE_ITERATIONS -999 #define STEP_TOO_SMALL -997 #define CONSTR_VIOLATED -996 /* * Algorithmic constants * --------------------- * * MAX_RECVR max. no. of attempts to correct a recoverable func error */ #define MAX_RECVR 5 /* * Keys for KINPrintInfo * --------------------- */ #define PRNT_RETVAL 1 #define PRNT_NNI 2 #define PRNT_TOL 3 #define PRNT_FMAX 4 #define PRNT_PNORM 5 #define PRNT_PNORM1 6 #define PRNT_FNORM 7 #define PRNT_LAM 8 #define PRNT_ALPHA 9 #define PRNT_BETA 10 #define PRNT_ALPHABETA 11 #define PRNT_ADJ 12 /*=================================================================*/ /* Shortcuts */ /*=================================================================*/ #define KIN_PROFILER kin_mem->kin_sunctx->profiler /* * ================================================================= * PRIVATE FUNCTION PROTOTYPES * ================================================================= */ static booleantype KINCheckNvector(N_Vector tmpl); static booleantype KINAllocVectors(KINMem kin_mem, N_Vector tmpl); static int KINSolInit(KINMem kin_mem); static int KINConstraint(KINMem kin_mem ); static void KINForcingTerm(KINMem kin_mem, realtype fnormp); static void KINFreeVectors(KINMem kin_mem); static int KINFullNewton(KINMem kin_mem, realtype *fnormp, realtype *f1normp, booleantype *maxStepTaken); static int KINLineSearch(KINMem kin_mem, realtype *fnormp, realtype *f1normp, booleantype *maxStepTaken); static int KINPicardAA(KINMem kin_mem); static int KINFP(KINMem kin_mem); static int KINLinSolDrv(KINMem kinmem); static int KINPicardFcnEval(KINMem kin_mem, N_Vector gval, N_Vector uval, N_Vector fval1); static realtype KINScFNorm(KINMem kin_mem, N_Vector v, N_Vector scale); static realtype KINScSNorm(KINMem kin_mem, N_Vector v, N_Vector u); static int KINStop(KINMem kin_mem, booleantype maxStepTaken, int sflag); static int AndersonAcc(KINMem kin_mem, N_Vector gval, N_Vector fv, N_Vector x, N_Vector x_old, long int iter, realtype *R, realtype *gamma); /* * ================================================================= * EXPORTED FUNCTIONS IMPLEMENTATION * ================================================================= */ /* * ----------------------------------------------------------------- * Creation and allocation functions * ----------------------------------------------------------------- */ /* * Function : KINCreate * * KINCreate creates an internal memory block for a problem to * be solved by KINSOL. If successful, KINCreate returns a pointer * to the problem memory. This pointer should be passed to * KINInit. If an initialization error occurs, KINCreate prints * an error message to standard error and returns NULL. */ void *KINCreate(SUNContext sunctx) { KINMem kin_mem; realtype uround; /* Test inputs */ if (sunctx == NULL) { KINProcessError(NULL, 0, "KIN", "KINCreate", MSG_NULL_SUNCTX); return(NULL); } kin_mem = NULL; kin_mem = (KINMem) malloc(sizeof(struct KINMemRec)); if (kin_mem == NULL) { KINProcessError(kin_mem, 0, "KINSOL", "KINCreate", MSG_MEM_FAIL); return(NULL); } /* Zero out kin_mem */ memset(kin_mem, 0, sizeof(struct KINMemRec)); kin_mem->kin_sunctx = sunctx; /* set uround (unit roundoff) */ kin_mem->kin_uround = uround = UNIT_ROUNDOFF; /* set default values for solver optional inputs */ kin_mem->kin_func = NULL; kin_mem->kin_user_data = NULL; kin_mem->kin_uu = NULL; kin_mem->kin_unew = NULL; kin_mem->kin_fval = NULL; kin_mem->kin_gval = NULL; kin_mem->kin_uscale = NULL; kin_mem->kin_fscale = NULL; kin_mem->kin_pp = NULL; kin_mem->kin_constraints = NULL; kin_mem->kin_vtemp1 = NULL; kin_mem->kin_vtemp2 = NULL; kin_mem->kin_vtemp3 = NULL; kin_mem->kin_fold_aa = NULL; kin_mem->kin_gold_aa = NULL; kin_mem->kin_df_aa = NULL; kin_mem->kin_dg_aa = NULL; kin_mem->kin_q_aa = NULL; kin_mem->kin_T_aa = NULL; kin_mem->kin_gamma_aa = NULL; kin_mem->kin_R_aa = NULL; kin_mem->kin_ipt_map = NULL; kin_mem->kin_cv = NULL; kin_mem->kin_Xv = NULL; kin_mem->kin_lmem = NULL; kin_mem->kin_beta = ONE; kin_mem->kin_damping = SUNFALSE; kin_mem->kin_m_aa = 0; kin_mem->kin_delay_aa = 0; kin_mem->kin_orth_aa = KIN_ORTH_MGS; kin_mem->kin_qr_func = NULL; kin_mem->kin_qr_data = NULL; kin_mem->kin_beta_aa = ONE; kin_mem->kin_damping_aa = SUNFALSE; kin_mem->kin_constraintsSet = SUNFALSE; kin_mem->kin_ehfun = KINErrHandler; kin_mem->kin_eh_data = kin_mem; kin_mem->kin_errfp = stderr; kin_mem->kin_ihfun = KINInfoHandler; kin_mem->kin_ih_data = kin_mem; kin_mem->kin_infofp = stdout; kin_mem->kin_printfl = PRINTFL_DEFAULT; kin_mem->kin_ret_newest = SUNFALSE; kin_mem->kin_mxiter = MXITER_DEFAULT; kin_mem->kin_noInitSetup = SUNFALSE; kin_mem->kin_msbset = MSBSET_DEFAULT; kin_mem->kin_noResMon = SUNFALSE; kin_mem->kin_msbset_sub = MSBSET_SUB_DEFAULT; kin_mem->kin_update_fnorm_sub = SUNFALSE; kin_mem->kin_mxnbcf = MXNBCF_DEFAULT; kin_mem->kin_sthrsh = TWO; kin_mem->kin_noMinEps = SUNFALSE; kin_mem->kin_mxnstepin = ZERO; kin_mem->kin_sqrt_relfunc = SUNRsqrt(uround); kin_mem->kin_scsteptol = SUNRpowerR(uround,TWOTHIRDS); kin_mem->kin_fnormtol = SUNRpowerR(uround,ONETHIRD); kin_mem->kin_etaflag = KIN_ETACHOICE1; kin_mem->kin_eta = POINT1; /* default for KIN_ETACONSTANT */ kin_mem->kin_eta_alpha = TWO; /* default for KIN_ETACHOICE2 */ kin_mem->kin_eta_gamma = POINT9; /* default for KIN_ETACHOICE2 */ kin_mem->kin_MallocDone = SUNFALSE; kin_mem->kin_eval_omega = SUNTRUE; kin_mem->kin_omega = ZERO; /* default to using min/max */ kin_mem->kin_omega_min = OMEGA_MIN; kin_mem->kin_omega_max = OMEGA_MAX; #ifdef SUNDIALS_DEBUG kin_mem->kin_debugfp = stdout; #else kin_mem->kin_debugfp = NULL; #endif /* initialize lrw and liw */ kin_mem->kin_lrw = 17; kin_mem->kin_liw = 22; /* NOTE: needed since KINInit could be called after KINSetConstraints */ kin_mem->kin_lrw1 = 0; kin_mem->kin_liw1 = 0; return((void *) kin_mem); } /* * Function : KINInit * * KINInit allocates memory for a problem or execution of KINSol. * If memory is successfully allocated, KIN_SUCCESS is returned. * Otherwise, an error message is printed and an error flag * returned. */ int KINInit(void *kinmem, KINSysFn func, N_Vector tmpl) { sunindextype liw1, lrw1; KINMem kin_mem; booleantype allocOK, nvectorOK, dotprodSB; /* check kinmem */ if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINInit", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; SUNDIALS_MARK_FUNCTION_BEGIN(KIN_PROFILER); if (func == NULL) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINInit", MSG_FUNC_NULL); SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(KIN_ILL_INPUT); } /* check if all required vector operations are implemented */ nvectorOK = KINCheckNvector(tmpl); if (!nvectorOK) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINInit", MSG_BAD_NVECTOR); SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(KIN_ILL_INPUT); } /* set space requirements for one N_Vector */ if (tmpl->ops->nvspace != NULL) { N_VSpace(tmpl, &lrw1, &liw1); kin_mem->kin_lrw1 = lrw1; kin_mem->kin_liw1 = liw1; } else { kin_mem->kin_lrw1 = 0; kin_mem->kin_liw1 = 0; } /* allocate necessary vectors */ allocOK = KINAllocVectors(kin_mem, tmpl); if (!allocOK) { KINProcessError(kin_mem, KIN_MEM_FAIL, "KINSOL", "KINInit", MSG_MEM_FAIL); free(kin_mem); kin_mem = NULL; SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(KIN_MEM_FAIL); } /* copy the input parameter into KINSol state */ kin_mem->kin_func = func; /* set the linear solver addresses to NULL */ kin_mem->kin_linit = NULL; kin_mem->kin_lsetup = NULL; kin_mem->kin_lsolve = NULL; kin_mem->kin_lfree = NULL; kin_mem->kin_lmem = NULL; /* initialize the QRData and set the QRAdd function if Anderson Acceleration is being used */ if (kin_mem->kin_m_aa != 0) { dotprodSB = SUNFALSE; if ((kin_mem->kin_vtemp2->ops->nvdotprodlocal || kin_mem->kin_vtemp2->ops->nvdotprodmultilocal) && kin_mem->kin_vtemp2->ops->nvdotprodmultiallreduce) dotprodSB = SUNTRUE; if (kin_mem->kin_orth_aa == KIN_ORTH_MGS) { kin_mem->kin_qr_func = (SUNQRAddFn) SUNQRAdd_MGS; kin_mem->kin_qr_data->vtemp = kin_mem->kin_vtemp2; } else if (kin_mem->kin_orth_aa == KIN_ORTH_ICWY) { if (dotprodSB) kin_mem->kin_qr_func = (SUNQRAddFn) SUNQRAdd_ICWY_SB; else kin_mem->kin_qr_func = (SUNQRAddFn) SUNQRAdd_ICWY; kin_mem->kin_qr_data->vtemp = kin_mem->kin_vtemp2; kin_mem->kin_qr_data->vtemp2 = kin_mem->kin_vtemp3; kin_mem->kin_qr_data->temp_array = kin_mem->kin_T_aa; } else if (kin_mem->kin_orth_aa == KIN_ORTH_CGS2) { kin_mem->kin_qr_func = (SUNQRAddFn) SUNQRAdd_CGS2; kin_mem->kin_qr_data->vtemp = kin_mem->kin_vtemp2; kin_mem->kin_qr_data->vtemp2 = kin_mem->kin_vtemp3; kin_mem->kin_qr_data->temp_array = kin_mem->kin_cv; } else if (kin_mem->kin_orth_aa == KIN_ORTH_DCGS2) { if (dotprodSB) kin_mem->kin_qr_func = (SUNQRAddFn) SUNQRAdd_DCGS2_SB; else kin_mem->kin_qr_func = (SUNQRAddFn) SUNQRAdd_DCGS2; kin_mem->kin_qr_data->vtemp = kin_mem->kin_vtemp2; kin_mem->kin_qr_data->vtemp2 = kin_mem->kin_vtemp3; kin_mem->kin_qr_data->temp_array = kin_mem->kin_cv; } } /* problem memory has been successfully allocated */ kin_mem->kin_MallocDone = SUNTRUE; SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Main solver function * ----------------------------------------------------------------- */ /* * Function : KINSol * * KINSol (main KINSOL driver routine) manages the computational * process of computing an approximate solution of the nonlinear * system F(uu) = 0. The KINSol routine calls the following * subroutines: * * KINSolInit checks if initial guess satisfies user-supplied * constraints and initializes linear solver * * KINLinSolDrv interfaces with linear solver to find a * solution of the system J(uu)*x = b (calculate * Newton step) * * KINFullNewton/KINLineSearch implement the global strategy * * KINForcingTerm computes the forcing term (eta) * * KINStop determines if an approximate solution has been found */ int KINSol(void *kinmem, N_Vector u, int strategy_in, N_Vector u_scale, N_Vector f_scale) { realtype fnormp, f1normp, epsmin; KINMem kin_mem; int ret, sflag; booleantype maxStepTaken; /* intialize to avoid compiler warning messages */ maxStepTaken = SUNFALSE; f1normp = fnormp = -ONE; /* initialize epsmin to avoid compiler warning message */ epsmin = ZERO; /* check for kinmem non-NULL */ if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSol", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; SUNDIALS_MARK_FUNCTION_BEGIN(KIN_PROFILER); if(kin_mem->kin_MallocDone == SUNFALSE) { KINProcessError(NULL, KIN_NO_MALLOC, "KINSOL", "KINSol", MSG_NO_MALLOC); SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(KIN_NO_MALLOC); } /* load input arguments */ kin_mem->kin_uu = u; kin_mem->kin_uscale = u_scale; kin_mem->kin_fscale = f_scale; kin_mem->kin_globalstrategy = strategy_in; /* CSW: Call fixed point solver if requested. Note that this should probably be forked off to a FPSOL solver instead of kinsol in the future. */ if ( kin_mem->kin_globalstrategy == KIN_FP ) { if (kin_mem->kin_uu == NULL) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSol", MSG_UU_NULL); SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(KIN_ILL_INPUT); } if (kin_mem->kin_constraintsSet != SUNFALSE) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSol", MSG_CONSTRAINTS_NOTOK); SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(KIN_ILL_INPUT); } if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_TOL, "KINSOL", "KINSol", INFO_TOL, kin_mem->kin_scsteptol, kin_mem->kin_fnormtol); kin_mem->kin_nfe = kin_mem->kin_nnilset = kin_mem->kin_nnilset_sub = kin_mem->kin_nni = kin_mem->kin_nbcf = kin_mem->kin_nbktrk = 0; ret = KINFP(kin_mem); switch(ret) { case KIN_SYSFUNC_FAIL: KINProcessError(kin_mem, KIN_SYSFUNC_FAIL, "KINSOL", "KINSol", MSG_SYSFUNC_FAILED); break; case KIN_MAXITER_REACHED: KINProcessError(kin_mem, KIN_MAXITER_REACHED, "KINSOL", "KINSol", MSG_MAXITER_REACHED); break; } SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(ret); } /* initialize solver */ ret = KINSolInit(kin_mem); if (ret != KIN_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(ret); } kin_mem->kin_ncscmx = 0; /* Note: The following logic allows the choice of whether or not to force a call to the linear solver setup upon a given call to KINSol */ if (kin_mem->kin_noInitSetup) kin_mem->kin_sthrsh = ONE; else kin_mem->kin_sthrsh = TWO; /* if eps is to be bounded from below, set the bound */ if (kin_mem->kin_inexact_ls && !(kin_mem->kin_noMinEps)) epsmin = POINT01 * kin_mem->kin_fnormtol; /* if omega is zero at this point, make sure it will be evaluated at each iteration based on the provided min/max bounds and the current function norm. */ if (kin_mem->kin_omega == ZERO) kin_mem->kin_eval_omega = SUNTRUE; else kin_mem->kin_eval_omega = SUNFALSE; /* CSW: Call fixed point solver for Picard method if requested. Note that this should probably be forked off to a part of an FPSOL solver instead of kinsol in the future. */ if ( kin_mem->kin_globalstrategy == KIN_PICARD ) { if (kin_mem->kin_gval == NULL) { kin_mem->kin_gval = N_VClone(kin_mem->kin_unew); if (kin_mem->kin_gval == NULL) { KINProcessError(kin_mem, KIN_MEM_FAIL, "KINSOL", "KINSol", MSG_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(KIN_MEM_FAIL); } kin_mem->kin_liw += kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_lrw1; } ret = KINPicardAA(kin_mem); SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(ret); } for(;;){ kin_mem->kin_retry_nni = SUNFALSE; kin_mem->kin_nni++; /* calculate the epsilon (stopping criteria for iterative linear solver) for this iteration based on eta from the routine KINForcingTerm */ if (kin_mem->kin_inexact_ls) { kin_mem->kin_eps = (kin_mem->kin_eta + kin_mem->kin_uround) * kin_mem->kin_fnorm; if(!(kin_mem->kin_noMinEps)) kin_mem->kin_eps = SUNMAX(epsmin, kin_mem->kin_eps); } repeat_nni: /* call the appropriate routine to calculate an acceptable step pp */ sflag = 0; if (kin_mem->kin_globalstrategy == KIN_NONE) { /* Full Newton Step*/ /* call KINLinSolDrv to calculate the (approximate) Newton step, pp */ ret = KINLinSolDrv(kin_mem); if (ret != KIN_SUCCESS) break; sflag = KINFullNewton(kin_mem, &fnormp, &f1normp, &maxStepTaken); /* if sysfunc failed unrecoverably, stop */ if ((sflag == KIN_SYSFUNC_FAIL) || (sflag == KIN_REPTD_SYSFUNC_ERR)) { ret = sflag; break; } } else if (kin_mem->kin_globalstrategy == KIN_LINESEARCH) { /* Line Search */ /* call KINLinSolDrv to calculate the (approximate) Newton step, pp */ ret = KINLinSolDrv(kin_mem); if (ret != KIN_SUCCESS) break; sflag = KINLineSearch(kin_mem, &fnormp, &f1normp, &maxStepTaken); /* if sysfunc failed unrecoverably, stop */ if ((sflag == KIN_SYSFUNC_FAIL) || (sflag == KIN_REPTD_SYSFUNC_ERR)) { ret = sflag; break; } /* if too many beta condition failures, then stop iteration */ if (kin_mem->kin_nbcf > kin_mem->kin_mxnbcf) { ret = KIN_LINESEARCH_BCFAIL; break; } } if ( (kin_mem->kin_globalstrategy != KIN_PICARD) && (kin_mem->kin_globalstrategy != KIN_FP) ) { /* evaluate eta by calling the forcing term routine */ if (kin_mem->kin_callForcingTerm) KINForcingTerm(kin_mem, fnormp); kin_mem->kin_fnorm = fnormp; /* call KINStop to check if tolerances where met by this iteration */ ret = KINStop(kin_mem, maxStepTaken, sflag); if (ret == RETRY_ITERATION) { kin_mem->kin_retry_nni = SUNTRUE; goto repeat_nni; } } /* update uu after the iteration */ N_VScale(ONE, kin_mem->kin_unew, kin_mem->kin_uu); kin_mem->kin_f1norm = f1normp; /* print the current nni, fnorm, and nfe values if printfl > 0 */ if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_NNI, "KINSOL", "KINSol", INFO_NNI, kin_mem->kin_nni, kin_mem->kin_nfe, kin_mem->kin_fnorm); if (ret != CONTINUE_ITERATIONS) break; fflush(kin_mem->kin_errfp); } /* end of loop; return */ if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_RETVAL, "KINSOL", "KINSol", INFO_RETVAL, ret); switch(ret) { case KIN_SYSFUNC_FAIL: KINProcessError(kin_mem, KIN_SYSFUNC_FAIL, "KINSOL", "KINSol", MSG_SYSFUNC_FAILED); break; case KIN_REPTD_SYSFUNC_ERR: KINProcessError(kin_mem, KIN_REPTD_SYSFUNC_ERR, "KINSOL", "KINSol", MSG_SYSFUNC_REPTD); break; case KIN_LSETUP_FAIL: KINProcessError(kin_mem, KIN_LSETUP_FAIL, "KINSOL", "KINSol", MSG_LSETUP_FAILED); break; case KIN_LSOLVE_FAIL: KINProcessError(kin_mem, KIN_LSOLVE_FAIL, "KINSOL", "KINSol", MSG_LSOLVE_FAILED); break; case KIN_LINSOLV_NO_RECOVERY: KINProcessError(kin_mem, KIN_LINSOLV_NO_RECOVERY, "KINSOL", "KINSol", MSG_LINSOLV_NO_RECOVERY); break; case KIN_LINESEARCH_NONCONV: KINProcessError(kin_mem, KIN_LINESEARCH_NONCONV, "KINSOL", "KINSol", MSG_LINESEARCH_NONCONV); break; case KIN_LINESEARCH_BCFAIL: KINProcessError(kin_mem, KIN_LINESEARCH_BCFAIL, "KINSOL", "KINSol", MSG_LINESEARCH_BCFAIL); break; case KIN_MAXITER_REACHED: KINProcessError(kin_mem, KIN_MAXITER_REACHED, "KINSOL", "KINSol", MSG_MAXITER_REACHED); break; case KIN_MXNEWT_5X_EXCEEDED: KINProcessError(kin_mem, KIN_MXNEWT_5X_EXCEEDED, "KINSOL", "KINSol", MSG_MXNEWT_5X_EXCEEDED); break; } SUNDIALS_MARK_FUNCTION_END(KIN_PROFILER); return(ret); } /* * ----------------------------------------------------------------- * Deallocation function * ----------------------------------------------------------------- */ /* * Function : KINFree * * This routine frees the problem memory allocated by KINInit. * Such memory includes all the vectors allocated by * KINAllocVectors, and the memory lmem for the linear solver * (deallocated by a call to lfree). */ void KINFree(void **kinmem) { KINMem kin_mem; if (*kinmem == NULL) return; kin_mem = (KINMem) (*kinmem); KINFreeVectors(kin_mem); /* call lfree if non-NULL */ if (kin_mem->kin_lfree != NULL) kin_mem->kin_lfree(kin_mem); free(*kinmem); *kinmem = NULL; } /* * ================================================================= * PRIVATE FUNCTIONS * ================================================================= */ /* * Function : KINCheckNvector * * This routine checks if all required vector operations are * implemented (excluding those required by KINConstraint). If all * necessary operations are present, then KINCheckNvector returns * SUNTRUE. Otherwise, SUNFALSE is returned. */ static booleantype KINCheckNvector(N_Vector tmpl) { if ((tmpl->ops->nvclone == NULL) || (tmpl->ops->nvdestroy == NULL) || (tmpl->ops->nvlinearsum == NULL) || (tmpl->ops->nvprod == NULL) || (tmpl->ops->nvdiv == NULL) || (tmpl->ops->nvscale == NULL) || (tmpl->ops->nvabs == NULL) || (tmpl->ops->nvinv == NULL) || (tmpl->ops->nvmaxnorm == NULL) || (tmpl->ops->nvmin == NULL) || (tmpl->ops->nvwl2norm == NULL)) return(SUNFALSE); else return(SUNTRUE); } /* * ----------------------------------------------------------------- * Memory allocation/deallocation * ----------------------------------------------------------------- */ /* * Function : KINAllocVectors * * This routine allocates the KINSol vectors. If all memory * allocations are successful, KINAllocVectors returns SUNTRUE. * Otherwise all allocated memory is freed and KINAllocVectors * returns SUNFALSE. */ static booleantype KINAllocVectors(KINMem kin_mem, N_Vector tmpl) { /* allocate unew, fval, pp, vtemp1 and vtemp2. */ /* allocate df, dg, q, for Anderson Acceleration, Broyden and EN */ /* allocate L, for Low Sync Anderson Acceleration */ if (kin_mem->kin_unew == NULL) { kin_mem->kin_unew = N_VClone(tmpl); if (kin_mem->kin_unew == NULL) return(SUNFALSE); kin_mem->kin_liw += kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_lrw1; } if (kin_mem->kin_fval == NULL) { kin_mem->kin_fval = N_VClone(tmpl); if (kin_mem->kin_fval == NULL) { N_VDestroy(kin_mem->kin_unew); kin_mem->kin_liw -= kin_mem->kin_liw1; kin_mem->kin_lrw -= kin_mem->kin_lrw1; return(SUNFALSE); } kin_mem->kin_liw += kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_lrw1; } if (kin_mem->kin_pp == NULL) { kin_mem->kin_pp = N_VClone(tmpl); if (kin_mem->kin_pp == NULL) { N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); kin_mem->kin_liw -= 2*kin_mem->kin_liw1; kin_mem->kin_lrw -= 2*kin_mem->kin_lrw1; return(SUNFALSE); } kin_mem->kin_liw += kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_lrw1; } if (kin_mem->kin_vtemp1 == NULL) { kin_mem->kin_vtemp1 = N_VClone(tmpl); if (kin_mem->kin_vtemp1 == NULL) { N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); kin_mem->kin_liw -= 3*kin_mem->kin_liw1; kin_mem->kin_lrw -= 3*kin_mem->kin_lrw1; return(SUNFALSE); } kin_mem->kin_liw += kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_lrw1; } if (kin_mem->kin_vtemp2 == NULL) { kin_mem->kin_vtemp2 = N_VClone(tmpl); if (kin_mem->kin_vtemp2 == NULL) { N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); kin_mem->kin_liw -= 4*kin_mem->kin_liw1; kin_mem->kin_lrw -= 4*kin_mem->kin_lrw1; return(SUNFALSE); } kin_mem->kin_liw += kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_lrw1; } /* Vectors for Anderson acceleration */ if (kin_mem->kin_m_aa) { if (kin_mem->kin_R_aa == NULL) { kin_mem->kin_R_aa = (realtype *) malloc((kin_mem->kin_m_aa*kin_mem->kin_m_aa) * sizeof(realtype)); if (kin_mem->kin_R_aa == NULL) { KINProcessError(kin_mem, 0, "KINSOL", "KINAllocVectors", MSG_MEM_FAIL); N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); kin_mem->kin_liw -= 5*kin_mem->kin_liw1; kin_mem->kin_lrw -= 5*kin_mem->kin_lrw1; return(KIN_MEM_FAIL); } } if (kin_mem->kin_gamma_aa == NULL) { kin_mem->kin_gamma_aa = (realtype *) malloc(kin_mem->kin_m_aa * sizeof(realtype)); if (kin_mem->kin_gamma_aa == NULL) { KINProcessError(kin_mem, 0, "KINSOL", "KINAllocVectors", MSG_MEM_FAIL); N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); kin_mem->kin_liw -= 5*kin_mem->kin_liw1; kin_mem->kin_lrw -= 5*kin_mem->kin_lrw1; return(KIN_MEM_FAIL); } } if (kin_mem->kin_ipt_map == NULL) { kin_mem->kin_ipt_map = (long int *) malloc(kin_mem->kin_m_aa * sizeof(long int)); if (kin_mem->kin_ipt_map == NULL) { KINProcessError(kin_mem, 0, "KINSOL", "KINAllocVectors", MSG_MEM_FAIL); N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); kin_mem->kin_liw -= 5*kin_mem->kin_liw1; kin_mem->kin_lrw -= 5*kin_mem->kin_lrw1; return(KIN_MEM_FAIL); } } if (kin_mem->kin_cv == NULL) { kin_mem->kin_cv = (realtype *) malloc(2 * (kin_mem->kin_m_aa+1) * sizeof(realtype)); if (kin_mem->kin_cv == NULL) { KINProcessError(kin_mem, 0, "KINSOL", "KINAllocVectors", MSG_MEM_FAIL); N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); free(kin_mem->kin_ipt_map); kin_mem->kin_liw -= 5*kin_mem->kin_liw1; kin_mem->kin_lrw -= 5*kin_mem->kin_lrw1; return(KIN_MEM_FAIL); } } if (kin_mem->kin_Xv == NULL) { kin_mem->kin_Xv = (N_Vector *) malloc(2 * (kin_mem->kin_m_aa+1) * sizeof(N_Vector)); if (kin_mem->kin_Xv == NULL) { KINProcessError(kin_mem, 0, "KINSOL", "KINAllocVectors", MSG_MEM_FAIL); N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); free(kin_mem->kin_ipt_map); free(kin_mem->kin_cv); kin_mem->kin_liw -= 5*kin_mem->kin_liw1; kin_mem->kin_lrw -= 5*kin_mem->kin_lrw1; return(KIN_MEM_FAIL); } } if (kin_mem->kin_fold_aa == NULL) { kin_mem->kin_fold_aa = N_VClone(tmpl); if (kin_mem->kin_fold_aa == NULL) { N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); free(kin_mem->kin_ipt_map); free(kin_mem->kin_cv); free(kin_mem->kin_Xv); kin_mem->kin_liw -= 5*kin_mem->kin_liw1; kin_mem->kin_lrw -= 5*kin_mem->kin_lrw1; return(SUNFALSE); } kin_mem->kin_liw += kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_lrw1; } if (kin_mem->kin_gold_aa == NULL) { kin_mem->kin_gold_aa = N_VClone(tmpl); if (kin_mem->kin_gold_aa == NULL) { N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); free(kin_mem->kin_ipt_map); free(kin_mem->kin_cv); free(kin_mem->kin_Xv); N_VDestroy(kin_mem->kin_fold_aa); kin_mem->kin_liw -= 6*kin_mem->kin_liw1; kin_mem->kin_lrw -= 6*kin_mem->kin_lrw1; return(SUNFALSE); } kin_mem->kin_liw += kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_lrw1; } if (kin_mem->kin_df_aa == NULL) { kin_mem->kin_df_aa = N_VCloneVectorArray((int) kin_mem->kin_m_aa,tmpl); if (kin_mem->kin_df_aa == NULL) { N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); free(kin_mem->kin_ipt_map); free(kin_mem->kin_cv); free(kin_mem->kin_Xv); N_VDestroy(kin_mem->kin_fold_aa); N_VDestroy(kin_mem->kin_gold_aa); kin_mem->kin_liw -= 7*kin_mem->kin_liw1; kin_mem->kin_lrw -= 7*kin_mem->kin_lrw1; return(SUNFALSE); } kin_mem->kin_liw += kin_mem->kin_m_aa * kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_m_aa * kin_mem->kin_lrw1; } if (kin_mem->kin_dg_aa == NULL) { kin_mem->kin_dg_aa = N_VCloneVectorArray((int) kin_mem->kin_m_aa,tmpl); if (kin_mem->kin_dg_aa == NULL) { N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); free(kin_mem->kin_ipt_map); free(kin_mem->kin_cv); free(kin_mem->kin_Xv); N_VDestroy(kin_mem->kin_fold_aa); N_VDestroy(kin_mem->kin_gold_aa); N_VDestroyVectorArray(kin_mem->kin_df_aa, (int) kin_mem->kin_m_aa); kin_mem->kin_liw -= (7 + kin_mem->kin_m_aa) * kin_mem->kin_liw1; kin_mem->kin_lrw -= (7 + kin_mem->kin_m_aa) * kin_mem->kin_lrw1; return(SUNFALSE); } kin_mem->kin_liw += kin_mem->kin_m_aa * kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_m_aa * kin_mem->kin_lrw1; } if (kin_mem->kin_q_aa == NULL) { kin_mem->kin_q_aa = N_VCloneVectorArray((int) kin_mem->kin_m_aa,tmpl); if (kin_mem->kin_q_aa == NULL) { N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); free(kin_mem->kin_ipt_map); free(kin_mem->kin_cv); free(kin_mem->kin_Xv); N_VDestroy(kin_mem->kin_fold_aa); N_VDestroy(kin_mem->kin_gold_aa); N_VDestroyVectorArray(kin_mem->kin_df_aa, (int) kin_mem->kin_m_aa); N_VDestroyVectorArray(kin_mem->kin_dg_aa, (int) kin_mem->kin_m_aa); kin_mem->kin_liw -= (7 + 2 * kin_mem->kin_m_aa) * kin_mem->kin_liw1; kin_mem->kin_lrw -= (7 + 2 * kin_mem->kin_m_aa) * kin_mem->kin_lrw1; return(SUNFALSE); } kin_mem->kin_liw += kin_mem->kin_m_aa * kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_m_aa * kin_mem->kin_lrw1; } if (kin_mem->kin_qr_data == NULL) { kin_mem->kin_qr_data = (SUNQRData) malloc(sizeof *kin_mem->kin_qr_data); if (kin_mem->kin_qr_data == NULL) { N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); free(kin_mem->kin_ipt_map); free(kin_mem->kin_cv); free(kin_mem->kin_Xv); N_VDestroy(kin_mem->kin_fold_aa); N_VDestroy(kin_mem->kin_gold_aa); N_VDestroyVectorArray(kin_mem->kin_df_aa, (int) kin_mem->kin_m_aa); N_VDestroyVectorArray(kin_mem->kin_dg_aa, (int) kin_mem->kin_m_aa); N_VDestroyVectorArray(kin_mem->kin_q_aa, (int) kin_mem->kin_m_aa); kin_mem->kin_liw -= (7 + 3 * kin_mem->kin_m_aa) * kin_mem->kin_liw1; kin_mem->kin_lrw -= (7 + 3 * kin_mem->kin_m_aa) * kin_mem->kin_lrw1; return(KIN_MEM_FAIL); } kin_mem->kin_liw += kin_mem->kin_m_aa * kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_m_aa * kin_mem->kin_lrw1; } if (kin_mem->kin_orth_aa != KIN_ORTH_MGS) { if (kin_mem->kin_vtemp3 == NULL) { kin_mem->kin_vtemp3 = N_VClone(tmpl); if (kin_mem->kin_vtemp3 == NULL) { N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); free(kin_mem->kin_ipt_map); free(kin_mem->kin_cv); free(kin_mem->kin_Xv); N_VDestroy(kin_mem->kin_fold_aa); N_VDestroy(kin_mem->kin_gold_aa); N_VDestroyVectorArray(kin_mem->kin_df_aa, (int) kin_mem->kin_m_aa); N_VDestroyVectorArray(kin_mem->kin_dg_aa, (int) kin_mem->kin_m_aa); N_VDestroyVectorArray(kin_mem->kin_q_aa, (int) kin_mem->kin_m_aa); free(kin_mem->kin_qr_data); kin_mem->kin_liw -= (7 + 3 * kin_mem->kin_m_aa) * kin_mem->kin_liw1; kin_mem->kin_lrw -= (7 + 3 * kin_mem->kin_m_aa) * kin_mem->kin_lrw1; return(SUNFALSE); } kin_mem->kin_liw += kin_mem->kin_liw1; kin_mem->kin_lrw += kin_mem->kin_lrw1; } if (kin_mem->kin_orth_aa == KIN_ORTH_ICWY) { if (kin_mem->kin_T_aa == NULL) { kin_mem->kin_T_aa = (realtype *) malloc(((kin_mem->kin_m_aa*kin_mem->kin_m_aa)) * sizeof(realtype)); if (kin_mem->kin_T_aa == NULL) { KINProcessError(kin_mem, 0, "KINSOL", "KINAllocVectors", MSG_MEM_FAIL); N_VDestroy(kin_mem->kin_unew); N_VDestroy(kin_mem->kin_fval); N_VDestroy(kin_mem->kin_pp); N_VDestroy(kin_mem->kin_vtemp1); N_VDestroy(kin_mem->kin_vtemp2); free(kin_mem->kin_R_aa); free(kin_mem->kin_gamma_aa); free(kin_mem->kin_ipt_map); free(kin_mem->kin_cv); free(kin_mem->kin_Xv); N_VDestroy(kin_mem->kin_fold_aa); N_VDestroy(kin_mem->kin_gold_aa); N_VDestroyVectorArray(kin_mem->kin_df_aa, (int) kin_mem->kin_m_aa); N_VDestroyVectorArray(kin_mem->kin_dg_aa, (int) kin_mem->kin_m_aa); N_VDestroyVectorArray(kin_mem->kin_q_aa, (int) kin_mem->kin_m_aa); free(kin_mem->kin_qr_data); N_VDestroy(kin_mem->kin_vtemp3); kin_mem->kin_liw -= (8 + 3 * kin_mem->kin_m_aa) * kin_mem->kin_liw1; kin_mem->kin_lrw -= (8 + 3 * kin_mem->kin_m_aa) * kin_mem->kin_lrw1; return(KIN_MEM_FAIL); } } } } } return(SUNTRUE); } /* * KINFreeVectors * * This routine frees the KINSol vectors allocated by * KINAllocVectors. */ static void KINFreeVectors(KINMem kin_mem) { if (kin_mem->kin_unew != NULL) { N_VDestroy(kin_mem->kin_unew); kin_mem->kin_unew = NULL; kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } if (kin_mem->kin_fval != NULL) { N_VDestroy(kin_mem->kin_fval); kin_mem->kin_fval = NULL; kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } if (kin_mem->kin_pp != NULL) { N_VDestroy(kin_mem->kin_pp); kin_mem->kin_pp = NULL; kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } if (kin_mem->kin_vtemp1 != NULL) { N_VDestroy(kin_mem->kin_vtemp1); kin_mem->kin_vtemp1 = NULL; kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } if (kin_mem->kin_vtemp2 != NULL) { N_VDestroy(kin_mem->kin_vtemp2); kin_mem->kin_vtemp2 = NULL; kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } if (kin_mem->kin_vtemp3 != NULL) { N_VDestroy(kin_mem->kin_vtemp3); kin_mem->kin_vtemp3 = NULL; kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } if (kin_mem->kin_gval != NULL) { N_VDestroy(kin_mem->kin_gval); kin_mem->kin_gval = NULL; kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } if (kin_mem->kin_R_aa != NULL) { free(kin_mem->kin_R_aa); kin_mem->kin_R_aa = NULL; } if (kin_mem->kin_gamma_aa != NULL) { free(kin_mem->kin_gamma_aa); kin_mem->kin_gamma_aa = NULL; } if (kin_mem->kin_ipt_map != NULL) { free(kin_mem->kin_ipt_map); kin_mem->kin_ipt_map = NULL; } if (kin_mem->kin_cv != NULL) { free(kin_mem->kin_cv); kin_mem->kin_cv = NULL; } if (kin_mem->kin_Xv != NULL) { free(kin_mem->kin_Xv); kin_mem->kin_Xv = NULL; } if (kin_mem->kin_fold_aa != NULL) { N_VDestroy(kin_mem->kin_fold_aa); kin_mem->kin_fold_aa = NULL; kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } if (kin_mem->kin_gold_aa != NULL) { N_VDestroy(kin_mem->kin_gold_aa); kin_mem->kin_gold_aa = NULL; kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } if (kin_mem->kin_df_aa != NULL) { N_VDestroyVectorArray(kin_mem->kin_df_aa, (int) kin_mem->kin_m_aa); kin_mem->kin_df_aa = NULL; kin_mem->kin_lrw -= kin_mem->kin_m_aa * kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_m_aa * kin_mem->kin_liw1; } if (kin_mem->kin_dg_aa != NULL) { N_VDestroyVectorArray(kin_mem->kin_dg_aa, (int) kin_mem->kin_m_aa); kin_mem->kin_dg_aa = NULL; kin_mem->kin_lrw -= kin_mem->kin_m_aa * kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_m_aa * kin_mem->kin_liw1; } if (kin_mem->kin_q_aa != NULL) { N_VDestroyVectorArray(kin_mem->kin_q_aa, (int) kin_mem->kin_m_aa); kin_mem->kin_q_aa = NULL; kin_mem->kin_lrw -= kin_mem->kin_m_aa * kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_m_aa * kin_mem->kin_liw1; } if (kin_mem->kin_qr_data != NULL) { free(kin_mem->kin_qr_data); kin_mem->kin_qr_data = NULL; } if (kin_mem->kin_T_aa != NULL) { free(kin_mem->kin_T_aa); kin_mem->kin_T_aa = NULL; } if (kin_mem->kin_constraints != NULL) { N_VDestroy(kin_mem->kin_constraints); kin_mem->kin_constraints = NULL; kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } return; } /* * ----------------------------------------------------------------- * Initial setup * ----------------------------------------------------------------- */ /* * KINSolInit * * KINSolInit initializes the problem for the specific input * received in this call to KINSol (which calls KINSolInit). All * problem specification inputs are checked for errors. If any error * occurs during initialization, it is reported to the file whose * file pointer is errfp. * * The possible return values for KINSolInit are: * KIN_SUCCESS : indicates a normal initialization * * KIN_ILL_INPUT : indicates that an input error has been found * * KIN_INITIAL_GUESS_OK : indicates that the guess uu * satisfied the system func(uu) = 0 * within the tolerances specified */ static int KINSolInit(KINMem kin_mem) { int retval; realtype fmax; /* check for illegal input parameters */ if (kin_mem->kin_uu == NULL) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSolInit", MSG_UU_NULL); return(KIN_ILL_INPUT); } /* check for valid strategy */ if ( (kin_mem->kin_globalstrategy != KIN_NONE) && (kin_mem->kin_globalstrategy != KIN_LINESEARCH) && (kin_mem->kin_globalstrategy != KIN_PICARD) && (kin_mem->kin_globalstrategy != KIN_FP) ) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSolInit", MSG_BAD_GLSTRAT); return(KIN_ILL_INPUT); } if (kin_mem->kin_uscale == NULL) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSolInit", MSG_BAD_USCALE); return(KIN_ILL_INPUT); } if (N_VMin(kin_mem->kin_uscale) <= ZERO){ KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSolInit", MSG_USCALE_NONPOSITIVE); return(KIN_ILL_INPUT); } if (kin_mem->kin_fscale == NULL) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSolInit", MSG_BAD_FSCALE); return(KIN_ILL_INPUT); } if (N_VMin(kin_mem->kin_fscale) <= ZERO){ KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSolInit", MSG_FSCALE_NONPOSITIVE); return(KIN_ILL_INPUT); } if ( (kin_mem->kin_constraints != NULL) && ( (kin_mem->kin_globalstrategy == KIN_PICARD) || (kin_mem->kin_globalstrategy == KIN_FP) ) ) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSolInit", MSG_CONSTRAINTS_NOTOK); return(KIN_ILL_INPUT); } /* set the constraints flag */ if (kin_mem->kin_constraints == NULL) kin_mem->kin_constraintsSet = SUNFALSE; else { kin_mem->kin_constraintsSet = SUNTRUE; if ((kin_mem->kin_constraints->ops->nvconstrmask == NULL) || (kin_mem->kin_constraints->ops->nvminquotient == NULL)) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSolInit", MSG_BAD_NVECTOR); return(KIN_ILL_INPUT); } } /* check the initial guess uu against the constraints */ if (kin_mem->kin_constraintsSet) { if (!N_VConstrMask(kin_mem->kin_constraints, kin_mem->kin_uu, kin_mem->kin_vtemp1)) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSolInit", MSG_INITIAL_CNSTRNT); return(KIN_ILL_INPUT); } } /* all error checking is complete at this point */ if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_TOL, "KINSOL", "KINSolInit", INFO_TOL, kin_mem->kin_scsteptol, kin_mem->kin_fnormtol); /* calculate the default value for mxnewtstep (maximum Newton step) */ if (kin_mem->kin_mxnstepin == ZERO) kin_mem->kin_mxnewtstep = THOUSAND * N_VWL2Norm(kin_mem->kin_uu, kin_mem->kin_uscale); else kin_mem->kin_mxnewtstep = kin_mem->kin_mxnstepin; if (kin_mem->kin_mxnewtstep < ONE) kin_mem->kin_mxnewtstep = ONE; /* additional set-up for inexact linear solvers */ if (kin_mem->kin_inexact_ls) { /* set up the coefficients for the eta calculation */ kin_mem->kin_callForcingTerm = (kin_mem->kin_etaflag != KIN_ETACONSTANT); /* this value is always used for choice #1 */ if (kin_mem->kin_etaflag == KIN_ETACHOICE1) kin_mem->kin_eta_alpha = (ONE + SUNRsqrt(FIVE)) * HALF; /* initial value for eta set to 0.5 for other than the KIN_ETACONSTANT option */ if (kin_mem->kin_etaflag != KIN_ETACONSTANT) kin_mem->kin_eta = HALF; /* disable residual monitoring if using an inexact linear solver */ kin_mem->kin_noResMon = SUNTRUE; } else { kin_mem->kin_callForcingTerm = SUNFALSE; } /* initialize counters */ kin_mem->kin_nfe = kin_mem->kin_nnilset = kin_mem->kin_nnilset_sub = kin_mem->kin_nni = kin_mem->kin_nbcf = kin_mem->kin_nbktrk = 0; /* see if the initial guess uu satisfies the nonlinear system */ retval = kin_mem->kin_func(kin_mem->kin_uu, kin_mem->kin_fval, kin_mem->kin_user_data); kin_mem->kin_nfe++; if (retval < 0) { KINProcessError(kin_mem, KIN_SYSFUNC_FAIL, "KINSOL", "KINSolInit", MSG_SYSFUNC_FAILED); return(KIN_SYSFUNC_FAIL); } else if (retval > 0) { KINProcessError(kin_mem, KIN_FIRST_SYSFUNC_ERR, "KINSOL", "KINSolInit", MSG_SYSFUNC_FIRST); return(KIN_FIRST_SYSFUNC_ERR); } fmax = KINScFNorm(kin_mem, kin_mem->kin_fval, kin_mem->kin_fscale); if (fmax <= (POINT01 * kin_mem->kin_fnormtol)) { kin_mem->kin_fnorm = N_VWL2Norm(kin_mem->kin_fval, kin_mem->kin_fscale); return(KIN_INITIAL_GUESS_OK); } if (kin_mem->kin_printfl > 1) KINPrintInfo(kin_mem, PRNT_FMAX, "KINSOL", "KINSolInit", INFO_FMAX, fmax); /* initialize the linear solver if linit != NULL */ if (kin_mem->kin_linit != NULL) { retval = kin_mem->kin_linit(kin_mem); if (retval != 0) { KINProcessError(kin_mem, KIN_LINIT_FAIL, "KINSOL", "KINSolInit", MSG_LINIT_FAIL); return(KIN_LINIT_FAIL); } } /* initialize the L2 (Euclidean) norms of f for the linear iteration steps */ kin_mem->kin_fnorm = N_VWL2Norm(kin_mem->kin_fval, kin_mem->kin_fscale); kin_mem->kin_f1norm = HALF * kin_mem->kin_fnorm * kin_mem->kin_fnorm; kin_mem->kin_fnorm_sub = kin_mem->kin_fnorm; if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_NNI, "KINSOL", "KINSolInit", INFO_NNI, kin_mem->kin_nni, kin_mem->kin_nfe, kin_mem->kin_fnorm); /* problem has now been successfully initialized */ return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Step functions * ----------------------------------------------------------------- */ /* * KINLinSolDrv * * This routine handles the process of solving for the approximate * solution of the Newton equations in the Newton iteration. * Subsequent routines handle the nonlinear aspects of its * application. */ static int KINLinSolDrv(KINMem kin_mem) { N_Vector x, b; int retval; if ((kin_mem->kin_nni - kin_mem->kin_nnilset) >= kin_mem->kin_msbset) { kin_mem->kin_sthrsh = TWO; kin_mem->kin_update_fnorm_sub = SUNTRUE; } for(;;){ kin_mem->kin_jacCurrent = SUNFALSE; if ((kin_mem->kin_sthrsh > ONEPT5) && (kin_mem->kin_lsetup != NULL)) { retval = kin_mem->kin_lsetup(kin_mem); kin_mem->kin_jacCurrent = SUNTRUE; kin_mem->kin_nnilset = kin_mem->kin_nni; kin_mem->kin_nnilset_sub = kin_mem->kin_nni; if (retval != 0) return(KIN_LSETUP_FAIL); } /* rename vectors for readability */ b = kin_mem->kin_unew; x = kin_mem->kin_pp; /* load b with the current value of -fval */ N_VScale(-ONE, kin_mem->kin_fval, b); /* call the generic 'lsolve' routine to solve the system Jx = b */ retval = kin_mem->kin_lsolve(kin_mem, x, b, &(kin_mem->kin_sJpnorm), &(kin_mem->kin_sFdotJp)); if (retval == 0) return(KIN_SUCCESS); else if (retval < 0) return(KIN_LSOLVE_FAIL); else if ((kin_mem->kin_lsetup == NULL) || (kin_mem->kin_jacCurrent)) return(KIN_LINSOLV_NO_RECOVERY); /* loop back only if the linear solver setup is in use and Jacobian information is not current */ kin_mem->kin_sthrsh = TWO; } } /* * KINFullNewton * * This routine is the main driver for the Full Newton * algorithm. Its purpose is to compute unew = uu + pp in the * direction pp from uu, taking the full Newton step. The * step may be constrained if the constraint conditions are * violated, or if the norm of pp is greater than mxnewtstep. */ static int KINFullNewton(KINMem kin_mem, realtype *fnormp, realtype *f1normp, booleantype *maxStepTaken) { realtype pnorm, ratio; booleantype fOK; int ircvr, retval; *maxStepTaken = SUNFALSE; pnorm = N_VWL2Norm(kin_mem->kin_pp, kin_mem->kin_uscale); ratio = ONE; if (pnorm > kin_mem->kin_mxnewtstep) { ratio = kin_mem->kin_mxnewtstep / pnorm; N_VScale(ratio, kin_mem->kin_pp, kin_mem->kin_pp); pnorm = kin_mem->kin_mxnewtstep; } if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_PNORM, "KINSOL", "KINFullNewton", INFO_PNORM, pnorm); /* If constraints are active, then constrain the step accordingly */ kin_mem->kin_stepl = pnorm; kin_mem->kin_stepmul = ONE; if (kin_mem->kin_constraintsSet) { retval = KINConstraint(kin_mem); if (retval == CONSTR_VIOLATED) { /* Apply stepmul set in KINConstraint */ ratio *= kin_mem->kin_stepmul; N_VScale(kin_mem->kin_stepmul, kin_mem->kin_pp, kin_mem->kin_pp); pnorm *= kin_mem->kin_stepmul; kin_mem->kin_stepl = pnorm; if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_PNORM, "KINSOL", "KINFullNewton", INFO_PNORM, pnorm); if (pnorm <= kin_mem->kin_scsteptol) { N_VLinearSum(ONE, kin_mem->kin_uu, ONE, kin_mem->kin_pp, kin_mem->kin_unew); return(STEP_TOO_SMALL);} } } /* Attempt (at most MAX_RECVR times) to evaluate function at the new iterate */ fOK = SUNFALSE; for (ircvr = 1; ircvr <= MAX_RECVR; ircvr++) { /* compute the iterate unew = uu + pp */ N_VLinearSum(ONE, kin_mem->kin_uu, ONE, kin_mem->kin_pp, kin_mem->kin_unew); /* evaluate func(unew) and its norm, and return */ retval = kin_mem->kin_func(kin_mem->kin_unew, kin_mem->kin_fval, kin_mem->kin_user_data); kin_mem->kin_nfe++; /* if func was successful, accept pp */ if (retval == 0) {fOK = SUNTRUE; break;} /* if func failed unrecoverably, give up */ else if (retval < 0) return(KIN_SYSFUNC_FAIL); /* func failed recoverably; cut step in half and try again */ ratio *= HALF; N_VScale(HALF, kin_mem->kin_pp, kin_mem->kin_pp); pnorm *= HALF; kin_mem->kin_stepl = pnorm; } /* If func() failed recoverably MAX_RECVR times, give up */ if (!fOK) return(KIN_REPTD_SYSFUNC_ERR); /* Evaluate function norms */ *fnormp = N_VWL2Norm(kin_mem->kin_fval, kin_mem->kin_fscale); *f1normp = HALF * (*fnormp) * (*fnormp); /* scale sFdotJp and sJpnorm by ratio for later use in KINForcingTerm */ kin_mem->kin_sFdotJp *= ratio; kin_mem->kin_sJpnorm *= ratio; if (kin_mem->kin_printfl > 1) KINPrintInfo(kin_mem, PRNT_FNORM, "KINSOL", "KINFullNewton", INFO_FNORM, *fnormp); if (pnorm > (POINT99 * kin_mem->kin_mxnewtstep)) *maxStepTaken = SUNTRUE; return(KIN_SUCCESS); } /* * KINLineSearch * * The routine KINLineSearch implements the LineSearch algorithm. * Its purpose is to find unew = uu + rl * pp in the direction pp * from uu so that: * t * func(unew) <= func(uu) + alpha * g (unew - uu) (alpha = 1.e-4) * * and * t * func(unew) >= func(uu) + beta * g (unew - uu) (beta = 0.9) * * where 0 < rlmin <= rl <= rlmax. * * Note: * mxnewtstep * rlmax = ---------------- if uu+pp is feasible * ||uscale*pp||_L2 * * rlmax = 1 otherwise * * and * * scsteptol * rlmin = -------------------------- * || pp || * || -------------------- ||_L-infinity * || (1/uscale + SUNRabs(uu)) || * * * If the system function fails unrecoverably at any time, KINLineSearch * returns KIN_SYSFUNC_FAIL which will halt the solver. * * We attempt to corect recoverable system function failures only before * the alpha-condition loop; i.e. when the solution is updated with the * full Newton step (possibly reduced due to constraint violations). * Once we find a feasible pp, we assume that any update up to pp is * feasible. * * If the step size is limited due to constraint violations and/or * recoverable system function failures, we set rlmax=1 to ensure * that the update remains feasible during the attempts to enforce * the beta-condition (this is not an issue while enforcing the alpha * condition, as rl can only decrease from 1 at that stage) */ static int KINLineSearch(KINMem kin_mem, realtype *fnormp, realtype *f1normp, booleantype *maxStepTaken) { realtype pnorm, ratio, slpi, rlmin, rlength, rl, rlmax, rldiff; realtype rltmp, rlprev, pt1trl, f1nprv, rllo, rlinc, alpha, beta; realtype alpha_cond, beta_cond, rl_a, tmp1, rl_b, tmp2, disc; int ircvr, nbktrk_l, retval; booleantype firstBacktrack, fOK; /* Initializations */ nbktrk_l = 0; /* local backtracking counter */ ratio = ONE; /* step change ratio */ alpha = POINT0001; beta = POINT9; firstBacktrack = SUNTRUE; *maxStepTaken = SUNFALSE; rlprev = f1nprv = ZERO; /* Compute length of Newton step */ pnorm = N_VWL2Norm(kin_mem->kin_pp, kin_mem->kin_uscale); rlmax = kin_mem->kin_mxnewtstep / pnorm; kin_mem->kin_stepl = pnorm; /* If the full Newton step is too large, set it to the maximum allowable value */ if(pnorm > kin_mem->kin_mxnewtstep ) { ratio = kin_mem->kin_mxnewtstep / pnorm; N_VScale(ratio, kin_mem->kin_pp, kin_mem->kin_pp); pnorm = kin_mem->kin_mxnewtstep; rlmax = ONE; kin_mem->kin_stepl = pnorm; } /* If constraint checking is activated, check and correct violations */ kin_mem->kin_stepmul = ONE; if(kin_mem->kin_constraintsSet){ retval = KINConstraint(kin_mem); if(retval == CONSTR_VIOLATED){ /* Apply stepmul set in KINConstraint */ N_VScale(kin_mem->kin_stepmul, kin_mem->kin_pp, kin_mem->kin_pp); ratio *= kin_mem->kin_stepmul; pnorm *= kin_mem->kin_stepmul; rlmax = ONE; kin_mem->kin_stepl = pnorm; if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_PNORM1, "KINSOL", "KINLineSearch", INFO_PNORM1, pnorm); if (pnorm <= kin_mem->kin_scsteptol) { N_VLinearSum(ONE, kin_mem->kin_uu, ONE, kin_mem->kin_pp, kin_mem->kin_unew); return(STEP_TOO_SMALL);} } } /* Attempt (at most MAX_RECVR times) to evaluate function at the new iterate */ fOK = SUNFALSE; for (ircvr = 1; ircvr <= MAX_RECVR; ircvr++) { /* compute the iterate unew = uu + pp */ N_VLinearSum(ONE, kin_mem->kin_uu, ONE, kin_mem->kin_pp, kin_mem->kin_unew); /* evaluate func(unew) and its norm, and return */ retval = kin_mem->kin_func(kin_mem->kin_unew, kin_mem->kin_fval, kin_mem->kin_user_data); kin_mem->kin_nfe++; /* if func was successful, accept pp */ if (retval == 0) {fOK = SUNTRUE; break;} /* if func failed unrecoverably, give up */ else if (retval < 0) return(KIN_SYSFUNC_FAIL); /* func failed recoverably; cut step in half and try again */ N_VScale(HALF, kin_mem->kin_pp, kin_mem->kin_pp); ratio *= HALF; pnorm *= HALF; rlmax = ONE; kin_mem->kin_stepl = pnorm; } /* If func() failed recoverably MAX_RECVR times, give up */ if (!fOK) return(KIN_REPTD_SYSFUNC_ERR); /* Evaluate function norms */ *fnormp = N_VWL2Norm(kin_mem->kin_fval, kin_mem->kin_fscale); *f1normp = HALF * (*fnormp) * (*fnormp) ; /* Estimate the line search value rl (lambda) to satisfy both ALPHA and BETA conditions */ slpi = kin_mem->kin_sFdotJp * ratio; rlength = KINScSNorm(kin_mem, kin_mem->kin_pp, kin_mem->kin_uu); rlmin = (kin_mem->kin_scsteptol) / rlength; rl = ONE; if (kin_mem->kin_printfl > 2) KINPrintInfo(kin_mem, PRNT_LAM, "KINSOL", "KINLineSearch", INFO_LAM, rlmin, kin_mem->kin_f1norm, pnorm); /* Loop until the ALPHA condition is satisfied. Terminate if rl becomes too small */ for(;;) { /* Evaluate test quantity */ alpha_cond = kin_mem->kin_f1norm + (alpha * slpi * rl); if (kin_mem->kin_printfl > 2) KINPrintInfo(kin_mem, PRNT_ALPHA, "KINSOL", "KINLinesearch", INFO_ALPHA, *fnormp, *f1normp, alpha_cond, rl); /* If ALPHA condition is satisfied, break out from loop */ if ((*f1normp) <= alpha_cond) break; /* Backtracking. Use quadratic fit the first time and cubic fit afterwards. */ if (firstBacktrack) { rltmp = -slpi / (TWO * ((*f1normp) - kin_mem->kin_f1norm - slpi)); firstBacktrack = SUNFALSE; } else { tmp1 = (*f1normp) - kin_mem->kin_f1norm - (rl * slpi); tmp2 = f1nprv - kin_mem->kin_f1norm - (rlprev * slpi); rl_a = ((ONE / (rl * rl)) * tmp1) - ((ONE / (rlprev * rlprev)) * tmp2); rl_b = ((-rlprev / (rl * rl)) * tmp1) + ((rl / (rlprev * rlprev)) * tmp2); tmp1 = ONE / (rl - rlprev); rl_a *= tmp1; rl_b *= tmp1; disc = (rl_b * rl_b) - (THREE * rl_a * slpi); if (SUNRabs(rl_a) < kin_mem->kin_uround) { /* cubic is actually just a quadratic (rl_a ~ 0) */ rltmp = -slpi / (TWO * rl_b); } else { /* real cubic */ rltmp = (-rl_b + SUNRsqrt(disc)) / (THREE * rl_a); } } if (rltmp > (HALF * rl)) rltmp = HALF * rl; /* Set new rl (do not allow a reduction by a factor larger than 10) */ rlprev = rl; f1nprv = (*f1normp); pt1trl = POINT1 * rl; rl = SUNMAX(pt1trl, rltmp); nbktrk_l++; /* Update unew and re-evaluate function */ N_VLinearSum(ONE, kin_mem->kin_uu, rl, kin_mem->kin_pp, kin_mem->kin_unew); retval = kin_mem->kin_func(kin_mem->kin_unew, kin_mem->kin_fval, kin_mem->kin_user_data); kin_mem->kin_nfe++; if (retval != 0) return(KIN_SYSFUNC_FAIL); *fnormp = N_VWL2Norm(kin_mem->kin_fval, kin_mem->kin_fscale); *f1normp = HALF * (*fnormp) * (*fnormp) ; /* Check if rl (lambda) is too small */ if (rl < rlmin) { /* unew sufficiently distinct from uu cannot be found. copy uu into unew (step remains unchanged) and return STEP_TOO_SMALL */ N_VScale(ONE, kin_mem->kin_uu, kin_mem->kin_unew); return(STEP_TOO_SMALL); } } /* end ALPHA condition loop */ /* ALPHA condition is satisfied. Now check the BETA condition */ beta_cond = kin_mem->kin_f1norm + (beta * slpi * rl); if ((*f1normp) < beta_cond) { /* BETA condition not satisfied */ if ((rl == ONE) && (pnorm < kin_mem->kin_mxnewtstep)) { do { rlprev = rl; f1nprv = *f1normp; rl = SUNMIN((TWO * rl), rlmax); nbktrk_l++; N_VLinearSum(ONE, kin_mem->kin_uu, rl, kin_mem->kin_pp, kin_mem->kin_unew); retval = kin_mem->kin_func(kin_mem->kin_unew, kin_mem->kin_fval, kin_mem->kin_user_data); kin_mem->kin_nfe++; if (retval != 0) return(KIN_SYSFUNC_FAIL); *fnormp = N_VWL2Norm(kin_mem->kin_fval, kin_mem->kin_fscale); *f1normp = HALF * (*fnormp) * (*fnormp); alpha_cond = kin_mem->kin_f1norm + (alpha * slpi * rl); beta_cond = kin_mem->kin_f1norm + (beta * slpi * rl); if (kin_mem->kin_printfl > 2) KINPrintInfo(kin_mem, PRNT_BETA, "KINSOL", "KINLineSearch", INFO_BETA, *f1normp, beta_cond, rl); } while (((*f1normp) <= alpha_cond) && ((*f1normp) < beta_cond) && (rl < rlmax)); } /* end if (rl == ONE) block */ if ((rl < ONE) || ((rl > ONE) && (*f1normp > alpha_cond))) { rllo = SUNMIN(rl, rlprev); rldiff = SUNRabs(rlprev - rl); do { rlinc = HALF * rldiff; rl = rllo + rlinc; nbktrk_l++; N_VLinearSum(ONE, kin_mem->kin_uu, rl, kin_mem->kin_pp, kin_mem->kin_unew); retval = kin_mem->kin_func(kin_mem->kin_unew, kin_mem->kin_fval, kin_mem->kin_user_data); kin_mem->kin_nfe++; if (retval != 0) return(KIN_SYSFUNC_FAIL); *fnormp = N_VWL2Norm(kin_mem->kin_fval, kin_mem->kin_fscale); *f1normp = HALF * (*fnormp) * (*fnormp); alpha_cond = kin_mem->kin_f1norm + (alpha * slpi * rl); beta_cond = kin_mem->kin_f1norm + (beta * slpi * rl); if (kin_mem->kin_printfl > 2) KINPrintInfo(kin_mem, PRNT_ALPHABETA, "KINSOL", "KINLineSearch", INFO_ALPHABETA, *f1normp, alpha_cond, beta_cond, rl); if ((*f1normp) > alpha_cond) rldiff = rlinc; else if (*f1normp < beta_cond) { rllo = rl; rldiff = rldiff - rlinc; } } while ((*f1normp > alpha_cond) || ((*f1normp < beta_cond) && (rldiff >= rlmin))); if ( (*f1normp < beta_cond) || ((rldiff < rlmin) && (*f1normp > alpha_cond)) ) { /* beta condition could not be satisfied or rldiff too small and alpha_cond not satisfied, so set unew to last u value that satisfied the alpha condition and continue */ N_VLinearSum(ONE, kin_mem->kin_uu, rllo, kin_mem->kin_pp, kin_mem->kin_unew); retval = kin_mem->kin_func(kin_mem->kin_unew, kin_mem->kin_fval, kin_mem->kin_user_data); kin_mem->kin_nfe++; if (retval != 0) return(KIN_SYSFUNC_FAIL); *fnormp = N_VWL2Norm(kin_mem->kin_fval, kin_mem->kin_fscale); *f1normp = HALF * (*fnormp) * (*fnormp); /* increment beta-condition failures counter */ kin_mem->kin_nbcf++; } } /* end of if (rl < ONE) block */ } /* end of if (f1normp < beta_cond) block */ /* Update number of backtracking operations */ kin_mem->kin_nbktrk += nbktrk_l; if (kin_mem->kin_printfl > 1) KINPrintInfo(kin_mem, PRNT_ADJ, "KINSOL", "KINLineSearch", INFO_ADJ, nbktrk_l); /* scale sFdotJp and sJpnorm by rl * ratio for later use in KINForcingTerm */ kin_mem->kin_sFdotJp = kin_mem->kin_sFdotJp * rl * ratio; kin_mem->kin_sJpnorm = kin_mem->kin_sJpnorm * rl * ratio; if ((rl * pnorm) > (POINT99 * kin_mem->kin_mxnewtstep)) *maxStepTaken = SUNTRUE; return(KIN_SUCCESS); } /* * Function : KINConstraint * * This routine checks if the proposed solution vector uu + pp * violates any constraints. If a constraint is violated, then the * scalar stepmul is determined such that uu + stepmul * pp does * not violate any constraints. * * Note: This routine is called by the functions * KINLineSearch and KINFullNewton. */ static int KINConstraint(KINMem kin_mem) { N_VLinearSum(ONE, kin_mem->kin_uu, ONE, kin_mem->kin_pp, kin_mem->kin_vtemp1); /* if vtemp1[i] violates constraint[i] then vtemp2[i] = 1 else vtemp2[i] = 0 (vtemp2 is the mask vector) */ if(N_VConstrMask(kin_mem->kin_constraints, kin_mem->kin_vtemp1, kin_mem->kin_vtemp2)) return(KIN_SUCCESS); /* vtemp1[i] = SUNRabs(pp[i]) */ N_VAbs(kin_mem->kin_pp, kin_mem->kin_vtemp1); /* consider vtemp1[i] only if vtemp2[i] = 1 (constraint violated) */ N_VProd(kin_mem->kin_vtemp2, kin_mem->kin_vtemp1, kin_mem->kin_vtemp1); N_VAbs(kin_mem->kin_uu, kin_mem->kin_vtemp2); kin_mem->kin_stepmul = POINT9 * N_VMinQuotient(kin_mem->kin_vtemp2, kin_mem->kin_vtemp1); return(CONSTR_VIOLATED); } /* * ----------------------------------------------------------------- * Stopping tests * ----------------------------------------------------------------- */ /* * KINStop * * This routine checks the current iterate unew to see if the * system func(unew) = 0 is satisfied by a variety of tests. * * strategy is one of KIN_NONE or KIN_LINESEARCH * sflag is one of KIN_SUCCESS, STEP_TOO_SMALL */ static int KINStop(KINMem kin_mem, booleantype maxStepTaken, int sflag) { realtype fmax, rlength, omexp; N_Vector delta; /* Check for too small a step */ if (sflag == STEP_TOO_SMALL) { if ((kin_mem->kin_lsetup != NULL) && !(kin_mem->kin_jacCurrent)) { /* If the Jacobian is out of date, update it and retry */ kin_mem->kin_sthrsh = TWO; return(RETRY_ITERATION); } else { /* Give up */ if (kin_mem->kin_globalstrategy == KIN_NONE) return(KIN_STEP_LT_STPTOL); else return(KIN_LINESEARCH_NONCONV); } } /* Check tolerance on scaled function norm at the current iterate */ fmax = KINScFNorm(kin_mem, kin_mem->kin_fval, kin_mem->kin_fscale); if (kin_mem->kin_printfl > 1) KINPrintInfo(kin_mem, PRNT_FMAX, "KINSOL", "KINStop", INFO_FMAX, fmax); if (fmax <= kin_mem->kin_fnormtol) return(KIN_SUCCESS); /* Check if the scaled distance between the last two steps is too small */ /* NOTE: pp used as work space to store this distance */ delta = kin_mem->kin_pp; N_VLinearSum(ONE, kin_mem->kin_unew, -ONE, kin_mem->kin_uu, delta); rlength = KINScSNorm(kin_mem, delta, kin_mem->kin_unew); if (rlength <= kin_mem->kin_scsteptol) { if ((kin_mem->kin_lsetup != NULL) && !(kin_mem->kin_jacCurrent)) { /* If the Jacobian is out of date, update it and retry */ kin_mem->kin_sthrsh = TWO; return(CONTINUE_ITERATIONS); } else { /* give up */ return(KIN_STEP_LT_STPTOL); } } /* Check if the maximum number of iterations is reached */ if (kin_mem->kin_nni >= kin_mem->kin_mxiter) return(KIN_MAXITER_REACHED); /* Check for consecutive number of steps taken of size mxnewtstep and if not maxStepTaken, then set ncscmx to 0 */ if (maxStepTaken) kin_mem->kin_ncscmx++; else kin_mem->kin_ncscmx = 0; if (kin_mem->kin_ncscmx == 5) return(KIN_MXNEWT_5X_EXCEEDED); /* Proceed according to the type of linear solver used */ if (kin_mem->kin_inexact_ls) { /* We're doing inexact Newton. Load threshold for reevaluating the Jacobian. */ kin_mem->kin_sthrsh = rlength; } else if (!(kin_mem->kin_noResMon)) { /* We're doing modified Newton and the user did not disable residual monitoring. Check if it is time to monitor residual. */ if ((kin_mem->kin_nni - kin_mem->kin_nnilset_sub) >= kin_mem->kin_msbset_sub) { /* Residual monitoring needed */ kin_mem->kin_nnilset_sub = kin_mem->kin_nni; /* If indicated, estimate new OMEGA value */ if (kin_mem->kin_eval_omega) { omexp = SUNMAX(ZERO,((kin_mem->kin_fnorm)/(kin_mem->kin_fnormtol))-ONE); kin_mem->kin_omega = (omexp > TWELVE)? kin_mem->kin_omega_max : SUNMIN(kin_mem->kin_omega_min * SUNRexp(omexp), kin_mem->kin_omega_max); } /* Check if making satisfactory progress */ if (kin_mem->kin_fnorm > kin_mem->kin_omega * kin_mem->kin_fnorm_sub) { /* Insufficient progress */ if ((kin_mem->kin_lsetup != NULL) && !(kin_mem->kin_jacCurrent)) { /* If the Jacobian is out of date, update it and retry */ kin_mem->kin_sthrsh = TWO; return(CONTINUE_ITERATIONS); } else { /* Otherwise, we cannot do anything, so just return. */ } } else { /* Sufficient progress */ kin_mem->kin_fnorm_sub = kin_mem->kin_fnorm; kin_mem->kin_sthrsh = ONE; } } else { /* Residual monitoring not needed */ /* Reset sthrsh */ if (kin_mem->kin_retry_nni || kin_mem->kin_update_fnorm_sub) kin_mem->kin_fnorm_sub = kin_mem->kin_fnorm; if (kin_mem->kin_update_fnorm_sub) kin_mem->kin_update_fnorm_sub = SUNFALSE; kin_mem->kin_sthrsh = ONE; } } /* if made it to here, then the iteration process is not finished so return CONTINUE_ITERATIONS flag */ return(CONTINUE_ITERATIONS); } /* * KINForcingTerm * * This routine computes eta, the scaling factor in the linear * convergence stopping tolerance eps when choice #1 or choice #2 * forcing terms are used. Eta is computed here for all but the * first iterative step, which is set to the default in routine * KINSolInit. * * This routine was written by Homer Walker of Utah State * University with subsequent modifications by Allan Taylor @ LLNL. * * It is based on the concepts of the paper 'Choosing the forcing * terms in an inexact Newton method', SIAM J Sci Comput, 17 * (1996), pp 16 - 32, or Utah State University Research Report * 6/94/75 of the same title. */ static void KINForcingTerm(KINMem kin_mem, realtype fnormp) { realtype eta_max, eta_min, eta_safe, linmodel_norm; eta_max = POINT9; eta_min = POINT0001; eta_safe = HALF; /* choice #1 forcing term */ if (kin_mem->kin_etaflag == KIN_ETACHOICE1) { /* compute the norm of f + Jp , scaled L2 norm */ linmodel_norm = SUNRsqrt((kin_mem->kin_fnorm * kin_mem->kin_fnorm) + (TWO * kin_mem->kin_sFdotJp) + (kin_mem->kin_sJpnorm * kin_mem->kin_sJpnorm)); /* form the safeguarded for choice #1 */ eta_safe = SUNRpowerR(kin_mem->kin_eta, kin_mem->kin_eta_alpha); kin_mem->kin_eta = SUNRabs(fnormp - linmodel_norm) / kin_mem->kin_fnorm; } /* choice #2 forcing term */ if (kin_mem->kin_etaflag == KIN_ETACHOICE2) { eta_safe = kin_mem->kin_eta_gamma * SUNRpowerR(kin_mem->kin_eta, kin_mem->kin_eta_alpha); kin_mem->kin_eta = kin_mem->kin_eta_gamma * SUNRpowerR((fnormp / kin_mem->kin_fnorm), kin_mem->kin_eta_alpha); } /* apply safeguards */ if(eta_safe < POINT1) eta_safe = ZERO; kin_mem->kin_eta = SUNMAX(kin_mem->kin_eta, eta_safe); kin_mem->kin_eta = SUNMAX(kin_mem->kin_eta, eta_min); kin_mem->kin_eta = SUNMIN(kin_mem->kin_eta, eta_max); return; } /* * ----------------------------------------------------------------- * Norm functions * ----------------------------------------------------------------- */ /* * Function : KINScFNorm * * This routine computes the max norm for scaled vectors. The * scaling vector is scale, and the vector of which the norm is to * be determined is vv. The returned value, fnormval, is the * resulting scaled vector norm. This routine uses N_Vector * functions from the vector module. */ static realtype KINScFNorm(KINMem kin_mem, N_Vector v, N_Vector scale) { N_VProd(scale, v, kin_mem->kin_vtemp1); return(N_VMaxNorm(kin_mem->kin_vtemp1)); } /* * Function : KINScSNorm * * This routine computes the max norm of the scaled steplength, ss. * Here ucur is the current step and usc is the u scale factor. */ static realtype KINScSNorm(KINMem kin_mem, N_Vector v, N_Vector u) { realtype length; N_VInv(kin_mem->kin_uscale, kin_mem->kin_vtemp1); N_VAbs(u, kin_mem->kin_vtemp2); N_VLinearSum(ONE, kin_mem->kin_vtemp1, ONE, kin_mem->kin_vtemp2, kin_mem->kin_vtemp1); N_VDiv(v, kin_mem->kin_vtemp1, kin_mem->kin_vtemp1); length = N_VMaxNorm(kin_mem->kin_vtemp1); return(length); } /* * ================================================================= * KINSOL Verbose output functions * ================================================================= */ /* * KINPrintInfo * * KINPrintInfo is a high level error handling function * Based on the value info_code, it composes the info message and * passes it to the info handler function. */ void KINPrintInfo(KINMem kin_mem, int info_code, const char *module, const char *fname, const char *msgfmt, ...) { va_list ap; char msg[256], msg1[40]; char retstr[30]; int ret; /* Initialize argument processing (msgfrmt is the last required argument) */ va_start(ap, msgfmt); if (info_code == PRNT_RETVAL) { /* If info_code = PRNT_RETVAL, decode the numeric value */ ret = va_arg(ap, int); switch(ret) { case KIN_SUCCESS: sprintf(retstr, "KIN_SUCCESS"); break; case KIN_SYSFUNC_FAIL: sprintf(retstr, "KIN_SYSFUNC_FAIL"); break; case KIN_REPTD_SYSFUNC_ERR: sprintf(retstr, "KIN_REPTD_SYSFUNC_ERR"); break; case KIN_STEP_LT_STPTOL: sprintf(retstr, "KIN_STEP_LT_STPTOL"); break; case KIN_LINESEARCH_NONCONV: sprintf(retstr, "KIN_LINESEARCH_NONCONV"); break; case KIN_LINESEARCH_BCFAIL: sprintf(retstr, "KIN_LINESEARCH_BCFAIL"); break; case KIN_MAXITER_REACHED: sprintf(retstr, "KIN_MAXITER_REACHED"); break; case KIN_MXNEWT_5X_EXCEEDED: sprintf(retstr, "KIN_MXNEWT_5X_EXCEEDED"); break; case KIN_LINSOLV_NO_RECOVERY: sprintf(retstr, "KIN_LINSOLV_NO_RECOVERY"); break; case KIN_LSETUP_FAIL: sprintf(retstr, "KIN_PRECONDSET_FAILURE"); break; case KIN_LSOLVE_FAIL: sprintf(retstr, "KIN_PRECONDSOLVE_FAILURE"); break; } /* Compose the message */ sprintf(msg1, msgfmt, ret); sprintf(msg,"%s (%s)",msg1,retstr); } else { /* Compose the message */ vsprintf(msg, msgfmt, ap); } /* call the info message handler */ kin_mem->kin_ihfun(module, fname, msg, kin_mem->kin_ih_data); /* finalize argument processing */ va_end(ap); return; } /* * KINInfoHandler * * This is the default KINSOL info handling function. * It sends the info message to the stream pointed to by kin_infofp */ void KINInfoHandler(const char *module, const char *function, char *msg, void *data) { KINMem kin_mem; /* data points to kin_mem here */ kin_mem = (KINMem) data; #ifndef NO_FPRINTF_OUTPUT if (kin_mem->kin_infofp != NULL) { STAN_SUNDIALS_FPRINTF(kin_mem->kin_infofp,"\n[%s] %s\n",module, function); STAN_SUNDIALS_FPRINTF(kin_mem->kin_infofp," %s\n",msg); } #endif } /* * ================================================================= * KINSOL Error Handling functions * ================================================================= */ /* * KINProcessError * * KINProcessError is a high level error handling function. * - If cv_mem==NULL it prints the error message to stderr. * - Otherwise, it sets up and calls the error handling function * pointed to by cv_ehfun. */ void KINProcessError(KINMem kin_mem, int error_code, const char *module, const char *fname, const char *msgfmt, ...) { va_list ap; char msg[256]; /* Initialize the argument pointer variable (msgfmt is the last required argument to KINProcessError) */ va_start(ap, msgfmt); /* Compose the message */ vsprintf(msg, msgfmt, ap); if (kin_mem == NULL) { /* We write to stderr */ #ifndef NO_FPRINTF_OUTPUT STAN_SUNDIALS_FPRINTF(stderr, "\n[%s ERROR] %s\n ", module, fname); STAN_SUNDIALS_FPRINTF(stderr, "%s\n\n", msg); #endif } else { /* We can call ehfun */ kin_mem->kin_ehfun(error_code, module, fname, msg, kin_mem->kin_eh_data); } /* Finalize argument processing */ va_end(ap); return; } /* * KINErrHandler * * This is the default error handling function. * It sends the error message to the stream pointed to by kin_errfp */ void KINErrHandler(int error_code, const char *module, const char *function, char *msg, void *data) { KINMem kin_mem; char err_type[10]; /* data points to kin_mem here */ kin_mem = (KINMem) data; if (error_code == KIN_WARNING) sprintf(err_type,"WARNING"); else sprintf(err_type,"ERROR"); #ifndef NO_FPRINTF_OUTPUT if (kin_mem->kin_errfp != NULL) { STAN_SUNDIALS_FPRINTF(kin_mem->kin_errfp,"\n[%s %s] %s\n",module,err_type,function); STAN_SUNDIALS_FPRINTF(kin_mem->kin_errfp," %s\n\n",msg); } #endif return; } /* * ======================================================================= * Picard and fixed point solvers * ======================================================================= */ /* * KINPicardAA * * This routine is the main driver for the Picard iteration with * acclerated fixed point. */ static int KINPicardAA(KINMem kin_mem) { int retval; /* return value from user func */ int ret; /* iteration status */ long int iter_aa; /* iteration count for AA */ N_Vector delta; /* temporary workspace vector */ realtype epsmin; realtype fnormp; delta = kin_mem->kin_vtemp1; ret = CONTINUE_ITERATIONS; epsmin = ZERO; fnormp = -ONE; /* initialize iteration count */ kin_mem->kin_nni = 0; /* if eps is to be bounded from below, set the bound */ if (kin_mem->kin_inexact_ls && !(kin_mem->kin_noMinEps)) epsmin = POINT01 * kin_mem->kin_fnormtol; while (ret == CONTINUE_ITERATIONS) { /* update iteration count */ kin_mem->kin_nni++; /* Update the forcing term for the inexact linear solves */ if (kin_mem->kin_inexact_ls) { kin_mem->kin_eps = (kin_mem->kin_eta + kin_mem->kin_uround) * kin_mem->kin_fnorm; if(!(kin_mem->kin_noMinEps)) kin_mem->kin_eps = SUNMAX(epsmin, kin_mem->kin_eps); } /* evaluate g = uu - L^{-1}func(uu) and return if failed. For Picard, assume that the fval vector has been filled with an eval of the nonlinear residual prior to this call. */ retval = KINPicardFcnEval(kin_mem, kin_mem->kin_gval, kin_mem->kin_uu, kin_mem->kin_fval); if (retval < 0) { ret = KIN_SYSFUNC_FAIL; break; } /* compute new solution */ if (kin_mem->kin_m_aa == 0 || kin_mem->kin_nni - 1 < kin_mem->kin_delay_aa) { if (kin_mem->kin_damping) { /* damped fixed point */ N_VLinearSum((ONE - kin_mem->kin_beta), kin_mem->kin_uu, kin_mem->kin_beta, kin_mem->kin_gval, kin_mem->kin_unew); } else { /* standard fixed point */ N_VScale(ONE, kin_mem->kin_gval, kin_mem->kin_unew); } } else { /* compute iteration count for Anderson acceleration */ if (kin_mem->kin_delay_aa > 0) { iter_aa = kin_mem->kin_nni - 1 - kin_mem->kin_delay_aa; } else { iter_aa = kin_mem->kin_nni - 1; } AndersonAcc(kin_mem, /* kinsol memory */ kin_mem->kin_gval, /* G(u_cur) in */ delta, /* F(u_cur) in (temp) */ kin_mem->kin_unew, /* u_new output out */ kin_mem->kin_uu, /* u_cur input in */ iter_aa, /* AA iteration in */ kin_mem->kin_R_aa, /* R matrix in/out */ kin_mem->kin_gamma_aa); /* gamma vector in (temp) */ } /* Fill the Newton residual based on the new solution iterate */ retval = kin_mem->kin_func(kin_mem->kin_unew, kin_mem->kin_fval, kin_mem->kin_user_data); kin_mem->kin_nfe++; if (retval < 0) { ret = KIN_SYSFUNC_FAIL; break; } /* Measure || F(x) ||_max */ kin_mem->kin_fnorm = KINScFNorm(kin_mem, kin_mem->kin_fval, kin_mem->kin_fscale); if (kin_mem->kin_printfl > 1) KINPrintInfo(kin_mem, PRNT_FMAX, "KINSOL", "KINPicardAA", INFO_FMAX, kin_mem->kin_fnorm); /* print the current iter, fnorm, and nfe values if printfl > 0 */ if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_NNI, "KINSOL", "KINPicardAA", INFO_NNI, kin_mem->kin_nni, kin_mem->kin_nfe, kin_mem->kin_fnorm); /* Check if the maximum number of iterations is reached */ if (kin_mem->kin_nni >= kin_mem->kin_mxiter) { ret = KIN_MAXITER_REACHED; } if (kin_mem->kin_fnorm <= kin_mem->kin_fnormtol) { ret = KIN_SUCCESS; } /* Update the solution. Always return the newest iteration. Note this is also consistent with last function evaluation. */ N_VScale(ONE, kin_mem->kin_unew, kin_mem->kin_uu); if (ret == CONTINUE_ITERATIONS && kin_mem->kin_callForcingTerm) { /* evaluate eta by calling the forcing term routine */ fnormp = N_VWL2Norm(kin_mem->kin_fval, kin_mem->kin_fscale); KINForcingTerm(kin_mem, fnormp); } fflush(kin_mem->kin_errfp); } /* end of loop; return */ if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_RETVAL, "KINSOL", "KINPicardAA", INFO_RETVAL, ret); return(ret); } /* * KINPicardFcnEval * * This routine evaluates the Picard fixed point function * using the linear solver, gval = u - L^{-1}F(u). * The function assumes the user has defined L either through * a user-supplied matvec if using a SPILS solver or through * a supplied matrix if using a dense solver. This assumption is * tested by a check on the strategy and the requisite functionality * within the linear solve routines. * * This routine fills gval = uu - L^{-1}F(uu) given uu and fval = F(uu). */ static int KINPicardFcnEval(KINMem kin_mem, N_Vector gval, N_Vector uval, N_Vector fval1) { int retval; if ((kin_mem->kin_nni - kin_mem->kin_nnilset) >= kin_mem->kin_msbset) { kin_mem->kin_sthrsh = TWO; kin_mem->kin_update_fnorm_sub = SUNTRUE; } for(;;){ kin_mem->kin_jacCurrent = SUNFALSE; if ((kin_mem->kin_sthrsh > ONEPT5) && (kin_mem->kin_lsetup != NULL)) { retval = kin_mem->kin_lsetup(kin_mem); kin_mem->kin_jacCurrent = SUNTRUE; kin_mem->kin_nnilset = kin_mem->kin_nni; kin_mem->kin_nnilset_sub = kin_mem->kin_nni; if (retval != 0) return(KIN_LSETUP_FAIL); } /* call the generic 'lsolve' routine to solve the system Lx = -fval Note that we are using gval to hold x. */ N_VScale(-ONE, fval1, fval1); retval = kin_mem->kin_lsolve(kin_mem, gval, fval1, &(kin_mem->kin_sJpnorm), &(kin_mem->kin_sFdotJp)); if (retval == 0) { /* Update gval = uval + gval since gval = -L^{-1}F(uu) */ N_VLinearSum(ONE, uval, ONE, gval, gval); return(KIN_SUCCESS); } else if (retval < 0) return(KIN_LSOLVE_FAIL); else if ((kin_mem->kin_lsetup == NULL) || (kin_mem->kin_jacCurrent)) return(KIN_LINSOLV_NO_RECOVERY); /* loop back only if the linear solver setup is in use and matrix information is not current */ kin_mem->kin_sthrsh = TWO; } } /* * KINFP * * This routine is the main driver for the fixed point iteration with * Anderson Acceleration. */ static int KINFP(KINMem kin_mem) { int retval; /* return value from user func */ int ret; /* iteration status */ long int iter_aa; /* iteration count for AA */ realtype tolfac; /* tolerance adjustment factor */ N_Vector delta; /* temporary workspace vector */ delta = kin_mem->kin_vtemp1; ret = CONTINUE_ITERATIONS; tolfac = ONE; #ifdef SUNDIALS_DEBUG_PRINTVEC STAN_SUNDIALS_FPRINTF(kin_mem->kin_debugfp, "KINSOL u_0:\n"); N_VPrintFile(kin_mem->kin_uu, kin_mem->kin_debugfp); #endif /* initialize iteration count */ kin_mem->kin_nni = 0; while (ret == CONTINUE_ITERATIONS) { /* update iteration count */ kin_mem->kin_nni++; /* evaluate func(uu) and return if failed */ retval = kin_mem->kin_func(kin_mem->kin_uu, kin_mem->kin_fval, kin_mem->kin_user_data); kin_mem->kin_nfe++; #ifdef SUNDIALS_DEBUG_PRINTVEC STAN_SUNDIALS_FPRINTF(kin_mem->kin_debugfp, "KINSOL G_%ld:\n", kin_mem->kin_nni - 1); N_VPrintFile(kin_mem->kin_fval, kin_mem->kin_debugfp); #endif if (retval < 0) { ret = KIN_SYSFUNC_FAIL; break; } /* compute new solution */ if (kin_mem->kin_m_aa == 0 || kin_mem->kin_nni - 1 < kin_mem->kin_delay_aa) { if (kin_mem->kin_damping) { /* damped fixed point */ N_VLinearSum((ONE - kin_mem->kin_beta), kin_mem->kin_uu, kin_mem->kin_beta, kin_mem->kin_fval, kin_mem->kin_unew); /* tolerance adjustment */ tolfac = kin_mem->kin_beta; } else { /* standard fixed point */ N_VScale(ONE, kin_mem->kin_fval, kin_mem->kin_unew); /* tolerance adjustment */ tolfac = ONE; } } else { /* compute iteration count for Anderson acceleration */ if (kin_mem->kin_delay_aa > 0) { iter_aa = kin_mem->kin_nni - 1 - kin_mem->kin_delay_aa; } else { iter_aa = kin_mem->kin_nni - 1; } /* apply Anderson acceleration */ AndersonAcc(kin_mem, kin_mem->kin_fval, delta, kin_mem->kin_unew, kin_mem->kin_uu, iter_aa, kin_mem->kin_R_aa, kin_mem->kin_gamma_aa); /* tolerance adjustment (first iteration is standard fixed point) */ if (iter_aa == 0 && kin_mem->kin_damping_aa) { tolfac = kin_mem->kin_beta; } else { tolfac = ONE; } } #ifdef SUNDIALS_DEBUG_PRINTVEC STAN_SUNDIALS_FPRINTF(kin_mem->kin_debugfp, "KINSOL u_%ld:\n", kin_mem->kin_nni); N_VPrintFile(kin_mem->kin_unew, kin_mem->kin_debugfp); #endif /* compute change between iterations */ N_VLinearSum(ONE, kin_mem->kin_unew, -ONE, kin_mem->kin_uu, delta); /* measure || g(x) - x || */ kin_mem->kin_fnorm = KINScFNorm(kin_mem, delta, kin_mem->kin_fscale); if (kin_mem->kin_printfl > 1) KINPrintInfo(kin_mem, PRNT_FMAX, "KINSOL", "KINFP", INFO_FMAX, kin_mem->kin_fnorm); /* print the current iter, fnorm, and nfe values if printfl > 0 */ if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_NNI, "KINSOL", "KINFP", INFO_NNI, kin_mem->kin_nni, kin_mem->kin_nfe, kin_mem->kin_fnorm); /* Check if the maximum number of iterations is reached */ if (kin_mem->kin_nni >= kin_mem->kin_mxiter) { ret = KIN_MAXITER_REACHED; } if (kin_mem->kin_fnorm <= (tolfac * kin_mem->kin_fnormtol)) { ret = KIN_SUCCESS; } /* Update the solution if taking another iteration or returning the newest iterate. Otherwise return the solution consistent with the last function evaluation. */ if (ret == CONTINUE_ITERATIONS || kin_mem->kin_ret_newest) { N_VScale(ONE, kin_mem->kin_unew, kin_mem->kin_uu); } fflush(kin_mem->kin_errfp); } /* end of loop; return */ if (kin_mem->kin_printfl > 0) KINPrintInfo(kin_mem, PRNT_RETVAL, "KINSOL", "KINFP", INFO_RETVAL, ret); return(ret); } /* * ======================================================================== * Anderson Acceleration * ======================================================================== */ static int AndersonAcc(KINMem kin_mem, N_Vector gval, N_Vector fv, N_Vector x, N_Vector xold, long int iter, realtype *R, realtype *gamma) { int retval; long int i_pt, i, j, lAA; long int *ipt_map; realtype alfa; realtype onembeta; realtype a, b, temp, c, s; booleantype dotprodSB = SUNFALSE; /* local shortcuts for fused vector operation */ int nvec=0; realtype* cv=kin_mem->kin_cv; N_Vector* Xv=kin_mem->kin_Xv; /* local dot product flag for single buffer reductions */ if ((kin_mem->kin_vtemp2->ops->nvdotprodlocal || kin_mem->kin_vtemp2->ops->nvdotprodmultilocal) && kin_mem->kin_vtemp2->ops->nvdotprodmultiallreduce) dotprodSB = SUNTRUE; ipt_map = kin_mem->kin_ipt_map; i_pt = iter-1 - ((iter-1) / kin_mem->kin_m_aa) * kin_mem->kin_m_aa; N_VLinearSum(ONE, gval, -ONE, xold, fv); if (iter > 0) { /* compute dg_new = gval - gval_old */ N_VLinearSum(ONE, gval, -ONE, kin_mem->kin_gold_aa, kin_mem->kin_dg_aa[i_pt]); /* compute df_new = fval - fval_old */ N_VLinearSum(ONE, fv, -ONE, kin_mem->kin_fold_aa, kin_mem->kin_df_aa[i_pt]); } N_VScale(ONE, gval, kin_mem->kin_gold_aa); N_VScale(ONE, fv, kin_mem->kin_fold_aa); /* on first iteration, do fixed point update */ if (iter == 0) { if (kin_mem->kin_damping_aa) { /* damped fixed point */ N_VLinearSum((ONE - kin_mem->kin_beta), xold, kin_mem->kin_beta_aa, gval, x); } else { /* standard fixed point */ N_VScale(ONE, gval, x); } return(0); } /* update data structures based on current iteration index */ if (iter == 1) { /* second iteration */ R[0] = SUNRsqrt(N_VDotProd(kin_mem->kin_df_aa[i_pt], kin_mem->kin_df_aa[i_pt])); alfa = ONE/R[0]; N_VScale(alfa, kin_mem->kin_df_aa[i_pt], kin_mem->kin_q_aa[i_pt]); ipt_map[0] = 0; } else if (iter <= kin_mem->kin_m_aa) { /* another iteration before we've reached maa */ kin_mem->kin_qr_func(kin_mem->kin_q_aa, R, kin_mem->kin_df_aa[i_pt], (int) iter-1, (int) kin_mem->kin_m_aa, (void *)kin_mem->kin_qr_data); /* update iteration map */ for (j = 0; j < iter; j++) { ipt_map[j] = j; } } else { /* we've filled the acceleration subspace, so start recycling */ /* Delete left-most column vector from QR factorization */ for (i=0; i < kin_mem->kin_m_aa-1; i++) { a = R[(i+1)*kin_mem->kin_m_aa + i]; b = R[(i+1)*kin_mem->kin_m_aa + i+1]; temp = SUNRsqrt(a*a + b*b); c = a / temp; s = b / temp; R[(i+1)*kin_mem->kin_m_aa + i] = temp; R[(i+1)*kin_mem->kin_m_aa + i+1] = ZERO; /* OK to re-use temp */ if (i < kin_mem->kin_m_aa-1) { for (j = i+2; j < kin_mem->kin_m_aa; j++) { a = R[j*kin_mem->kin_m_aa + i]; b = R[j*kin_mem->kin_m_aa + i+1]; temp = c * a + s * b; R[j*kin_mem->kin_m_aa + i+1] = -s*a + c*b; R[j*kin_mem->kin_m_aa + i] = temp; } } N_VLinearSum(c, kin_mem->kin_q_aa[i], s, kin_mem->kin_q_aa[i+1], kin_mem->kin_vtemp2); N_VLinearSum(-s, kin_mem->kin_q_aa[i], c, kin_mem->kin_q_aa[i+1], kin_mem->kin_q_aa[i+1]); N_VScale(ONE, kin_mem->kin_vtemp2, kin_mem->kin_q_aa[i]); } /* Shift R to the left by one. */ for (i = 1; i < kin_mem->kin_m_aa; i++) { for (j = 0; j < kin_mem->kin_m_aa-1; j++) { R[(i-1)*kin_mem->kin_m_aa + j] = R[i*kin_mem->kin_m_aa + j]; } } /* If ICWY orthogonalization, then update T */ if (kin_mem->kin_orth_aa == KIN_ORTH_ICWY) { if (dotprodSB) { if (i > 1) { for (i = 2; i < kin_mem->kin_m_aa; i++) { N_VDotProdMultiLocal((int) i, kin_mem->kin_q_aa[i-1], kin_mem->kin_q_aa, kin_mem->kin_T_aa + (i-1) * kin_mem->kin_m_aa); } N_VDotProdMultiAllReduce((int) (kin_mem->kin_m_aa * kin_mem->kin_m_aa), kin_mem->kin_q_aa[i-1], kin_mem->kin_T_aa); } for (i = 1; i < kin_mem->kin_m_aa; i++) { kin_mem->kin_T_aa[(i-1) * kin_mem->kin_m_aa + (i-1)] = ONE; } } else { kin_mem->kin_T_aa[0] = ONE; for (i = 2; i < kin_mem->kin_m_aa; i++) { N_VDotProdMulti((int) i-1, kin_mem->kin_q_aa[i-1], kin_mem->kin_q_aa, kin_mem->kin_T_aa + (i-1) * kin_mem->kin_m_aa); kin_mem->kin_T_aa[(i-1) * kin_mem->kin_m_aa + (i-1)] = ONE; } } } /* Add the new df vector */ kin_mem->kin_qr_func(kin_mem->kin_q_aa, R, kin_mem->kin_df_aa[i_pt], (int) kin_mem->kin_m_aa - 1, (int) kin_mem->kin_m_aa, (void *)kin_mem->kin_qr_data); /* Update the iteration map */ j = 0; for (i=i_pt+1; i < kin_mem->kin_m_aa; i++) ipt_map[j++] = i; for (i=0; i < (i_pt+1); i++) ipt_map[j++] = i; } /* Solve least squares problem and update solution */ lAA = iter; if (kin_mem->kin_m_aa < iter) lAA = kin_mem->kin_m_aa; retval = N_VDotProdMulti((int) lAA, fv, kin_mem->kin_q_aa, gamma); if (retval != KIN_SUCCESS) return(KIN_VECTOROP_ERR); /* set arrays for fused vector operation */ cv[0] = ONE; Xv[0] = gval; nvec = 1; for (i=lAA-1; i > -1; i--) { for (j=i+1; j < lAA; j++) { gamma[i] = gamma[i]-R[j*kin_mem->kin_m_aa+i]*gamma[j]; } gamma[i] = gamma[i]/R[i*kin_mem->kin_m_aa+i]; cv[nvec] = -gamma[i]; Xv[nvec] = kin_mem->kin_dg_aa[ipt_map[i]]; nvec += 1; } /* if enabled, apply damping */ if (kin_mem->kin_damping_aa) { onembeta = (ONE - kin_mem->kin_beta_aa); cv[nvec] = -onembeta; Xv[nvec] = fv; nvec += 1; for (i = lAA - 1; i > -1; i--) { cv[nvec] = onembeta * gamma[i]; Xv[nvec] = kin_mem->kin_df_aa[ipt_map[i]]; nvec += 1; } } /* update solution */ retval = N_VLinearCombination(nvec, cv, Xv, x); if (retval != KIN_SUCCESS) return(KIN_VECTOROP_ERR); return 0; } StanHeaders/src/kinsol/NOTICE0000644000176200001440000000221614645137106015455 0ustar liggesusersThis work was produced under the auspices of the U.S. Department of Energy by Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344. This work was prepared as an account of work sponsored by an agency of the United States Government. Neither the United States Government nor Lawrence Livermore National Security, LLC, nor any of their employees makes any warranty, expressed or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately owned rights. Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or Lawrence Livermore National Security, LLC, and shall not be used for advertising or product endorsement purposes.StanHeaders/src/kinsol/kinsol_ls.c0000644000176200001440000013352714645137106016724 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * David J. Gardner, Radu Serban and Aaron Collier @ LLNL *----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *----------------------------------------------------------------- * Implementation file for KINSOL's linear solver interface. *-----------------------------------------------------------------*/ #include #include #include #include #include "kinsol_impl.h" #include "kinsol_ls_impl.h" #include #include #include #include /* constants */ #define MIN_INC_MULT RCONST(1000.0) #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define TWO RCONST(2.0) /*================================================================== KINLS Exported functions -- Required ==================================================================*/ /*--------------------------------------------------------------- KINSetLinearSolver specifies the linear solver ---------------------------------------------------------------*/ int KINSetLinearSolver(void *kinmem, SUNLinearSolver LS, SUNMatrix A) { KINMem kin_mem; KINLsMem kinls_mem; int retval, LSType; booleantype iterative; /* is the solver iterative? */ booleantype matrixbased; /* is a matrix structure used? */ /* Return immediately if either kinmem or LS inputs are NULL */ if (kinmem == NULL) { KINProcessError(NULL, KINLS_MEM_NULL, "KINLS", "KINSetLinearSolver", MSG_LS_KINMEM_NULL); return(KINLS_MEM_NULL); } if (LS == NULL) { KINProcessError(NULL, KINLS_ILL_INPUT, "KINLS", "KINSetLinearSolver", "LS must be non-NULL"); return(KINLS_ILL_INPUT); } kin_mem = (KINMem) kinmem; /* Test if solver is compatible with LS interface */ if ( (LS->ops->gettype == NULL) || (LS->ops->solve == NULL) ) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetLinearSolver", "LS object is missing a required operation"); return(KINLS_ILL_INPUT); } /* Retrieve the LS type */ LSType = SUNLinSolGetType(LS); /* Return with error if LS has 'matrix-embedded' type */ if (LSType == SUNLINEARSOLVER_MATRIX_EMBEDDED) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetLinearSolver", "KINSOL is incompatible with MATRIX_EMBEDDED LS objects"); return(KINLS_ILL_INPUT); } /* Set flags based on LS type */ iterative = (LSType != SUNLINEARSOLVER_DIRECT); matrixbased = (LSType != SUNLINEARSOLVER_ITERATIVE); /* check for required vector operations for KINLS interface */ if ( (kin_mem->kin_vtemp1->ops->nvconst == NULL) || (kin_mem->kin_vtemp1->ops->nvdotprod == NULL) ) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetLinearSolver", MSG_LS_BAD_NVECTOR); return(KINLS_ILL_INPUT); } /* Check for compatible LS type, matrix and "atimes" support */ if (iterative) { if ((LS->ops->setscalingvectors == NULL) && (kin_mem->kin_vtemp1->ops->nvgetlength == NULL)) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetLinearSolver", MSG_LS_BAD_NVECTOR); return(KINLS_ILL_INPUT); } if (!matrixbased && (LS->ops->setatimes == NULL)) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetLinearSolver", "Incompatible inputs: iterative LS must support ATimes routine"); return(KINLS_ILL_INPUT); } if (matrixbased && A == NULL) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetLinearSolver", "Incompatible inputs: matrix-iterative LS requires non-NULL matrix"); return(KINLS_ILL_INPUT); } } else if (A == NULL) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetLinearSolver", "Incompatible inputs: direct LS requires non-NULL matrix"); return(KINLS_ILL_INPUT); } /* free any existing system solver attached to KIN */ if (kin_mem->kin_lfree) kin_mem->kin_lfree(kin_mem); /* Determine if this is an iterative linear solver */ kin_mem->kin_inexact_ls = iterative; /* Set four main system linear solver function fields in kin_mem */ kin_mem->kin_linit = kinLsInitialize; kin_mem->kin_lsetup = kinLsSetup; kin_mem->kin_lsolve = kinLsSolve; kin_mem->kin_lfree = kinLsFree; /* Get memory for KINLsMemRec */ kinls_mem = NULL; kinls_mem = (KINLsMem) malloc(sizeof(struct KINLsMemRec)); if (kinls_mem == NULL) { KINProcessError(kin_mem, KINLS_MEM_FAIL, "KINLS", "KINSetLinearSolver", MSG_LS_MEM_FAIL); return(KINLS_MEM_FAIL); } memset(kinls_mem, 0, sizeof(struct KINLsMemRec)); /* set SUNLinearSolver pointer */ kinls_mem->LS = LS; /* Set defaults for Jacobian-related fields */ if (A != NULL) { kinls_mem->jacDQ = SUNTRUE; kinls_mem->jac = kinLsDQJac; kinls_mem->J_data = kin_mem; } else { kinls_mem->jacDQ = SUNFALSE; kinls_mem->jac = NULL; kinls_mem->J_data = NULL; } kinls_mem->jtimesDQ = SUNTRUE; kinls_mem->jtimes = kinLsDQJtimes; kinls_mem->jt_func = kin_mem->kin_func; kinls_mem->jt_data = kin_mem; /* Set defaults for preconditioner-related fields */ kinls_mem->pset = NULL; kinls_mem->psolve = NULL; kinls_mem->pfree = NULL; kinls_mem->pdata = kin_mem->kin_user_data; /* Initialize counters */ kinLsInitializeCounters(kinls_mem); /* Set default values for the rest of the LS parameters */ kinls_mem->last_flag = KINLS_SUCCESS; /* If LS supports ATimes, attach KINLs routine */ if (LS->ops->setatimes) { retval = SUNLinSolSetATimes(LS, kin_mem, kinLsATimes); if (retval != SUNLS_SUCCESS) { KINProcessError(kin_mem, KINLS_SUNLS_FAIL, "KINLS", "KINSetLinearSolver", "Error in calling SUNLinSolSetATimes"); free(kinls_mem); kinls_mem = NULL; return(KINLS_SUNLS_FAIL); } } /* If LS supports preconditioning, initialize pset/psol to NULL */ if (LS->ops->setpreconditioner) { retval = SUNLinSolSetPreconditioner(LS, kin_mem, NULL, NULL); if (retval != SUNLS_SUCCESS) { KINProcessError(kin_mem, KINLS_SUNLS_FAIL, "KINLS", "KINSetLinearSolver", "Error in calling SUNLinSolSetPreconditioner"); free(kinls_mem); kinls_mem = NULL; return(KINLS_SUNLS_FAIL); } } /* initialize tolerance scaling factor */ kinls_mem->tol_fac = -ONE; /* set SUNMatrix pointer (can be NULL) */ kinls_mem->J = A; /* Attach linear solver memory to integrator memory */ kin_mem->kin_lmem = kinls_mem; return(KINLS_SUCCESS); } /*================================================================== Optional input/output routines ==================================================================*/ /*------------------------------------------------------------------ KINSetJacFn specifies the Jacobian function ------------------------------------------------------------------*/ int KINSetJacFn(void *kinmem, KINLsJacFn jac) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure */ retval = kinLs_AccessLMem(kinmem, "KINSetJacFn", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); /* return with failure if jac cannot be used */ if ((jac != NULL) && (kinls_mem->J == NULL)) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetJacFn", "Jacobian routine cannot be supplied for NULL SUNMatrix"); return(KINLS_ILL_INPUT); } if (jac != NULL) { kinls_mem->jacDQ = SUNFALSE; kinls_mem->jac = jac; kinls_mem->J_data = kin_mem->kin_user_data; } else { kinls_mem->jacDQ = SUNTRUE; kinls_mem->jac = kinLsDQJac; kinls_mem->J_data = kin_mem; } return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINSetPreconditioner sets the preconditioner setup and solve functions ------------------------------------------------------------------*/ int KINSetPreconditioner(void *kinmem, KINLsPrecSetupFn psetup, KINLsPrecSolveFn psolve) { KINMem kin_mem; KINLsMem kinls_mem; SUNPSetupFn kinls_psetup; SUNPSolveFn kinls_psolve; int retval; /* access KINLsMem structure */ retval = kinLs_AccessLMem(kinmem, "KINSetPreconditioner", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); /* store function pointers for user-supplied routines in KINLS interface */ kinls_mem->pset = psetup; kinls_mem->psolve = psolve; /* issue error if LS object does not support user-supplied preconditioning */ if (kinls_mem->LS->ops->setpreconditioner == NULL) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetPreconditioner", "SUNLinearSolver object does not support user-supplied preconditioning"); return(KINLS_ILL_INPUT); } /* notify iterative linear solver to call KINLs interface routines */ kinls_psetup = (psetup == NULL) ? NULL : kinLsPSetup; kinls_psolve = (psolve == NULL) ? NULL : kinLsPSolve; retval = SUNLinSolSetPreconditioner(kinls_mem->LS, kin_mem, kinls_psetup, kinls_psolve); if (retval != SUNLS_SUCCESS) { KINProcessError(kin_mem, KINLS_SUNLS_FAIL, "KINLS", "KINSetPreconditioner", "Error in calling SUNLinSolSetPreconditioner"); return(KINLS_SUNLS_FAIL); } return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINSetJacTimesVecFn sets the matrix-vector product function ------------------------------------------------------------------*/ int KINSetJacTimesVecFn(void *kinmem, KINLsJacTimesVecFn jtv) { int retval; KINMem kin_mem; KINLsMem kinls_mem; /* access KINLsMem structure */ retval = kinLs_AccessLMem(kinmem, "KINSetJacTimesVecFn", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); /* issue error if LS object does not support user-supplied ATimes */ if (kinls_mem->LS->ops->setatimes == NULL) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetJacTimesVecFn", "SUNLinearSolver object does not support user-supplied ATimes routine"); return(KINLS_ILL_INPUT); } /* store function pointers for user-supplied routine in KINLs interface (NULL jtimes implies use of DQ default) */ if (jtv != NULL) { kinls_mem->jtimesDQ = SUNFALSE; kinls_mem->jtimes = jtv; kinls_mem->jt_data = kin_mem->kin_user_data; } else { kinls_mem->jtimesDQ = SUNTRUE; kinls_mem->jtimes = kinLsDQJtimes; kinls_mem->jt_func = kin_mem->kin_func; kinls_mem->jt_data = kin_mem; } return(KINLS_SUCCESS); } /* KINSetJacTimesVecSysFn specifies an alternative user-supplied system function to use in the internal finite difference Jacobian-vector product */ int KINSetJacTimesVecSysFn(void *kinmem, KINSysFn jtimesSysFn) { int retval; KINMem kin_mem = NULL; KINLsMem kinls_mem = NULL; /* access KINLsMem structure */ retval = kinLs_AccessLMem(kin_mem, "KINSetJacTimesVecSysFn", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); /* check if using internal finite difference approximation */ if (!(kinls_mem->jtimesDQ)) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "KINSetJacTimesVecSysFn", "Internal finite-difference Jacobian-vector product is disabled."); return(KINLS_ILL_INPUT); } /* store function pointers for system function (NULL implies use kin_func) */ if (jtimesSysFn != NULL) kinls_mem->jt_func = jtimesSysFn; else kinls_mem->jt_func = kin_mem->kin_func; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINGetLinWorkSpace returns the integer and real workspace size ------------------------------------------------------------------*/ int KINGetLinWorkSpace(void *kinmem, long int *lenrwLS, long int *leniwLS) { KINMem kin_mem; KINLsMem kinls_mem; sunindextype lrw1, liw1; long int lrw, liw; int retval; /* access KINLsMem structure */ retval = kinLs_AccessLMem(kinmem, "KINGetLinWorkSpace", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); /* start with fixed sizes plus vector/matrix pointers */ *lenrwLS = 1; *leniwLS = 21; /* add N_Vector sizes */ if (kin_mem->kin_vtemp1->ops->nvspace) { N_VSpace(kin_mem->kin_vtemp1, &lrw1, &liw1); *lenrwLS += lrw1; *leniwLS += liw1; } /* add LS sizes */ if (kinls_mem->LS->ops->space) { retval = SUNLinSolSpace(kinls_mem->LS, &lrw, &liw); if (retval == 0) { *lenrwLS += lrw; *leniwLS += liw; } } return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINGetNumJacEvals returns the number of Jacobian evaluations ------------------------------------------------------------------*/ int KINGetNumJacEvals(void *kinmem, long int *njevals) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure; set output value and return */ retval = kinLs_AccessLMem(kinmem, "KINGetNumJacEvals", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); *njevals = kinls_mem->nje; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINGetNumPrecEvals returns the total number of preconditioner evaluations ------------------------------------------------------------------*/ int KINGetNumPrecEvals(void *kinmem, long int *npevals) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure; set output value and return */ retval = kinLs_AccessLMem(kinmem, "KINGetNumPrecEvals", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); *npevals = kinls_mem->npe; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINGetNumPrecSolves returns the total number of times the preconditioner was applied ------------------------------------------------------------------*/ int KINGetNumPrecSolves(void *kinmem, long int *npsolves) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure; set output value and return */ retval = kinLs_AccessLMem(kinmem, "KINGetNumPrecSolves", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); *npsolves = kinls_mem->nps; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINGetNumLinIters returns the total number of linear iterations ------------------------------------------------------------------*/ int KINGetNumLinIters(void *kinmem, long int *nliters) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure; set output value and return */ retval = kinLs_AccessLMem(kinmem, "KINGetNumLinIters", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); *nliters = kinls_mem->nli; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINGetNumLinConvFails returns the total numbe of convergence failures ------------------------------------------------------------------*/ int KINGetNumLinConvFails(void *kinmem, long int *nlcfails) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure; set output value and return */ retval = kinLs_AccessLMem(kinmem, "KINGetNumLinConvFails", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); *nlcfails = kinls_mem->ncfl; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINGetNumJtimesEvals returns the number of times the matrix vector product was computed ------------------------------------------------------------------*/ int KINGetNumJtimesEvals(void *kinmem, long int *njvevals) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure; set output value and return */ retval = kinLs_AccessLMem(kinmem, "KINGetNumJtimesEvals", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); *njvevals = kinls_mem->njtimes; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINGetNumLinFuncEvals returns the number of calls to the user's F routine by the linear solver module ------------------------------------------------------------------*/ int KINGetNumLinFuncEvals(void *kinmem, long int *nfevals) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure; set output value and return */ retval = kinLs_AccessLMem(kinmem, "KINGetNumLinFuncEvals", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); *nfevals = kinls_mem->nfeDQ; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINGetLastLinFlag returns the last flag set in the KINLS function ------------------------------------------------------------------*/ int KINGetLastLinFlag(void *kinmem, long int *flag) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure; set output value and return */ retval = kinLs_AccessLMem(kinmem, "KINGetLastLinFlag", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); *flag = kinls_mem->last_flag; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINGetLinReturnFlagName ------------------------------------------------------------------*/ char *KINGetLinReturnFlagName(long int flag) { char *name; name = (char *)malloc(30*sizeof(char)); switch(flag) { case KINLS_SUCCESS: sprintf(name, "KINLS_SUCCESS"); break; case KINLS_MEM_NULL: sprintf(name, "KINLS_MEM_NULL"); break; case KINLS_LMEM_NULL: sprintf(name, "KINLS_LMEM_NULL"); break; case KINLS_ILL_INPUT: sprintf(name, "KINLS_ILL_INPUT"); break; case KINLS_MEM_FAIL: sprintf(name, "KINLS_MEM_FAIL"); break; case KINLS_PMEM_NULL: sprintf(name, "KINLS_PMEM_NULL"); break; case KINLS_JACFUNC_ERR: sprintf(name,"KINLS_JACFUNC_ERR"); break; case KINLS_SUNMAT_FAIL: sprintf(name,"KINLS_SUNMAT_FAIL"); break; case KINLS_SUNLS_FAIL: sprintf(name,"KINLS_SUNLS_FAIL"); break; default: sprintf(name, "NONE"); } return(name); } /*================================================================== KINLS Private functions ==================================================================*/ /*------------------------------------------------------------------ kinLsATimes This routine coordinates the generation of the matrix-vector product z = J*v by calling either kinLsDQJtimes, which uses a difference quotient approximation for J*v, or by calling the user-supplied routine KINLsJacTimesVecFn if it is non-null. ------------------------------------------------------------------*/ int kinLsATimes(void *kinmem, N_Vector v, N_Vector z) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure */ retval = kinLs_AccessLMem(kinmem, "kinLsATimes", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); /* call Jacobian-times-vector product routine (either user-supplied or internal DQ) */ retval = kinls_mem->jtimes(v, z, kin_mem->kin_uu, &(kinls_mem->new_uu), kinls_mem->jt_data); kinls_mem->njtimes++; return(retval); } /*--------------------------------------------------------------- kinLsPSetup: This routine interfaces between the generic iterative linear solvers and the user's psetup routine. It passes to psetup all required state information from kin_mem. Its return value is the same as that returned by psetup. Note that the generic iterative linear solvers guarantee that kinLsPSetup will only be called in the case that the user's psetup routine is non-NULL. ---------------------------------------------------------------*/ int kinLsPSetup(void *kinmem) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure */ retval = kinLs_AccessLMem(kinmem, "kinLsPSetup", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); /* Call user pset routine to update preconditioner */ retval = kinls_mem->pset(kin_mem->kin_uu, kin_mem->kin_uscale, kin_mem->kin_fval, kin_mem->kin_fscale, kinls_mem->pdata); kinls_mem->npe++; return(retval); } /*------------------------------------------------------------------ kinLsPSolve This routine interfaces between the generic iterative linear solvers and the user's psolve routine. It passes to psolve all required state information from kinsol_mem. Its return value is the same as that returned by psolve. Note that the generic SUNLinSol solver guarantees that kinLsPSolve will not be called in the case in which preconditioning is not done. This is the only case in which the user's psolve routine is allowed to be NULL. ------------------------------------------------------------------*/ int kinLsPSolve(void *kinmem, N_Vector r, N_Vector z, realtype tol, int lr) { KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure */ retval = kinLs_AccessLMem(kinmem, "kinLsPSolve", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); /* copy the rhs into z before the psolve call */ /* Note: z returns with the solution */ N_VScale(ONE, r, z); /* note: user-supplied preconditioning with KINSOL does not support either the 'tol' or 'lr' inputs */ retval = kinls_mem->psolve(kin_mem->kin_uu, kin_mem->kin_uscale, kin_mem->kin_fval, kin_mem->kin_fscale, z, kinls_mem->pdata); kinls_mem->nps++; return(retval); } /*------------------------------------------------------------------ kinLsDQJac This routine is a wrapper for the Dense and Band implementations of the difference quotient Jacobian approximation routines. ------------------------------------------------------------------*/ int kinLsDQJac(N_Vector u, N_Vector fu, SUNMatrix Jac, void *kinmem, N_Vector tmp1, N_Vector tmp2) { KINMem kin_mem; int retval; /* access KINMem structure */ if (kinmem == NULL) { KINProcessError(NULL, KINLS_MEM_NULL, "KINLS", "kinLsDQJac", MSG_LS_KINMEM_NULL); return(KINLS_MEM_NULL); } kin_mem = (KINMem) kinmem; /* verify that Jac is non-NULL */ if (Jac == NULL) { KINProcessError(kin_mem, KINLS_LMEM_NULL, "KINLS", "kinLsDQJac", MSG_LS_LMEM_NULL); return(KINLS_LMEM_NULL); } /* Call the matrix-structure-specific DQ approximation routine */ if (SUNMatGetID(Jac) == SUNMATRIX_DENSE) { retval = kinLsDenseDQJac(u, fu, Jac, kin_mem, tmp1, tmp2); } else if (SUNMatGetID(Jac) == SUNMATRIX_BAND) { retval = kinLsBandDQJac(u, fu, Jac, kin_mem, tmp1, tmp2); } else { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINLS", "kinLsDQJac", "unrecognized matrix type for kinLsDQJac"); retval = KIN_ILL_INPUT; } return(retval); } /*------------------------------------------------------------------ kinLsDenseDQJac This routine generates a dense difference quotient approximation to the Jacobian of F(u). It assumes a dense SUNMatrix input stored column-wise, and that elements within each column are contiguous. The address of the jth column of J is obtained via the function SUNDenseMatrix_Column() and this pointer is associated with an N_Vector using the N_VGetArrayPointer and N_VSetArrayPointer functions. Finally, the actual computation of the jth column of the Jacobian is done with a call to N_VLinearSum. The increment used in the finite-difference approximation J_ij = ( F_i(u+sigma_j * e_j) - F_i(u) ) / sigma_j is sigma_j = max{|u_j|, |1/uscale_j|} * sqrt(uround) Note: uscale_j = 1/typ(u_j) NOTE: Any type of failure of the system function here leads to an unrecoverable failure of the Jacobian function and thus of the linear solver setup function, stopping KINSOL. ------------------------------------------------------------------*/ int kinLsDenseDQJac(N_Vector u, N_Vector fu, SUNMatrix Jac, KINMem kin_mem, N_Vector tmp1, N_Vector tmp2) { realtype inc, inc_inv, ujsaved, ujscale, sign; realtype *tmp2_data, *u_data, *uscale_data; N_Vector ftemp, jthCol; sunindextype j, N; KINLsMem kinls_mem; int retval = 0; /* access LsMem interface structure */ kinls_mem = (KINLsMem) kin_mem->kin_lmem; /* access matrix dimension */ N = SUNDenseMatrix_Columns(Jac); /* Save pointer to the array in tmp2 */ tmp2_data = N_VGetArrayPointer(tmp2); /* Rename work vectors for readibility */ ftemp = tmp1; jthCol = tmp2; /* Obtain pointers to the data for u and uscale */ u_data = N_VGetArrayPointer(u); uscale_data = N_VGetArrayPointer(kin_mem->kin_uscale); /* This is the only for loop for 0..N-1 in KINSOL */ for (j = 0; j < N; j++) { /* Generate the jth col of J(u) */ /* Set data address of jthCol, and save u_j values and scaling */ N_VSetArrayPointer(SUNDenseMatrix_Column(Jac,j), jthCol); ujsaved = u_data[j]; ujscale = ONE/uscale_data[j]; /* Compute increment */ sign = (ujsaved >= ZERO) ? ONE : -ONE; inc = kin_mem->kin_sqrt_relfunc*SUNMAX(SUNRabs(ujsaved), ujscale)*sign; /* Increment u_j, call F(u), and return if error occurs */ u_data[j] += inc; retval = kin_mem->kin_func(u, ftemp, kin_mem->kin_user_data); kinls_mem->nfeDQ++; if (retval != 0) break; /* reset u_j */ u_data[j] = ujsaved; /* Construct difference quotient in jthCol */ inc_inv = ONE/inc; N_VLinearSum(inc_inv, ftemp, -inc_inv, fu, jthCol); } /* Restore original array pointer in tmp2 */ N_VSetArrayPointer(tmp2_data, tmp2); return(retval); } /*------------------------------------------------------------------ kinLsBandDQJac This routine generates a banded difference quotient approximation to the Jacobian of F(u). It assumes a SUNBandMatrix input stored column-wise, and that elements within each column are contiguous. This makes it possible to get the address of a column of J via the function SUNBandMatrix_Column() and to write a simple for loop to set each of the elements of a column in succession. NOTE: Any type of failure of the system function her leads to an unrecoverable failure of the Jacobian function and thus of the linear solver setup function, stopping KINSOL. ------------------------------------------------------------------*/ int kinLsBandDQJac(N_Vector u, N_Vector fu, SUNMatrix Jac, KINMem kin_mem, N_Vector tmp1, N_Vector tmp2) { realtype inc, inc_inv; N_Vector futemp, utemp; sunindextype group, i, j, width, ngroups, i1, i2; sunindextype N, mupper, mlower; realtype *col_j, *fu_data, *futemp_data, *u_data, *utemp_data, *uscale_data; KINLsMem kinls_mem; int retval = 0; /* access LsMem interface structure */ kinls_mem = (KINLsMem) kin_mem->kin_lmem; /* access matrix dimensions */ N = SUNBandMatrix_Columns(Jac); mupper = SUNBandMatrix_UpperBandwidth(Jac); mlower = SUNBandMatrix_LowerBandwidth(Jac); /* Rename work vectors for use as temporary values of u and fu */ futemp = tmp1; utemp = tmp2; /* Obtain pointers to the data for ewt, fy, futemp, y, ytemp */ fu_data = N_VGetArrayPointer(fu); futemp_data = N_VGetArrayPointer(futemp); u_data = N_VGetArrayPointer(u); uscale_data = N_VGetArrayPointer(kin_mem->kin_uscale); utemp_data = N_VGetArrayPointer(utemp); /* Load utemp with u */ N_VScale(ONE, u, utemp); /* Set bandwidth and number of column groups for band differencing */ width = mlower + mupper + 1; ngroups = SUNMIN(width, N); for (group=1; group <= ngroups; group++) { /* Increment all utemp components in group */ for(j=group-1; j < N; j+=width) { inc = kin_mem->kin_sqrt_relfunc*SUNMAX(SUNRabs(u_data[j]), ONE/SUNRabs(uscale_data[j])); utemp_data[j] += inc; } /* Evaluate f with incremented u */ retval = kin_mem->kin_func(utemp, futemp, kin_mem->kin_user_data); if (retval != 0) return(retval); /* Restore utemp components, then form and load difference quotients */ for (j=group-1; j < N; j+=width) { utemp_data[j] = u_data[j]; col_j = SUNBandMatrix_Column(Jac, j); inc = kin_mem->kin_sqrt_relfunc*SUNMAX(SUNRabs(u_data[j]), ONE/SUNRabs(uscale_data[j])); inc_inv = ONE/inc; i1 = SUNMAX(0, j-mupper); i2 = SUNMIN(j+mlower, N-1); for (i=i1; i <= i2; i++) SM_COLUMN_ELEMENT_B(col_j,i,j) = inc_inv * (futemp_data[i] - fu_data[i]); } } /* Increment counter nfeDQ */ kinls_mem->nfeDQ += ngroups; return(0); } /*------------------------------------------------------------------ kinLsDQJtimes This routine generates the matrix-vector product z = J*v using a difference quotient approximation. The approximation is J*v = [func(uu + sigma*v) - func(uu)]/sigma. Here sigma is based on the dot products (uscale*uu, uscale*v) and (uscale*v, uscale*v), the L1Norm(uscale*v), and on sqrt_relfunc (the square root of the relative error in the function). Note that v in the argument list has already been both preconditioned and unscaled. NOTE: Unlike the DQ Jacobian functions for direct linear solvers (which are called from within the lsetup function), this function is called from within the lsolve function and thus a recovery may still be possible even if the system function fails (recoverably). ------------------------------------------------------------------*/ int kinLsDQJtimes(N_Vector v, N_Vector Jv, N_Vector u, booleantype *new_u, void *kinmem) { realtype sigma, sigma_inv, sutsv, sq1norm, sign, vtv; KINMem kin_mem; KINLsMem kinls_mem; int retval; /* access KINLsMem structure */ retval = kinLs_AccessLMem(kinmem, "kinLsDQJtimes", &kin_mem, &kinls_mem); if (retval != KIN_SUCCESS) return(retval); /* ensure that NVector supplies requisite routines */ if ( (v->ops->nvprod == NULL) || (v->ops->nvdotprod == NULL) || (v->ops->nvl1norm == NULL) || (v->ops->nvlinearsum == NULL) ){ KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "kinLsDQJtimes", MSG_LS_BAD_NVECTOR); return(KINLS_ILL_INPUT); } /* scale the vector v and put Du*v into vtemp1 */ N_VProd(v, kin_mem->kin_uscale, kin_mem->kin_vtemp1); /* scale u and put into Jv (used as a temporary storage) */ N_VProd(u, kin_mem->kin_uscale, Jv); /* compute dot product (Du*u).(Du*v) */ sutsv = N_VDotProd(Jv, kin_mem->kin_vtemp1); /* compute dot product (Du*v).(Du*v) */ vtv = N_VDotProd(kin_mem->kin_vtemp1, kin_mem->kin_vtemp1); /* compute differencing factor -- this is from p. 469, Brown and Saad paper */ sq1norm = N_VL1Norm(kin_mem->kin_vtemp1); sign = (sutsv >= ZERO) ? ONE : -ONE ; sigma = sign*(kin_mem->kin_sqrt_relfunc)*SUNMAX(SUNRabs(sutsv),sq1norm)/vtv; sigma_inv = ONE/sigma; /* compute the u-prime at which to evaluate the function func */ N_VLinearSum(ONE, u, sigma, v, kin_mem->kin_vtemp1); /* call the system function to calculate func(u+sigma*v) */ retval = kinls_mem->jt_func(kin_mem->kin_vtemp1, kin_mem->kin_vtemp2, kin_mem->kin_user_data); kinls_mem->nfeDQ++; if (retval != 0) return(retval); /* finish the computation of the difference quotient */ N_VLinearSum(sigma_inv, kin_mem->kin_vtemp2, -sigma_inv, kin_mem->kin_fval, Jv); return(0); } /*------------------------------------------------------------------ kinLsInitialize performs remaining initializations specific to the iterative linear solver interface (and solver itself) ------------------------------------------------------------------*/ int kinLsInitialize(KINMem kin_mem) { KINLsMem kinls_mem; int retval; /* Access KINLsMem structure */ if (kin_mem->kin_lmem == NULL) { KINProcessError(kin_mem, KINLS_LMEM_NULL, "KINLS", "kinLsInitialize", MSG_LS_LMEM_NULL); return(KINLS_LMEM_NULL); } kinls_mem = (KINLsMem) kin_mem->kin_lmem; /* Test for valid combinations of matrix & Jacobian routines: */ if (kinls_mem->J == NULL) { /* If SUNMatrix A is NULL: ensure 'jac' function pointer is NULL */ kinls_mem->jacDQ = SUNFALSE; kinls_mem->jac = NULL; kinls_mem->J_data = NULL; } else if (kinls_mem->jacDQ) { /* If J is non-NULL, and 'jac' is not user-supplied: - if A is dense or band, ensure that our DQ approx. is used - otherwise => error */ retval = 0; if (kinls_mem->J->ops->getid) { if ( (SUNMatGetID(kinls_mem->J) == SUNMATRIX_DENSE) || (SUNMatGetID(kinls_mem->J) == SUNMATRIX_BAND) ) { kinls_mem->jac = kinLsDQJac; kinls_mem->J_data = kin_mem; } else { retval++; } } else { retval++; } if (retval) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "kinLsInitialize", "No Jacobian constructor available for SUNMatrix type"); kinls_mem->last_flag = KINLS_ILL_INPUT; return(KINLS_ILL_INPUT); } /* check for required vector operations for kinLsDQJac routine */ if ( (kin_mem->kin_vtemp1->ops->nvlinearsum == NULL) || (kin_mem->kin_vtemp1->ops->nvscale == NULL) || (kin_mem->kin_vtemp1->ops->nvgetarraypointer == NULL) || (kin_mem->kin_vtemp1->ops->nvsetarraypointer == NULL) ) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "kinLsInitialize", MSG_LS_BAD_NVECTOR); return(KINLS_ILL_INPUT); } } else { /* If J is non-NULL, and 'jac' is user-supplied, reset J_data pointer (just in case) */ kinls_mem->J_data = kin_mem->kin_user_data; } /* Prohibit Picard iteration with DQ Jacobian approximation or difference-quotient J*v */ if ( (kin_mem->kin_globalstrategy == KIN_PICARD) && kinls_mem->jacDQ && kinls_mem->jtimesDQ ) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINLS", "kinLsInitialize", MSG_NOL_FAIL); return(KINLS_ILL_INPUT); } /** error-checking is complete, begin initializtions **/ /* Initialize counters */ kinLsInitializeCounters(kinls_mem); /* Set Jacobian-related fields, based on jtimesDQ */ if (kinls_mem->jtimesDQ) { kinls_mem->jtimes = kinLsDQJtimes; kinls_mem->jt_data = kin_mem; } else { kinls_mem->jt_data = kin_mem->kin_user_data; } /* if J is NULL and: NOT preconditioning or do NOT need to setup the preconditioner, then set the lsetup function to NULL */ if (kinls_mem->J == NULL) if ((kinls_mem->psolve == NULL) || (kinls_mem->pset == NULL)) kin_mem->kin_lsetup = NULL; /* Set scaling vectors assuming RIGHT preconditioning */ /* NOTE: retval is non-zero only if LS == NULL */ if (kinls_mem->LS->ops->setscalingvectors) { retval = SUNLinSolSetScalingVectors(kinls_mem->LS, kin_mem->kin_fscale, kin_mem->kin_fscale); if (retval != SUNLS_SUCCESS) { KINProcessError(kin_mem, KINLS_SUNLS_FAIL, "KINLS", "kinLsInitialize", "Error in calling SUNLinSolSetScalingVectors"); return(KINLS_SUNLS_FAIL); } } /* If the linear solver is iterative or matrix-iterative, and if left/right scaling are not supported, we must update linear solver tolerances in an attempt to account for the fscale vector. We make the following assumptions: 1. fscale_i = fs_mean, for i=0,...,n-1 (i.e. the weights are homogeneous) 2. the linear solver uses a basic 2-norm to measure convergence Hence (using the notation from sunlinsol_spgmr.h, with S = diag(fscale)), || bbar - Abar xbar ||_2 < tol <=> || S b - S A x ||_2 < tol <=> || S (b - A x) ||_2 < tol <=> \sum_{i=0}^{n-1} (fscale_i (b - A x)_i)^2 < tol^2 <=> fs_mean^2 \sum_{i=0}^{n-1} (b - A x_i)^2 < tol^2 <=> \sum_{i=0}^{n-1} (b - A x_i)^2 < tol^2 / fs_mean^2 <=> || b - A x ||_2 < tol / fs_mean <=> || b - A x ||_2 < tol * tol_fac So we compute tol_fac = sqrt(N) / ||fscale||_L2 for scaling desired tolerances */ if (kinls_mem->iterative && kinls_mem->LS->ops->setscalingvectors == NULL) { N_VConst(ONE, kin_mem->kin_vtemp1); kinls_mem->tol_fac = SUNRsqrt(N_VGetLength(kin_mem->kin_vtemp1)) / N_VWL2Norm(kin_mem->kin_fscale, kin_mem->kin_vtemp1); } else { kinls_mem->tol_fac = ONE; } /* Call LS initialize routine, and return result */ kinls_mem->last_flag = SUNLinSolInitialize(kinls_mem->LS); return(kinls_mem->last_flag); } /*------------------------------------------------------------------ kinLsSetup call the LS setup routine ------------------------------------------------------------------*/ int kinLsSetup(KINMem kin_mem) { KINLsMem kinls_mem; int retval; /* Access KINLsMem structure */ if (kin_mem->kin_lmem == NULL) { KINProcessError(kin_mem, KINLS_LMEM_NULL, "KINLS", "kinLsSetup", MSG_LS_LMEM_NULL); return(KINLS_LMEM_NULL); } kinls_mem = (KINLsMem) kin_mem->kin_lmem; /* recompute if J if it is non-NULL */ if (kinls_mem->J) { /* Increment nje counter. */ kinls_mem->nje++; /* Clear the linear system matrix if necessary */ if (SUNLinSolGetType(kinls_mem->LS) == SUNLINEARSOLVER_DIRECT) { retval = SUNMatZero(kinls_mem->J); if (retval != 0) { KINProcessError(kin_mem, KINLS_SUNMAT_FAIL, "KINLS", "kinLsSetup", MSG_LS_MATZERO_FAILED); kinls_mem->last_flag = KINLS_SUNMAT_FAIL; return(kinls_mem->last_flag); } } /* Call Jacobian routine */ retval = kinls_mem->jac(kin_mem->kin_uu, kin_mem->kin_fval, kinls_mem->J, kinls_mem->J_data, kin_mem->kin_vtemp1, kin_mem->kin_vtemp2); if (retval != 0) { KINProcessError(kin_mem, KINLS_JACFUNC_ERR, "KINLS", "kinLsSetup", MSG_LS_JACFUNC_FAILED); kinls_mem->last_flag = KINLS_JACFUNC_ERR; return(kinls_mem->last_flag); } } /* Call LS setup routine -- the LS will call kinLsPSetup (if applicable) */ kinls_mem->last_flag = SUNLinSolSetup(kinls_mem->LS, kinls_mem->J); /* save nni value from most recent lsetup call */ kin_mem->kin_nnilset = kin_mem->kin_nni; return(kinls_mem->last_flag); } /*------------------------------------------------------------------ kinLsSolve interfaces between KINSOL and the generic SUNLinearSolver object ------------------------------------------------------------------*/ int kinLsSolve(KINMem kin_mem, N_Vector xx, N_Vector bb, realtype *sJpnorm, realtype *sFdotJp) { KINLsMem kinls_mem; int nli_inc, retval; realtype res_norm, tol; /* Access KINLsMem structure */ if (kin_mem->kin_lmem == NULL) { KINProcessError(kin_mem, KINLS_LMEM_NULL, "KINLS", "kinLsSolve", MSG_LS_LMEM_NULL); return(KINLS_LMEM_NULL); } kinls_mem = (KINLsMem) kin_mem->kin_lmem; /* Set linear solver tolerance as input value times scaling factor (to account for possible lack of support for left/right scaling vectors in SUNLinSol object) */ tol = kin_mem->kin_eps * kinls_mem->tol_fac; /* Set initial guess x = 0 to LS */ N_VConst(ZERO, xx); /* Set zero initial guess flag */ retval = SUNLinSolSetZeroGuess(kinls_mem->LS, SUNTRUE); if (retval != SUNLS_SUCCESS) return(-1); /* set flag required for user-supplied J*v routine */ kinls_mem->new_uu = SUNTRUE; /* Call solver */ retval = SUNLinSolSolve(kinls_mem->LS, kinls_mem->J, xx, bb, tol); /* Retrieve solver statistics */ res_norm = ZERO; if (kinls_mem->LS->ops->resnorm) res_norm = SUNLinSolResNorm(kinls_mem->LS); nli_inc = 0; if (kinls_mem->LS->ops->numiters) nli_inc = SUNLinSolNumIters(kinls_mem->LS); if (kinls_mem->iterative && kin_mem->kin_printfl > 2) KINPrintInfo(kin_mem, PRNT_NLI, "KINLS", "kinLsSolve", INFO_NLI, nli_inc); /* Increment counters nli and ncfl */ kinls_mem->nli += nli_inc; if (retval != SUNLS_SUCCESS) kinls_mem->ncfl++; /* Interpret solver return value */ kinls_mem->last_flag = retval; if ( (retval != 0) && (retval != SUNLS_RES_REDUCED) ) { switch(retval) { case SUNLS_ATIMES_FAIL_REC: case SUNLS_PSOLVE_FAIL_REC: return(1); break; case SUNLS_MEM_NULL: case SUNLS_ILL_INPUT: case SUNLS_MEM_FAIL: case SUNLS_GS_FAIL: case SUNLS_CONV_FAIL: case SUNLS_QRFACT_FAIL: case SUNLS_LUFACT_FAIL: case SUNLS_QRSOL_FAIL: break; case SUNLS_PACKAGE_FAIL_REC: KINProcessError(kin_mem, SUNLS_PACKAGE_FAIL_REC, "KINLS", "kinLsSolve", "Failure in SUNLinSol external package"); break; case SUNLS_PACKAGE_FAIL_UNREC: KINProcessError(kin_mem, SUNLS_PACKAGE_FAIL_UNREC, "KINLS", "kinLsSolve", "Failure in SUNLinSol external package"); break; case SUNLS_ATIMES_FAIL_UNREC: KINProcessError(kin_mem, SUNLS_ATIMES_FAIL_UNREC, "KINLS", "kinLsSolve", MSG_LS_JTIMES_FAILED); break; case SUNLS_PSOLVE_FAIL_UNREC: KINProcessError(kin_mem, SUNLS_PSOLVE_FAIL_UNREC, "KINLS", "kinLsSolve", MSG_LS_PSOLVE_FAILED); break; } return(retval); } /* SUNLinSolSolve returned SUNLS_SUCCESS or SUNLS_RES_REDUCED */ /* Compute auxiliary values for use in the linesearch and in KINForcingTerm. These will be subsequently corrected if the step is reduced by constraints or the linesearch. */ if (kin_mem->kin_globalstrategy != KIN_FP) { /* sJpnorm is the norm of the scaled product (scaled by fscale) of the current Jacobian matrix J and the step vector p (= solution vector xx) */ if (kin_mem->kin_inexact_ls && kin_mem->kin_etaflag == KIN_ETACHOICE1) { retval = kinLsATimes(kin_mem, xx, bb); if (retval > 0) { kinls_mem->last_flag = SUNLS_ATIMES_FAIL_REC; return(1); } else if (retval < 0) { kinls_mem->last_flag = SUNLS_ATIMES_FAIL_UNREC; return(-1); } *sJpnorm = N_VWL2Norm(bb, kin_mem->kin_fscale); } /* sFdotJp is the dot product of the scaled f vector and the scaled vector J*p, where the scaling uses fscale */ if ((kin_mem->kin_inexact_ls && kin_mem->kin_etaflag == KIN_ETACHOICE1) || kin_mem->kin_globalstrategy == KIN_LINESEARCH) { N_VProd(bb, kin_mem->kin_fscale, bb); N_VProd(bb, kin_mem->kin_fscale, bb); *sFdotJp = N_VDotProd(kin_mem->kin_fval, bb); } } if (kin_mem->kin_inexact_ls && kin_mem->kin_printfl > 2) KINPrintInfo(kin_mem, PRNT_EPS, "KINLS", "kinLsSolve", INFO_EPS, res_norm, kin_mem->kin_eps); return(0); } /*------------------------------------------------------------------ kinLsFree frees memory associated with the KINLs system solver interface ------------------------------------------------------------------*/ int kinLsFree(KINMem kin_mem) { KINLsMem kinls_mem; /* Return immediately if kin_mem or kin_mem->kin_lmem are NULL */ if (kin_mem == NULL) return (KINLS_SUCCESS); if (kin_mem->kin_lmem == NULL) return(KINLS_SUCCESS); kinls_mem = (KINLsMem) kin_mem->kin_lmem; /* Nullify SUNMatrix pointer */ kinls_mem->J = NULL; /* Free preconditioner memory (if applicable) */ if (kinls_mem->pfree) kinls_mem->pfree(kin_mem); /* free KINLs interface structure */ free(kin_mem->kin_lmem); return(KINLS_SUCCESS); } /*------------------------------------------------------------------ kinLsInitializeCounters resets counters for the LS interface ------------------------------------------------------------------*/ int kinLsInitializeCounters(KINLsMem kinls_mem) { kinls_mem->nje = 0; kinls_mem->nfeDQ = 0; kinls_mem->npe = 0; kinls_mem->nli = 0; kinls_mem->nps = 0; kinls_mem->ncfl = 0; kinls_mem->njtimes = 0; return(0); } /*--------------------------------------------------------------- kinLs_AccessLMem This routine unpacks the kin_mem and ls_mem structures from void* pointer. If either is missing it returns KINLS_MEM_NULL or KINLS_LMEM_NULL. ---------------------------------------------------------------*/ int kinLs_AccessLMem(void* kinmem, const char *fname, KINMem *kin_mem, KINLsMem *kinls_mem) { if (kinmem==NULL) { KINProcessError(NULL, KINLS_MEM_NULL, "KINLS", fname, MSG_LS_KINMEM_NULL); return(KINLS_MEM_NULL); } *kin_mem = (KINMem) kinmem; if ((*kin_mem)->kin_lmem==NULL) { KINProcessError(*kin_mem, KINLS_LMEM_NULL, "KINLS", fname, MSG_LS_LMEM_NULL); return(KINLS_LMEM_NULL); } *kinls_mem = (KINLsMem) (*kin_mem)->kin_lmem; return(KINLS_SUCCESS); } /*--------------------------------------------------------------- EOF ---------------------------------------------------------------*/ StanHeaders/src/kinsol/kinsol_io.c0000644000176200001440000006764314645137106016722 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Allan Taylor, Alan Hindmarsh, Radu Serban, and * Aaron Collier, Shelby Lockhart @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the optional input and output * functions for the KINSOL solver. * ----------------------------------------------------------------- */ #include #include #include "kinsol_impl.h" #include #include #define ZERO RCONST(0.0) #define POINT1 RCONST(0.1) #define ONETHIRD RCONST(0.3333333333333333) #define HALF RCONST(0.5) #define TWOTHIRDS RCONST(0.6666666666666667) #define POINT9 RCONST(0.9) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define TWOPT5 RCONST(2.5) /* * ================================================================= * KINSOL optional input functions * ================================================================= */ /* * ----------------------------------------------------------------- * KINSetErrHandlerFn * ----------------------------------------------------------------- */ int KINSetErrHandlerFn(void *kinmem, KINErrHandlerFn ehfun, void *eh_data) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetErrHandlerFn", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; kin_mem->kin_ehfun = ehfun; kin_mem->kin_eh_data = eh_data; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetErrFile * ----------------------------------------------------------------- */ int KINSetErrFile(void *kinmem, FILE *errfp) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetErrFile", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; kin_mem->kin_errfp = errfp; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetPrintLevel * ----------------------------------------------------------------- */ int KINSetPrintLevel(void *kinmem, int printfl) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetPrintLevel", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if ((printfl < 0) || (printfl > 3)) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetPrintLevel", MSG_BAD_PRINTFL); return(KIN_ILL_INPUT); } kin_mem->kin_printfl = printfl; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * KINSetInfoHandlerFn * ----------------------------------------------------------------- */ int KINSetInfoHandlerFn(void *kinmem, KINInfoHandlerFn ihfun, void *ih_data) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetInfoHandlerFn", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; kin_mem->kin_ihfun = ihfun; kin_mem->kin_ih_data = ih_data; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetInfoFile * ----------------------------------------------------------------- */ int KINSetInfoFile(void *kinmem, FILE *infofp) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetInfoFile", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; kin_mem->kin_infofp = infofp; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetDebugFile * ----------------------------------------------------------------- */ int KINSetDebugFile(void *kinmem, FILE *debugfp) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetDebugFile", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; #if defined(SUNDIALS_DEBUG) if (debugfp) kin_mem->kin_debugfp = debugfp; else kin_mem->kin_debugfp = stdout; return(KIN_SUCCESS); #else KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSetDebugFile", "SUNDIALS was not built with debugging enabled"); return(KIN_ILL_INPUT); #endif } /* * ----------------------------------------------------------------- * Function : KINSetUserData * ----------------------------------------------------------------- */ int KINSetUserData(void *kinmem, void *user_data) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetUserData", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; kin_mem->kin_user_data = user_data; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetDamping * ----------------------------------------------------------------- */ int KINSetDamping(void *kinmem, realtype beta) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetDamping", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; /* check for illegal input value */ if (beta <= ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetDamping", "beta <= 0 illegal"); return(KIN_ILL_INPUT); } if (beta < ONE) { /* enable damping */ kin_mem->kin_beta = beta; kin_mem->kin_damping = SUNTRUE; } else { /* disable damping */ kin_mem->kin_beta = ONE; kin_mem->kin_damping = SUNFALSE; } return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetMAA * ----------------------------------------------------------------- */ int KINSetMAA(void *kinmem, long int maa) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetMAA", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (maa < 0) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetMAA", MSG_BAD_MAA); return(KIN_ILL_INPUT); } if (maa > kin_mem->kin_mxiter) maa = kin_mem->kin_mxiter; kin_mem->kin_m_aa = maa; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetDelayAA * ----------------------------------------------------------------- */ int KINSetDelayAA(void *kinmem, long int delay) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetDelayAA", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; /* check for illegal input value */ if (delay < 0) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetDelayAA", "delay < 0 illegal"); return(KIN_ILL_INPUT); } kin_mem->kin_delay_aa = delay; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetOrthAA * ----------------------------------------------------------------- */ int KINSetOrthAA(void *kinmem, int orthaa) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetOrthAA", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if ((orthaa < KIN_ORTH_MGS) || (orthaa > KIN_ORTH_DCGS2)) { KINProcessError(kin_mem, KIN_ILL_INPUT, "KINSOL", "KINSetOrthAA", MSG_BAD_ORTHAA); return(KIN_ILL_INPUT); } kin_mem->kin_orth_aa = orthaa; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetDampingAA * ----------------------------------------------------------------- */ int KINSetDampingAA(void *kinmem, realtype beta) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetDampingAA", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; /* check for illegal input value */ if (beta <= ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetDampingAA", "beta <= 0 illegal"); return(KIN_ILL_INPUT); } if (beta < ONE) { /* enable damping */ kin_mem->kin_beta_aa = beta; kin_mem->kin_damping_aa = SUNTRUE; } else { /* disable damping */ kin_mem->kin_beta_aa = ONE; kin_mem->kin_damping_aa = SUNFALSE; } return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetReturnNewest * ----------------------------------------------------------------- */ int KINSetReturnNewest(void *kinmem, booleantype ret_newest) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetReturnNewest", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; kin_mem->kin_ret_newest = ret_newest; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetNumMaxIters * ----------------------------------------------------------------- */ int KINSetNumMaxIters(void *kinmem, long int mxiter) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetNumMaxIters", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (mxiter < 0) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetNumMaxIters", MSG_BAD_MXITER); return(KIN_ILL_INPUT); } if (mxiter == 0) kin_mem->kin_mxiter = MXITER_DEFAULT; else kin_mem->kin_mxiter = mxiter; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetNoInitSetup * ----------------------------------------------------------------- */ int KINSetNoInitSetup(void *kinmem, booleantype noInitSetup) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetNoInitSetup", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; kin_mem->kin_noInitSetup = noInitSetup; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetNoResMon * ----------------------------------------------------------------- */ int KINSetNoResMon(void *kinmem, booleantype noResMon) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetNoResMon", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; kin_mem->kin_noResMon = noResMon; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetMaxSetupCalls * ----------------------------------------------------------------- */ int KINSetMaxSetupCalls(void *kinmem, long int msbset) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetMaxSetupCalls", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (msbset < 0) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetMaxSetupCalls", MSG_BAD_MSBSET); return(KIN_ILL_INPUT); } if (msbset == 0) kin_mem->kin_msbset = MSBSET_DEFAULT; else kin_mem->kin_msbset = msbset; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetMaxSubSetupCalls * ----------------------------------------------------------------- */ int KINSetMaxSubSetupCalls(void *kinmem, long int msbsetsub) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetMaxSubSetupCalls", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (msbsetsub < 0) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetMaxSubSetupCalls", MSG_BAD_MSBSETSUB); return(KIN_ILL_INPUT); } if (msbsetsub == 0) kin_mem->kin_msbset_sub = MSBSET_SUB_DEFAULT; else kin_mem->kin_msbset_sub = msbsetsub; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetEtaForm * ----------------------------------------------------------------- */ int KINSetEtaForm(void *kinmem, int etachoice) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetEtaForm", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if ((etachoice != KIN_ETACONSTANT) && (etachoice != KIN_ETACHOICE1) && (etachoice != KIN_ETACHOICE2)) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetEtaForm", MSG_BAD_ETACHOICE); return(KIN_ILL_INPUT); } kin_mem->kin_etaflag = etachoice; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetEtaConstValue * ----------------------------------------------------------------- */ int KINSetEtaConstValue(void *kinmem, realtype eta) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetEtaConstValue", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if ((eta < ZERO) || (eta > ONE)) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetEtaConstValue", MSG_BAD_ETACONST); return(KIN_ILL_INPUT); } if (eta == ZERO) kin_mem->kin_eta = POINT1; else kin_mem->kin_eta = eta; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetEtaParams * ----------------------------------------------------------------- */ int KINSetEtaParams(void *kinmem, realtype egamma, realtype ealpha) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetEtaParams", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if ((ealpha <= ONE) || (ealpha > TWO)) if (ealpha != ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetEtaParams", MSG_BAD_ALPHA); return(KIN_ILL_INPUT); } if (ealpha == ZERO) kin_mem->kin_eta_alpha = TWO; else kin_mem->kin_eta_alpha = ealpha; if ((egamma <= ZERO) || (egamma > ONE)) if (egamma != ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetEtaParams", MSG_BAD_GAMMA); return(KIN_ILL_INPUT); } if (egamma == ZERO) kin_mem->kin_eta_gamma = POINT9; else kin_mem->kin_eta_gamma = egamma; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetResMonParams * ----------------------------------------------------------------- */ int KINSetResMonParams(void *kinmem, realtype omegamin, realtype omegamax) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetResMonParams", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; /* check omegamin */ if (omegamin < ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetResMonParams", MSG_BAD_OMEGA); return(KIN_ILL_INPUT); } if (omegamin == ZERO) kin_mem->kin_omega_min = OMEGA_MIN; else kin_mem->kin_omega_min = omegamin; /* check omegamax */ if (omegamax < ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetResMonParams", MSG_BAD_OMEGA); return(KIN_ILL_INPUT); } if (omegamax == ZERO) { if (kin_mem->kin_omega_min > OMEGA_MAX) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetResMonParams", MSG_BAD_OMEGA); return(KIN_ILL_INPUT); } else kin_mem->kin_omega_max = OMEGA_MAX; } else { if (kin_mem->kin_omega_min > omegamax) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetResMonParams", MSG_BAD_OMEGA); return(KIN_ILL_INPUT); } else kin_mem->kin_omega_max = omegamax; } return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetResMonConstValue * ----------------------------------------------------------------- */ int KINSetResMonConstValue(void *kinmem, realtype omegaconst) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetResMonConstValue", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; /* check omegaconst */ if (omegaconst < ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetResMonConstValue", MSG_BAD_OMEGA); return(KIN_ILL_INPUT); } /* Load omega value. A value of 0 will force using omega_min and omega_max */ kin_mem->kin_omega = omegaconst; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetNoMinEps * ----------------------------------------------------------------- */ int KINSetNoMinEps(void *kinmem, booleantype noMinEps) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetNoMinEps", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; kin_mem->kin_noMinEps = noMinEps; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetMaxNewtonStep * ----------------------------------------------------------------- */ int KINSetMaxNewtonStep(void *kinmem, realtype mxnewtstep) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetMaxNewtonStep", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (mxnewtstep < ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetMaxNewtonStep", MSG_BAD_MXNEWTSTEP); return(KIN_ILL_INPUT); } /* Note: passing a value of 0.0 will use the default value (computed in KINSolInit) */ kin_mem->kin_mxnstepin = mxnewtstep; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetMaxBetaFails * ----------------------------------------------------------------- */ int KINSetMaxBetaFails(void *kinmem, long int mxnbcf) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetMaxBetaFails", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (mxnbcf < 0) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetMaxBetaFails", MSG_BAD_MXNBCF); return(KIN_ILL_INPUT); } if (mxnbcf == 0) kin_mem->kin_mxnbcf = MXNBCF_DEFAULT; else kin_mem->kin_mxnbcf = mxnbcf; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetRelErrFunc * ----------------------------------------------------------------- */ int KINSetRelErrFunc(void *kinmem, realtype relfunc) { KINMem kin_mem; realtype uround; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetRelErrFunc", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (relfunc < ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetRelErrFunc", MSG_BAD_RELFUNC); return(KIN_ILL_INPUT); } if (relfunc == ZERO) { uround = kin_mem->kin_uround; kin_mem->kin_sqrt_relfunc = SUNRsqrt(uround); } else { kin_mem->kin_sqrt_relfunc = SUNRsqrt(relfunc); } return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetFuncNormTol * ----------------------------------------------------------------- */ int KINSetFuncNormTol(void *kinmem, realtype fnormtol) { KINMem kin_mem; realtype uround; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetFuncNormTol", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (fnormtol < ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetFuncNormTol", MSG_BAD_FNORMTOL); return(KIN_ILL_INPUT); } if (fnormtol == ZERO) { uround = kin_mem->kin_uround; kin_mem->kin_fnormtol = SUNRpowerR(uround,ONETHIRD); } else { kin_mem->kin_fnormtol = fnormtol; } return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetScaledStepTol * ----------------------------------------------------------------- */ int KINSetScaledStepTol(void *kinmem, realtype scsteptol) { KINMem kin_mem; realtype uround; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetScaledStepTol", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (scsteptol < ZERO) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetScaledStepTol", MSG_BAD_SCSTEPTOL); return(KIN_ILL_INPUT); } if (scsteptol == ZERO) { uround = kin_mem->kin_uround; kin_mem->kin_scsteptol = SUNRpowerR(uround,TWOTHIRDS); } else { kin_mem->kin_scsteptol = scsteptol; } return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetConstraints * ----------------------------------------------------------------- */ int KINSetConstraints(void *kinmem, N_Vector constraints) { KINMem kin_mem; realtype temptest; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetConstraints", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (constraints == NULL) { if (kin_mem->kin_constraintsSet) { N_VDestroy(kin_mem->kin_constraints); kin_mem->kin_lrw -= kin_mem->kin_lrw1; kin_mem->kin_liw -= kin_mem->kin_liw1; } kin_mem->kin_constraintsSet = SUNFALSE; return(KIN_SUCCESS); } /* Check the constraints vector */ temptest = N_VMaxNorm(constraints); if (temptest > TWOPT5){ KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetConstraints", MSG_BAD_CONSTRAINTS); return(KIN_ILL_INPUT); } if (!kin_mem->kin_constraintsSet) { kin_mem->kin_constraints = N_VClone(constraints); kin_mem->kin_lrw += kin_mem->kin_lrw1; kin_mem->kin_liw += kin_mem->kin_liw1; kin_mem->kin_constraintsSet = SUNTRUE; } /* Load the constraint vector */ N_VScale(ONE, constraints, kin_mem->kin_constraints); return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINSetSysFunc * ----------------------------------------------------------------- */ int KINSetSysFunc(void *kinmem, KINSysFn func) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINSetSysFunc", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; if (func == NULL) { KINProcessError(NULL, KIN_ILL_INPUT, "KINSOL", "KINSetSysFunc", MSG_FUNC_NULL); return(KIN_ILL_INPUT); } kin_mem->kin_func = func; return(KIN_SUCCESS); } /* * ================================================================= * KINSOL optional output functions * ================================================================= */ /* * ----------------------------------------------------------------- * Function : KINGetWorkSpace * ----------------------------------------------------------------- */ int KINGetWorkSpace(void *kinmem, long int *lenrw, long int *leniw) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINGetWorkSpace", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; *lenrw = kin_mem->kin_lrw; *leniw = kin_mem->kin_liw; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINGetNumNonlinSolvIters * ----------------------------------------------------------------- */ int KINGetNumNonlinSolvIters(void *kinmem, long int *nniters) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINGetNumNonlinSolvIters", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; *nniters = kin_mem->kin_nni; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINGetNumFuncEvals * ----------------------------------------------------------------- */ int KINGetNumFuncEvals(void *kinmem, long int *nfevals) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINGetNumFuncEvals", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; *nfevals = kin_mem->kin_nfe; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINGetNumBetaCondFails * ----------------------------------------------------------------- */ int KINGetNumBetaCondFails(void *kinmem, long int *nbcfails) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINGetNumBetaCondFails", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; *nbcfails = kin_mem->kin_nbcf; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINGetNumBacktrackOps * ----------------------------------------------------------------- */ int KINGetNumBacktrackOps(void *kinmem, long int *nbacktr) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINGetNumBacktrackOps", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; *nbacktr = kin_mem->kin_nbktrk; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINGetFuncNorm * ----------------------------------------------------------------- */ int KINGetFuncNorm(void *kinmem, realtype *funcnorm) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINGetFuncNorm", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; *funcnorm = kin_mem->kin_fnorm; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINGetStepLength * ----------------------------------------------------------------- */ int KINGetStepLength(void *kinmem, realtype *steplength) { KINMem kin_mem; if (kinmem == NULL) { KINProcessError(NULL, KIN_MEM_NULL, "KINSOL", "KINGetStepLength", MSG_NO_MEM); return(KIN_MEM_NULL); } kin_mem = (KINMem) kinmem; *steplength = kin_mem->kin_stepl; return(KIN_SUCCESS); } /* * ----------------------------------------------------------------- * Function : KINGetReturnFlagName * ----------------------------------------------------------------- */ char *KINGetReturnFlagName(long int flag) { char *name; name = (char *)malloc(24*sizeof(char)); switch(flag) { case KIN_SUCCESS: sprintf(name, "KIN_SUCCESS"); break; case KIN_INITIAL_GUESS_OK: sprintf(name, "KIN_INITIAL_GUESS_OK"); break; case KIN_STEP_LT_STPTOL: sprintf(name, "KIN_STEP_LT_STPTOL"); break; case KIN_WARNING: sprintf(name, "KIN_WARNING"); break; case KIN_MEM_NULL: sprintf(name, "KIN_MEM_NULL"); break; case KIN_ILL_INPUT: sprintf(name, "KIN_ILL_INPUT"); break; case KIN_NO_MALLOC: sprintf(name, "KIN_NO_MALLOC"); break; case KIN_MEM_FAIL: sprintf(name, "KIN_MEM_FAIL"); break; case KIN_LINESEARCH_NONCONV: sprintf(name, "KIN_LINESEARCH_NONCONV"); break; case KIN_MAXITER_REACHED: sprintf(name, "KIN_MAXITER_REACHED"); break; case KIN_MXNEWT_5X_EXCEEDED: sprintf(name, "KIN_MXNEWT_5X_EXCEEDED"); break; case KIN_LINESEARCH_BCFAIL: sprintf(name, "KIN_LINESEARCH_BCFAIL"); break; case KIN_LINSOLV_NO_RECOVERY: sprintf(name, "KIN_LINSOLV_NO_RECOVERY"); break; case KIN_LINIT_FAIL: sprintf(name, "KIN_LINIT_FAIL"); break; case KIN_LSETUP_FAIL: sprintf(name, "KIN_LSETUP_FAIL"); break; case KIN_LSOLVE_FAIL: sprintf(name, "KIN_LSOLVE_FAIL"); break; default: sprintf(name, "NONE"); } return(name); } StanHeaders/src/kinsol/LICENSE0000644000176200001440000000305014645137106015553 0ustar liggesusersBSD 3-Clause License Copyright (c) 2002-2022, Lawrence Livermore National Security and Southern Methodist University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. StanHeaders/src/kinsol/fmod/0000755000176200001440000000000014511135055015466 5ustar liggesusersStanHeaders/src/kinsol/fmod/fkinsol_mod.f900000644000176200001440000013476114645137106020335 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fkinsol_mod use, intrinsic :: ISO_C_BINDING use fsundials_types_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_linearsolver_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_nonlinearsolver_mod implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: KIN_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: KIN_INITIAL_GUESS_OK = 1_C_INT integer(C_INT), parameter, public :: KIN_STEP_LT_STPTOL = 2_C_INT integer(C_INT), parameter, public :: KIN_WARNING = 99_C_INT integer(C_INT), parameter, public :: KIN_MEM_NULL = -1_C_INT integer(C_INT), parameter, public :: KIN_ILL_INPUT = -2_C_INT integer(C_INT), parameter, public :: KIN_NO_MALLOC = -3_C_INT integer(C_INT), parameter, public :: KIN_MEM_FAIL = -4_C_INT integer(C_INT), parameter, public :: KIN_LINESEARCH_NONCONV = -5_C_INT integer(C_INT), parameter, public :: KIN_MAXITER_REACHED = -6_C_INT integer(C_INT), parameter, public :: KIN_MXNEWT_5X_EXCEEDED = -7_C_INT integer(C_INT), parameter, public :: KIN_LINESEARCH_BCFAIL = -8_C_INT integer(C_INT), parameter, public :: KIN_LINSOLV_NO_RECOVERY = -9_C_INT integer(C_INT), parameter, public :: KIN_LINIT_FAIL = -10_C_INT integer(C_INT), parameter, public :: KIN_LSETUP_FAIL = -11_C_INT integer(C_INT), parameter, public :: KIN_LSOLVE_FAIL = -12_C_INT integer(C_INT), parameter, public :: KIN_SYSFUNC_FAIL = -13_C_INT integer(C_INT), parameter, public :: KIN_FIRST_SYSFUNC_ERR = -14_C_INT integer(C_INT), parameter, public :: KIN_REPTD_SYSFUNC_ERR = -15_C_INT integer(C_INT), parameter, public :: KIN_VECTOROP_ERR = -16_C_INT integer(C_INT), parameter, public :: KIN_CONTEXT_ERR = -17_C_INT integer(C_INT), parameter, public :: KIN_ORTH_MGS = 0_C_INT integer(C_INT), parameter, public :: KIN_ORTH_ICWY = 1_C_INT integer(C_INT), parameter, public :: KIN_ORTH_CGS2 = 2_C_INT integer(C_INT), parameter, public :: KIN_ORTH_DCGS2 = 3_C_INT integer(C_INT), parameter, public :: KIN_ETACHOICE1 = 1_C_INT integer(C_INT), parameter, public :: KIN_ETACHOICE2 = 2_C_INT integer(C_INT), parameter, public :: KIN_ETACONSTANT = 3_C_INT integer(C_INT), parameter, public :: KIN_NONE = 0_C_INT integer(C_INT), parameter, public :: KIN_LINESEARCH = 1_C_INT integer(C_INT), parameter, public :: KIN_PICARD = 2_C_INT integer(C_INT), parameter, public :: KIN_FP = 3_C_INT public :: FKINCreate public :: FKINInit public :: FKINSol public :: FKINSetErrHandlerFn public :: FKINSetErrFile public :: FKINSetInfoHandlerFn public :: FKINSetInfoFile public :: FKINSetUserData public :: FKINSetPrintLevel public :: FKINSetDamping public :: FKINSetMAA public :: FKINSetOrthAA public :: FKINSetDelayAA public :: FKINSetDampingAA public :: FKINSetReturnNewest public :: FKINSetNumMaxIters public :: FKINSetNoInitSetup public :: FKINSetNoResMon public :: FKINSetMaxSetupCalls public :: FKINSetMaxSubSetupCalls public :: FKINSetEtaForm public :: FKINSetEtaConstValue public :: FKINSetEtaParams public :: FKINSetResMonParams public :: FKINSetResMonConstValue public :: FKINSetNoMinEps public :: FKINSetMaxNewtonStep public :: FKINSetMaxBetaFails public :: FKINSetRelErrFunc public :: FKINSetFuncNormTol public :: FKINSetScaledStepTol public :: FKINSetConstraints public :: FKINSetSysFunc public :: FKINGetWorkSpace public :: FKINGetNumNonlinSolvIters public :: FKINGetNumFuncEvals public :: FKINGetNumBetaCondFails public :: FKINGetNumBacktrackOps public :: FKINGetFuncNorm public :: FKINGetStepLength type, bind(C) :: SwigArrayWrapper type(C_PTR), public :: data = C_NULL_PTR integer(C_SIZE_T), public :: size = 0 end type public :: FKINGetReturnFlagName public :: FKINFree public :: FKINSetJacTimesVecSysFn public :: FKINSetDebugFile integer(C_INT), parameter, public :: KINBBDPRE_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: KINBBDPRE_PDATA_NULL = -11_C_INT integer(C_INT), parameter, public :: KINBBDPRE_FUNC_UNRECVR = -12_C_INT public :: FKINBBDPrecInit public :: FKINBBDPrecGetWorkSpace public :: FKINBBDPrecGetNumGfnEvals integer(C_INT), parameter, public :: KINLS_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: KINLS_MEM_NULL = -1_C_INT integer(C_INT), parameter, public :: KINLS_LMEM_NULL = -2_C_INT integer(C_INT), parameter, public :: KINLS_ILL_INPUT = -3_C_INT integer(C_INT), parameter, public :: KINLS_MEM_FAIL = -4_C_INT integer(C_INT), parameter, public :: KINLS_PMEM_NULL = -5_C_INT integer(C_INT), parameter, public :: KINLS_JACFUNC_ERR = -6_C_INT integer(C_INT), parameter, public :: KINLS_SUNMAT_FAIL = -7_C_INT integer(C_INT), parameter, public :: KINLS_SUNLS_FAIL = -8_C_INT public :: FKINSetLinearSolver public :: FKINSetJacFn public :: FKINSetPreconditioner public :: FKINSetJacTimesVecFn public :: FKINGetLinWorkSpace public :: FKINGetNumJacEvals public :: FKINGetNumLinFuncEvals public :: FKINGetNumPrecEvals public :: FKINGetNumPrecSolves public :: FKINGetNumLinIters public :: FKINGetNumLinConvFails public :: FKINGetNumJtimesEvals public :: FKINGetLastLinFlag public :: FKINGetLinReturnFlagName ! WRAPPER DECLARATIONS interface function swigc_FKINCreate(farg1) & bind(C, name="_wrap_FKINCreate") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FKINInit(farg1, farg2, farg3) & bind(C, name="_wrap_FKINInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FKINSol(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FKINSol") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FKINSetErrHandlerFn(farg1, farg2, farg3) & bind(C, name="_wrap_FKINSetErrHandlerFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FKINSetErrFile(farg1, farg2) & bind(C, name="_wrap_FKINSetErrFile") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetInfoHandlerFn(farg1, farg2, farg3) & bind(C, name="_wrap_FKINSetInfoHandlerFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FKINSetInfoFile(farg1, farg2) & bind(C, name="_wrap_FKINSetInfoFile") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetUserData(farg1, farg2) & bind(C, name="_wrap_FKINSetUserData") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetPrintLevel(farg1, farg2) & bind(C, name="_wrap_FKINSetPrintLevel") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetDamping(farg1, farg2) & bind(C, name="_wrap_FKINSetDamping") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetMAA(farg1, farg2) & bind(C, name="_wrap_FKINSetMAA") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetOrthAA(farg1, farg2) & bind(C, name="_wrap_FKINSetOrthAA") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetDelayAA(farg1, farg2) & bind(C, name="_wrap_FKINSetDelayAA") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetDampingAA(farg1, farg2) & bind(C, name="_wrap_FKINSetDampingAA") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetReturnNewest(farg1, farg2) & bind(C, name="_wrap_FKINSetReturnNewest") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetNumMaxIters(farg1, farg2) & bind(C, name="_wrap_FKINSetNumMaxIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetNoInitSetup(farg1, farg2) & bind(C, name="_wrap_FKINSetNoInitSetup") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetNoResMon(farg1, farg2) & bind(C, name="_wrap_FKINSetNoResMon") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetMaxSetupCalls(farg1, farg2) & bind(C, name="_wrap_FKINSetMaxSetupCalls") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetMaxSubSetupCalls(farg1, farg2) & bind(C, name="_wrap_FKINSetMaxSubSetupCalls") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetEtaForm(farg1, farg2) & bind(C, name="_wrap_FKINSetEtaForm") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetEtaConstValue(farg1, farg2) & bind(C, name="_wrap_FKINSetEtaConstValue") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetEtaParams(farg1, farg2, farg3) & bind(C, name="_wrap_FKINSetEtaParams") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FKINSetResMonParams(farg1, farg2, farg3) & bind(C, name="_wrap_FKINSetResMonParams") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FKINSetResMonConstValue(farg1, farg2) & bind(C, name="_wrap_FKINSetResMonConstValue") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetNoMinEps(farg1, farg2) & bind(C, name="_wrap_FKINSetNoMinEps") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetMaxNewtonStep(farg1, farg2) & bind(C, name="_wrap_FKINSetMaxNewtonStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetMaxBetaFails(farg1, farg2) & bind(C, name="_wrap_FKINSetMaxBetaFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetRelErrFunc(farg1, farg2) & bind(C, name="_wrap_FKINSetRelErrFunc") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetFuncNormTol(farg1, farg2) & bind(C, name="_wrap_FKINSetFuncNormTol") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetScaledStepTol(farg1, farg2) & bind(C, name="_wrap_FKINSetScaledStepTol") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetConstraints(farg1, farg2) & bind(C, name="_wrap_FKINSetConstraints") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetSysFunc(farg1, farg2) & bind(C, name="_wrap_FKINSetSysFunc") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FKINGetWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FKINGetNumNonlinSolvIters(farg1, farg2) & bind(C, name="_wrap_FKINGetNumNonlinSolvIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetNumFuncEvals(farg1, farg2) & bind(C, name="_wrap_FKINGetNumFuncEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetNumBetaCondFails(farg1, farg2) & bind(C, name="_wrap_FKINGetNumBetaCondFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetNumBacktrackOps(farg1, farg2) & bind(C, name="_wrap_FKINGetNumBacktrackOps") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetFuncNorm(farg1, farg2) & bind(C, name="_wrap_FKINGetFuncNorm") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetStepLength(farg1, farg2) & bind(C, name="_wrap_FKINGetStepLength") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function subroutine SWIG_free(cptr) & bind(C, name="free") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: cptr end subroutine function swigc_FKINGetReturnFlagName(farg1) & bind(C, name="_wrap_FKINGetReturnFlagName") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper integer(C_LONG), intent(in) :: farg1 type(SwigArrayWrapper) :: fresult end function subroutine swigc_FKINFree(farg1) & bind(C, name="_wrap_FKINFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FKINSetJacTimesVecSysFn(farg1, farg2) & bind(C, name="_wrap_FKINSetJacTimesVecSysFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetDebugFile(farg1, farg2) & bind(C, name="_wrap_FKINSetDebugFile") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINBBDPrecInit(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) & bind(C, name="_wrap_FKINBBDPrecInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT64_T), intent(in) :: farg4 integer(C_INT64_T), intent(in) :: farg5 integer(C_INT64_T), intent(in) :: farg6 real(C_DOUBLE), intent(in) :: farg7 type(C_FUNPTR), value :: farg8 type(C_FUNPTR), value :: farg9 integer(C_INT) :: fresult end function function swigc_FKINBBDPrecGetWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FKINBBDPrecGetWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FKINBBDPrecGetNumGfnEvals(farg1, farg2) & bind(C, name="_wrap_FKINBBDPrecGetNumGfnEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetLinearSolver(farg1, farg2, farg3) & bind(C, name="_wrap_FKINSetLinearSolver") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FKINSetJacFn(farg1, farg2) & bind(C, name="_wrap_FKINSetJacFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINSetPreconditioner(farg1, farg2, farg3) & bind(C, name="_wrap_FKINSetPreconditioner") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FKINSetJacTimesVecFn(farg1, farg2) & bind(C, name="_wrap_FKINSetJacTimesVecFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetLinWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FKINGetLinWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FKINGetNumJacEvals(farg1, farg2) & bind(C, name="_wrap_FKINGetNumJacEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetNumLinFuncEvals(farg1, farg2) & bind(C, name="_wrap_FKINGetNumLinFuncEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetNumPrecEvals(farg1, farg2) & bind(C, name="_wrap_FKINGetNumPrecEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetNumPrecSolves(farg1, farg2) & bind(C, name="_wrap_FKINGetNumPrecSolves") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetNumLinIters(farg1, farg2) & bind(C, name="_wrap_FKINGetNumLinIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetNumLinConvFails(farg1, farg2) & bind(C, name="_wrap_FKINGetNumLinConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetNumJtimesEvals(farg1, farg2) & bind(C, name="_wrap_FKINGetNumJtimesEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetLastLinFlag(farg1, farg2) & bind(C, name="_wrap_FKINGetLastLinFlag") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FKINGetLinReturnFlagName(farg1) & bind(C, name="_wrap_FKINGetLinReturnFlagName") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper integer(C_LONG), intent(in) :: farg1 type(SwigArrayWrapper) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FKINCreate(sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = sunctx fresult = swigc_FKINCreate(farg1) swig_result = fresult end function function FKINInit(kinmem, func, tmpl) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_FUNPTR), intent(in), value :: func type(N_Vector), target, intent(inout) :: tmpl integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = kinmem farg2 = func farg3 = c_loc(tmpl) fresult = swigc_FKINInit(farg1, farg2, farg3) swig_result = fresult end function function FKINSol(kinmem, uu, strategy, u_scale, f_scale) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(N_Vector), target, intent(inout) :: uu integer(C_INT), intent(in) :: strategy type(N_Vector), target, intent(inout) :: u_scale type(N_Vector), target, intent(inout) :: f_scale integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = kinmem farg2 = c_loc(uu) farg3 = strategy farg4 = c_loc(u_scale) farg5 = c_loc(f_scale) fresult = swigc_FKINSol(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FKINSetErrHandlerFn(kinmem, ehfun, eh_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_FUNPTR), intent(in), value :: ehfun type(C_PTR) :: eh_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = kinmem farg2 = ehfun farg3 = eh_data fresult = swigc_FKINSetErrHandlerFn(farg1, farg2, farg3) swig_result = fresult end function function FKINSetErrFile(kinmem, errfp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_PTR) :: errfp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = errfp fresult = swigc_FKINSetErrFile(farg1, farg2) swig_result = fresult end function function FKINSetInfoHandlerFn(kinmem, ihfun, ih_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_FUNPTR), intent(in), value :: ihfun type(C_PTR) :: ih_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = kinmem farg2 = ihfun farg3 = ih_data fresult = swigc_FKINSetInfoHandlerFn(farg1, farg2, farg3) swig_result = fresult end function function FKINSetInfoFile(kinmem, infofp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_PTR) :: infofp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = infofp fresult = swigc_FKINSetInfoFile(farg1, farg2) swig_result = fresult end function function FKINSetUserData(kinmem, user_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_PTR) :: user_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = user_data fresult = swigc_FKINSetUserData(farg1, farg2) swig_result = fresult end function function FKINSetPrintLevel(kinmem, printfl) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_INT), intent(in) :: printfl integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = kinmem farg2 = printfl fresult = swigc_FKINSetPrintLevel(farg1, farg2) swig_result = fresult end function function FKINSetDamping(kinmem, beta) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), intent(in) :: beta integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = kinmem farg2 = beta fresult = swigc_FKINSetDamping(farg1, farg2) swig_result = fresult end function function FKINSetMAA(kinmem, maa) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), intent(in) :: maa integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = kinmem farg2 = maa fresult = swigc_FKINSetMAA(farg1, farg2) swig_result = fresult end function function FKINSetOrthAA(kinmem, orthaa) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_INT), intent(in) :: orthaa integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = kinmem farg2 = orthaa fresult = swigc_FKINSetOrthAA(farg1, farg2) swig_result = fresult end function function FKINSetDelayAA(kinmem, delay) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), intent(in) :: delay integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = kinmem farg2 = delay fresult = swigc_FKINSetDelayAA(farg1, farg2) swig_result = fresult end function function FKINSetDampingAA(kinmem, beta) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), intent(in) :: beta integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = kinmem farg2 = beta fresult = swigc_FKINSetDampingAA(farg1, farg2) swig_result = fresult end function function FKINSetReturnNewest(kinmem, ret_newest) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_INT), intent(in) :: ret_newest integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = kinmem farg2 = ret_newest fresult = swigc_FKINSetReturnNewest(farg1, farg2) swig_result = fresult end function function FKINSetNumMaxIters(kinmem, mxiter) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), intent(in) :: mxiter integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = kinmem farg2 = mxiter fresult = swigc_FKINSetNumMaxIters(farg1, farg2) swig_result = fresult end function function FKINSetNoInitSetup(kinmem, noinitsetup) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_INT), intent(in) :: noinitsetup integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = kinmem farg2 = noinitsetup fresult = swigc_FKINSetNoInitSetup(farg1, farg2) swig_result = fresult end function function FKINSetNoResMon(kinmem, nonniresmon) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_INT), intent(in) :: nonniresmon integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = kinmem farg2 = nonniresmon fresult = swigc_FKINSetNoResMon(farg1, farg2) swig_result = fresult end function function FKINSetMaxSetupCalls(kinmem, msbset) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), intent(in) :: msbset integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = kinmem farg2 = msbset fresult = swigc_FKINSetMaxSetupCalls(farg1, farg2) swig_result = fresult end function function FKINSetMaxSubSetupCalls(kinmem, msbsetsub) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), intent(in) :: msbsetsub integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = kinmem farg2 = msbsetsub fresult = swigc_FKINSetMaxSubSetupCalls(farg1, farg2) swig_result = fresult end function function FKINSetEtaForm(kinmem, etachoice) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_INT), intent(in) :: etachoice integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = kinmem farg2 = etachoice fresult = swigc_FKINSetEtaForm(farg1, farg2) swig_result = fresult end function function FKINSetEtaConstValue(kinmem, eta) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), intent(in) :: eta integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = kinmem farg2 = eta fresult = swigc_FKINSetEtaConstValue(farg1, farg2) swig_result = fresult end function function FKINSetEtaParams(kinmem, egamma, ealpha) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), intent(in) :: egamma real(C_DOUBLE), intent(in) :: ealpha integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 real(C_DOUBLE) :: farg3 farg1 = kinmem farg2 = egamma farg3 = ealpha fresult = swigc_FKINSetEtaParams(farg1, farg2, farg3) swig_result = fresult end function function FKINSetResMonParams(kinmem, omegamin, omegamax) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), intent(in) :: omegamin real(C_DOUBLE), intent(in) :: omegamax integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 real(C_DOUBLE) :: farg3 farg1 = kinmem farg2 = omegamin farg3 = omegamax fresult = swigc_FKINSetResMonParams(farg1, farg2, farg3) swig_result = fresult end function function FKINSetResMonConstValue(kinmem, omegaconst) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), intent(in) :: omegaconst integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = kinmem farg2 = omegaconst fresult = swigc_FKINSetResMonConstValue(farg1, farg2) swig_result = fresult end function function FKINSetNoMinEps(kinmem, nomineps) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_INT), intent(in) :: nomineps integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = kinmem farg2 = nomineps fresult = swigc_FKINSetNoMinEps(farg1, farg2) swig_result = fresult end function function FKINSetMaxNewtonStep(kinmem, mxnewtstep) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), intent(in) :: mxnewtstep integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = kinmem farg2 = mxnewtstep fresult = swigc_FKINSetMaxNewtonStep(farg1, farg2) swig_result = fresult end function function FKINSetMaxBetaFails(kinmem, mxnbcf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), intent(in) :: mxnbcf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = kinmem farg2 = mxnbcf fresult = swigc_FKINSetMaxBetaFails(farg1, farg2) swig_result = fresult end function function FKINSetRelErrFunc(kinmem, relfunc) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), intent(in) :: relfunc integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = kinmem farg2 = relfunc fresult = swigc_FKINSetRelErrFunc(farg1, farg2) swig_result = fresult end function function FKINSetFuncNormTol(kinmem, fnormtol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), intent(in) :: fnormtol integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = kinmem farg2 = fnormtol fresult = swigc_FKINSetFuncNormTol(farg1, farg2) swig_result = fresult end function function FKINSetScaledStepTol(kinmem, scsteptol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), intent(in) :: scsteptol integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = kinmem farg2 = scsteptol fresult = swigc_FKINSetScaledStepTol(farg1, farg2) swig_result = fresult end function function FKINSetConstraints(kinmem, constraints) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(N_Vector), target, intent(inout) :: constraints integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(constraints) fresult = swigc_FKINSetConstraints(farg1, farg2) swig_result = fresult end function function FKINSetSysFunc(kinmem, func) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_FUNPTR), intent(in), value :: func integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = kinmem farg2 = func fresult = swigc_FKINSetSysFunc(farg1, farg2) swig_result = fresult end function function FKINGetWorkSpace(kinmem, lenrw, leniw) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: lenrw integer(C_LONG), dimension(*), target, intent(inout) :: leniw integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = kinmem farg2 = c_loc(lenrw(1)) farg3 = c_loc(leniw(1)) fresult = swigc_FKINGetWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FKINGetNumNonlinSolvIters(kinmem, nniters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: nniters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(nniters(1)) fresult = swigc_FKINGetNumNonlinSolvIters(farg1, farg2) swig_result = fresult end function function FKINGetNumFuncEvals(kinmem, nfevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: nfevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(nfevals(1)) fresult = swigc_FKINGetNumFuncEvals(farg1, farg2) swig_result = fresult end function function FKINGetNumBetaCondFails(kinmem, nbcfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: nbcfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(nbcfails(1)) fresult = swigc_FKINGetNumBetaCondFails(farg1, farg2) swig_result = fresult end function function FKINGetNumBacktrackOps(kinmem, nbacktr) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: nbacktr integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(nbacktr(1)) fresult = swigc_FKINGetNumBacktrackOps(farg1, farg2) swig_result = fresult end function function FKINGetFuncNorm(kinmem, fnorm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), dimension(*), target, intent(inout) :: fnorm integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(fnorm(1)) fresult = swigc_FKINGetFuncNorm(farg1, farg2) swig_result = fresult end function function FKINGetStepLength(kinmem, steplength) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem real(C_DOUBLE), dimension(*), target, intent(inout) :: steplength integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(steplength(1)) fresult = swigc_FKINGetStepLength(farg1, farg2) swig_result = fresult end function subroutine SWIG_chararray_to_string(wrap, string) use, intrinsic :: ISO_C_BINDING type(SwigArrayWrapper), intent(IN) :: wrap character(kind=C_CHAR, len=:), allocatable, intent(OUT) :: string character(kind=C_CHAR), dimension(:), pointer :: chars integer(kind=C_SIZE_T) :: i call c_f_pointer(wrap%data, chars, [wrap%size]) allocate(character(kind=C_CHAR, len=wrap%size) :: string) do i=1, wrap%size string(i:i) = chars(i) end do end subroutine function FKINGetReturnFlagName(flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING character(kind=C_CHAR, len=:), allocatable :: swig_result integer(C_LONG), intent(in) :: flag type(SwigArrayWrapper) :: fresult integer(C_LONG) :: farg1 farg1 = flag fresult = swigc_FKINGetReturnFlagName(farg1) call SWIG_chararray_to_string(fresult, swig_result) if (.false.) call SWIG_free(fresult%data) end function subroutine FKINFree(kinmem) use, intrinsic :: ISO_C_BINDING type(C_PTR), target, intent(inout) :: kinmem type(C_PTR) :: farg1 farg1 = c_loc(kinmem) call swigc_FKINFree(farg1) end subroutine function FKINSetJacTimesVecSysFn(kinmem, jtimessysfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_FUNPTR), intent(in), value :: jtimessysfn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = kinmem farg2 = jtimessysfn fresult = swigc_FKINSetJacTimesVecSysFn(farg1, farg2) swig_result = fresult end function function FKINSetDebugFile(kinmem, debugfp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_PTR) :: debugfp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = debugfp fresult = swigc_FKINSetDebugFile(farg1, farg2) swig_result = fresult end function function FKINBBDPrecInit(kinmem, nlocal, mudq, mldq, mukeep, mlkeep, dq_rel_uu, gloc, gcomm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_INT64_T), intent(in) :: nlocal integer(C_INT64_T), intent(in) :: mudq integer(C_INT64_T), intent(in) :: mldq integer(C_INT64_T), intent(in) :: mukeep integer(C_INT64_T), intent(in) :: mlkeep real(C_DOUBLE), intent(in) :: dq_rel_uu type(C_FUNPTR), intent(in), value :: gloc type(C_FUNPTR), intent(in), value :: gcomm integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT64_T) :: farg4 integer(C_INT64_T) :: farg5 integer(C_INT64_T) :: farg6 real(C_DOUBLE) :: farg7 type(C_FUNPTR) :: farg8 type(C_FUNPTR) :: farg9 farg1 = kinmem farg2 = nlocal farg3 = mudq farg4 = mldq farg5 = mukeep farg6 = mlkeep farg7 = dq_rel_uu farg8 = gloc farg9 = gcomm fresult = swigc_FKINBBDPrecInit(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) swig_result = fresult end function function FKINBBDPrecGetWorkSpace(kinmem, lenrwbbdp, leniwbbdp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: lenrwbbdp integer(C_LONG), dimension(*), target, intent(inout) :: leniwbbdp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = kinmem farg2 = c_loc(lenrwbbdp(1)) farg3 = c_loc(leniwbbdp(1)) fresult = swigc_FKINBBDPrecGetWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FKINBBDPrecGetNumGfnEvals(kinmem, ngevalsbbdp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: ngevalsbbdp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(ngevalsbbdp(1)) fresult = swigc_FKINBBDPrecGetNumGfnEvals(farg1, farg2) swig_result = fresult end function function FKINSetLinearSolver(kinmem, ls, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(SUNLinearSolver), target, intent(inout) :: ls type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = kinmem farg2 = c_loc(ls) farg3 = c_loc(a) fresult = swigc_FKINSetLinearSolver(farg1, farg2, farg3) swig_result = fresult end function function FKINSetJacFn(kinmem, jac) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_FUNPTR), intent(in), value :: jac integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = kinmem farg2 = jac fresult = swigc_FKINSetJacFn(farg1, farg2) swig_result = fresult end function function FKINSetPreconditioner(kinmem, psetup, psolve) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_FUNPTR), intent(in), value :: psetup type(C_FUNPTR), intent(in), value :: psolve integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = kinmem farg2 = psetup farg3 = psolve fresult = swigc_FKINSetPreconditioner(farg1, farg2, farg3) swig_result = fresult end function function FKINSetJacTimesVecFn(kinmem, jtv) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem type(C_FUNPTR), intent(in), value :: jtv integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = kinmem farg2 = jtv fresult = swigc_FKINSetJacTimesVecFn(farg1, farg2) swig_result = fresult end function function FKINGetLinWorkSpace(kinmem, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = kinmem farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FKINGetLinWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FKINGetNumJacEvals(kinmem, njevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: njevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(njevals(1)) fresult = swigc_FKINGetNumJacEvals(farg1, farg2) swig_result = fresult end function function FKINGetNumLinFuncEvals(kinmem, nfevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: nfevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(nfevals(1)) fresult = swigc_FKINGetNumLinFuncEvals(farg1, farg2) swig_result = fresult end function function FKINGetNumPrecEvals(kinmem, npevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: npevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(npevals(1)) fresult = swigc_FKINGetNumPrecEvals(farg1, farg2) swig_result = fresult end function function FKINGetNumPrecSolves(kinmem, npsolves) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: npsolves integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(npsolves(1)) fresult = swigc_FKINGetNumPrecSolves(farg1, farg2) swig_result = fresult end function function FKINGetNumLinIters(kinmem, nliters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: nliters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(nliters(1)) fresult = swigc_FKINGetNumLinIters(farg1, farg2) swig_result = fresult end function function FKINGetNumLinConvFails(kinmem, nlcfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: nlcfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(nlcfails(1)) fresult = swigc_FKINGetNumLinConvFails(farg1, farg2) swig_result = fresult end function function FKINGetNumJtimesEvals(kinmem, njvevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: njvevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(njvevals(1)) fresult = swigc_FKINGetNumJtimesEvals(farg1, farg2) swig_result = fresult end function function FKINGetLastLinFlag(kinmem, flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: kinmem integer(C_LONG), dimension(*), target, intent(inout) :: flag integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = kinmem farg2 = c_loc(flag(1)) fresult = swigc_FKINGetLastLinFlag(farg1, farg2) swig_result = fresult end function function FKINGetLinReturnFlagName(flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING character(kind=C_CHAR, len=:), allocatable :: swig_result integer(C_LONG), intent(in) :: flag type(SwigArrayWrapper) :: fresult integer(C_LONG) :: farg1 farg1 = flag fresult = swigc_FKINGetLinReturnFlagName(farg1) call SWIG_chararray_to_string(fresult, swig_result) if (.false.) call SWIG_free(fresult%data) end function end module StanHeaders/src/kinsol/fmod/fkinsol_mod.c0000644000176200001440000006444714645137106020164 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "kinsol/kinsol.h" #include "kinsol/kinsol_bbdpre.h" #include "kinsol/kinsol_ls.h" #include #ifdef _MSC_VER # ifndef strtoull # define strtoull _strtoui64 # endif # ifndef strtoll # define strtoll _strtoi64 # endif #endif typedef struct { void* data; size_t size; } SwigArrayWrapper; SWIGINTERN SwigArrayWrapper SwigArrayWrapper_uninitialized() { SwigArrayWrapper result; result.data = NULL; result.size = 0; return result; } #include SWIGEXPORT void * _wrap_FKINCreate(void *farg1) { void * fresult ; SUNContext arg1 = (SUNContext) 0 ; void *result = 0 ; arg1 = (SUNContext)(farg1); result = (void *)KINCreate(arg1); fresult = result; return fresult; } SWIGEXPORT int _wrap_FKINInit(void *farg1, KINSysFn farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; KINSysFn arg2 = (KINSysFn) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (KINSysFn)(farg2); arg3 = (N_Vector)(farg3); result = (int)KINInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSol(void *farg1, N_Vector farg2, int const *farg3, N_Vector farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); arg3 = (int)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); result = (int)KINSol(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetErrHandlerFn(void *farg1, KINErrHandlerFn farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; KINErrHandlerFn arg2 = (KINErrHandlerFn) 0 ; void *arg3 = (void *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (KINErrHandlerFn)(farg2); arg3 = (void *)(farg3); result = (int)KINSetErrHandlerFn(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetErrFile(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (FILE *)(farg2); result = (int)KINSetErrFile(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetInfoHandlerFn(void *farg1, KINInfoHandlerFn farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; KINInfoHandlerFn arg2 = (KINInfoHandlerFn) 0 ; void *arg3 = (void *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (KINInfoHandlerFn)(farg2); arg3 = (void *)(farg3); result = (int)KINSetInfoHandlerFn(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetInfoFile(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (FILE *)(farg2); result = (int)KINSetInfoFile(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetUserData(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (void *)(farg2); result = (int)KINSetUserData(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetPrintLevel(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)KINSetPrintLevel(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetDamping(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)KINSetDamping(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetMAA(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)KINSetMAA(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetOrthAA(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)KINSetOrthAA(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetDelayAA(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)KINSetDelayAA(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetDampingAA(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)KINSetDampingAA(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetReturnNewest(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)KINSetReturnNewest(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetNumMaxIters(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)KINSetNumMaxIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetNoInitSetup(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)KINSetNoInitSetup(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetNoResMon(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)KINSetNoResMon(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetMaxSetupCalls(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)KINSetMaxSetupCalls(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetMaxSubSetupCalls(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)KINSetMaxSubSetupCalls(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetEtaForm(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)KINSetEtaForm(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetEtaConstValue(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)KINSetEtaConstValue(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetEtaParams(void *farg1, double const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype)(*farg3); result = (int)KINSetEtaParams(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetResMonParams(void *farg1, double const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype)(*farg3); result = (int)KINSetResMonParams(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetResMonConstValue(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)KINSetResMonConstValue(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetNoMinEps(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)KINSetNoMinEps(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetMaxNewtonStep(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)KINSetMaxNewtonStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetMaxBetaFails(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)KINSetMaxBetaFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetRelErrFunc(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)KINSetRelErrFunc(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetFuncNormTol(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)KINSetFuncNormTol(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetScaledStepTol(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)KINSetScaledStepTol(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetConstraints(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)KINSetConstraints(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetSysFunc(void *farg1, KINSysFn farg2) { int fresult ; void *arg1 = (void *) 0 ; KINSysFn arg2 = (KINSysFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (KINSysFn)(farg2); result = (int)KINSetSysFunc(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)KINGetWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumNonlinSolvIters(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumNonlinSolvIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumFuncEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumFuncEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumBetaCondFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumBetaCondFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumBacktrackOps(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumBacktrackOps(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetFuncNorm(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)KINGetFuncNorm(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetStepLength(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)KINGetStepLength(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT SwigArrayWrapper _wrap_FKINGetReturnFlagName(long const *farg1) { SwigArrayWrapper fresult ; long arg1 ; char *result = 0 ; arg1 = (long)(*farg1); result = (char *)KINGetReturnFlagName(arg1); fresult.size = strlen((const char*)(result)); fresult.data = (char *)(result); return fresult; } SWIGEXPORT void _wrap_FKINFree(void *farg1) { void **arg1 = (void **) 0 ; arg1 = (void **)(farg1); KINFree(arg1); } SWIGEXPORT int _wrap_FKINSetJacTimesVecSysFn(void *farg1, KINSysFn farg2) { int fresult ; void *arg1 = (void *) 0 ; KINSysFn arg2 = (KINSysFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (KINSysFn)(farg2); result = (int)KINSetJacTimesVecSysFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetDebugFile(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (FILE *)(farg2); result = (int)KINSetDebugFile(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINBBDPrecInit(void *farg1, int64_t const *farg2, int64_t const *farg3, int64_t const *farg4, int64_t const *farg5, int64_t const *farg6, double const *farg7, KINBBDLocalFn farg8, KINBBDCommFn farg9) { int fresult ; void *arg1 = (void *) 0 ; sunindextype arg2 ; sunindextype arg3 ; sunindextype arg4 ; sunindextype arg5 ; sunindextype arg6 ; realtype arg7 ; KINBBDLocalFn arg8 = (KINBBDLocalFn) 0 ; KINBBDCommFn arg9 = (KINBBDCommFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (sunindextype)(*farg4); arg5 = (sunindextype)(*farg5); arg6 = (sunindextype)(*farg6); arg7 = (realtype)(*farg7); arg8 = (KINBBDLocalFn)(farg8); arg9 = (KINBBDCommFn)(farg9); result = (int)KINBBDPrecInit(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINBBDPrecGetWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)KINBBDPrecGetWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINBBDPrecGetNumGfnEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINBBDPrecGetNumGfnEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetLinearSolver(void *farg1, SUNLinearSolver farg2, SUNMatrix farg3) { int fresult ; void *arg1 = (void *) 0 ; SUNLinearSolver arg2 = (SUNLinearSolver) 0 ; SUNMatrix arg3 = (SUNMatrix) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNLinearSolver)(farg2); arg3 = (SUNMatrix)(farg3); result = (int)KINSetLinearSolver(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetJacFn(void *farg1, KINLsJacFn farg2) { int fresult ; void *arg1 = (void *) 0 ; KINLsJacFn arg2 = (KINLsJacFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (KINLsJacFn)(farg2); result = (int)KINSetJacFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetPreconditioner(void *farg1, KINLsPrecSetupFn farg2, KINLsPrecSolveFn farg3) { int fresult ; void *arg1 = (void *) 0 ; KINLsPrecSetupFn arg2 = (KINLsPrecSetupFn) 0 ; KINLsPrecSolveFn arg3 = (KINLsPrecSolveFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (KINLsPrecSetupFn)(farg2); arg3 = (KINLsPrecSolveFn)(farg3); result = (int)KINSetPreconditioner(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINSetJacTimesVecFn(void *farg1, KINLsJacTimesVecFn farg2) { int fresult ; void *arg1 = (void *) 0 ; KINLsJacTimesVecFn arg2 = (KINLsJacTimesVecFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (KINLsJacTimesVecFn)(farg2); result = (int)KINSetJacTimesVecFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetLinWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)KINGetLinWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumJacEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumJacEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumLinFuncEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumLinFuncEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumPrecEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumPrecEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumPrecSolves(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumPrecSolves(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumLinIters(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumLinIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumLinConvFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumLinConvFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetNumJtimesEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetNumJtimesEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FKINGetLastLinFlag(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)KINGetLastLinFlag(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT SwigArrayWrapper _wrap_FKINGetLinReturnFlagName(long const *farg1) { SwigArrayWrapper fresult ; long arg1 ; char *result = 0 ; arg1 = (long)(*farg1); result = (char *)KINGetLinReturnFlagName(arg1); fresult.size = strlen((const char*)(result)); fresult.data = (char *)(result); return fresult; } StanHeaders/src/kinsol/kinsol_spils.c0000644000176200001440000000526514645137106017435 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Scott Cohen, Alan Hindmarsh, Radu Serban, * and Aaron Collier @ LLNL *----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *----------------------------------------------------------------- * Header file for the deprecated Scaled Preconditioned Iterative * Linear Solver interface in KINSOL; these routines now just wrap * the updated KINSOL generic linear solver interface in kinsol_ls.h. *-----------------------------------------------------------------*/ #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*================================================================= Exported Functions (wrappers for equivalent routines in kinsol_ls.h) =================================================================*/ int KINSpilsSetLinearSolver(void *kinmem, SUNLinearSolver LS) { return(KINSetLinearSolver(kinmem, LS, NULL)); } int KINSpilsSetPreconditioner(void *kinmem, KINSpilsPrecSetupFn psetup, KINSpilsPrecSolveFn psolve) { return(KINSetPreconditioner(kinmem, psetup, psolve)); } int KINSpilsSetJacTimesVecFn(void *kinmem, KINSpilsJacTimesVecFn jtv) { return(KINSetJacTimesVecFn(kinmem, jtv)); } int KINSpilsGetWorkSpace(void *kinmem, long int *lenrwLS, long int *leniwLS) { return(KINGetLinWorkSpace(kinmem, lenrwLS, leniwLS)); } int KINSpilsGetNumPrecEvals(void *kinmem, long int *npevals) { return(KINGetNumPrecEvals(kinmem, npevals)); } int KINSpilsGetNumPrecSolves(void *kinmem, long int *npsolves) { return(KINGetNumPrecSolves(kinmem, npsolves)); } int KINSpilsGetNumLinIters(void *kinmem, long int *nliters) { return(KINGetNumLinIters(kinmem, nliters)); } int KINSpilsGetNumConvFails(void *kinmem, long int *nlcfails) { return(KINGetNumLinConvFails(kinmem, nlcfails)); } int KINSpilsGetNumJtimesEvals(void *kinmem, long int *njvevals) { return(KINGetNumJtimesEvals(kinmem, njvevals)); } int KINSpilsGetNumFuncEvals(void *kinmem, long int *nfevals) { return(KINGetNumLinFuncEvals(kinmem, nfevals)); } int KINSpilsGetLastFlag(void *kinmem, long int *flag) { return(KINGetLastLinFlag(kinmem, flag)); } char *KINSpilsGetReturnFlagName(long int flag) { return(KINGetLinReturnFlagName(flag)); } #ifdef __cplusplus } #endif StanHeaders/src/kinsol/README.md0000644000176200001440000000543214645137106016033 0ustar liggesusers# KINSOL ### Version 6.1.1 (Feb 2022) **Alan C. Hindmarsh, Radu Serban, Cody J. Balos, David J. Gardner, and Carol S. Woodward, Center for Applied Scientific Computing, LLNL** **Daniel R. Reynolds, Department of Mathematics, Southern Methodist University** KINSOL is a package for the solution for nonlinear algebraic systems ``` F(u) = 0. ``` Nonlinear solver methods available include Newton-Krylov, Picard, and fixed-point. Both Picard and fixed point can be accelerated with Anderson acceleration. KINSOL is part of the SUNDIALS Suite of Nonlinear and Differential/Algebraic equation Solvers which consists of ARKode, CVODE, CVODES, IDA, IDAS and KINSOL. It is written in ANSI standard C, but is based on the previous Fortran package NKSOL, written by Peter Brown and Youcef Saad. KINSOL can be used in a variety of computing environments including serial, shared memory, distributed memory, and accelerator-based (e.g., GPU) systems. This flexibility is obtained from a modular design that leverages the shared vector, matrix, and linear solver APIs used across SUNDIALS packages. For use with Fortran applications, a set of Fortran/C interface routines, called FKINSOL, is also supplied. These are written in C, but assume that the user calling program and all user-supplied routines are in Fortran. ## Documentation See the [KINSOL User Guide](/doc/kinsol/kin_guide.pdf) and [KINSOL Examples](/doc/kinsol/kin_examples.pdf) document for more information about IDA usage and the provided example programs respectively. ## Installation For installation instructions see the [INSTALL_GUIDE](/INSTALL_GUIDE.pdf) or the "Installation Procedure" chapter in the KINSOL User Guide. ## Release History Information on recent changes to KINSOL can be found in the "Introduction" chapter of the KINSOL User Guide and a complete release history is available in the "SUNDIALS Release History" appendix of the KINSOL User Guide. ## References * A. C. Hindmarsh, R. Serban, C. J. Balos, D. J. Gardner, D. R. Reynolds and C. S. Woodward, "User Documentation for KINSOL v6.1.1," LLNL technical report UCRL-SM-208116, Feb 2022. * A. M. Collier and R. Serban, "Example Programs for KINSOL v6.1.1," LLNL technical report UCRL-SM-208114, Feb 2022. * A. C. Hindmarsh, P. N. Brown, K. E. Grant, S. L. Lee, R. Serban, D. E. Shumaker, and C. S. Woodward, "SUNDIALS, Suite of Nonlinear and Differential/Algebraic Equation Solvers," ACM Trans. Math. Softw., 31(3), pp. 363-396, 2005. * Peter N. Brown and Youcef Saad, "Hybrid Krylov Methods for Nonlinear Systems of Equations," SIAM J. Sci. Stat. Comput., Vol 11, no 3, pp. 450-481, May 1990. * A. G. Taylor and A. C. Hindmarsh, "User Documentation for KINSOL, A Nonlinear Solver for Sequential and Parallel Computers," LLNL technical report UCRL-ID-131185, July 1998. StanHeaders/src/kinsol/kinsol_direct.c0000644000176200001440000000364314645137106017553 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Radu Serban @ LLNL *----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *----------------------------------------------------------------- * Implementation file for the deprecated direct linear solver interface in * KINSOL; these routines now just wrap the updated KINSOL generic * linear solver interface in kinsol_ls.h. *-----------------------------------------------------------------*/ #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*================================================================= Exported Functions (wrappers for equivalent routines in kinsol_ls.h) =================================================================*/ int KINDlsSetLinearSolver(void *kinmem, SUNLinearSolver LS, SUNMatrix A) { return(KINSetLinearSolver(kinmem, LS, A)); } int KINDlsSetJacFn(void *kinmem, KINDlsJacFn jac) { return(KINSetJacFn(kinmem, jac)); } int KINDlsGetWorkSpace(void *kinmem, long int *lenrw, long int *leniw) { return(KINGetLinWorkSpace(kinmem, lenrw, leniw)); } int KINDlsGetNumJacEvals(void *kinmem, long int *njevals) { return(KINGetNumJacEvals(kinmem, njevals)); } int KINDlsGetNumFuncEvals(void *kinmem, long int *nfevals) { return(KINGetNumLinFuncEvals(kinmem, nfevals)); } int KINDlsGetLastFlag(void *kinmem, long int *flag) { return(KINGetLastLinFlag(kinmem, flag)); } char *KINDlsGetReturnFlagName(long int flag) { return(KINGetLinReturnFlagName(flag)); } #ifdef __cplusplus } #endif StanHeaders/src/kinsol/kinsol_bbdpre.c0000644000176200001440000004460214645137106017537 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * Allan Taylor, Alan Hindmarsh, Radu Serban, and * Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This file contains implementations of routines for a * band-block-diagonal preconditioner, i.e. a block-diagonal * matrix with banded blocks, for use with KINSol and the * KINLS linear solver interface. * * Note: With only one process, a banded matrix results * rather than a b-b-d matrix with banded blocks. Diagonal * blocking occurs at the process level. * -----------------------------------------------------------------*/ #include #include #include "kinsol_impl.h" #include "kinsol_ls_impl.h" #include "kinsol_bbdpre_impl.h" #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* Prototypes of functions KINBBDPrecSetup and KINBBDPrecSolve */ static int KINBBDPrecSetup(N_Vector uu, N_Vector uscale, N_Vector fval, N_Vector fscale, void *pdata); static int KINBBDPrecSolve(N_Vector uu, N_Vector uscale, N_Vector fval, N_Vector fscale, N_Vector vv, void *pdata); /* Prototype for KINBBDPrecFree */ static int KINBBDPrecFree(KINMem kin_mem); /* Prototype for difference quotient jacobian calculation routine */ static int KBBDDQJac(KBBDPrecData pdata, N_Vector uu, N_Vector uscale, N_Vector gu, N_Vector gtemp, N_Vector utemp); /*------------------------------------------------------------------ user-callable functions ------------------------------------------------------------------*/ /*------------------------------------------------------------------ KINBBDPrecInit ------------------------------------------------------------------*/ int KINBBDPrecInit(void *kinmem, sunindextype Nlocal, sunindextype mudq, sunindextype mldq, sunindextype mukeep, sunindextype mlkeep, realtype dq_rel_uu, KINBBDLocalFn gloc, KINBBDCommFn gcomm) { KINMem kin_mem; KINLsMem kinls_mem; KBBDPrecData pdata; sunindextype muk, mlk, storage_mu, lrw1, liw1; long int lrw, liw; int flag; if (kinmem == NULL) { KINProcessError(NULL, KINLS_MEM_NULL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_MEM_NULL); return(KINLS_MEM_NULL); } kin_mem = (KINMem) kinmem; /* Test if the LS linear solver interface has been created */ if (kin_mem->kin_lmem == NULL) { KINProcessError(kin_mem, KINLS_LMEM_NULL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_LMEM_NULL); return(KINLS_LMEM_NULL); } kinls_mem = (KINLsMem) kin_mem->kin_lmem; /* Test compatibility of NVECTOR package with the BBD preconditioner */ /* Note: Do NOT need to check for N_VScale since has already been checked for in KINSOL */ if (kin_mem->kin_vtemp1->ops->nvgetarraypointer == NULL) { KINProcessError(kin_mem, KINLS_ILL_INPUT, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_BAD_NVECTOR); return(KINLS_ILL_INPUT); } /* Allocate data memory */ pdata = NULL; pdata = (KBBDPrecData) malloc(sizeof *pdata); if (pdata == NULL) { KINProcessError(kin_mem, KINLS_MEM_FAIL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_MEM_FAIL); return(KINLS_MEM_FAIL); } /* Set pointers to gloc and gcomm; load half-bandwidths */ pdata->kin_mem = kinmem; pdata->gloc = gloc; pdata->gcomm = gcomm; pdata->mudq = SUNMIN(Nlocal-1, SUNMAX(0, mudq)); pdata->mldq = SUNMIN(Nlocal-1, SUNMAX(0, mldq)); muk = SUNMIN(Nlocal-1, SUNMAX(0, mukeep)); mlk = SUNMIN(Nlocal-1, SUNMAX(0, mlkeep)); pdata->mukeep = muk; pdata->mlkeep = mlk; /* Set extended upper half-bandwidth for PP (required for pivoting) */ storage_mu = SUNMIN(Nlocal-1, muk+mlk); /* Allocate memory for preconditioner matrix */ pdata->PP = NULL; pdata->PP = SUNBandMatrixStorage(Nlocal, muk, mlk, storage_mu, kin_mem->kin_sunctx); if (pdata->PP == NULL) { free(pdata); pdata = NULL; KINProcessError(kin_mem, KINLS_MEM_FAIL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_MEM_FAIL); return(KINLS_MEM_FAIL); } /* Allocate memory for temporary N_Vectors */ pdata->zlocal = NULL; pdata->zlocal = N_VNew_Serial(Nlocal, kin_mem->kin_sunctx); if (pdata->zlocal == NULL) { SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; KINProcessError(kin_mem, KINLS_MEM_FAIL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_MEM_FAIL); return(KINLS_MEM_FAIL); } pdata->rlocal = NULL; pdata->rlocal = N_VNewEmpty_Serial(Nlocal, kin_mem->kin_sunctx); /* empty vector */ if (pdata->rlocal == NULL) { N_VDestroy(pdata->zlocal); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; KINProcessError(kin_mem, KINLS_MEM_FAIL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_MEM_FAIL); return(KINLS_MEM_FAIL); } pdata->tempv1 = NULL; pdata->tempv1 = N_VClone(kin_mem->kin_vtemp1); if (pdata->tempv1 == NULL) { N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; KINProcessError(kin_mem, KINLS_MEM_FAIL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_MEM_FAIL); return(KINLS_MEM_FAIL); } pdata->tempv2 = NULL; pdata->tempv2 = N_VClone(kin_mem->kin_vtemp1); if (pdata->tempv2 == NULL) { N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); N_VDestroy(pdata->tempv1); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; KINProcessError(kin_mem, KINLS_MEM_FAIL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_MEM_FAIL); return(KINLS_MEM_FAIL); } pdata->tempv3 = NULL; pdata->tempv3 = N_VClone(kin_mem->kin_vtemp1); if (pdata->tempv3 == NULL) { N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); N_VDestroy(pdata->tempv1); N_VDestroy(pdata->tempv2); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; KINProcessError(kin_mem, KINLS_MEM_FAIL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_MEM_FAIL); return(KINLS_MEM_FAIL); } /* Allocate memory for banded linear solver */ pdata->LS = NULL; pdata->LS = SUNLinSol_Band(pdata->zlocal, pdata->PP, kin_mem->kin_sunctx); if (pdata->LS == NULL) { N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); N_VDestroy(pdata->tempv1); N_VDestroy(pdata->tempv2); N_VDestroy(pdata->tempv3); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; KINProcessError(kin_mem, KINLS_MEM_FAIL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_MEM_FAIL); return(KINLS_MEM_FAIL); } /* initialize band linear solver object */ flag = SUNLinSolInitialize(pdata->LS); if (flag != SUNLS_SUCCESS) { N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); N_VDestroy(pdata->tempv1); N_VDestroy(pdata->tempv2); N_VDestroy(pdata->tempv3); SUNMatDestroy(pdata->PP); SUNLinSolFree(pdata->LS); free(pdata); pdata = NULL; KINProcessError(kin_mem, KINLS_SUNLS_FAIL, "KINBBDPRE", "KINBBDPrecInit", MSGBBD_SUNLS_FAIL); return(KINLS_SUNLS_FAIL); } /* Set rel_uu based on input value dq_rel_uu (0 implies default) */ pdata->rel_uu = (dq_rel_uu > ZERO) ? dq_rel_uu : SUNRsqrt(kin_mem->kin_uround); /* Store Nlocal to be used in KINBBDPrecSetup */ pdata->n_local = Nlocal; /* Set work space sizes and initialize nge */ pdata->rpwsize = 0; pdata->ipwsize = 0; if (kin_mem->kin_vtemp1->ops->nvspace) { N_VSpace(kin_mem->kin_vtemp1, &lrw1, &liw1); pdata->rpwsize += 3*lrw1; pdata->ipwsize += 3*liw1; } if (pdata->zlocal->ops->nvspace) { N_VSpace(pdata->zlocal, &lrw1, &liw1); pdata->rpwsize += lrw1; pdata->ipwsize += liw1; } if (pdata->rlocal->ops->nvspace) { N_VSpace(pdata->rlocal, &lrw1, &liw1); pdata->rpwsize += lrw1; pdata->ipwsize += liw1; } if (pdata->PP->ops->space) { flag = SUNMatSpace(pdata->PP, &lrw, &liw); pdata->rpwsize += lrw; pdata->ipwsize += liw; } if (pdata->LS->ops->space) { flag = SUNLinSolSpace(pdata->LS, &lrw, &liw); pdata->rpwsize += lrw; pdata->ipwsize += liw; } pdata->nge = 0; /* make sure pdata is free from any previous allocations */ if (kinls_mem->pfree != NULL) kinls_mem->pfree(kin_mem); /* Point to the new pdata field in the LS memory */ kinls_mem->pdata = pdata; /* Attach the pfree function */ kinls_mem->pfree = KINBBDPrecFree; /* Attach preconditioner solve and setup functions */ flag = KINSetPreconditioner(kinmem, KINBBDPrecSetup, KINBBDPrecSolve); return(flag); } /*------------------------------------------------------------------ KINBBDPrecGetWorkSpace ------------------------------------------------------------------*/ int KINBBDPrecGetWorkSpace(void *kinmem, long int *lenrwBBDP, long int *leniwBBDP) { KINMem kin_mem; KINLsMem kinls_mem; KBBDPrecData pdata; if (kinmem == NULL) { KINProcessError(NULL, KINLS_MEM_NULL, "KINBBDPRE", "KINBBDPrecGetWorkSpace", MSGBBD_MEM_NULL); return(KINLS_MEM_NULL); } kin_mem = (KINMem) kinmem; if (kin_mem->kin_lmem == NULL) { KINProcessError(kin_mem, KINLS_LMEM_NULL, "KINBBDPRE", "KINBBDPrecGetWorkSpace", MSGBBD_LMEM_NULL); return(KINLS_LMEM_NULL); } kinls_mem = (KINLsMem) kin_mem->kin_lmem; if (kinls_mem->pdata == NULL) { KINProcessError(kin_mem, KINLS_PMEM_NULL, "KINBBDPRE", "KINBBDPrecGetWorkSpace", MSGBBD_PMEM_NULL); return(KINLS_PMEM_NULL); } pdata = (KBBDPrecData) kinls_mem->pdata; *lenrwBBDP = pdata->rpwsize; *leniwBBDP = pdata->ipwsize; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINBBDPrecGetNumGfnEvals -------------------------------------------------------------------*/ int KINBBDPrecGetNumGfnEvals(void *kinmem, long int *ngevalsBBDP) { KINMem kin_mem; KINLsMem kinls_mem; KBBDPrecData pdata; if (kinmem == NULL) { KINProcessError(NULL, KINLS_MEM_NULL, "KINBBDPRE", "KINBBDPrecGetNumGfnEvals", MSGBBD_MEM_NULL); return(KINLS_MEM_NULL); } kin_mem = (KINMem) kinmem; if (kin_mem->kin_lmem == NULL) { KINProcessError(kin_mem, KINLS_LMEM_NULL, "KINBBDPRE", "KINBBDPrecGetNumGfnEvals", MSGBBD_LMEM_NULL); return(KINLS_LMEM_NULL); } kinls_mem = (KINLsMem) kin_mem->kin_lmem; if (kinls_mem->pdata == NULL) { KINProcessError(kin_mem, KINLS_PMEM_NULL, "KINBBDPRE", "KINBBDPrecGetNumGfnEvals", MSGBBD_PMEM_NULL); return(KINLS_PMEM_NULL); } pdata = (KBBDPrecData) kinls_mem->pdata; *ngevalsBBDP = pdata->nge; return(KINLS_SUCCESS); } /*------------------------------------------------------------------ KINBBDPrecSetup KINBBDPrecSetup generates and factors a banded block of the preconditioner matrix on each processor, via calls to the user-supplied gloc and gcomm functions. It uses difference quotient approximations to the Jacobian elements. KINBBDPrecSetup calculates a new Jacobian, stored in banded matrix PP and does an LU factorization of P in place in PP. The parameters of KINBBDPrecSetup are as follows: uu is the current value of the dependent variable vector, namely the solutin to func(uu)=0 uscale is the dependent variable scaling vector (i.e. uu) fval is the vector f(u) fscale is the function scaling vector bbd_data is the pointer to BBD data set by KINBBDInit. Note: The value to be returned by the KINBBDPrecSetup function is a flag indicating whether it was successful. This value is: 0 if successful, > 0 for a recoverable error - step will be retried. ------------------------------------------------------------------*/ static int KINBBDPrecSetup(N_Vector uu, N_Vector uscale, N_Vector fval, N_Vector fscale, void *bbd_data) { KBBDPrecData pdata; KINMem kin_mem; int retval; pdata = (KBBDPrecData) bbd_data; kin_mem = (KINMem) pdata->kin_mem; /* Call KBBDDQJac for a new Jacobian calculation and store in PP */ retval = SUNMatZero(pdata->PP); if (retval != 0) { KINProcessError(kin_mem, -1, "KINBBDPRE", "KINBBDPrecSetup", MSGBBD_SUNMAT_FAIL); return(-1); } retval = KBBDDQJac(pdata, uu, uscale, pdata->tempv1, pdata->tempv2, pdata->tempv3); if (retval != 0) { KINProcessError(kin_mem, -1, "KINBBDPRE", "KINBBDPrecSetup", MSGBBD_FUNC_FAILED); return(-1); } /* Do LU factorization of P and return error flag */ retval = SUNLinSolSetup_Band(pdata->LS, pdata->PP); return(retval); } /*------------------------------------------------------------------ INBBDPrecSolve KINBBDPrecSolve solves a linear system P z = r, with the banded blocked preconditioner matrix P generated and factored by KINBBDPrecSetup. Here, r comes in as vv and z is returned in vv as well. The parameters for KINBBDPrecSolve are as follows: uu an N_Vector giving the current iterate for the system uscale an N_Vector giving the diagonal entries of the uu scaling matrix fval an N_Vector giving the current function value fscale an N_Vector giving the diagonal entries of the function scaling matrix vv vector initially set to the right-hand side vector r, but which upon return contains a solution of the linear system P*z = r bbd_data is the pointer to BBD data set by KINBBDInit. Note: The value returned by the KINBBDPrecSolve function is a flag returned from the lienar solver object. ------------------------------------------------------------------*/ static int KINBBDPrecSolve(N_Vector uu, N_Vector uscale, N_Vector fval, N_Vector fscale, N_Vector vv, void *bbd_data) { KBBDPrecData pdata; realtype *vd; realtype *zd; int i, retval; pdata = (KBBDPrecData) bbd_data; /* Get data pointers */ vd = N_VGetArrayPointer(vv); zd = N_VGetArrayPointer(pdata->zlocal); /* Attach local data array for vv to rlocal */ N_VSetArrayPointer(vd, pdata->rlocal); /* Call banded solver object to do the work */ retval = SUNLinSolSolve(pdata->LS, pdata->PP, pdata->zlocal, pdata->rlocal, ZERO); /* Copy result into vv */ for (i=0; in_local; i++) vd[i] = zd[i]; return(retval); } /*------------------------------------------------------------------ KINBBDPrecFree ------------------------------------------------------------------*/ static int KINBBDPrecFree(KINMem kin_mem) { KINLsMem kinls_mem; KBBDPrecData pdata; if (kin_mem->kin_lmem == NULL) return(0); kinls_mem = (KINLsMem) kin_mem->kin_lmem; if (kinls_mem->pdata == NULL) return(0); pdata = (KBBDPrecData) kinls_mem->pdata; SUNLinSolFree(pdata->LS); N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); N_VDestroy(pdata->tempv1); N_VDestroy(pdata->tempv2); N_VDestroy(pdata->tempv3); SUNMatDestroy(pdata->PP); free(pdata); pdata = NULL; return(0); } /*------------------------------------------------------------------ KBBDDQJac This routine generates a banded difference quotient approximation to the Jacobian of f(u). It assumes that a band matrix of type SUNMatrix is stored column-wise, and that elements within each column are contiguous. All matrix elements are generated as difference quotients, by way of calls to the user routine gloc. By virtue of the band structure, the number of these calls is bandwidth + 1, where bandwidth = ml + mu + 1. This routine also assumes that the local elements of a vector are stored contiguously. ------------------------------------------------------------------*/ static int KBBDDQJac(KBBDPrecData pdata, N_Vector uu, N_Vector uscale, N_Vector gu, N_Vector gtemp, N_Vector utemp) { KINMem kin_mem; realtype inc, inc_inv; int retval; sunindextype group, i, j, width, ngroups, i1, i2; realtype *udata, *uscdata, *gudata, *gtempdata, *utempdata, *col_j; kin_mem = (KINMem) pdata->kin_mem; /* load utemp with uu = predicted solution vector */ N_VScale(ONE, uu, utemp); /* set pointers to the data for all vectors */ udata = N_VGetArrayPointer(uu); uscdata = N_VGetArrayPointer(uscale); gudata = N_VGetArrayPointer(gu); gtempdata = N_VGetArrayPointer(gtemp); utempdata = N_VGetArrayPointer(utemp); /* Call gcomm and gloc to get base value of g(uu) */ if (pdata->gcomm != NULL) { retval = pdata->gcomm(pdata->n_local, uu, kin_mem->kin_user_data); if (retval != 0) return(retval); } retval = pdata->gloc(pdata->n_local, uu, gu, kin_mem->kin_user_data); pdata->nge++; if (retval != 0) return(retval); /* Set bandwidth and number of column groups for band differencing */ width = pdata->mldq + pdata->mudq + 1; ngroups = SUNMIN(width, pdata->n_local); /* Loop over groups */ for(group = 1; group <= ngroups; group++) { /* increment all u_j in group */ for(j = group - 1; j < pdata->n_local; j += width) { inc = pdata->rel_uu * SUNMAX(SUNRabs(udata[j]), (ONE / uscdata[j])); utempdata[j] += inc; } /* Evaluate g with incremented u */ retval = pdata->gloc(pdata->n_local, utemp, gtemp, kin_mem->kin_user_data); pdata->nge++; if (retval != 0) return(retval); /* restore utemp, then form and load difference quotients */ for (j = group - 1; j < pdata->n_local; j += width) { utempdata[j] = udata[j]; col_j = SUNBandMatrix_Column(pdata->PP,j); inc = pdata->rel_uu * SUNMAX(SUNRabs(udata[j]) , (ONE / uscdata[j])); inc_inv = ONE / inc; i1 = SUNMAX(0, (j - pdata->mukeep)); i2 = SUNMIN((j + pdata->mlkeep), (pdata->n_local - 1)); for (i = i1; i <= i2; i++) SM_COLUMN_ELEMENT_B(col_j, i, j) = inc_inv * (gtempdata[i] - gudata[i]); } } return(0); } StanHeaders/src/sundials/0000755000176200001440000000000014645137104015071 5ustar liggesusersStanHeaders/src/sundials/sundials_sycl.h0000644000176200001440000000577514645137106020136 0ustar liggesusers/* --------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * --------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * --------------------------------------------------------------------------- * This header files defines internal utility functions and macros for working * with SYCL. * ---------------------------------------------------------------------------*/ #include #include #ifndef _SUNDIALS_SYCL_H #define _SUNDIALS_SYCL_H #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* Get the maximum work group size (block size) for a queue */ #define SYCL_BLOCKDIM(q) (q->get_device().get_info<::sycl::info::device::max_work_group_size>()) /* Grid (work group) stride loop */ #define GRID_STRIDE_XLOOP(item, iter, max) \ for (sunindextype iter = item.get_global_id(0); \ iter < max; \ iter += item.get_global_range(0)) /* Sycl parallel for loop */ #define SYCL_FOR(q, total, block, item, loop) \ q->submit([&](::sycl::handler& h) { \ h.parallel_for(::sycl::nd_range<1>{total,block}, \ [=](::sycl::nd_item<1> item) \ { loop }); }); /* Sycl parallel for loop with stream for ouput */ #define SYCL_FOR_DEBUG(q, total, block, item, loop) \ q->submit([&](::sycl::handler& h) { \ ::sycl::stream out(1024, 256, h); \ h.parallel_for(::sycl::nd_range<1>{total,block}, \ [=](::sycl::nd_item<1> item) \ { loop }); }); /* Sycl parallel for loop with reduction */ #define SYCL_FOR_REDUCE(q, total, block, item, rvar, rop, loop) \ q->submit([&](::sycl::handler& h) { \ h.parallel_for(::sycl::nd_range<1>{total,block}, \ ::sycl::reduction(rvar, rop), \ [=](::sycl::nd_item<1> item, auto& rvar) \ { loop }); }); /* Sycl parallel for loop with reduction and stream for ouput */ #define SYCL_FOR_REDUCE_DEBUG(q, total, block, item, rvar, rop, loop) \ q->submit([&](::sycl::handler& h) { \ ::sycl::stream out(1024, 256, h); \ h.parallel_for(::sycl::nd_range<1>{total,block}, \ ::sycl::reduction(rvar, rop), \ [=](::sycl::nd_item<1> item, auto& rvar) \ { loop }); }); #ifdef __cplusplus /* wrapper to enable C++ usage */ } #endif #endif /* _SUNDIALS_SYCL_H */ StanHeaders/src/sundials/sundials_cuda_kernels.cuh0000644000176200001440000003260314645137106022141 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- */ #ifndef _SUNDIALS_CUDA_KERNELS_CUH #define _SUNDIALS_CUDA_KERNELS_CUH #define SUNDIALS_HOST_DEVICE __host__ __device__ #define SUNDIALS_DEVICE_INLINE __forceinline__ #include "sundials_reductions.hpp" #define GRID_STRIDE_XLOOP(type, iter, max) \ for (type iter = blockDim.x * blockIdx.x + threadIdx.x; \ iter < max; \ iter += blockDim.x * gridDim.x) #include "sundials_cuda.h" namespace sundials { namespace cuda { namespace impl { template __forceinline__ __device__ T shfl_xor_sync(T var, int laneMask); template __forceinline__ __device__ T shfl_sync(T var, int srcLane); template __forceinline__ __device__ T shfl_down_sync(T var, int srcLane); template <> __forceinline__ __device__ float shfl_xor_sync(float var, int laneMask) { return ::__shfl_xor_sync(0xFFFFFFFF, var, laneMask); } template <> __forceinline__ __device__ double shfl_xor_sync(double var, int laneMask) { return ::__shfl_xor_sync(0xFFFFFFFF, var, laneMask); } template <> __forceinline__ __device__ float shfl_sync(float var, int srcLane) { return ::__shfl_sync(0xFFFFFFFF, var, srcLane); } template <> __forceinline__ __device__ double shfl_sync(double var, int srcLane) { return ::__shfl_sync(0xFFFFFFFF, var, srcLane); } template<> __forceinline__ __device__ float shfl_down_sync(float val, int srcLane) { return ::__shfl_down_sync(0xFFFFFFFF, val, srcLane); } template<> __forceinline__ __device__ double shfl_down_sync(double val, int srcLane) { return ::__shfl_down_sync(0xFFFFFFFF, val, srcLane); } /* The atomic functions below are implemented using the atomic compare and swap function atomicCAS which performs an atomic version of (*address == assumed) ? (assumed + val) : *address. Since *address could change between when the value is loaded and the atomicCAS call the operation is repeated until *address does not change between the read and the compare and swap operation. */ __forceinline__ __device__ double atomicAdd(double* address, double val) { #if __CUDA_ARCH__ < 600 unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); #else return ::atomicAdd(address, val); #endif } __forceinline__ __device__ float atomicAdd(float* address, float val) { #if __CUDA_ARCH__ < 600 unsigned int* address_as_ull = (unsigned int*)address; unsigned int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val + __int_as_float(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __int_as_float(old); #else return ::atomicAdd(address, val); #endif } /* * Compute the maximum of 2 double-precision floating point values using an atomic operation * "address" is the address of the reference value which might get updated with the maximum * "value" is the value that is compared to the reference in order to determine the maximum */ __forceinline__ __device__ void atomicMax(double* const address, const double value) { if (*address >= value) { return; } unsigned long long * const address_as_i = (unsigned long long *)address; unsigned long long old = * address_as_i, assumed; do { assumed = old; if (__longlong_as_double(assumed) >= value) { break; } old = atomicCAS(address_as_i, assumed, __double_as_longlong(value)); } while (assumed != old); } /* * Compute the maximum of 2 single-precision floating point values using an atomic operation * "address" is the address of the reference value which might get updated with the maximum * "value" is the value that is compared to the reference in order to determine the maximum */ __forceinline__ __device__ void atomicMax(float* const address, const float value) { if (*address >= value) { return; } unsigned int* const address_as_i = (unsigned int *)address; unsigned int old = *address_as_i, assumed; do { assumed = old; if (__int_as_float(assumed) >= value) { break; } old = atomicCAS(address_as_i, assumed, __float_as_int(value)); } while (assumed != old); } /* * Compute the minimum of 2 double-precision floating point values using an atomic operation * "address" is the address of the reference value which might get updated with the minimum * "value" is the value that is compared to the reference in order to determine the minimum */ __forceinline__ __device__ void atomicMin(double* const address, const double value) { if (*address <= value) { return; } unsigned long long* const address_as_i = (unsigned long long *)address; unsigned long long old = *address_as_i, assumed; do { assumed = old; if (__longlong_as_double(assumed) <= value) { break; } old = atomicCAS(address_as_i, assumed, __double_as_longlong(value)); } while (assumed != old); } /* * Compute the minimum of 2 single-precision floating point values using an atomic operation * "address" is the address of the reference value which might get updated with the minimum * "value" is the value that is compared to the reference in order to determine the minimum */ __forceinline__ __device__ void atomicMin(float* const address, const float value) { if (*address <= value) { return; } unsigned int* const address_as_i = (unsigned int *)address; unsigned int old = *address_as_i, assumed; do { assumed = old; if (__int_as_float(assumed) <= value) { break; } old = atomicCAS(address_as_i, assumed, __float_as_int(value)); } while (assumed != old); } // // Atomic specializations of sundials::reduction operators // template struct atomic; template struct atomic> { __device__ __forceinline__ void operator()(T* out, const T val) { atomicAdd(out, val); } }; template struct atomic> { __device__ __forceinline__ void operator()(T* out, const T val) { atomicMax(out, val); } }; template struct atomic> { __device__ __forceinline__ void operator()(T* out, const T val) { atomicMin(out, val); } }; /* * Perform a reduce on the warp to get the operation result. */ template __inline__ __device__ T warpReduceShflDown(T val) { for (int offset = warpSize/2; offset > 0; offset /= 2) { T rhs = shfl_down_sync(val, offset); val = BinaryReductionOp{}(val, rhs); } return val; } /* * Reduce value across the thread block. */ template __inline__ __device__ T blockReduceShflDown(T val, T identity) { // Shared memory for the partial sums static __shared__ T shared[MAX_WARPS]; int numThreads = blockDim.x * blockDim.y * blockDim.z; int threadId = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; int warpId = threadId / WARP_SIZE; int warpLane = threadId % WARP_SIZE; // Each warp performs partial reduction val = warpReduceShflDown(val); // Write reduced value from each warp to shared memory if (warpLane == 0) shared[warpId] = val; // Wait for all partial reductions to complete __syncthreads(); // Read per warp values from shared memory only if that warp existed val = (threadId < numThreads / warpSize) ? shared[warpLane] : identity; // Final reduce within first warp if (warpId == 0) val = warpReduceShflDown(val); return val; } /* * Warp reduce + block reduce using shfl instead of shfl_down. */ template __inline__ __device__ T blockReduceShfl(T val, T identity) { int numThreads = blockDim.x * blockDim.y * blockDim.z; int threadId = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; int warpId = threadId / WARP_SIZE; int warpLane = threadId % WARP_SIZE; T temp = val; // Reduce each warp if (numThreads % WARP_SIZE == 0) { for (int i = 1; i < WARP_SIZE; i *= 2) { T rhs = shfl_xor_sync(temp, i); temp = BinaryReductionOp{}(temp, rhs); } } else { for (int i = 1; i < WARP_SIZE; i *= 2) { int srcLane = threadId ^ i; T rhs = shfl_sync(temp, srcLane); // Only add from threads that exist to avoid double counting if (srcLane < numThreads) temp = BinaryReductionOp{}(temp, rhs); } } // Reduce per warp values if (numThreads > WARP_SIZE) { static_assert(MAX_WARPS <= WARP_SIZE, "max warps must be <= warp size for this algorithm to work"); __shared__ T shared[MAX_WARPS]; // Write per warp values to shared memory if (warpLane == 0) shared[warpId] = temp; __syncthreads(); if (warpId == 0) { // Read per warp values only if the warp existed temp = (warpLane * WARP_SIZE < numThreads) ? shared[warpLane] : identity; // Final reduction for (int i = 1; i < MAX_WARPS; i *= 2) { T rhs = shfl_xor_sync(temp, i); temp = BinaryReductionOp{}(temp, rhs); } } __syncthreads(); } return temp; } /* * Reduce values into thread 0 of the last running thread block. * Output value is device_mem[0]. */ template __device__ __forceinline__ void gridReduce(T val, T identity, T* device_mem, unsigned int* device_count) { int numBlocks = gridDim.x * gridDim.y * gridDim.z; int numThreads = blockDim.x * blockDim.y * blockDim.z; unsigned int wrap_around = numBlocks - 1; int blockId = blockIdx.x + gridDim.x * blockIdx.y + (gridDim.x * gridDim.y) * blockIdx.z; int threadId = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; // Each block reduces a subset of the input T temp = blockReduceShfl(val, identity); __shared__ bool isLastBlockDone; if (threadId == 0) { // One thread per block stores the partial reductions to global memory device_mem[blockId] = temp; // Ensure write visible to all threads __threadfence(); // Increment counter, (wraps back to zero if old count == wrap_around) unsigned int old_count = atomicInc(device_count, wrap_around); isLastBlockDone = (old_count == wrap_around) ? 1 : 0; } // Synchronize to ensure that each thread reads the // correct value of isLastBlockDone. __syncthreads(); // The last block reduces values in device_mem if (isLastBlockDone) { // Reduce thread_i in each block into temp temp = identity; for (int i = threadId; i < numBlocks; i += numThreads) temp = BinaryReductionOp{}(temp, device_mem[i]); // Compute the final block partial reductions temp = blockReduceShfl(temp, identity); // One thread returns the final value if (threadId == 0) device_mem[0] = temp; } } template __device__ __forceinline__ void gridReduceAtomic(T val, T identity, T* device_mem) { int threadId = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; val = blockReduceShflDown(val, identity); // Final reduction of all block values into the output device_mem if (threadId == 0) atomic{}(device_mem, val); } template struct GridReducerLDS { __device__ __forceinline__ void operator()(T val, T identity, T* device_mem, unsigned int* device_count) { return gridReduce(val, identity, device_mem, device_count); } }; template struct GridReducerAtomic { __device__ __forceinline__ void operator()(T val, T identity, T* device_mem, unsigned int* device_count) { return gridReduceAtomic(val, identity, device_mem); } }; } // namespace impl } // namespace cuda } // namespace sundials #endif // _SUNDIALS_CUDA_KERNELS_CUHStanHeaders/src/sundials/sundials_nonlinearsolver.c0000644000176200001440000001572314645137106022371 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the implementation file for a generic SUNNonlinerSolver package. It * contains the implementation of the SUNNonlinearSolver operations listed in * the 'ops' structure in sundials_nonlinearsolver.h * ---------------------------------------------------------------------------*/ #include #include #include "sundials_context_impl.h" #if defined(SUNDIALS_BUILD_WITH_PROFILING) static SUNProfiler getSUNProfiler(SUNNonlinearSolver NLS) { return(NLS->sunctx->profiler); } #endif /* ----------------------------------------------------------------------------- * Create a new empty SUNLinearSolver object * ---------------------------------------------------------------------------*/ SUNNonlinearSolver SUNNonlinSolNewEmpty(SUNContext sunctx) { SUNNonlinearSolver NLS; SUNNonlinearSolver_Ops ops; /* check input */ if (!sunctx) return(NULL); /* create nonlinear solver object */ NLS = NULL; NLS = (SUNNonlinearSolver) malloc(sizeof *NLS); if (NLS == NULL) return(NULL); /* create nonlinear solver ops structure */ ops = NULL; ops = (SUNNonlinearSolver_Ops) malloc(sizeof *ops); if (ops == NULL) { free(NLS); return(NULL); } /* initialize operations to NULL */ ops->gettype = NULL; ops->initialize = NULL; ops->setup = NULL; ops->solve = NULL; ops->free = NULL; ops->setsysfn = NULL; ops->setlsetupfn = NULL; ops->setlsolvefn = NULL; ops->setctestfn = NULL; ops->setmaxiters = NULL; ops->getnumiters = NULL; ops->getcuriter = NULL; ops->getnumconvfails = NULL; /* attach context and ops, initialize content to NULL */ NLS->sunctx = sunctx; NLS->ops = ops; NLS->content = NULL; return(NLS); } /* ----------------------------------------------------------------------------- * Free a generic SUNNonlinearSolver (assumes content is already empty) * ---------------------------------------------------------------------------*/ void SUNNonlinSolFreeEmpty(SUNNonlinearSolver NLS) { if (NLS == NULL) return; /* free non-NULL ops structure */ if (NLS->ops) free(NLS->ops); NLS->ops = NULL; /* free overall N_Vector object and return */ free(NLS); return; } /* ----------------------------------------------------------------------------- * core functions * ---------------------------------------------------------------------------*/ SUNNonlinearSolver_Type SUNNonlinSolGetType(SUNNonlinearSolver NLS) { return(NLS->ops->gettype(NLS)); } int SUNNonlinSolInitialize(SUNNonlinearSolver NLS) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(NLS)); if (NLS->ops->initialize) ier = NLS->ops->initialize(NLS); else ier = SUN_NLS_SUCCESS; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(NLS)); return(ier); } int SUNNonlinSolSetup(SUNNonlinearSolver NLS, N_Vector y, void* mem) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(NLS)); if (NLS->ops->setup) ier = NLS->ops->setup(NLS, y, mem); else ier = SUN_NLS_SUCCESS; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(NLS)); return(ier); } int SUNNonlinSolSolve(SUNNonlinearSolver NLS, N_Vector y0, N_Vector y, N_Vector w, realtype tol, booleantype callLSetup, void* mem) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(NLS)); ier = NLS->ops->solve(NLS, y0, y, w, tol, callLSetup, mem); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(NLS)); return(ier); } int SUNNonlinSolFree(SUNNonlinearSolver NLS) { if (NLS == NULL) return(SUN_NLS_SUCCESS); /* if the free operation exists use it */ if (NLS->ops) if (NLS->ops->free) return(NLS->ops->free(NLS)); /* if we reach this point, either ops == NULL or free == NULL, try to cleanup by freeing the content, ops, and solver */ if (NLS->content) { free(NLS->content); NLS->content = NULL; } if (NLS->ops) { free(NLS->ops); NLS->ops = NULL; } free(NLS); NLS = NULL; return(SUN_NLS_SUCCESS); } /* ----------------------------------------------------------------------------- * set functions * ---------------------------------------------------------------------------*/ /* set the nonlinear system function (required) */ int SUNNonlinSolSetSysFn(SUNNonlinearSolver NLS, SUNNonlinSolSysFn SysFn) { return((int) NLS->ops->setsysfn(NLS, SysFn)); } /* set the linear solver setup function (optional) */ int SUNNonlinSolSetLSetupFn(SUNNonlinearSolver NLS, SUNNonlinSolLSetupFn LSetupFn) { if (NLS->ops->setlsetupfn) return((int) NLS->ops->setlsetupfn(NLS, LSetupFn)); else return(SUN_NLS_SUCCESS); } /* set the linear solver solve function (optional) */ int SUNNonlinSolSetLSolveFn(SUNNonlinearSolver NLS, SUNNonlinSolLSolveFn LSolveFn) { if (NLS->ops->setlsolvefn) return((int) NLS->ops->setlsolvefn(NLS, LSolveFn)); else return(SUN_NLS_SUCCESS); } /* set the convergence test function (optional) */ int SUNNonlinSolSetConvTestFn(SUNNonlinearSolver NLS, SUNNonlinSolConvTestFn CTestFn, void* ctest_data) { if (NLS->ops->setctestfn) return((int) NLS->ops->setctestfn(NLS, CTestFn, ctest_data)); else return(SUN_NLS_SUCCESS); } int SUNNonlinSolSetMaxIters(SUNNonlinearSolver NLS, int maxiters) { if (NLS->ops->setmaxiters) return((int) NLS->ops->setmaxiters(NLS, maxiters)); else return(SUN_NLS_SUCCESS); } /* ----------------------------------------------------------------------------- * get functions * ---------------------------------------------------------------------------*/ /* get the total number on nonlinear iterations (optional) */ int SUNNonlinSolGetNumIters(SUNNonlinearSolver NLS, long int *niters) { if (NLS->ops->getnumiters) { return((int) NLS->ops->getnumiters(NLS, niters)); } else { *niters = 0; return(SUN_NLS_SUCCESS); } } /* get the iteration count for the current nonlinear solve */ int SUNNonlinSolGetCurIter(SUNNonlinearSolver NLS, int *iter) { if (NLS->ops->getcuriter) { return((int) NLS->ops->getcuriter(NLS, iter)); } else { *iter = -1; return(SUN_NLS_SUCCESS); } } /* get the total number on nonlinear solve convergence failures (optional) */ int SUNNonlinSolGetNumConvFails(SUNNonlinearSolver NLS, long int *nconvfails) { if (NLS->ops->getnumconvfails) { return((int) NLS->ops->getnumconvfails(NLS, nconvfails)); } else { *nconvfails = 0; return(SUN_NLS_SUCCESS); } } StanHeaders/src/sundials/sundials_context.c0000644000176200001440000000636614645137106020640 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS context class. A context object holds data that all * SUNDIALS objects in a simulation share. * ----------------------------------------------------------------*/ #include #include #include #include #include #include "sundials_context_impl.h" #include "sundials_debug.h" int SUNContext_Create(void* comm, SUNContext* sunctx) { SUNProfiler profiler = NULL; #if defined(SUNDIALS_BUILD_WITH_PROFILING) && !defined(SUNDIALS_CALIPER_ENABLED) if (SUNProfiler_Create(comm, "SUNContext Default", &profiler)) return(-1); #endif *sunctx = NULL; *sunctx = (SUNContext) malloc(sizeof(struct _SUNContext)); if (*sunctx == NULL) { #if defined(SUNDIALS_BUILD_WITH_PROFILING) && !defined(SUNDIALS_CALIPER_ENABLED) SUNProfiler_Free(&profiler); #endif return(-1); } (*sunctx)->profiler = profiler; (*sunctx)->own_profiler = SUNTRUE; return(0); } int SUNContext_GetProfiler(SUNContext sunctx, SUNProfiler* profiler) { if (sunctx == NULL) return(-1); #ifdef SUNDIALS_BUILD_WITH_PROFILING /* get profiler */ *profiler = sunctx->profiler; #else *profiler = NULL; #endif return(0); } int SUNContext_SetProfiler(SUNContext sunctx, SUNProfiler profiler) { if (sunctx == NULL) return(-1); #ifdef SUNDIALS_BUILD_WITH_PROFILING /* free any existing profiler */ if (sunctx->profiler && sunctx->own_profiler) { if (SUNProfiler_Free(&(sunctx->profiler))) return(-1); sunctx->profiler = NULL; } /* set profiler */ sunctx->profiler = profiler; sunctx->own_profiler = SUNFALSE; #endif return(0); } int SUNContext_Free(SUNContext* sunctx) { #if defined(SUNDIALS_BUILD_WITH_PROFILING) && !defined(SUNDIALS_CALIPER_ENABLED) FILE *fp; char* sunprofiler_print_env; #endif if (!sunctx) return(0); if (!(*sunctx)) return(0); #if defined(SUNDIALS_BUILD_WITH_PROFILING) && !defined(SUNDIALS_CALIPER_ENABLED) /* Find out where we are printing to */ sunprofiler_print_env = getenv("SUNPROFILER_PRINT"); fp = NULL; if (sunprofiler_print_env) { if (!strcmp(sunprofiler_print_env, "0")) fp = NULL; else if (!strcmp(sunprofiler_print_env, "1") || !strcmp(sunprofiler_print_env, "TRUE") || !strcmp(sunprofiler_print_env, "stdout")) fp = stdout; else fp = fopen(sunprofiler_print_env, "a"); } /* Enforce that the profiler is freed before finalizing, if it is not owned by the sunctx. */ if ((*sunctx)->profiler) { if (fp) SUNProfiler_Print((*sunctx)->profiler, fp); if (fp) fclose(fp); if ((*sunctx)->own_profiler && SUNProfiler_Free(&(*sunctx)->profiler)) return(-1); } #endif free(*sunctx); *sunctx = NULL; return(0); } StanHeaders/src/sundials/sundials_nvector.c0000644000176200001440000006641114645137106020631 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Radu Serban, Aaron Collier, and * David J. Gardner @ LLNL * Daniel R. Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a generic NVECTOR package. * It contains the implementation of the N_Vector operations listed * in nvector.h. * -----------------------------------------------------------------*/ #include #include #include #include "sundials_context_impl.h" #if defined(SUNDIALS_BUILD_WITH_PROFILING) static SUNProfiler getSUNProfiler(N_Vector v) { return(v->sunctx->profiler); } #endif /* ----------------------------------------------------------------- * Methods that are not ops (i.e., non-virtual and not overridable). * -----------------------------------------------------------------*/ /* Create an empty NVector object */ N_Vector N_VNewEmpty(SUNContext sunctx) { N_Vector v; N_Vector_Ops ops; if (sunctx == NULL) return(NULL); /* create vector object */ v = NULL; v = (N_Vector) malloc(sizeof *v); if (v == NULL) return(NULL); /* create vector ops structure */ ops = NULL; ops = (N_Vector_Ops) malloc(sizeof *ops); if (ops == NULL) { free(v); return(NULL); } /* initialize operations to NULL */ /* * REQUIRED operations. * * These must be implemented by derivations of the generic N_Vector. */ /* constructors, destructors, and utility operations */ ops->nvgetvectorid = NULL; ops->nvclone = NULL; ops->nvcloneempty = NULL; ops->nvdestroy = NULL; ops->nvspace = NULL; ops->nvgetarraypointer = NULL; ops->nvgetdevicearraypointer = NULL; ops->nvsetarraypointer = NULL; ops->nvgetcommunicator = NULL; ops->nvgetlength = NULL; /* standard vector operations */ ops->nvlinearsum = NULL; ops->nvconst = NULL; ops->nvprod = NULL; ops->nvdiv = NULL; ops->nvscale = NULL; ops->nvabs = NULL; ops->nvinv = NULL; ops->nvaddconst = NULL; ops->nvdotprod = NULL; ops->nvmaxnorm = NULL; ops->nvwrmsnorm = NULL; ops->nvwrmsnormmask = NULL; ops->nvmin = NULL; ops->nvwl2norm = NULL; ops->nvl1norm = NULL; ops->nvcompare = NULL; ops->nvinvtest = NULL; ops->nvconstrmask = NULL; ops->nvminquotient = NULL; /* * OPTIONAL operations. * * These operations provide default implementations that may be overriden. */ /* fused vector operations (optional) */ ops->nvlinearcombination = NULL; ops->nvscaleaddmulti = NULL; ops->nvdotprodmulti = NULL; /* vector array operations (optional) */ ops->nvlinearsumvectorarray = NULL; ops->nvscalevectorarray = NULL; ops->nvconstvectorarray = NULL; ops->nvwrmsnormvectorarray = NULL; ops->nvwrmsnormmaskvectorarray = NULL; ops->nvscaleaddmultivectorarray = NULL; ops->nvlinearcombinationvectorarray = NULL; /* * OPTIONAL operations with no default implementation. */ /* local reduction operations (optional) */ ops->nvdotprodlocal = NULL; ops->nvmaxnormlocal = NULL; ops->nvminlocal = NULL; ops->nvl1normlocal = NULL; ops->nvinvtestlocal = NULL; ops->nvconstrmasklocal = NULL; ops->nvminquotientlocal = NULL; ops->nvwsqrsumlocal = NULL; ops->nvwsqrsummasklocal = NULL; /* single buffer reduction operations */ ops->nvdotprodmultilocal = NULL; ops->nvdotprodmultiallreduce = NULL; /* XBraid interface operations */ ops->nvbufsize = NULL; ops->nvbufpack = NULL; ops->nvbufunpack = NULL; /* debugging functions (called when SUNDIALS_DEBUG_PRINTVEC is defined) */ ops->nvprint = NULL; ops->nvprintfile = NULL; /* attach ops */ v->ops = ops; /* initialize content and sunctx to NULL */ v->content = NULL; v->sunctx = sunctx; return(v); } /* Free a generic N_Vector (assumes content is already empty) */ void N_VFreeEmpty(N_Vector v) { if (v == NULL) return; /* free non-NULL ops structure */ if (v->ops) free(v->ops); v->ops = NULL; /* free overall N_Vector object and return */ free(v); return; } /* Copy a vector 'ops' structure */ int N_VCopyOps(N_Vector w, N_Vector v) { /* Check that ops structures exist */ if (w == NULL || v == NULL) return(-1); if (w->ops == NULL || v->ops == NULL) return(-1); /* Copy ops from w to v */ /* * REQUIRED operations. * * These must be implemented by derivations of the generic N_Vector. */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = w->ops->nvgetvectorid; v->ops->nvclone = w->ops->nvclone; v->ops->nvcloneempty = w->ops->nvcloneempty; v->ops->nvdestroy = w->ops->nvdestroy; v->ops->nvspace = w->ops->nvspace; v->ops->nvgetarraypointer = w->ops->nvgetarraypointer; v->ops->nvgetdevicearraypointer = w->ops->nvgetdevicearraypointer; v->ops->nvsetarraypointer = w->ops->nvsetarraypointer; v->ops->nvgetcommunicator = w->ops->nvgetcommunicator; v->ops->nvgetlength = w->ops->nvgetlength; /* standard vector operations */ v->ops->nvlinearsum = w->ops->nvlinearsum; v->ops->nvconst = w->ops->nvconst; v->ops->nvprod = w->ops->nvprod; v->ops->nvdiv = w->ops->nvdiv; v->ops->nvscale = w->ops->nvscale; v->ops->nvabs = w->ops->nvabs; v->ops->nvinv = w->ops->nvinv; v->ops->nvaddconst = w->ops->nvaddconst; v->ops->nvdotprod = w->ops->nvdotprod; v->ops->nvmaxnorm = w->ops->nvmaxnorm; v->ops->nvwrmsnorm = w->ops->nvwrmsnorm; v->ops->nvwrmsnormmask = w->ops->nvwrmsnormmask; v->ops->nvmin = w->ops->nvmin; v->ops->nvwl2norm = w->ops->nvwl2norm; v->ops->nvl1norm = w->ops->nvl1norm; v->ops->nvcompare = w->ops->nvcompare; v->ops->nvinvtest = w->ops->nvinvtest; v->ops->nvconstrmask = w->ops->nvconstrmask; v->ops->nvminquotient = w->ops->nvminquotient; /* * OPTIONAL operations. * * These operations provide default implementations that may be overriden. */ /* fused vector operations */ v->ops->nvlinearcombination = w->ops->nvlinearcombination; v->ops->nvscaleaddmulti = w->ops->nvscaleaddmulti; v->ops->nvdotprodmulti = w->ops->nvdotprodmulti; /* vector array operations */ v->ops->nvlinearsumvectorarray = w->ops->nvlinearsumvectorarray; v->ops->nvscalevectorarray = w->ops->nvscalevectorarray; v->ops->nvconstvectorarray = w->ops->nvconstvectorarray; v->ops->nvwrmsnormvectorarray = w->ops->nvwrmsnormvectorarray; v->ops->nvwrmsnormmaskvectorarray = w->ops->nvwrmsnormmaskvectorarray; v->ops->nvscaleaddmultivectorarray = w->ops->nvscaleaddmultivectorarray; v->ops->nvlinearcombinationvectorarray = w->ops->nvlinearcombinationvectorarray; /* * OPTIONAL operations with no default implementation. */ /* local reduction operations */ v->ops->nvdotprodlocal = w->ops->nvdotprodlocal; v->ops->nvmaxnormlocal = w->ops->nvmaxnormlocal; v->ops->nvminlocal = w->ops->nvminlocal; v->ops->nvl1normlocal = w->ops->nvl1normlocal; v->ops->nvinvtestlocal = w->ops->nvinvtestlocal; v->ops->nvconstrmasklocal = w->ops->nvconstrmasklocal; v->ops->nvminquotientlocal = w->ops->nvminquotientlocal; v->ops->nvwsqrsumlocal = w->ops->nvwsqrsumlocal; v->ops->nvwsqrsummasklocal = w->ops->nvwsqrsummasklocal; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = w->ops->nvdotprodmultilocal; v->ops->nvdotprodmultiallreduce = w->ops->nvdotprodmultiallreduce; /* XBraid interface operations */ v->ops->nvbufsize = w->ops->nvbufsize; v->ops->nvbufpack = w->ops->nvbufpack; v->ops->nvbufunpack = w->ops->nvbufunpack; /* debugging functions (called when SUNDIALS_DEBUG_PRINTVEC is defined) */ v->ops->nvprint = w->ops->nvprint; v->ops->nvprintfile = w->ops->nvprintfile; return(0); } /* ----------------------------------------------------------------- * Functions in the 'ops' structure * -----------------------------------------------------------------*/ N_Vector_ID N_VGetVectorID(N_Vector w) { return(w->ops->nvgetvectorid(w)); } N_Vector N_VClone(N_Vector w) { N_Vector result = NULL; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(w)); result = w->ops->nvclone(w); result->sunctx = w->sunctx; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(w)); return result; } N_Vector N_VCloneEmpty(N_Vector w) { N_Vector result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(w)); result = w->ops->nvcloneempty(w); result->sunctx = w->sunctx; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(w)); return result; } void N_VDestroy(N_Vector v) { if (v == NULL) return; /* if the destroy operation exists use it */ if (v->ops) if (v->ops->nvdestroy) { v->ops->nvdestroy(v); return; } /* if we reach this point, either ops == NULL or nvdestroy == NULL, try to cleanup by freeing the content, ops, and vector */ if (v->content) { free(v->content); v->content = NULL; } if (v->ops) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } void N_VSpace(N_Vector v, sunindextype *lrw, sunindextype *liw) { v->ops->nvspace(v, lrw, liw); return; } realtype *N_VGetArrayPointer(N_Vector v) { return((realtype *) v->ops->nvgetarraypointer(v)); } realtype *N_VGetDeviceArrayPointer(N_Vector v) { if (v->ops->nvgetdevicearraypointer) return((realtype *) v->ops->nvgetdevicearraypointer(v)); else return(NULL); } void N_VSetArrayPointer(realtype *v_data, N_Vector v) { v->ops->nvsetarraypointer(v_data, v); return; } void *N_VGetCommunicator(N_Vector v) { if (v->ops->nvgetcommunicator) return(v->ops->nvgetcommunicator(v)); else return(NULL); } sunindextype N_VGetLength(N_Vector v) { return((sunindextype) v->ops->nvgetlength(v)); } /* ----------------------------------------------------------------- * standard vector operations * -----------------------------------------------------------------*/ void N_VLinearSum(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); z->ops->nvlinearsum(a, x, b, y, z); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return; } void N_VConst(realtype c, N_Vector z) { SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(z)); z->ops->nvconst(c, z); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(z)); return; } void N_VProd(N_Vector x, N_Vector y, N_Vector z) { SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); z->ops->nvprod(x, y, z); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return; } void N_VDiv(N_Vector x, N_Vector y, N_Vector z) { SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); z->ops->nvdiv(x, y, z); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return; } void N_VScale(realtype c, N_Vector x, N_Vector z) { SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); z->ops->nvscale(c, x, z); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return; } void N_VAbs(N_Vector x, N_Vector z) { SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); z->ops->nvabs(x, z); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return; } void N_VInv(N_Vector x, N_Vector z) { SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); z->ops->nvinv(x, z); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return; } void N_VAddConst(N_Vector x, realtype b, N_Vector z) { SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); z->ops->nvaddconst(x, b, z); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return; } realtype N_VDotProd(N_Vector x, N_Vector y) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) y->ops->nvdotprod(x, y)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VMaxNorm(N_Vector x) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvmaxnorm(x)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VWrmsNorm(N_Vector x, N_Vector w) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvwrmsnorm(x, w)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VWrmsNormMask(N_Vector x, N_Vector w, N_Vector id) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvwrmsnormmask(x, w, id)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VMin(N_Vector x) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvmin(x)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VWL2Norm(N_Vector x, N_Vector w) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvwl2norm(x, w)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VL1Norm(N_Vector x) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvl1norm(x)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } void N_VCompare(realtype c, N_Vector x, N_Vector z) { SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); z->ops->nvcompare(c, x, z); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return; } booleantype N_VInvTest(N_Vector x, N_Vector z) { booleantype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((booleantype) z->ops->nvinvtest(x, z)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } booleantype N_VConstrMask(N_Vector c, N_Vector x, N_Vector m) { booleantype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(c)); result = ((booleantype) x->ops->nvconstrmask(c, x, m)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(c)); return(result); } realtype N_VMinQuotient(N_Vector num, N_Vector denom) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(num)); result = ((realtype) num->ops->nvminquotient(num, denom)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(num)); return(result); } /* ----------------------------------------------------------------- * OPTIONAL fused vector operations * -----------------------------------------------------------------*/ int N_VLinearCombination(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i, ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(X[0])); if (z->ops->nvlinearcombination != NULL) { ier = z->ops->nvlinearcombination(nvec, c, X, z); } else { z->ops->nvscale(c[0], X[0], z); for (i=1; iops->nvlinearsum(c[i], X[i], RCONST(1.0), z, z); } ier = 0; } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(X[0])); return(ier); } int N_VScaleAddMulti(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z) { int i, ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); if (x->ops->nvscaleaddmulti != NULL) { ier = x->ops->nvscaleaddmulti(nvec, a, x, Y, Z); } else { for (i=0; iops->nvlinearsum(a[i], x, RCONST(1.0), Y[i], Z[i]); } ier = 0; } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(ier); } int N_VDotProdMulti(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods) { int i, ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); if (x->ops->nvdotprodmulti != NULL) { ier = x->ops->nvdotprodmulti(nvec, x, Y, dotprods); } else { for (i=0; iops->nvdotprod(x, Y[i]); } ier = 0; } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(ier); } /* ----------------------------------------------------------------- * OPTIONAL vector array operations * -----------------------------------------------------------------*/ int N_VLinearSumVectorArray(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { int i, ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(X[0])); if (Z[0]->ops->nvlinearsumvectorarray != NULL) { ier = Z[0]->ops->nvlinearsumvectorarray(nvec, a, X, b, Y, Z); } else { for (i=0; iops->nvlinearsum(a, X[i], b, Y[i], Z[i]); } ier = 0; } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(X[0])); return(ier); } int N_VScaleVectorArray(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { int i, ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(X[0])); if (Z[0]->ops->nvscalevectorarray != NULL) { ier = Z[0]->ops->nvscalevectorarray(nvec, c, X, Z); } else { for (i=0; iops->nvscale(c[i], X[i], Z[i]); } ier = 0; } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(X[0])); return(ier); } int N_VConstVectorArray(int nvec, realtype c, N_Vector* Z) { int i, ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(Z[0])); if (Z[0]->ops->nvconstvectorarray != NULL) { ier = Z[0]->ops->nvconstvectorarray(nvec, c, Z); } else { for (i=0; iops->nvconst(c, Z[i]); } ier = 0; } SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(Z[0])); return(ier); } int N_VWrmsNormVectorArray(int nvec, N_Vector* X, N_Vector* W, realtype* nrm) { int i, ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(X[0])); if (X[0]->ops->nvwrmsnormvectorarray != NULL) { ier = X[0]->ops->nvwrmsnormvectorarray(nvec, X, W, nrm); } else { for (i=0; iops->nvwrmsnorm(X[i], W[i]); } ier = 0; } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(X[0])); return(ier); } int N_VWrmsNormMaskVectorArray(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm) { int i, ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(X[0])); if (id->ops->nvwrmsnormmaskvectorarray != NULL) { ier = id->ops->nvwrmsnormmaskvectorarray(nvec, X, W, id, nrm); } else { for (i=0; iops->nvwrmsnormmask(X[i], W[i], id); } ier = 0; } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(X[0])); return(ier); } int N_VScaleAddMultiVectorArray(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z) { int i, j, ier; N_Vector* YY = NULL; N_Vector* ZZ = NULL; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(X[0])); if (X[0]->ops->nvscaleaddmultivectorarray != NULL) { ier = X[0]->ops->nvscaleaddmultivectorarray(nvec, nsum, a, X, Y, Z); } else if (X[0]->ops->nvscaleaddmulti != NULL ) { /* allocate arrays of vectors */ YY = (N_Vector*) malloc(nsum * sizeof(N_Vector)); ZZ = (N_Vector*) malloc(nsum * sizeof(N_Vector)); for (i=0; iops->nvscaleaddmulti(nsum, a, X[i], YY, ZZ); if (ier != 0) break; } /* free array of vectors */ free(YY); free(ZZ); } else { for (i=0; iops->nvlinearsum(a[j], X[i], RCONST(1.0), Y[j][i], Z[j][i]); } } ier = 0; } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(X[0])); return(ier); } int N_VLinearCombinationVectorArray(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { int i, j, ier; N_Vector* Y = NULL; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(X[0][0])); if (Z[0]->ops->nvlinearcombinationvectorarray != NULL) { ier = Z[0]->ops->nvlinearcombinationvectorarray(nvec, nsum, c, X, Z); } else if (Z[0]->ops->nvlinearcombination != NULL ) { /* allocate array of vectors */ Y = (N_Vector* ) malloc(nsum * sizeof(N_Vector)); for (i=0; iops->nvlinearcombination(nsum, c, Y, Z[i]); if (ier != 0) break; } /* free array of vectors */ free(Y); } else { for (i=0; iops->nvscale(c[0], X[0][i], Z[i]); for (j=1; jops->nvlinearsum(c[j], X[j][i], RCONST(1.0), Z[i], Z[i]); } } ier = 0; } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(X[0][0])); return(ier); } /* ----------------------------------------------------------------- * OPTIONAL local reduction kernels (no parallel communication) * -----------------------------------------------------------------*/ realtype N_VDotProdLocal(N_Vector x, N_Vector y) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) y->ops->nvdotprodlocal(x, y)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VMaxNormLocal(N_Vector x) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvmaxnormlocal(x)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VMinLocal(N_Vector x) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvminlocal(x)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VL1NormLocal(N_Vector x) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvl1normlocal(x)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VWSqrSumLocal(N_Vector x, N_Vector w) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvwsqrsumlocal(x,w)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VWSqrSumMaskLocal(N_Vector x, N_Vector w, N_Vector id) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((realtype) x->ops->nvwsqrsummasklocal(x,w,id)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } booleantype N_VInvTestLocal(N_Vector x, N_Vector z) { booleantype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((booleantype) z->ops->nvinvtestlocal(x,z)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } booleantype N_VConstrMaskLocal(N_Vector c, N_Vector x, N_Vector m) { booleantype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); result = ((booleantype) x->ops->nvconstrmasklocal(c,x,m)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(result); } realtype N_VMinQuotientLocal(N_Vector num, N_Vector denom) { realtype result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(num)); result = ((realtype) num->ops->nvminquotientlocal(num,denom)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(num)); return(result); } /* ------------------------------------------- * OPTIONAL single buffer reduction operations * -------------------------------------------*/ int N_VDotProdMultiLocal(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods) { int i; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); if (x->ops->nvdotprodmultilocal) return((int) x->ops->nvdotprodmultilocal(nvec, x, Y, dotprods)); if (x->ops->nvdotprodlocal) { for (i = 0; i < nvec; i++) { dotprods[i] = x->ops->nvdotprodlocal(x, Y[i]); } return(0); } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(-1); } int N_VDotProdMultiAllReduce(int nvec, N_Vector x, realtype* sum) { SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); if (x->ops->nvdotprodmultiallreduce) return(x->ops->nvdotprodmultiallreduce(nvec, x, sum)); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(-1); } /* ------------------------------------ * OPTIONAL XBraid interface operations * ------------------------------------*/ int N_VBufSize(N_Vector x, sunindextype *size) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); if (x->ops->nvbufsize == NULL) ier = -1; else ier = x->ops->nvbufsize(x, size); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(ier); } int N_VBufPack(N_Vector x, void *buf) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); if (x->ops->nvbufpack == NULL) ier = -1; else ier = x->ops->nvbufpack(x, buf); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(ier); } int N_VBufUnpack(N_Vector x, void *buf) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(x)); if (x->ops->nvbufunpack == NULL) ier = -1; else ier = x->ops->nvbufunpack(x, buf); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(x)); return(ier); } /* ----------------------------------------------------------------- * Additional functions exported by the generic NVECTOR: * N_VNewVectorArray * N_VCloneEmptyVectorArray * N_VCloneVectorArray * N_VDestroyVectorArray * -----------------------------------------------------------------*/ N_Vector* N_VNewVectorArray(int count) { N_Vector* vs = NULL; if (count <= 0) return(NULL); vs = (N_Vector* ) malloc(count * sizeof(N_Vector)); if(vs == NULL) return(NULL); return(vs); } N_Vector* N_VCloneEmptyVectorArray(int count, N_Vector w) { N_Vector* vs = NULL; int j; if (count <= 0) return(NULL); vs = (N_Vector* ) malloc(count * sizeof(N_Vector)); if(vs == NULL) return(NULL); for (j = 0; j < count; j++) { vs[j] = N_VCloneEmpty(w); if (vs[j] == NULL) { N_VDestroyVectorArray(vs, j-1); return(NULL); } } return(vs); } N_Vector* N_VCloneVectorArray(int count, N_Vector w) { N_Vector* vs = NULL; int j; if (count <= 0) return(NULL); vs = (N_Vector* ) malloc(count * sizeof(N_Vector)); if (vs == NULL) return(NULL); for (j = 0; j < count; j++) { vs[j] = N_VClone(w); if (vs[j] == NULL) { N_VDestroyVectorArray(vs, j-1); return(NULL); } } return(vs); } void N_VDestroyVectorArray(N_Vector* vs, int count) { int j; if (vs == NULL) return; for (j = 0; j < count; j++) { N_VDestroy(vs[j]); vs[j] = NULL; } free(vs); vs = NULL; return; } /* These function are really only for users of the Fortran interface */ N_Vector N_VGetVecAtIndexVectorArray(N_Vector* vs, int index) { if (vs==NULL) return NULL; else if (index < 0) return NULL; else return vs[index]; } void N_VSetVecAtIndexVectorArray(N_Vector* vs, int index, N_Vector w) { if (vs==NULL) return; else if (index < 0) return; else vs[index] = w; } /* ----------------------------------------------------------------- * Debugging functions * ----------------------------------------------------------------- */ void N_VPrint(N_Vector v) { if (v == NULL) { STAN_SUNDIALS_PRINTF("NULL Vector\n"); return; } if (v->ops->nvprint == NULL) { STAN_SUNDIALS_PRINTF("NULL Print Op\n"); return; } v->ops->nvprint(v); return; } void N_VPrintFile(N_Vector v, FILE* outfile) { if (v == NULL) { STAN_SUNDIALS_FPRINTF(outfile, "NULL Vector\n"); return; } if (v->ops->nvprintfile == NULL) { STAN_SUNDIALS_FPRINTF(outfile, "NULL PrintFile Op\n"); return; } v->ops->nvprintfile(v, outfile); return; } StanHeaders/src/sundials/sundials_nvector_senswrapper.c0000644000176200001440000003101214645137106023247 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the implementation file for a vector wrapper for an array of NVECTORS * ---------------------------------------------------------------------------*/ #include #include #include #include #include #include #define ZERO RCONST(0.0) /*============================================================================== Constructors ============================================================================*/ /*------------------------------------------------------------------------------ create a new empty vector wrapper with space for vectors ----------------------------------------------------------------------------*/ N_Vector N_VNewEmpty_SensWrapper(int nvecs, SUNContext sunctx) { int i; N_Vector v; N_VectorContent_SensWrapper content; /* return if wrapper is empty */ if (nvecs < 1) return(NULL); /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ v->ops->nvclone = N_VClone_SensWrapper; v->ops->nvcloneempty = N_VCloneEmpty_SensWrapper; v->ops->nvdestroy = N_VDestroy_SensWrapper; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_SensWrapper; v->ops->nvconst = N_VConst_SensWrapper; v->ops->nvprod = N_VProd_SensWrapper; v->ops->nvdiv = N_VDiv_SensWrapper; v->ops->nvscale = N_VScale_SensWrapper; v->ops->nvabs = N_VAbs_SensWrapper; v->ops->nvinv = N_VInv_SensWrapper; v->ops->nvaddconst = N_VAddConst_SensWrapper; v->ops->nvdotprod = N_VDotProd_SensWrapper; v->ops->nvmaxnorm = N_VMaxNorm_SensWrapper; v->ops->nvwrmsnormmask = N_VWrmsNormMask_SensWrapper; v->ops->nvwrmsnorm = N_VWrmsNorm_SensWrapper; v->ops->nvmin = N_VMin_SensWrapper; v->ops->nvwl2norm = N_VWL2Norm_SensWrapper; v->ops->nvl1norm = N_VL1Norm_SensWrapper; v->ops->nvcompare = N_VCompare_SensWrapper; v->ops->nvinvtest = N_VInvTest_SensWrapper; v->ops->nvconstrmask = N_VConstrMask_SensWrapper; v->ops->nvminquotient = N_VMinQuotient_SensWrapper; /* create content */ content = NULL; content = (N_VectorContent_SensWrapper) malloc(sizeof *content); if (content == NULL) { N_VFreeEmpty(v); return(NULL); } content->nvecs = nvecs; content->own_vecs = SUNFALSE; content->vecs = NULL; content->vecs = (N_Vector*) malloc(nvecs * sizeof(N_Vector)); if (content->vecs == NULL) { free(content); N_VFreeEmpty(v); return(NULL); } /* initialize vector array to null */ for (i=0; i < nvecs; i++) content->vecs[i] = NULL; /* attach content */ v->content = content; return(v); } N_Vector N_VNew_SensWrapper(int count, N_Vector w) { N_Vector v; int i; v = NULL; v = N_VNewEmpty_SensWrapper(count, w->sunctx); if (v == NULL) return(NULL); for (i=0; i < NV_NVECS_SW(v); i++) { NV_VEC_SW(v,i) = N_VClone(w); if (NV_VEC_SW(v,i) == NULL) { N_VDestroy(v); return(NULL); } } /* update own vectors status */ NV_OWN_VECS_SW(v) = SUNTRUE; /* set context */ v->sunctx = w->sunctx; return(v); } /*============================================================================== Clone operations ============================================================================*/ /*------------------------------------------------------------------------------ create an empty clone of the vector wrapper w ----------------------------------------------------------------------------*/ N_Vector N_VCloneEmpty_SensWrapper(N_Vector w) { int i; N_Vector v; N_Vector_Ops ops; N_VectorContent_SensWrapper content; if (w == NULL) return(NULL); if (NV_NVECS_SW(w) < 1) return(NULL); /* create vector */ v = NULL; v = (N_Vector) malloc(sizeof *v); if (v == NULL) return(NULL); /* create vector operation structure */ ops = NULL; ops = (N_Vector_Ops) malloc(sizeof *ops); if (ops == NULL) { free(v); return(NULL); } ops->nvgetvectorid = w->ops->nvgetvectorid; ops->nvclone = w->ops->nvclone; ops->nvcloneempty = w->ops->nvcloneempty; ops->nvdestroy = w->ops->nvdestroy; ops->nvspace = w->ops->nvspace; ops->nvgetarraypointer = w->ops->nvgetarraypointer; ops->nvsetarraypointer = w->ops->nvsetarraypointer; /* standard vector operations */ ops->nvlinearsum = w->ops->nvlinearsum; ops->nvconst = w->ops->nvconst; ops->nvprod = w->ops->nvprod; ops->nvdiv = w->ops->nvdiv; ops->nvscale = w->ops->nvscale; ops->nvabs = w->ops->nvabs; ops->nvinv = w->ops->nvinv; ops->nvaddconst = w->ops->nvaddconst; ops->nvdotprod = w->ops->nvdotprod; ops->nvmaxnorm = w->ops->nvmaxnorm; ops->nvwrmsnormmask = w->ops->nvwrmsnormmask; ops->nvwrmsnorm = w->ops->nvwrmsnorm; ops->nvmin = w->ops->nvmin; ops->nvwl2norm = w->ops->nvwl2norm; ops->nvl1norm = w->ops->nvl1norm; ops->nvcompare = w->ops->nvcompare; ops->nvinvtest = w->ops->nvinvtest; ops->nvconstrmask = w->ops->nvconstrmask; ops->nvminquotient = w->ops->nvminquotient; /* fused vector operations */ ops->nvlinearcombination = w->ops->nvlinearcombination; ops->nvscaleaddmulti = w->ops->nvscaleaddmulti; ops->nvdotprodmulti = w->ops->nvdotprodmulti; /* vector array operations */ ops->nvlinearsumvectorarray = w->ops->nvlinearsumvectorarray; ops->nvscalevectorarray = w->ops->nvscalevectorarray; ops->nvconstvectorarray = w->ops->nvconstvectorarray; ops->nvwrmsnormvectorarray = w->ops->nvwrmsnormvectorarray; ops->nvwrmsnormmaskvectorarray = w->ops->nvwrmsnormmaskvectorarray; ops->nvscaleaddmultivectorarray = w->ops->nvscaleaddmultivectorarray; ops->nvlinearcombinationvectorarray = w->ops->nvlinearcombinationvectorarray; /* Create content */ content = NULL; content = (N_VectorContent_SensWrapper) malloc(sizeof *content); if (content == NULL) { free(ops); free(v); return(NULL); } content->nvecs = NV_NVECS_SW(w); content->own_vecs = SUNFALSE; content->vecs = NULL; content->vecs = (N_Vector*) malloc(NV_NVECS_SW(w) * sizeof(N_Vector)); if (content->vecs == NULL) {free(ops); free(v); free(content); return(NULL);} /* initialize vector array to null */ for (i=0; i < NV_NVECS_SW(w); i++) content->vecs[i] = NULL; /* Attach content and ops */ v->content = content; v->ops = ops; return(v); } /*------------------------------------------------------------------------------ create a clone of the vector wrapper w ----------------------------------------------------------------------------*/ N_Vector N_VClone_SensWrapper(N_Vector w) { N_Vector v; int i; /* create empty wrapper */ v = NULL; v = N_VCloneEmpty_SensWrapper(w); if (v == NULL) return(NULL); /* update own vectors status */ NV_OWN_VECS_SW(v) = SUNTRUE; /* allocate arrays */ for (i=0; i < NV_NVECS_SW(v); i++) { NV_VEC_SW(v,i) = N_VClone(NV_VEC_SW(w,i)); if (NV_VEC_SW(v,i) == NULL) { N_VDestroy(v); return(NULL); } } return(v); } /*============================================================================== Destructor ============================================================================*/ void N_VDestroy_SensWrapper(N_Vector v) { int i; if (NV_OWN_VECS_SW(v) == SUNTRUE) { for (i=0; i < NV_NVECS_SW(v); i++) { if (NV_VEC_SW(v,i)) N_VDestroy(NV_VEC_SW(v,i)); NV_VEC_SW(v,i) = NULL; } } free(NV_VECS_SW(v)); NV_VECS_SW(v) = NULL; free(v->content); v->content = NULL; free(v->ops); v->ops = NULL; free(v); v = NULL; return; } /*============================================================================== Standard vector operations ============================================================================*/ void N_VLinearSum_SensWrapper(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { int i; for (i=0; i < NV_NVECS_SW(x); i++) N_VLinearSum(a, NV_VEC_SW(x,i), b, NV_VEC_SW(y,i), NV_VEC_SW(z,i)); return; } void N_VConst_SensWrapper(realtype c, N_Vector z) { int i; for (i=0; i < NV_NVECS_SW(z); i++) N_VConst(c, NV_VEC_SW(z,i)); return; } void N_VProd_SensWrapper(N_Vector x, N_Vector y, N_Vector z) { int i; for (i=0; i < NV_NVECS_SW(x); i++) N_VProd(NV_VEC_SW(x,i), NV_VEC_SW(y,i), NV_VEC_SW(z,i)); return; } void N_VDiv_SensWrapper(N_Vector x, N_Vector y, N_Vector z) { int i; for (i=0; i < NV_NVECS_SW(x); i++) N_VDiv(NV_VEC_SW(x,i), NV_VEC_SW(y,i), NV_VEC_SW(z,i)); return; } void N_VScale_SensWrapper(realtype c, N_Vector x, N_Vector z) { int i; for (i=0; i < NV_NVECS_SW(x); i++) N_VScale(c, NV_VEC_SW(x,i), NV_VEC_SW(z,i)); return; } void N_VAbs_SensWrapper(N_Vector x, N_Vector z) { int i; for (i=0; i < NV_NVECS_SW(x); i++) N_VAbs(NV_VEC_SW(x,i), NV_VEC_SW(z,i)); return; } void N_VInv_SensWrapper(N_Vector x, N_Vector z) { int i; for (i=0; i < NV_NVECS_SW(x); i++) N_VInv(NV_VEC_SW(x,i), NV_VEC_SW(z,i)); return; } void N_VAddConst_SensWrapper(N_Vector x, realtype b, N_Vector z) { int i; for (i=0; i < NV_NVECS_SW(x); i++) N_VAddConst(NV_VEC_SW(x,i), b, NV_VEC_SW(z,i)); return; } realtype N_VDotProd_SensWrapper(N_Vector x, N_Vector y) { int i; realtype sum; sum = ZERO; for (i=0; i < NV_NVECS_SW(x); i++) sum += N_VDotProd(NV_VEC_SW(x,i), NV_VEC_SW(y,i)); return(sum); } realtype N_VMaxNorm_SensWrapper(N_Vector x) { int i; realtype max, tmp; max = ZERO; for (i=0; i < NV_NVECS_SW(x); i++) { tmp = N_VMaxNorm(NV_VEC_SW(x,i)); if (tmp > max) max = tmp; } return(max); } realtype N_VWrmsNorm_SensWrapper(N_Vector x, N_Vector w) { int i; realtype nrm, tmp; nrm = ZERO; for (i=0; i < NV_NVECS_SW(x); i++) { tmp = N_VWrmsNorm(NV_VEC_SW(x,i), NV_VEC_SW(w,i)); if (tmp > nrm) nrm = tmp; } return(nrm); } realtype N_VWrmsNormMask_SensWrapper(N_Vector x, N_Vector w, N_Vector id) { int i; realtype nrm, tmp; nrm = ZERO; for (i=0; i < NV_NVECS_SW(x); i++) { tmp = N_VWrmsNormMask(NV_VEC_SW(x,i), NV_VEC_SW(w,i), NV_VEC_SW(id,i)); if (tmp > nrm) nrm = tmp; } return(nrm); } realtype N_VMin_SensWrapper(N_Vector x) { int i; realtype min, tmp; min = N_VMin(NV_VEC_SW(x,0)); for (i=1; i < NV_NVECS_SW(x); i++) { tmp = N_VMin(NV_VEC_SW(x,i)); if (tmp < min) min = tmp; } return(min); } realtype N_VWL2Norm_SensWrapper(N_Vector x, N_Vector w) { int i; realtype nrm, tmp; nrm = ZERO; for (i=0; i < NV_NVECS_SW(x); i++) { tmp = N_VWL2Norm(NV_VEC_SW(x,i), NV_VEC_SW(w,i)); if (tmp > nrm) nrm = tmp; } return(nrm); } realtype N_VL1Norm_SensWrapper(N_Vector x) { int i; realtype nrm, tmp; nrm = ZERO; for (i=0; i < NV_NVECS_SW(x); i++) { tmp = N_VL1Norm(NV_VEC_SW(x,i)); if (tmp > nrm) nrm = tmp; } return(nrm); } void N_VCompare_SensWrapper(realtype c, N_Vector x, N_Vector z) { int i; for (i=0; i < NV_NVECS_SW(x); i++) N_VCompare(c, NV_VEC_SW(x,i), NV_VEC_SW(z,i)); return; } booleantype N_VInvTest_SensWrapper(N_Vector x, N_Vector z) { int i; booleantype no_zero_found, tmp; no_zero_found = SUNTRUE; for (i=0; i < NV_NVECS_SW(x); i++) { tmp = N_VInvTest(NV_VEC_SW(x,i), NV_VEC_SW(z,i)); if (tmp != SUNTRUE) no_zero_found = SUNFALSE; } return(no_zero_found); } booleantype N_VConstrMask_SensWrapper(N_Vector c, N_Vector x, N_Vector m) { int i; booleantype test, tmp; test = SUNTRUE; for (i=0; i < NV_NVECS_SW(x); i++) { tmp = N_VConstrMask(c, NV_VEC_SW(x,i), NV_VEC_SW(m,i)); if (tmp != SUNTRUE) test = SUNFALSE; } return(test); } realtype N_VMinQuotient_SensWrapper(N_Vector num, N_Vector denom) { int i; realtype min, tmp; min = N_VMinQuotient(NV_VEC_SW(num,0), NV_VEC_SW(denom,0)); for (i=1; i < NV_NVECS_SW(num); i++) { tmp = N_VMinQuotient(NV_VEC_SW(num,i), NV_VEC_SW(denom,i)); if (tmp < min) min = tmp; } return(min); } StanHeaders/src/sundials/sundials_iterative_impl.h0000644000176200001440000000260114645137106022162 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the implementation header file for SUNDIALS functions used by * different iterative solvers. * ---------------------------------------------------------------------------*/ #include /* ----------------------------------------------------------------------------- * Type: SUNQRData * ----------------------------------------------------------------------------- * A SUNQRData struct holds temporary workspace vectors and realtype arrays for * a SUNQRAddFn. The N_Vectors and realtype arrays it contains are created by * the routine calling a SUNQRAdd function. * ---------------------------------------------------------------------------*/ typedef struct _SUNQRData *SUNQRData; struct _SUNQRData { N_Vector vtemp; N_Vector vtemp2; realtype *temp_array; }; StanHeaders/src/sundials/sundials_hip.h0000644000176200001440000000436614645137106017737 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos, and Daniel McGreer @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header files defines internal utility functions and macros * for working with HIP. * ----------------------------------------------------------------- */ #include #include #include #ifndef _SUNDIALS_HIP_H #define _SUNDIALS_HIP_H #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* --------------------------------------------------------------------------- * Utility macros * ---------------------------------------------------------------------------*/ #define SUNDIALS_HIP_VERIFY(hiperr) SUNDIALS_HIP_Assert(hiperr, __FILE__, __LINE__) #define SUNDIALS_KERNEL_NAME(...) __VA_ARGS__ #ifndef SUNDIALS_DEBUG_HIP_LASTERROR #define SUNDIALS_LAUNCH_KERNEL(kernel, gridDim, blockDim, shMem, stream, ...) \ { kernel<<>>(__VA_ARGS__); } #else #define SUNDIALS_LAUNCH_KERNEL(kernel, gridDim, blockDim, shMem, stream, ...) \ { \ kernel<<>>(__VA_ARGS__); \ hipDeviceSynchronize(); \ SUNDIALS_HIP_VERIFY(hipGetLastError()); \ } #endif /* --------------------------------------------------------------------------- * Utility functions * ---------------------------------------------------------------------------*/ inline booleantype SUNDIALS_HIP_Assert(hipError_t hiperr, const char *file, int line) { if (hiperr != hipSuccess) { #ifdef SUNDIALS_DEBUG fprintf(stderr, "ERROR in HIP runtime operation: %s %s:%d\n", hipGetErrorString(hiperr), file, line); #endif return SUNFALSE; /* Assert failed */ } return SUNTRUE; /* Assert OK */ } #ifdef __cplusplus /* wrapper to enable C++ usage */ } #endif #endif /* _SUNDIALS_HIP_H */ StanHeaders/src/sundials/sundials_direct.c0000644000176200001440000002003514645137106020413 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer: Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for operations to be used by a * generic direct linear solver. * -----------------------------------------------------------------*/ #include #include #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) SUNDlsMat NewDenseMat(sunindextype M, sunindextype N) { return(SUNDlsMat_NewDenseMat(M, N)); } SUNDlsMat SUNDlsMat_NewDenseMat(sunindextype M, sunindextype N) { SUNDlsMat A; sunindextype j; if ( (M <= 0) || (N <= 0) ) return(NULL); A = NULL; A = (SUNDlsMat) malloc(sizeof *A); if (A==NULL) return (NULL); A->data = (realtype *) malloc(M * N * sizeof(realtype)); if (A->data == NULL) { free(A); A = NULL; return(NULL); } A->cols = (realtype **) malloc(N * sizeof(realtype *)); if (A->cols == NULL) { free(A->data); A->data = NULL; free(A); A = NULL; return(NULL); } for (j=0; j < N; j++) A->cols[j] = A->data + j * M; A->M = M; A->N = N; A->ldim = M; A->ldata = M*N; A->type = SUNDIALS_DENSE; return(A); } realtype** newDenseMat(sunindextype m, sunindextype n) { return(SUNDlsMat_newDenseMat(m, n)); } realtype** SUNDlsMat_newDenseMat(sunindextype m, sunindextype n) { sunindextype j; realtype **a; if ( (n <= 0) || (m <= 0) ) return(NULL); a = NULL; a = (realtype **) malloc(n * sizeof(realtype *)); if (a == NULL) return(NULL); a[0] = NULL; a[0] = (realtype *) malloc(m * n * sizeof(realtype)); if (a[0] == NULL) { free(a); a = NULL; return(NULL); } for (j=1; j < n; j++) a[j] = a[0] + j * m; return(a); } SUNDlsMat NewBandMat(sunindextype N, sunindextype mu, sunindextype ml, sunindextype smu) { return(SUNDlsMat_NewBandMat(N, mu, ml, smu)); } SUNDlsMat SUNDlsMat_NewBandMat(sunindextype N, sunindextype mu, sunindextype ml, sunindextype smu) { SUNDlsMat A; sunindextype j, colSize; if (N <= 0) return(NULL); A = NULL; A = (SUNDlsMat) malloc(sizeof *A); if (A == NULL) return (NULL); colSize = smu + ml + 1; A->data = NULL; A->data = (realtype *) malloc(N * colSize * sizeof(realtype)); if (A->data == NULL) { free(A); A = NULL; return(NULL); } A->cols = NULL; A->cols = (realtype **) malloc(N * sizeof(realtype *)); if (A->cols == NULL) { free(A->data); free(A); A = NULL; return(NULL); } for (j=0; j < N; j++) A->cols[j] = A->data + j * colSize; A->M = N; A->N = N; A->mu = mu; A->ml = ml; A->s_mu = smu; A->ldim = colSize; A->ldata = N * colSize; A->type = SUNDIALS_BAND; return(A); } realtype** newBandMat(sunindextype n, sunindextype smu, sunindextype ml) { return(SUNDlsMat_newBandMat(n, smu, ml)); } realtype** SUNDlsMat_newBandMat(sunindextype n, sunindextype smu, sunindextype ml) { realtype **a; sunindextype j, colSize; if (n <= 0) return(NULL); a = NULL; a = (realtype **) malloc(n * sizeof(realtype *)); if (a == NULL) return(NULL); colSize = smu + ml + 1; a[0] = NULL; a[0] = (realtype *) malloc(n * colSize * sizeof(realtype)); if (a[0] == NULL) { free(a); a = NULL; return(NULL); } for (j=1; j < n; j++) a[j] = a[0] + j * colSize; return(a); } void DestroyMat(SUNDlsMat A) { SUNDlsMat_DestroyMat(A); } void SUNDlsMat_DestroyMat(SUNDlsMat A) { free(A->data); A->data = NULL; free(A->cols); free(A); A = NULL; } void destroyMat(realtype **a) { SUNDlsMat_destroyMat(a); } void SUNDlsMat_destroyMat(realtype **a) { free(a[0]); a[0] = NULL; free(a); a = NULL; } int* NewIntArray(int N) { return(SUNDlsMat_NewIntArray(N)); } int* SUNDlsMat_NewIntArray(int N) { int *vec; if (N <= 0) return(NULL); vec = NULL; vec = (int *) malloc(N * sizeof(int)); return(vec); } int* newIntArray(int N) { return(SUNDlsMat_newIntArray(N)); } int* SUNDlsMat_newIntArray(int n) { int *v; if (n <= 0) return(NULL); v = NULL; v = (int *) malloc(n * sizeof(int)); return(v); } sunindextype* NewIndexArray(sunindextype N) { return(SUNDlsMat_NewIndexArray(N)); } sunindextype* SUNDlsMat_NewIndexArray(sunindextype N) { sunindextype *vec; if (N <= 0) return(NULL); vec = NULL; vec = (sunindextype *) malloc(N * sizeof(sunindextype)); return(vec); } sunindextype* newIndexArray(sunindextype n) { return(SUNDlsMat_newIndexArray(n)); } sunindextype* SUNDlsMat_newIndexArray(sunindextype n) { sunindextype *v; if (n <= 0) return(NULL); v = NULL; v = (sunindextype *) malloc(n * sizeof(sunindextype)); return(v); } realtype* NewRealArray(sunindextype N) { return(SUNDlsMat_NewRealArray(N)); } realtype* SUNDlsMat_NewRealArray(sunindextype N) { realtype *vec; if (N <= 0) return(NULL); vec = NULL; vec = (realtype *) malloc(N * sizeof(realtype)); return(vec); } realtype* newRealArray(sunindextype N) { return(SUNDlsMat_newRealArray(N)); } realtype* SUNDlsMat_newRealArray(sunindextype m) { realtype *v; if (m <= 0) return(NULL); v = NULL; v = (realtype *) malloc(m * sizeof(realtype)); return(v); } void DestroyArray(void *p) { SUNDlsMat_DestroyArray(p); } void SUNDlsMat_DestroyArray(void *V) { free(V); V = NULL; } void destroyArray(void *p) { SUNDlsMat_destroyArray(p); } void SUNDlsMat_destroyArray(void *v) { free(v); v = NULL; } void AddIdentity(SUNDlsMat A) { SUNDlsMat_AddIdentity(A); } void SUNDlsMat_AddIdentity(SUNDlsMat A) { sunindextype i; switch (A->type) { case SUNDIALS_DENSE: for (i=0; iN; i++) A->cols[i][i] += ONE; break; case SUNDIALS_BAND: for (i=0; iM; i++) A->cols[i][A->s_mu] += ONE; break; } } void SetToZero(SUNDlsMat A) { SUNDlsMat_SetToZero(A); } void SUNDlsMat_SetToZero(SUNDlsMat A) { sunindextype i, j, colSize; realtype *col_j; switch (A->type) { case SUNDIALS_DENSE: for (j=0; jN; j++) { col_j = A->cols[j]; for (i=0; iM; i++) col_j[i] = ZERO; } break; case SUNDIALS_BAND: colSize = A->mu + A->ml + 1; for (j=0; jM; j++) { col_j = A->cols[j] + A->s_mu - A->mu; for (i=0; itype) { case SUNDIALS_DENSE: STAN_SUNDIALS_FPRINTF(outfile, "\n"); for (i=0; i < A->M; i++) { for (j=0; j < A->N; j++) { #if defined(SUNDIALS_EXTENDED_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%12Lg ", SUNDLS_DENSE_ELEM(A,i,j)); #elif defined(SUNDIALS_DOUBLE_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%12g ", SUNDLS_DENSE_ELEM(A,i,j)); #else STAN_SUNDIALS_FPRINTF(outfile, "%12g ", SUNDLS_DENSE_ELEM(A,i,j)); #endif } STAN_SUNDIALS_FPRINTF(outfile, "\n"); } STAN_SUNDIALS_FPRINTF(outfile, "\n"); break; case SUNDIALS_BAND: a = A->cols; STAN_SUNDIALS_FPRINTF(outfile, "\n"); for (i=0; i < A->N; i++) { start = SUNMAX(0,i-A->ml); finish = SUNMIN(A->N-1,i+A->mu); for (j=0; j < start; j++) STAN_SUNDIALS_FPRINTF(outfile, "%12s ",""); for (j=start; j <= finish; j++) { #if defined(SUNDIALS_EXTENDED_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%12Lg ", a[j][i-j+A->s_mu]); #elif defined(SUNDIALS_DOUBLE_PRECISION) STAN_SUNDIALS_FPRINTF(outfile, "%12g ", a[j][i-j+A->s_mu]); #else STAN_SUNDIALS_FPRINTF(outfile, "%12g ", a[j][i-j+A->s_mu]); #endif } STAN_SUNDIALS_FPRINTF(outfile, "\n"); } STAN_SUNDIALS_FPRINTF(outfile, "\n"); break; } } StanHeaders/src/sundials/sundials_debug.h0000644000176200001440000000257314645137106020243 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header files defines internal utility functions and macros * for SUNDIALS debugging. * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_DEBUG_H #define _SUNDIALS_DEBUG_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * Macro which prints to stderr when in debug mode */ #ifdef SUNDIALS_DEBUG #define SUNDIALS_DEBUG_PRINT(str) fprintf(stderr, str) #else #define SUNDIALS_DEBUG_PRINT(str) #endif /* * Macro which prints error messages in debug mode */ #ifdef SUNDIALS_DEBUG #define SUNDIALS_DEBUG_ERROR(msg) \ fprintf(stderr, "ERROR in %s (%s line %d): %s", \ __func__, __FILE__, __LINE__, msg); #else #define SUNDIALS_DEBUG_ERROR(msg) #endif #ifdef __cplusplus /* wrapper to enable C++ usage */ } #endif #endif /* _SUNDIALS_DEBUG_H */ StanHeaders/src/sundials/sundials_iterative.c0000644000176200001440000004365514645137106021152 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Scott D. Cohen, Alan C. Hindmarsh and * Radu Serban @ LLNL * Shelby Lockhart @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the iterative.h header * file. It contains the implementation of functions that may be * useful for many different iterative solvers of A x = b. * -----------------------------------------------------------------*/ #include #include "sundials_iterative_impl.h" #include #define FACTOR RCONST(1000.0) #define ZERO RCONST(0.0) #define ONE RCONST(1.0) /* * ----------------------------------------------------------------- * Function : SUNModifiedGS * ----------------------------------------------------------------- * This implementation of SUNModifiedGS is a slight modification of * a previous modified Gram-Schmidt routine (called mgs) written by * Milo Dorr. * ----------------------------------------------------------------- */ int ModifiedGS(N_Vector *v, realtype **h, int k, int p, realtype *new_vk_norm) { return(SUNModifiedGS(v, h, k, p, new_vk_norm)); } int SUNModifiedGS(N_Vector *v, realtype **h, int k, int p, realtype *new_vk_norm) { int i, k_minus_1, i0; realtype new_norm_2, new_product, vk_norm, temp; vk_norm = SUNRsqrt(N_VDotProd(v[k],v[k])); k_minus_1 = k - 1; i0 = SUNMAX(k-p, 0); /* Perform modified Gram-Schmidt */ for (i=i0; i < k; i++) { h[i][k_minus_1] = N_VDotProd(v[i], v[k]); N_VLinearSum(ONE, v[k], -h[i][k_minus_1], v[i], v[k]); } /* Compute the norm of the new vector at v[k] */ *new_vk_norm = SUNRsqrt(N_VDotProd(v[k], v[k])); /* If the norm of the new vector at v[k] is less than FACTOR (== 1000) times unit roundoff times the norm of the input vector v[k], then the vector will be reorthogonalized in order to ensure that nonorthogonality is not being masked by a very small vector length. */ temp = FACTOR * vk_norm; if ((temp + (*new_vk_norm)) != temp) return(0); new_norm_2 = ZERO; for (i=i0; i < k; i++) { new_product = N_VDotProd(v[i], v[k]); temp = FACTOR * h[i][k_minus_1]; if ((temp + new_product) == temp) continue; h[i][k_minus_1] += new_product; N_VLinearSum(ONE, v[k],-new_product, v[i], v[k]); new_norm_2 += SUNSQR(new_product); } if (new_norm_2 != ZERO) { new_product = SUNSQR(*new_vk_norm) - new_norm_2; *new_vk_norm = (new_product > ZERO) ? SUNRsqrt(new_product) : ZERO; } return(0); } /* * ----------------------------------------------------------------- * Function : SUNClassicalGS * ----------------------------------------------------------------- * This implementation of SUNClassicalGS was contributed by Homer * Walker and Peter Brown. * ----------------------------------------------------------------- */ int ClassicalGS(N_Vector *v, realtype **h, int k, int p, realtype *new_vk_norm, realtype *stemp, N_Vector *vtemp) { return(SUNClassicalGS(v, h, k, p, new_vk_norm, stemp, vtemp)); } int SUNClassicalGS(N_Vector *v, realtype **h, int k, int p, realtype *new_vk_norm, realtype *stemp, N_Vector *vtemp) { int i, i0, k_minus_1, retval; realtype vk_norm; k_minus_1 = k - 1; i0 = SUNMAX(k-p,0); /* Perform Classical Gram-Schmidt */ retval = N_VDotProdMulti(k-i0+1, v[k], v+i0, stemp); if (retval != 0) return(-1); vk_norm = SUNRsqrt(stemp[k-i0]); for (i=k-i0-1; i >= 0; i--) { h[i][k_minus_1] = stemp[i]; stemp[i+1] = -stemp[i]; vtemp[i+1] = v[i]; } stemp[0] = ONE; vtemp[0] = v[k]; retval = N_VLinearCombination(k-i0+1, stemp, vtemp, v[k]); if (retval != 0) return(-1); /* Compute the norm of the new vector at v[k] */ *new_vk_norm = SUNRsqrt(N_VDotProd(v[k], v[k])); /* Reorthogonalize if necessary */ if ((FACTOR * (*new_vk_norm)) < vk_norm) { retval = N_VDotProdMulti(k-i0, v[k], v+i0, stemp+1); if (retval != 0) return(-1); stemp[0] = ONE; vtemp[0] = v[k]; for (i=i0; i < k; i++) { h[i][k_minus_1] += stemp[i-i0+1]; stemp[i-i0+1] = -stemp[i-i0+1]; vtemp[i-i0+1] = v[i-i0]; } retval = N_VLinearCombination(k+1, stemp, vtemp, v[k]); if (retval != 0) return(-1); *new_vk_norm = SUNRsqrt(N_VDotProd(v[k],v[k])); } return(0); } /* * ----------------------------------------------------------------- * Function : SUNQRfact * ----------------------------------------------------------------- * This implementation of SUNQRfact is a slight modification of a * previous routine (called qrfact) written by Milo Dorr. * ----------------------------------------------------------------- */ int QRfact(int n, realtype **h, realtype *q, int job) { return(SUNQRfact(n, h, q, job)); } int SUNQRfact(int n, realtype **h, realtype *q, int job) { realtype c, s, temp1, temp2, temp3; int i, j, k, q_ptr, n_minus_1, code=0; switch (job) { case 0: /* Compute a new factorization of H */ code = 0; for (k=0; k < n; k++) { /* Multiply column k by the previous k-1 Givens rotations */ for (j=0; j < k-1; j++) { i = 2*j; temp1 = h[j][k]; temp2 = h[j+1][k]; c = q[i]; s = q[i+1]; h[j][k] = c*temp1 - s*temp2; h[j+1][k] = s*temp1 + c*temp2; } /* Compute the Givens rotation components c and s */ q_ptr = 2*k; temp1 = h[k][k]; temp2 = h[k+1][k]; if( temp2 == ZERO) { c = ONE; s = ZERO; } else if (SUNRabs(temp2) >= SUNRabs(temp1)) { temp3 = temp1/temp2; s = -ONE/SUNRsqrt(ONE+SUNSQR(temp3)); c = -s*temp3; } else { temp3 = temp2/temp1; c = ONE/SUNRsqrt(ONE+SUNSQR(temp3)); s = -c*temp3; } q[q_ptr] = c; q[q_ptr+1] = s; if( (h[k][k] = c*temp1 - s*temp2) == ZERO) code = k+1; } break; default: /* Update the factored H to which a new column has been added */ n_minus_1 = n - 1; code = 0; /* Multiply the new column by the previous n-1 Givens rotations */ for (k=0; k < n_minus_1; k++) { i = 2*k; temp1 = h[k][n_minus_1]; temp2 = h[k+1][n_minus_1]; c = q[i]; s = q[i+1]; h[k][n_minus_1] = c*temp1 - s*temp2; h[k+1][n_minus_1] = s*temp1 + c*temp2; } /* Compute new Givens rotation and multiply it times the last two entries in the new column of H. Note that the second entry of this product will be 0, so it is not necessary to compute it. */ temp1 = h[n_minus_1][n_minus_1]; temp2 = h[n][n_minus_1]; if (temp2 == ZERO) { c = ONE; s = ZERO; } else if (SUNRabs(temp2) >= SUNRabs(temp1)) { temp3 = temp1/temp2; s = -ONE/SUNRsqrt(ONE+SUNSQR(temp3)); c = -s*temp3; } else { temp3 = temp2/temp1; c = ONE/SUNRsqrt(ONE+SUNSQR(temp3)); s = -c*temp3; } q_ptr = 2*n_minus_1; q[q_ptr] = c; q[q_ptr+1] = s; if ((h[n_minus_1][n_minus_1] = c*temp1 - s*temp2) == ZERO) code = n; } return (code); } /* * ----------------------------------------------------------------- * Function : SUNQRsol * ----------------------------------------------------------------- * This implementation of SUNQRsol is a slight modification of a * previous routine (called qrsol) written by Milo Dorr. * ----------------------------------------------------------------- */ int QRsol(int n, realtype **h, realtype *q, realtype *b) { return(SUNQRsol(n, h, q, b)); } int SUNQRsol(int n, realtype **h, realtype *q, realtype *b) { realtype c, s, temp1, temp2; int i, k, q_ptr, code=0; /* Compute Q*b */ for (k=0; k < n; k++) { q_ptr = 2*k; c = q[q_ptr]; s = q[q_ptr+1]; temp1 = b[k]; temp2 = b[k+1]; b[k] = c*temp1 - s*temp2; b[k+1] = s*temp1 + c*temp2; } /* Solve R*x = Q*b */ for (k=n-1; k >= 0; k--) { if (h[k][k] == ZERO) { code = k + 1; break; } b[k] /= h[k][k]; for (i=0; i < k; i++) b[i] -= b[k]*h[i][k]; } return (code); } /* * ----------------------------------------------------------------- * Function : SUNQRAdd_MGS * ----------------------------------------------------------------- * Implementation of QRAdd to be called in Anderson Acceleration * ----------------------------------------------------------------- */ int SUNQRAdd_MGS(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata) { sunindextype j; SUNQRData qrdata = (SUNQRData) QRdata; N_VScale(ONE, df, qrdata->vtemp); for (j=0; j < m; j++) { R[m * mMax + j] = N_VDotProd(Q[j], qrdata->vtemp); N_VLinearSum(ONE, qrdata->vtemp, -R[m * mMax + j], Q[j], qrdata->vtemp); } R[m * mMax + m] = SUNRsqrt(N_VDotProd(qrdata->vtemp, qrdata->vtemp)); N_VScale((1/R[m * mMax + m]), qrdata->vtemp, Q[m]); /* Return success */ return 0; } /* * ----------------------------------------------------------------- * Function : SUNQRAdd_ICWY * ----------------------------------------------------------------- * Low synchronous implementation of QRAdd to be called in * Anderson Acceleration. * ----------------------------------------------------------------- */ int SUNQRAdd_ICWY(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata) { sunindextype j, k; SUNQRData qrdata = (SUNQRData) QRdata; N_VScale(ONE, df, qrdata->vtemp); /* stores d_fi in temp */ if (m > 0) { /* T(1:k-1,k-1)^T = Q(:,1:k-1)^T * Q(:,k-1) */ N_VDotProdMulti(m, Q[m-1], Q, qrdata->temp_array + (m-1) * mMax); /* T(k-1,k-1) = 1.0 */ qrdata->temp_array[(m-1) * mMax + (m-1)] = ONE; /* R(1:k-1,k) = Q_k-1^T * df */ N_VDotProdMulti(m, qrdata->vtemp, Q, R + m * mMax ); /* Solve T^T * R(1:k-1,k) = R(1:k-1,k) */ for (k = 0; k < m; k++) { /* Skip setting the diagonal element because it doesn't change */ for (j = k+1; j < m; j++) { R[m * mMax + j] -= R[m * mMax + k] * qrdata->temp_array[j * mMax + k]; } } /* end */ /* Q(:,k-1) = df - Q_k-1 R(1:k-1,k) */ N_VLinearCombination(m, R + m * mMax, Q, qrdata->vtemp2); N_VLinearSum(ONE, qrdata->vtemp, -ONE, qrdata->vtemp2, qrdata->vtemp); } /* R(k,k) = \| df \| */ R[m * mMax + m] = SUNRsqrt(N_VDotProd(qrdata->vtemp, qrdata->vtemp)); /* Q(:,k) = df / \| df \| */ N_VScale((1/R[m * mMax + m]), qrdata->vtemp, Q[m]); /* Return success */ return 0; } /* * ----------------------------------------------------------------- * Function : SUNQRAdd_ICWY_SB * ----------------------------------------------------------------- * Low synchronous implementation of QRAdd to be called in * Anderson Acceleration which utilizes a single buffer reduction. * ----------------------------------------------------------------- */ int SUNQRAdd_ICWY_SB(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata) { sunindextype j, k; SUNQRData qrdata = (SUNQRData) QRdata; N_VScale(ONE, df, qrdata->vtemp); /* stores d_fi in temp */ if (m > 0) { /* T(1:k-1,k-1)^T = Q(:,1:k-1)^T * Q(:,k-1) */ N_VDotProdMultiLocal(m, Q[m-1], Q, qrdata->temp_array + (m-1) * mMax); /* R(1:k-1,k) = Q_k-1^T * df */ /* Put R values at end of temp_array */ N_VDotProdMultiLocal(m, qrdata->vtemp, Q, qrdata->temp_array + (m-1) * mMax + m ); N_VDotProdMultiAllReduce(m+m, qrdata->vtemp, qrdata->temp_array + (m-1) * mMax); /* Move the last values from temp array into R */ for (k = 0; k < m; k++) { R[m*mMax + k] = qrdata->temp_array[(m-1)*mMax + m + k]; } /* T(k-1,k-1) = 1.0 */ qrdata->temp_array[(m-1) * mMax + (m-1)] = ONE; /* Solve T^T * R(1:k-1,k) = R(1:k-1,k) */ for (k = 0; k < m; k++) { /* Skip setting the diagonal element because it doesn't change */ for (j = k+1; j < m; j++) { R[m * mMax + j] -= R[m * mMax + k] * qrdata->temp_array[j * mMax + k]; } } /* end */ /* Q(:,k-1) = df - Q_k-1 R(1:k-1,k) */ N_VLinearCombination(m, R + m * mMax, Q, qrdata->vtemp2); N_VLinearSum(ONE, qrdata->vtemp, -ONE, qrdata->vtemp2, qrdata->vtemp); } /* R(k,k) = \| df \| */ R[m * mMax + m] = SUNRsqrt(N_VDotProd(qrdata->vtemp, qrdata->vtemp)); /* Q(:,k) = df / \| df \| */ N_VScale((1/R[m * mMax + m]), qrdata->vtemp, Q[m]); /* Return success */ return 0; } /* * ----------------------------------------------------------------- * Function : SUNQRAdd_CGS2 * ----------------------------------------------------------------- * Low synchronous Implementation of QRAdd to be called in * Anderson Acceleration. * ----------------------------------------------------------------- */ int SUNQRAdd_CGS2(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata) { sunindextype j; SUNQRData qrdata = (SUNQRData) QRdata; N_VScale(ONE, df, qrdata->vtemp); /* temp = df */ if (m > 0) { /* s_k = Q_k-1^T df_aa -- update with sdata as a realtype* array */ N_VDotProdMulti(m, qrdata->vtemp, Q, R + m * mMax); /* y = df - Q_k-1 s_k */ N_VLinearCombination(m, R + m * mMax, Q, qrdata->vtemp2); N_VLinearSum(ONE, qrdata->vtemp, -ONE, qrdata->vtemp2, qrdata->vtemp2); /* z_k = Q_k-1^T y */ N_VDotProdMulti(m, qrdata->vtemp2, Q, qrdata->temp_array); /* df = y - Q_k-1 z_k -- update using N_VLinearCombination */ N_VLinearCombination(m, qrdata->temp_array, Q, Q[m]); N_VLinearSum(ONE, qrdata->vtemp2, -ONE, Q[m], qrdata->vtemp); /* R(1:k-1,k) = s_k + z_k */ for (j = 0; j < m; j++) { R[m * mMax + j] = R[m * mMax + j] + qrdata->temp_array[j]; } } /* R(k,k) = \| df \| */ R[m * mMax + m] = SUNRsqrt(N_VDotProd(qrdata->vtemp, qrdata->vtemp)); /* Q(:,k) = df / R(k,k) */ N_VScale((1/R[m * mMax + m]), qrdata->vtemp, Q[m]); /* Return success */ return 0; } /* * ----------------------------------------------------------------- * Function : SUNQRAdd_DCGS2 * ----------------------------------------------------------------- * Low synchronous Implementation of QRAdd to be called in * Anderson Acceleration. * ----------------------------------------------------------------- */ int SUNQRAdd_DCGS2(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata) { sunindextype j; SUNQRData qrdata = (SUNQRData) QRdata; N_VScale(ONE, df, qrdata->vtemp); /* temp = df */ if (m > 0) { /* R(1:k-1,k) = Q_k-1^T df_aa */ N_VDotProdMulti(m, qrdata->vtemp, Q, R + m*mMax); /* Delayed reorthogonalization */ if (m > 1) { /* s = Q_k-2^T Q(:,k-1) */ N_VDotProdMulti(m-1, Q[m-1], Q, qrdata->temp_array); /* Q(:,k-1) = Q(:,k-1) - Q_k-2 s */ N_VLinearCombination(m-1, qrdata->temp_array, Q, qrdata->vtemp2); N_VLinearSum(ONE, Q[m-1], -ONE, qrdata->vtemp2, Q[m-1]); /* R(1:k-2,k-1) = R(1:k-2,k-1) + s */ for (j = 0; j < m-1; j++) { R[(m-1) * mMax + j] = R[(m-1) * mMax + j] + qrdata->temp_array[j]; } } /* df = df - Q(:,k-1) R(1:k-1,k) */ N_VLinearCombination(m, R + m * mMax, Q, qrdata->vtemp2); N_VLinearSum(ONE, qrdata->vtemp, -ONE, qrdata->vtemp2, qrdata->vtemp); } /* R(k,k) = \| df \| */ R[m * mMax + m] = SUNRsqrt(N_VDotProd(qrdata->vtemp, qrdata->vtemp)); /* Q(:,k) = df / R(k,k) */ N_VScale((1/R[m * mMax + m]), qrdata->vtemp, Q[m]); /* Return success */ return 0; } /* * ----------------------------------------------------------------- * Function : SUNQRAdd_DCGS2_SB * ----------------------------------------------------------------- * Low synchronous Implementation of QRAdd to be called in * Anderson Acceleration which utilizes a single buffer reduction. * ----------------------------------------------------------------- */ int SUNQRAdd_DCGS2_SB(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata) { sunindextype j; SUNQRData qrdata = (SUNQRData) QRdata; N_VScale(ONE, df, qrdata->vtemp); /* temp = df */ if (m > 0) { if (m == 1) { /* R(1:k-1,k) = Q_k-1^T df_aa */ N_VDotProdMulti(m, qrdata->vtemp, Q, R + m*mMax); } /* Delayed reorthogonalization */ else if (m > 1) { /* R(1:k-1,k) = Q_k-1^T df_aa */ /* Put R values at beginning of temp array */ N_VDotProdMultiLocal(m, qrdata->vtemp, Q, qrdata->temp_array); /* s = Q_k-2^T Q(:,k-1) */ N_VDotProdMultiLocal(m-1, Q[m-1], Q, qrdata->temp_array + m); N_VDotProdMultiAllReduce(m + m-1, qrdata->vtemp, qrdata->temp_array); /* Move R values to R */ for (j = 0; j < m; j++) { R[m*mMax + j] = qrdata->temp_array[j]; } /* Q(:,k-1) = Q(:,k-1) - Q_k-2 s */ N_VLinearCombination(m-1, qrdata->temp_array + m, Q, qrdata->vtemp2); N_VLinearSum(ONE, Q[m-1], -ONE, qrdata->vtemp2, Q[m-1]); /* R(1:k-2,k-1) = R(1:k-2,k-1) + s */ for (j = 0; j < m-1; j++) { R[(m-1) * mMax + j] = R[(m-1) * mMax + j] + qrdata->temp_array[m + j]; } } /* df = df - Q(:,k-1) R(1:k-1,k) */ N_VLinearCombination(m, R + m * mMax, Q, qrdata->vtemp2); N_VLinearSum(ONE, qrdata->vtemp, -ONE, qrdata->vtemp2, qrdata->vtemp); } /* R(k,k) = \| df \| */ R[m * mMax + m] = SUNRsqrt(N_VDotProd(qrdata->vtemp, qrdata->vtemp)); /* Q(:,k) = df / R(k,k) */ N_VScale((1/R[m * mMax + m]), qrdata->vtemp, Q[m]); /* Return success */ return 0; } StanHeaders/src/sundials/sundials_version.c0000644000176200001440000000337214645137106020633 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This file implements functions for getting SUNDIALS version * information. * -----------------------------------------------------------------*/ #include #include /* note strlen does not include terminating null character hence the use of >= when checking len below and strncpy copies up to len characters including the terminating null character */ /* fill string with SUNDIALS version information */ int SUNDIALSGetVersion(char *version, int len) { if (version == NULL) return(-1); if (strlen(SUNDIALS_VERSION) >= (size_t)len) return(-1); strncpy(version, SUNDIALS_VERSION, (size_t)len); return(0); } /* fill integers with SUNDIALS major, minor, and patch release numbers and fill a string with the release label */ int SUNDIALSGetVersionNumber(int *major, int *minor, int *patch, char *label, int len) { if (major == NULL || minor == NULL || patch == NULL || label == NULL) return(-1); if (strlen(SUNDIALS_VERSION_LABEL) >= (size_t)len) return(-1); *major = SUNDIALS_VERSION_MAJOR; *minor = SUNDIALS_VERSION_MINOR; *patch = SUNDIALS_VERSION_PATCH; strncpy(label, SUNDIALS_VERSION_LABEL, (size_t)len); return(0); } StanHeaders/src/sundials/sundials_reductions.hpp0000644000176200001440000000427514645137106021675 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- */ /* NOTE: SUNDIALS_HOST_DEVICE and SUNDIALS_DEVICE_INLINE must be defined before including this file */ #include namespace sundials { namespace reductions { namespace impl { template struct BinaryOperator { using first_arg_type = Arg1; using second_arg_type = Arg2; using result_arg_type = Result; }; template struct plus : public BinaryOperator { SUNDIALS_HOST_DEVICE constexpr Ret operator()(const Arg1& lhs, const Arg2& rhs) const { return Ret{lhs} + rhs; } static SUNDIALS_HOST_DEVICE SUNDIALS_DEVICE_INLINE constexpr Ret identity() { return Ret{0}; } }; template struct maximum : public BinaryOperator { SUNDIALS_HOST_DEVICE constexpr Ret operator()(const Arg1& lhs, const Arg2& rhs) const { return (lhs >= rhs) ? lhs : rhs; } static SUNDIALS_HOST_DEVICE SUNDIALS_DEVICE_INLINE constexpr Ret identity() { return std::numeric_limits::min(); } }; template struct minimum : public BinaryOperator { SUNDIALS_HOST_DEVICE constexpr Ret operator()(const Arg1& lhs, const Arg2& rhs) const { return (rhs < lhs) ? rhs : lhs; } static SUNDIALS_HOST_DEVICE SUNDIALS_DEVICE_INLINE constexpr Ret identity() { return std::numeric_limits::max(); } }; } // impl } // reductions } // sundials StanHeaders/src/sundials/sundials_futils.c0000644000176200001440000000170414645137106020451 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS Fortran 2003 interface utility implementations. * -----------------------------------------------------------------*/ #include /* Create a file pointer with the given file name and mode. */ FILE* SUNDIALSFileOpen(const char* filename, const char* mode) { return fopen(filename, mode); } /* Close a file pointer with the given file name. */ void SUNDIALSFileClose(FILE* fp) { fclose(fp); } StanHeaders/src/sundials/sundials_context_impl.h0000644000176200001440000000177314645137106021663 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS context class implementation. * ----------------------------------------------------------------*/ #ifndef _SUNDIALS_CONTEXT_IMPL_H #define _SUNDIALS_CONTEXT_IMPL_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif struct _SUNContext { SUNProfiler profiler; booleantype own_profiler; }; #ifdef __cplusplus } #endif #endif StanHeaders/src/sundials/sundials_cuda.h0000644000176200001440000000662714645137106020075 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header files defines internal utility functions and macros * for working with CUDA. * ----------------------------------------------------------------- */ #include #include #include #include #include #include #ifndef _SUNDIALS_CUDA_H #define _SUNDIALS_CUDA_H #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* --------------------------------------------------------------------------- * Utility macros * ---------------------------------------------------------------------------*/ #define SUNDIALS_CUDA_VERIFY(cuerr) SUNDIALS_CUDA_Assert(cuerr, __FILE__, __LINE__) #define SUNDIALS_CUSPARSE_VERIFY(cuerr) SUNDIALS_CUSPARSE_Assert(cuerr, __FILE__, __LINE__) #define SUNDIALS_CUSOLVER_VERIFY(cuerr) SUNDIALS_CUSOLVER_Assert(cuerr, __FILE__, __LINE__) #define SUNDIALS_KERNEL_NAME(...) __VA_ARGS__ #ifndef SUNDIALS_DEBUG_CUDA_LASTERROR #define SUNDIALS_LAUNCH_KERNEL(kernel, gridDim, blockDim, shMem, stream, ...) \ { kernel<<>>(__VA_ARGS__); } #else #define SUNDIALS_LAUNCH_KERNEL(kernel, gridDim, blockDim, shMem, stream, ...) \ { \ kernel<<>>(__VA_ARGS__); \ cudaDeviceSynchronize(); \ SUNDIALS_CUDA_VERIFY(cudaGetLastError()); \ } #endif /* --------------------------------------------------------------------------- * Utility functions * ---------------------------------------------------------------------------*/ inline booleantype SUNDIALS_CUDA_Assert(cudaError_t cuerr, const char *file, int line) { if (cuerr != cudaSuccess) { #ifdef SUNDIALS_DEBUG fprintf(stderr, "ERROR in CUDA runtime operation: %s %s:%d\n", cudaGetErrorString(cuerr), file, line); #ifdef SUNDIALS_DEBUG_ASSERT assert(false); #endif #endif return SUNFALSE; /* Assert failed */ } return SUNTRUE; /* Assert OK */ } inline booleantype SUNDIALS_CUSPARSE_Assert(cusparseStatus_t status, const char *file, int line) { if (status != CUSPARSE_STATUS_SUCCESS) { #ifdef SUNDIALS_DEBUG fprintf(stderr, "ERROR in cuSPARSE runtime operation: cusparseStatus_t = %d %s:%d\n", status, file, line); #ifdef SUNDIALS_DEBUG_ASSERT assert(false); #endif #endif return SUNFALSE; /* Assert failed */ } return SUNTRUE; /* Assert OK */ } inline booleantype SUNDIALS_CUSOLVER_Assert(cusolverStatus_t status, const char *file, int line) { if (status != CUSOLVER_STATUS_SUCCESS) { #ifdef SUNDIALS_DEBUG fprintf(stderr, "ERROR in cuSOLVER runtime operation: cusolverStatus_t = %d %s:%d\n", status, file, line); #ifdef SUNDIALS_DEBUG_ASSERT assert(false); #endif #endif return SUNFALSE; /* Assert failed */ } return SUNTRUE; /* Assert OK */ } #ifdef __cplusplus /* wrapper to enable C++ usage */ } #endif #endif /* _SUNDIALS_CUDA_H */ StanHeaders/src/sundials/sundials_dense.c0000644000176200001440000002655514645137106020254 0ustar liggesusers/* * ----------------------------------------------------------------- * $Revision$ * $Date$ * ----------------------------------------------------------------- * Programmer(s): Scott D. Cohen, Alan C. Hindmarsh and * Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a generic package of dense * matrix operations. * ----------------------------------------------------------------- */ #include #include #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define TWO RCONST(2.0) /* * ----------------------------------------------------- * Functions working on SUNDlsMat * ----------------------------------------------------- */ sunindextype SUNDlsMat_DenseGETRF(SUNDlsMat A, sunindextype *p) { return(SUNDlsMat_denseGETRF(A->cols, A->M, A->N, p)); } sunindextype DenseGETRF(SUNDlsMat A, sunindextype *p) { return(SUNDlsMat_denseGETRF(A->cols, A->M, A->N, p)); } void SUNDlsMat_DenseGETRS(SUNDlsMat A, sunindextype *p, realtype *b) { SUNDlsMat_denseGETRS(A->cols, A->N, p, b); } void DenseGETRS(SUNDlsMat A, sunindextype *p, realtype *b) { SUNDlsMat_denseGETRS(A->cols, A->N, p, b); } sunindextype SUNDlsMat_DensePOTRF(SUNDlsMat A) { return(SUNDlsMat_densePOTRF(A->cols, A->M)); } sunindextype DensePOTRF(SUNDlsMat A) { return(SUNDlsMat_densePOTRF(A->cols, A->M)); } void SUNDlsMat_DensePOTRS(SUNDlsMat A, realtype *b) { SUNDlsMat_densePOTRS(A->cols, A->M, b); } void DensePOTRS(SUNDlsMat A, realtype *b) { SUNDlsMat_densePOTRS(A->cols, A->M, b); } int SUNDlsMat_DenseGEQRF(SUNDlsMat A, realtype *beta, realtype *wrk) { return(SUNDlsMat_denseGEQRF(A->cols, A->M, A->N, beta, wrk)); } int DenseGEQRF(SUNDlsMat A, realtype *beta, realtype *wrk) { return(SUNDlsMat_denseGEQRF(A->cols, A->M, A->N, beta, wrk)); } int SUNDlsMat_DenseORMQR(SUNDlsMat A, realtype *beta, realtype *vn, realtype *vm, realtype *wrk) { return(SUNDlsMat_denseORMQR(A->cols, A->M, A->N, beta, vn, vm, wrk)); } int DenseORMQR(SUNDlsMat A, realtype *beta, realtype *vn, realtype *vm, realtype *wrk) { return(SUNDlsMat_denseORMQR(A->cols, A->M, A->N, beta, vn, vm, wrk)); } void SUNDlsMat_DenseCopy(SUNDlsMat A, SUNDlsMat B) { SUNDlsMat_denseCopy(A->cols, B->cols, A->M, A->N); } void DenseCopy(SUNDlsMat A, SUNDlsMat B) { SUNDlsMat_denseCopy(A->cols, B->cols, A->M, A->N); } void SUNDlsMat_DenseScale(realtype c, SUNDlsMat A) { SUNDlsMat_denseScale(c, A->cols, A->M, A->N); } void DenseScale(realtype c, SUNDlsMat A) { SUNDlsMat_denseScale(c, A->cols, A->M, A->N); } void SUNDlsMat_DenseMatvec(SUNDlsMat A, realtype *x, realtype *y) { SUNDlsMat_denseMatvec(A->cols, x, y, A->M, A->N); } void DenseMatvec(SUNDlsMat A, realtype *x, realtype *y) { SUNDlsMat_denseMatvec(A->cols, x, y, A->M, A->N); } sunindextype denseGETRF(realtype **a, sunindextype m, sunindextype n, sunindextype *p) { return(SUNDlsMat_denseGETRF(a, m, n, p)); } sunindextype SUNDlsMat_denseGETRF(realtype **a, sunindextype m, sunindextype n, sunindextype *p) { sunindextype i, j, k, l; realtype *col_j, *col_k; realtype temp, mult, a_kj; /* k-th elimination step number */ for (k=0; k < n; k++) { col_k = a[k]; /* find l = pivot row number */ l=k; for (i=k+1; i < m; i++) if (SUNRabs(col_k[i]) > SUNRabs(col_k[l])) l=i; p[k] = l; /* check for zero pivot element */ if (col_k[l] == ZERO) return(k+1); /* swap a(k,1:n) and a(l,1:n) if necessary */ if ( l!= k ) { for (i=0; i 0; k--) { col_k = a[k]; b[k] /= col_k[k]; for (i=0; i0) { for(i=j; i=0; i--) { col_i = a[i]; for (j=i+1; j= n) * using Householder reflections. * * On exit, the elements on and above the diagonal of A contain the n by n * upper triangular matrix R; the elements below the diagonal, with the array beta, * represent the orthogonal matrix Q as a product of elementary reflectors . * * v (of length m) must be provided as workspace. * */ int denseGEQRF(realtype **a, sunindextype m, sunindextype n, realtype *beta, realtype *v) { return(SUNDlsMat_denseGEQRF(a, m, n, beta, v)); } int SUNDlsMat_denseGEQRF(realtype **a, sunindextype m, sunindextype n, realtype *beta, realtype *v) { realtype ajj, s, mu, v1, v1_2; realtype *col_j, *col_k; sunindextype i, j, k; /* For each column...*/ for(j=0; j= n. * * v (of length m) must be provided as workspace. */ int denseORMQR(realtype **a, sunindextype m, sunindextype n, realtype *beta, realtype *vn, realtype *vm, realtype *v) { return(SUNDlsMat_denseORMQR(a, m, n, beta, vn, vm, v)); } int SUNDlsMat_denseORMQR(realtype **a, sunindextype m, sunindextype n, realtype *beta, realtype *vn, realtype *vm, realtype *v) { realtype *col_j, s; sunindextype i, j; /* Initialize vm */ for(i=0; i=0; j--) { col_j = a[j]; v[0] = ONE; s = vm[j]; for(i=1; i #include #include "sundials_context_impl.h" #if defined(SUNDIALS_BUILD_WITH_PROFILING) static SUNProfiler getSUNProfiler(SUNLinearSolver S) { return(S->sunctx->profiler); } #endif /* ----------------------------------------------------------------- * Create a new empty SUNLinearSolver object * ----------------------------------------------------------------- */ SUNLinearSolver SUNLinSolNewEmpty(SUNContext sunctx) { SUNLinearSolver LS; SUNLinearSolver_Ops ops; /* a context is required */ if (sunctx == NULL) return(NULL); /* create linear solver object */ LS = NULL; LS = (SUNLinearSolver) malloc(sizeof *LS); if (LS == NULL) return(NULL); /* create linear solver ops structure */ ops = NULL; ops = (SUNLinearSolver_Ops) malloc(sizeof *ops); if (ops == NULL) { free(LS); return(NULL); } /* initialize operations to NULL */ ops->gettype = NULL; ops->getid = NULL; ops->setatimes = NULL; ops->setpreconditioner = NULL; ops->setscalingvectors = NULL; ops->setzeroguess = NULL; ops->initialize = NULL; ops->setup = NULL; ops->solve = NULL; ops->numiters = NULL; ops->resnorm = NULL; ops->resid = NULL; ops->lastflag = NULL; ops->space = NULL; ops->free = NULL; /* attach ops and initialize content and context to NULL */ LS->ops = ops; LS->content = NULL; LS->sunctx = sunctx; return(LS); } /* ----------------------------------------------------------------- * Free a generic SUNLinearSolver (assumes content is already empty) * ----------------------------------------------------------------- */ void SUNLinSolFreeEmpty(SUNLinearSolver S) { if (S == NULL) return; /* free non-NULL ops structure */ if (S->ops) free(S->ops); S->ops = NULL; /* free overall N_Vector object and return */ free(S); return; } /* ----------------------------------------------------------------- * Functions in the 'ops' structure * -----------------------------------------------------------------*/ SUNLinearSolver_Type SUNLinSolGetType(SUNLinearSolver S) { return(S->ops->gettype(S)); } SUNLinearSolver_ID SUNLinSolGetID(SUNLinearSolver S) { if (S->ops->getid) return(S->ops->getid(S)); else return(SUNLINEARSOLVER_CUSTOM); } int SUNLinSolSetATimes(SUNLinearSolver S, void* A_data, SUNATimesFn ATimes) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(S)); if (S->ops->setatimes) ier = S->ops->setatimes(S, A_data, ATimes); else ier = SUNLS_SUCCESS; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(S)); return(ier); } int SUNLinSolSetPreconditioner(SUNLinearSolver S, void* P_data, SUNPSetupFn Pset, SUNPSolveFn Psol) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(S)); if (S->ops->setpreconditioner) ier = S->ops->setpreconditioner(S, P_data, Pset, Psol); else ier = SUNLS_SUCCESS; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(S)); return(ier); } int SUNLinSolSetScalingVectors(SUNLinearSolver S, N_Vector s1, N_Vector s2) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(S)); if (S->ops->setscalingvectors) ier = S->ops->setscalingvectors(S, s1, s2); else ier = SUNLS_SUCCESS; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(S)); return(ier); } int SUNLinSolSetZeroGuess(SUNLinearSolver S, booleantype onoff) { if (S->ops->setzeroguess) return ((int) S->ops->setzeroguess(S, onoff)); else return SUNLS_SUCCESS; } int SUNLinSolInitialize(SUNLinearSolver S) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(S)); if (S->ops->initialize) ier = S->ops->initialize(S); else ier = SUNLS_SUCCESS; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(S)); return(ier); } int SUNLinSolSetup(SUNLinearSolver S, SUNMatrix A) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(S)); if (S->ops->setup) ier = S->ops->setup(S, A); else ier = SUNLS_SUCCESS; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(S)); return(ier); } int SUNLinSolSolve(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(S)); ier = S->ops->solve(S, A, x, b, tol); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(S)); return(ier); } int SUNLinSolNumIters(SUNLinearSolver S) { int ier; if (S->ops->numiters) ier = S->ops->numiters(S); else ier = 0; return(ier); } realtype SUNLinSolResNorm(SUNLinearSolver S) { double result; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(S)); if (S->ops->resnorm) result = S->ops->resnorm(S); else result = RCONST(0.0); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(S)); return(result); } N_Vector SUNLinSolResid(SUNLinearSolver S) { N_Vector resid; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(S)); if (S->ops->resid) resid = S->ops->resid(S); else resid = NULL; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(S)); return(resid); } sunindextype SUNLinSolLastFlag(SUNLinearSolver S) { if (S->ops->lastflag) return ((sunindextype) S->ops->lastflag(S)); else return SUNLS_SUCCESS; } int SUNLinSolSpace(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS) { if (S->ops->space) return ((int) S->ops->space(S, lenrwLS, leniwLS)); else { *lenrwLS = 0; *leniwLS = 0; return SUNLS_SUCCESS; } } int SUNLinSolFree(SUNLinearSolver S) { if (S == NULL) return SUNLS_SUCCESS; /* if the free operation exists use it */ if (S->ops) if (S->ops->free) return(S->ops->free(S)); /* if we reach this point, either ops == NULL or free == NULL, try to cleanup by freeing the content, ops, and solver */ if (S->content) { free(S->content); S->content = NULL; } if (S->ops) { free(S->ops); S->ops = NULL; } free(S); S = NULL; return(SUNLS_SUCCESS); } StanHeaders/src/sundials/sundials_profiler.c0000644000176200001440000003035314645137106020767 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer: Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -----------------------------------------------------------------*/ #include #if SUNDIALS_MPI_ENABLED #include #include #elif defined(SUNDIALS_HAVE_POSIX_TIMERS) /* Minimum POSIX version needed for struct timespec and clock_monotonic */ #if !defined(_POSIX_C_SOURCE) || (_POSIX_C_SOURCE < 199309L) #define _POSIX_C_SOURCE 199309L #endif #include #include #include #else #error Either MPI_Wtime or clock_getttime is required but neither were found #endif #include #include #include #include #include "sundials_hashmap.h" #include "sundials_debug.h" #define SUNDIALS_ROOT_TIMER ((const char*) "From profiler epoch") /* Private functions */ #if SUNDIALS_MPI_ENABLED static int sunCollectTimers(SUNProfiler p); #endif static void sunPrintTimers(int idx, SUNHashMapKeyValue kv, FILE* fp, void* pvoid); static int sunCompareTimes(const void* l, const void* r); /* sunTimerStruct. A private structure holding timing information. */ struct _sunTimerStruct { #if SUNDIALS_MPI_ENABLED double tic; double toc; #else struct timespec* tic; struct timespec* toc; #endif double average; double maximum; double elapsed; long count; }; typedef struct _sunTimerStruct sunTimerStruct; static sunTimerStruct* sunTimerStructNew(void) { sunTimerStruct* ts = (sunTimerStruct*) malloc(sizeof(sunTimerStruct)); #if SUNDIALS_MPI_ENABLED ts->tic = 0.0; ts->toc = 0.0; #else ts->tic = (struct timespec *) malloc(sizeof(struct timespec)); ts->toc = (struct timespec *) malloc(sizeof(struct timespec)); ts->tic->tv_sec = 0; ts->tic->tv_nsec = 0; #endif ts->elapsed = 0.0; ts->average = 0.0; ts->maximum = 0.0; ts->count = 0; return ts; } static void sunTimerStructFree(void* TS) { sunTimerStruct* ts = (sunTimerStruct*) TS; if (ts) { #if !SUNDIALS_MPI_ENABLED if (ts->tic) free(ts->tic); if (ts->toc) free(ts->toc); #endif free(ts); } } static void sunStartTiming(sunTimerStruct* entry) { #if SUNDIALS_MPI_ENABLED entry->tic = MPI_Wtime(); #else clock_gettime(CLOCK_MONOTONIC, entry->tic); #endif } static void sunStopTiming(sunTimerStruct* entry) { #if SUNDIALS_MPI_ENABLED entry->toc = MPI_Wtime(); entry->elapsed += entry->toc - entry->tic; #else clock_gettime(CLOCK_MONOTONIC, entry->toc); entry->elapsed += ((double) (entry->toc->tv_sec - entry->tic->tv_sec) + (double) (entry->toc->tv_nsec - entry->tic->tv_nsec) * 1e-9); #endif /* Initialize to total value */ entry->average = entry->elapsed; entry->maximum = entry->elapsed; } /* SUNProfiler. This structure holds all of the timers in a map.s */ struct _SUNProfiler { void* comm; char* title; SUNHashMap map; sunTimerStruct* overhead; double sundials_time; }; int SUNProfiler_Create(void* comm, const char* title, SUNProfiler* p) { SUNProfiler profiler; int max_entries; char* max_entries_env; *p = profiler = (SUNProfiler) malloc(sizeof(struct _SUNProfiler)); if (profiler == NULL) return(-1); profiler->overhead = sunTimerStructNew(); if (profiler->overhead == NULL) { free(profiler); *p = profiler = NULL; return(-1); } sunStartTiming(profiler->overhead); /* Check to see if max entries env variable was set, and use if it was. */ max_entries = 2560; max_entries_env = getenv("SUNPROFILER_MAX_ENTRIES"); if (max_entries_env) max_entries = atoi(max_entries_env); if (max_entries <= 0) max_entries = 2560; /* Create the hashmap used to store the timers */ if (SUNHashMap_New(max_entries, &profiler->map)) { sunTimerStructFree((void*) profiler->overhead); free(profiler); *p = profiler = NULL; return(-1); } /* Attach the comm, duplicating it if MPI is used. */ #if SUNDIALS_MPI_ENABLED profiler->comm = NULL; if (comm != NULL) { profiler->comm = malloc(sizeof(MPI_Comm)); MPI_Comm_dup(*((MPI_Comm*) comm), (MPI_Comm*) profiler->comm); } #else profiler->comm = comm; #endif /* Copy the title of the profiler (note strlen does not include terminating null character hence the +1) */ profiler->title = malloc((strlen(title) + 1) * sizeof(char)); strcpy(profiler->title, title); /* Initialize the overall timer to 0. */ profiler->sundials_time = 0.0; SUNDIALS_MARK_BEGIN(profiler, SUNDIALS_ROOT_TIMER); sunStopTiming(profiler->overhead); return(0); } int SUNProfiler_Free(SUNProfiler* p) { if (p == NULL) return(-1); SUNDIALS_MARK_END(*p, SUNDIALS_ROOT_TIMER); if (*p) { SUNHashMap_Free(&(*p)->map, sunTimerStructFree); sunTimerStructFree((void*) (*p)->overhead); #if SUNDIALS_MPI_ENABLED if ((*p)->comm) { MPI_Comm_free((*p)->comm); free((*p)->comm); } #endif free((*p)->title); free(*p); } *p = NULL; return(0); } int SUNProfiler_Begin(SUNProfiler p, const char* name) { int ier; sunTimerStruct* timer = NULL; #ifdef SUNDIALS_DEBUG size_t slen; char* errmsg; #endif if (p == NULL) return(-1); sunStartTiming(p->overhead); if (SUNHashMap_GetValue(p->map, name, (void**) &timer)) { timer = sunTimerStructNew(); ier = SUNHashMap_Insert(p->map, name, (void*) timer); if (ier) { #ifdef SUNDIALS_DEBUG slen = strlen(name); errmsg = malloc(slen*sizeof(char)); sSTAN_SUNDIALS_PRINTF(errmsg, 128+slen, "(((( [ERROR] in SUNProfilerBegin: SUNHashMapInsert failed with code %d while inserting %s))))\n", ier, name); SUNDIALS_DEBUG_PRINT(errmsg); free(errmsg); #endif sunTimerStructFree(timer); sunStopTiming(p->overhead); return(-1); } } timer->count++; sunStartTiming(timer); sunStopTiming(p->overhead); return(0); } int SUNProfiler_End(SUNProfiler p, const char* name) { sunTimerStruct* timer; if (p == NULL) return(-1); sunStartTiming(p->overhead); if (SUNHashMap_GetValue(p->map, name, (void**) &timer)) { sunStopTiming(p->overhead); return(-1); } sunStopTiming(timer); sunStopTiming(p->overhead); return(0); } int SUNProfiler_Print(SUNProfiler p, FILE* fp) { int i = 0; int rank = 0; sunTimerStruct* timer = NULL; SUNHashMapKeyValue* sorted = NULL; if (p == NULL) return(-1); sunStartTiming(p->overhead); /* Get the total SUNDIALS time up to this point */ SUNDIALS_MARK_END(p, SUNDIALS_ROOT_TIMER); SUNDIALS_MARK_BEGIN(p, SUNDIALS_ROOT_TIMER); if (SUNHashMap_GetValue(p->map, SUNDIALS_ROOT_TIMER, (void**) &timer)) return(-1); p->sundials_time = timer->elapsed; #if SUNDIALS_MPI_ENABLED if (p->comm) { MPI_Comm_rank(*((MPI_Comm*) p->comm), &rank); /* Find the max and average time across all ranks */ sunCollectTimers(p); } #endif if (rank == 0) { /* Sort the timers in descending order */ if (SUNHashMap_Sort(p->map, &sorted, sunCompareTimes)) return(-1); STAN_SUNDIALS_FPRINTF(fp, "\n================================================================================================================\n"); STAN_SUNDIALS_FPRINTF(fp, "SUNDIALS GIT VERSION: %s\n", SUNDIALS_GIT_VERSION); STAN_SUNDIALS_FPRINTF(fp, "SUNDIALS PROFILER: %s\n", p->title); STAN_SUNDIALS_FPRINTF(fp, "%-40s\t %% time (inclusive) \t max/rank \t average/rank \t count \n", "Results:"); STAN_SUNDIALS_FPRINTF(fp, "================================================================================================================\n"); #if SUNDIALS_MPI_ENABLED if (p->comm == NULL) STAN_SUNDIALS_PRINTF("WARNING: no MPI communicator provided, times shown are for rank 0\n"); #endif /* Print all the other timers out */ for (i = 0; i < p->map->size; i++) if (sorted[i]) sunPrintTimers(i, sorted[i], fp, (void*) p); free(sorted); } sunStopTiming(p->overhead); if (rank == 0) { /* Print out the total time and the profiler overhead */ STAN_SUNDIALS_FPRINTF(fp, "%-40s\t %6.2f%% \t %.6fs \t -- \t\t -- \n", "Est. profiler overhead", p->overhead->elapsed/p->sundials_time, p->overhead->elapsed); /* End of output */ STAN_SUNDIALS_FPRINTF(fp, "\n"); } return(0); } #if SUNDIALS_MPI_ENABLED static void sunTimerStructReduceMaxAndSum(void* a, void* b, int* len, MPI_Datatype* dType) { sunTimerStruct* a_ts = (sunTimerStruct*) a; sunTimerStruct* b_ts = (sunTimerStruct*) b; int i; for (i = 0; i < *len; ++i) { b_ts[i].average += a_ts[i].elapsed; b_ts[i].maximum = SUNMAX(a_ts[i].maximum, b_ts[i].maximum); } } /* Find the max and average time across all ranks */ int sunCollectTimers(SUNProfiler p) { int i, rank, nranks; MPI_Comm comm = *((MPI_Comm*) p->comm); MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &nranks); sunTimerStruct** values = NULL; /* Extract the elapsed times from the hash map */ SUNHashMap_Values(p->map, (void***) &values, sizeof(sunTimerStruct)); sunTimerStruct* reduced = (sunTimerStruct*) malloc(p->map->size*sizeof(sunTimerStruct)); for (i = 0; i < p->map->size; ++i) reduced[i] = *values[i]; /* Register MPI datatype for sunTimerStruct */ MPI_Datatype tmp_type, MPI_sunTimerStruct; const int block_lens[2] = { 5, 1 }; const MPI_Datatype types[2] = { MPI_DOUBLE, MPI_LONG }; const MPI_Aint displ[2] = { offsetof(sunTimerStruct, tic), offsetof(sunTimerStruct, count) }; MPI_Aint lb, extent; MPI_Type_create_struct(2, block_lens, displ, types, &tmp_type); MPI_Type_get_extent(tmp_type, &lb, &extent); extent = sizeof(sunTimerStruct); MPI_Type_create_resized(tmp_type, lb, extent, &MPI_sunTimerStruct); MPI_Type_commit(&MPI_sunTimerStruct); /* Register max and sum MPI reduction operations for our datatype */ MPI_Op MPI_sunTimerStruct_MAXANDSUM; MPI_Op_create(sunTimerStructReduceMaxAndSum, 1, &MPI_sunTimerStruct_MAXANDSUM); /* Compute max and average time across all ranks */ if (rank == 0) { MPI_Reduce(MPI_IN_PLACE, reduced, p->map->size, MPI_sunTimerStruct, MPI_sunTimerStruct_MAXANDSUM, 0, comm); } else { MPI_Reduce(reduced, reduced, p->map->size, MPI_sunTimerStruct, MPI_sunTimerStruct_MAXANDSUM, 0, comm); } /* Cleanup custom MPI datatype and operations */ MPI_Type_free(&tmp_type); MPI_Type_free(&MPI_sunTimerStruct); MPI_Op_free(&MPI_sunTimerStruct_MAXANDSUM); /* Update the values that are in this rank's hash map. */ for (i = 0; i < p->map->size; ++i) { values[i]->average = reduced[i].average / (realtype) nranks; values[i]->maximum = reduced[i].maximum; } free(reduced); free(values); return(0); } #endif /* Print out the: timer name, percentage of exec time (based on the max), max across ranks, average across ranks, and the timer counter. */ void sunPrintTimers(int idx, SUNHashMapKeyValue kv, FILE* fp, void* pvoid) { SUNProfiler p = (SUNProfiler) pvoid; sunTimerStruct* ts = (sunTimerStruct*) kv->value; double maximum = ts->maximum; double average = ts->average; double percent = strcmp((const char*) kv->key, (const char*) SUNDIALS_ROOT_TIMER) ? maximum / p->sundials_time * 100 : 100; STAN_SUNDIALS_FPRINTF(fp, "%-40s\t %6.2f%% \t %.6fs \t %.6fs \t %ld\n", kv->key, percent, maximum, average, ts->count); } /* Comparator for qsort that compares key-value pairs based on the maximum time in the sunTimerStruct. */ int sunCompareTimes(const void* l, const void* r) { double left_max; double right_max; const SUNHashMapKeyValue left = *((SUNHashMapKeyValue*) l); const SUNHashMapKeyValue right = *((SUNHashMapKeyValue*) r); if (left == NULL && right == NULL) return(0); if (left == NULL) return(1); if (right == NULL) return(-1); left_max = ((sunTimerStruct*) left->value)->maximum; right_max = ((sunTimerStruct*) right->value)->maximum; if (left_max < right_max) return(1); if (left_max > right_max) return(-1); return(0); } StanHeaders/src/sundials/sundials_math.c0000644000176200001440000000561414645137106020100 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Scott D. Cohen, Alan C. Hindmarsh and * Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a simple C-language math * library. * -----------------------------------------------------------------*/ #include #include #include #include static booleantype sunIsInf(realtype a) { #if (__STDC_VERSION__ >= 199901L) return(isinf(a)); #else return(a < -BIG_REAL || a > BIG_REAL); #endif } static booleantype sunIsNaN(realtype a) { #if ( __STDC_VERSION__ >= 199901L) return(isnan(a)); #else /* Most compilers/platforms follow NaN != a, * but since C89 does not require this, it is * possible some platforms might not follow it. */ return(a != a); #endif } realtype SUNRpowerI(realtype base, int exponent) { int i, expt; realtype prod; prod = RCONST(1.0); expt = abs(exponent); for(i = 1; i <= expt; i++) prod *= base; if (exponent < 0) prod = RCONST(1.0)/prod; return(prod); } realtype SUNRpowerR(realtype base, realtype exponent) { if (base <= RCONST(0.0)) return(RCONST(0.0)); #if defined(SUNDIALS_USE_GENERIC_MATH) return((realtype) pow((double) base, (double) exponent)); #elif defined(SUNDIALS_DOUBLE_PRECISION) return(pow(base, exponent)); #elif defined(SUNDIALS_SINGLE_PRECISION) return(powf(base, exponent)); #elif defined(SUNDIALS_EXTENDED_PRECISION) return(powl(base, exponent)); #endif } booleantype SUNRCompare(realtype a, realtype b) { return(SUNRCompareTol(a, b, 10*UNIT_ROUNDOFF)); } booleantype SUNRCompareTol(realtype a, realtype b, realtype tol) { realtype diff; realtype norm; /* If a and b are exactly equal. * This also covers the case where a and b are both inf under IEEE 754. */ if (a == b) return(SUNFALSE); /* If a or b are NaN */ if (sunIsNaN(a) || sunIsNaN(b)) return(SUNTRUE); /* If one of a or b are Inf (since we handled both being inf above) */ if (sunIsInf(a) || sunIsInf(b)) return(SUNTRUE); diff = SUNRabs(a - b); norm = SUNMIN(SUNRabs(a + b), BIG_REAL); /* When |a + b| is very small (less than 10*UNIT_ROUNDOFF) or zero, we use an * absolute difference: * |a - b| >= 10*UNIT_ROUNDOFF * Otherwise we use a relative difference: * |a - b| < tol * |a + b| * The choice to use |a + b| over max(a, b) * is arbitrary, as is the choice to use * 10*UNIT_ROUNDOFF. */ return(diff >= SUNMAX(10*UNIT_ROUNDOFF, tol*norm)); } StanHeaders/src/sundials/sundials_matrix.c0000644000176200001440000001332114645137106020445 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * David J. Gardner, Carol S. Woodward, and * Slaven Peles @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a generic SUNMATRIX package. * It contains the implementation of the SUNMatrix operations listed * in sundials_matrix.h * -----------------------------------------------------------------*/ #include #include #include #include "sundials_context_impl.h" #if defined(SUNDIALS_BUILD_WITH_PROFILING) static SUNProfiler getSUNProfiler(SUNMatrix A) { return(A->sunctx->profiler); } #endif /* ----------------------------------------------------------------- * Create a new empty SUNMatrix object * ----------------------------------------------------------------- */ SUNMatrix SUNMatNewEmpty(SUNContext sunctx) { SUNMatrix A; SUNMatrix_Ops ops; /* a context is required */ if (sunctx == NULL) return(NULL); /* create matrix object */ A = NULL; A = (SUNMatrix) malloc(sizeof *A); if (A == NULL) return(NULL); /* create matrix ops structure */ ops = NULL; ops = (SUNMatrix_Ops) malloc(sizeof *ops); if (ops == NULL) { free(A); return(NULL); } /* initialize operations to NULL */ ops->getid = NULL; ops->clone = NULL; ops->destroy = NULL; ops->zero = NULL; ops->copy = NULL; ops->scaleadd = NULL; ops->scaleaddi = NULL; ops->matvecsetup = NULL; ops->matvec = NULL; ops->space = NULL; /* attach ops and initialize content to NULL */ A->ops = ops; A->content = NULL; A->sunctx = sunctx; return(A); } /* ----------------------------------------------------------------- * Free a generic SUNMatrix (assumes content is already empty) * ----------------------------------------------------------------- */ void SUNMatFreeEmpty(SUNMatrix A) { if (A == NULL) return; /* free non-NULL ops structure */ if (A->ops) free(A->ops); A->ops = NULL; /* free overall SUNMatrix object and return */ free(A); return; } /* ----------------------------------------------------------------- * Copy a matrix 'ops' structure * -----------------------------------------------------------------*/ int SUNMatCopyOps(SUNMatrix A, SUNMatrix B) { /* Check that ops structures exist */ if (A == NULL || B == NULL) return(-1); if (A->ops == NULL || B->ops == NULL) return(-1); /* Copy ops from A to B */ B->ops->getid = A->ops->getid; B->ops->clone = A->ops->clone; B->ops->destroy = A->ops->destroy; B->ops->zero = A->ops->zero; B->ops->copy = A->ops->copy; B->ops->scaleadd = A->ops->scaleadd; B->ops->scaleaddi = A->ops->scaleaddi; B->ops->matvecsetup = A->ops->matvecsetup; B->ops->matvec = A->ops->matvec; B->ops->space = A->ops->space; return(0); } /* ----------------------------------------------------------------- * Functions in the 'ops' structure * ----------------------------------------------------------------- */ SUNMatrix_ID SUNMatGetID(SUNMatrix A) { SUNMatrix_ID id; id = A->ops->getid(A); return(id); } SUNMatrix SUNMatClone(SUNMatrix A) { SUNMatrix B = NULL; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(A)); B = A->ops->clone(A); B->sunctx = A->sunctx; SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(A)); return(B); } void SUNMatDestroy(SUNMatrix A) { if (A == NULL) return; /* if the destroy operation exists use it */ if (A->ops) if (A->ops->destroy) { A->ops->destroy(A); return; } /* if we reach this point, either ops == NULL or destroy == NULL, try to cleanup by freeing the content, ops, and matrix */ if (A->content) { free(A->content); A->content = NULL; } if (A->ops) { free(A->ops); A->ops = NULL; } free(A); A = NULL; return; } int SUNMatZero(SUNMatrix A) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(A)); ier = A->ops->zero(A); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(A)); return(ier); } int SUNMatCopy(SUNMatrix A, SUNMatrix B) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(A)); ier = A->ops->copy(A, B); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(A)); return(ier); } int SUNMatScaleAdd(realtype c, SUNMatrix A, SUNMatrix B) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(A)); ier = A->ops->scaleadd(c, A, B); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(A)); return(ier); } int SUNMatScaleAddI(realtype c, SUNMatrix A) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(A)); ier = A->ops->scaleaddi(c, A); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(A)); return(ier); } int SUNMatMatvecSetup(SUNMatrix A) { int ier = 0; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(A)); if (A->ops->matvecsetup) ier = A->ops->matvecsetup(A); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(A)); return(ier); } int SUNMatMatvec(SUNMatrix A, N_Vector x, N_Vector y) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(A)); ier = A->ops->matvec(A, x, y); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(A)); return(ier); } int SUNMatSpace(SUNMatrix A, long int *lenrw, long int *leniw) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(A)); ier = A->ops->space(A, lenrw, leniw); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(A)); return(ier); } StanHeaders/src/sundials/sundials_hip_kernels.hip.hpp0000644000176200001440000003307714645137106022602 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- */ #ifndef _SUNDIALS_HIP_KERNELS_CUH #define _SUNDIALS_HIP_KERNELS_CUH #define SUNDIALS_HOST_DEVICE __host__ __device__ #define SUNDIALS_DEVICE_INLINE __forceinline__ #include "sundials_reductions.hpp" #define GRID_STRIDE_XLOOP(type, iter, max) \ for (type iter = blockDim.x * blockIdx.x + threadIdx.x; \ iter < max; \ iter += blockDim.x * gridDim.x) #include "sundials_hip.h" namespace sundials { namespace hip { namespace impl { template __forceinline__ __device__ T shfl_xor_sync(T var, int laneMask); template __forceinline__ __device__ T shfl_sync(T var, int srcLane); template __forceinline__ __device__ T shfl_down_sync(T var, int srcLane); template <> __forceinline__ __device__ int shfl_xor_sync(int var, int laneMask) { return ::__shfl_xor(var, laneMask); } template <> __forceinline__ __device__ float shfl_xor_sync(float var, int laneMask) { return ::__shfl_xor(var, laneMask); } template <> __forceinline__ __device__ double shfl_xor_sync(double var, int laneMask) { return ::__shfl_xor(var, laneMask); } template <> __forceinline__ __device__ int shfl_sync(int var, int srcLane) { return ::__shfl(var, srcLane); } template <> __forceinline__ __device__ float shfl_sync(float var, int srcLane) { return ::__shfl(var, srcLane); } template <> __forceinline__ __device__ double shfl_sync(double var, int srcLane) { return ::__shfl(var, srcLane); } template<> __forceinline__ __device__ float shfl_down_sync(float val, int srcLane) { return ::__shfl_down(val, srcLane); } template<> __forceinline__ __device__ double shfl_down_sync(double val, int srcLane) { return ::__shfl_down(val, srcLane); } /* The atomic functions below are implemented using the atomic compare and swap function atomicCAS which performs an atomic version of (*address == assumed) ? (assumed + val) : *address. Since *address could change between when the value is loaded and the atomicCAS call the operation is repeated until *address does not change between the read and the compare and swap operation. */ __forceinline__ __device__ double atomicAdd(double* address, double val) { #ifndef __HIP_ARCH_HAS_DOUBLE_ATOMIC_ADD__ unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); #else return ::atomicAdd(address, val); #endif } __forceinline__ __device__ float atomicAdd(float* address, float val) { #ifndef __HIP_ARCH_HAS_FLOAT_ATOMIC_ADD__ unsigned int* address_as_ull = (unsigned int*)address; unsigned int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val + __int_as_float(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __int_as_float(old); #else return ::atomicAdd(address, val); #endif } /* * Compute the maximum of 2 double-precision floating point values using an atomic operation * "address" is the address of the reference value which might get updated with the maximum * "value" is the value that is compared to the reference in order to determine the maximum */ __forceinline__ __device__ void atomicMax(double* const address, const double value) { if (*address >= value) { return; } unsigned long long * const address_as_i = (unsigned long long *)address; unsigned long long old = * address_as_i, assumed; do { assumed = old; if (__longlong_as_double(assumed) >= value) { break; } old = atomicCAS(address_as_i, assumed, __double_as_longlong(value)); } while (assumed != old); } /* * Compute the maximum of 2 single-precision floating point values using an atomic operation * "address" is the address of the reference value which might get updated with the maximum * "value" is the value that is compared to the reference in order to determine the maximum */ __forceinline__ __device__ void atomicMax(float* const address, const float value) { if (*address >= value) { return; } unsigned int* const address_as_i = (unsigned int *)address; unsigned int old = *address_as_i, assumed; do { assumed = old; if (__int_as_float(assumed) >= value) { break; } old = atomicCAS(address_as_i, assumed, __float_as_int(value)); } while (assumed != old); } /* * Compute the minimum of 2 double-precision floating point values using an atomic operation * "address" is the address of the reference value which might get updated with the minimum * "value" is the value that is compared to the reference in order to determine the minimum */ __forceinline__ __device__ void atomicMin(double* const address, const double value) { if (*address <= value) { return; } unsigned long long* const address_as_i = (unsigned long long *)address; unsigned long long old = *address_as_i, assumed; do { assumed = old; if (__longlong_as_double(assumed) <= value) { break; } old = atomicCAS(address_as_i, assumed, __double_as_longlong(value)); } while (assumed != old); } /* * Compute the minimum of 2 single-precision floating point values using an atomic operation * "address" is the address of the reference value which might get updated with the minimum * "value" is the value that is compared to the reference in order to determine the minimum */ __forceinline__ __device__ void atomicMin(float* const address, const float value) { if (*address <= value) { return; } unsigned int* const address_as_i = (unsigned int *)address; unsigned int old = *address_as_i, assumed; do { assumed = old; if (__int_as_float(assumed) <= value) { break; } old = atomicCAS(address_as_i, assumed, __float_as_int(value)); } while (assumed != old); } // // Atomic specializations of sundials::reductions operators // template struct atomic; template struct atomic> { __device__ __forceinline__ void operator()(T* out, const T val) { atomicAdd(out, val); } }; template struct atomic> { __device__ __forceinline__ void operator()(T* out, const T val) { atomicMax(out, val); } }; template struct atomic> { __device__ __forceinline__ void operator()(T* out, const T val) { atomicMin(out, val); } }; /* * Perform a reduce on the warp to get the operation result. */ template __inline__ __device__ T warpReduceShflDown(T val) { for (int offset = warpSize/2; offset > 0; offset /= 2) { T rhs = shfl_down_sync(val, offset); val = BinaryReductionOp{}(val, rhs); } return val; } /* * Reduce value across the thread block. */ template __inline__ __device__ T blockReduceShflDown(T val, T identity) { // Shared memory for the partial sums static __shared__ T shared[MAX_WARPS]; int numThreads = blockDim.x * blockDim.y * blockDim.z; int threadId = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; int warpId = threadId / WARP_SIZE; int warpLane = threadId % WARP_SIZE; // Each warp performs partial reduction val = warpReduceShflDown(val); // Write reduced value from each warp to shared memory if (warpLane == 0) shared[warpId] = val; // Wait for all partial reductions to complete __syncthreads(); // Read per warp values from shared memory only if that warp existed val = (threadId < numThreads / warpSize) ? shared[warpLane] : identity; // Final reduce within first warp if (warpId == 0) val = warpReduceShflDown(val); return val; } /* * Warp reduce + block reduce using shfl instead of shfl_down. */ template __inline__ __device__ T blockReduceShfl(T val, T identity) { int numThreads = blockDim.x * blockDim.y * blockDim.z; int threadId = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; int warpId = threadId / WARP_SIZE; int warpLane = threadId % WARP_SIZE; T temp = val; // Reduce each warp if (numThreads % WARP_SIZE == 0) { for (int i = 1; i < WARP_SIZE; i *= 2) { T rhs = shfl_xor_sync(temp, i); temp = BinaryReductionOp{}(temp, rhs); } } else { for (int i = 1; i < WARP_SIZE; i *= 2) { int srcLane = threadId ^ i; T rhs = shfl_sync(temp, srcLane); // Only add from threads that exist to avoid double counting if (srcLane < numThreads) temp = BinaryReductionOp{}(temp, rhs); } } // Reduce per warp values if (numThreads > WARP_SIZE) { static_assert(MAX_WARPS <= WARP_SIZE, "max warps must be <= warp size for this algorithm to work"); __shared__ T shared[MAX_WARPS]; // Write per warp values to shared memory if (warpLane == 0) shared[warpId] = temp; __syncthreads(); if (warpId == 0) { // Read per warp values only if the warp existed temp = (warpLane * WARP_SIZE < numThreads) ? shared[warpLane] : identity; // Final reduction for (int i = 1; i < MAX_WARPS; i *= 2) { T rhs = shfl_xor_sync(temp, i); temp = BinaryReductionOp{}(temp, rhs); } } __syncthreads(); } return temp; } /* * Reduce values into thread 0 of the last running thread block. * Output value is device_mem[0]. */ template __device__ __forceinline__ void gridReduce(T val, T identity, T* device_mem, unsigned int* device_count) { int numBlocks = gridDim.x * gridDim.y * gridDim.z; int numThreads = blockDim.x * blockDim.y * blockDim.z; unsigned int wrap_around = numBlocks - 1; int blockId = blockIdx.x + gridDim.x * blockIdx.y + (gridDim.x * gridDim.y) * blockIdx.z; int threadId = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; // Each block reduces a subset of the input T temp = blockReduceShfl(val, identity); __shared__ bool isLastBlockDone; if (threadId == 0) { // One thread per block stores the partial reductions to global memory device_mem[blockId] = temp; // Ensure write visible to all threads __threadfence(); // Increment counter, (wraps back to zero if old count == wrap_around) unsigned int old_count = atomicInc(device_count, wrap_around); isLastBlockDone = (old_count == wrap_around) ? 1 : 0; } // Synchronize to ensure that each thread reads the // correct value of isLastBlockDone. __syncthreads(); // The last block reduces values in device_mem if (isLastBlockDone) { // Reduce thread_i in each block into temp temp = identity; for (int i = threadId; i < numBlocks; i += numThreads) temp = BinaryReductionOp{}(temp, device_mem[i]); // Compute the final block partial reductions temp = blockReduceShfl(temp, identity); // One thread returns the final value if (threadId == 0) device_mem[0] = temp; } } template __device__ __forceinline__ void gridReduceAtomic(T val, T identity, T* device_mem) { int threadId = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; val = blockReduceShflDown(val, identity); // Final reduction of all block values into the output device_mem if (threadId == 0) atomic{}(device_mem, val); } template struct GridReducerLDS { __device__ __forceinline__ void operator()(T val, T identity, T* device_mem, unsigned int* device_count) { return impl::gridReduce(val, identity, device_mem, device_count); } }; template struct GridReducerAtomic { __device__ __forceinline__ void operator()(T val, T identity, T* device_mem, unsigned int* device_count) { return impl::gridReduceAtomic(val, identity, device_mem); } }; } // namespace impl } // namespace hip } // namespace sundials #endif // _SUNDIALS_HIP_KERNELS_CUH StanHeaders/src/sundials/fmod/0000755000176200001440000000000014511135055016011 5ustar liggesusersStanHeaders/src/sundials/fmod/fsundials_context_mod.f900000644000176200001440000000735314645137106022743 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- #include "sundials/sundials_config.h" module fsundials_context_mod use, intrinsic :: ISO_C_BINDING implicit none private ! DECLARATION CONSTRUCTS public :: FSUNContext_GetProfiler public :: FSUNContext_SetProfiler public :: FSUNContext_Free public :: FSUNContext_Create ! WRAPPER DECLARATIONS interface function swigc_FSUNContext_GetProfiler(farg1, farg2) & bind(C, name="_wrap_FSUNContext_GetProfiler") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNContext_SetProfiler(farg1, farg2) & bind(C, name="_wrap_FSUNContext_SetProfiler") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNContext_Free(farg1) & bind(C, name="_wrap_FSUNContext_Free") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNContext_Create(farg1, farg2) & bind(C, name="_wrap_FSUNContext_Create") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNContext_GetProfiler(sunctx, profiler) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: sunctx type(C_PTR), target, intent(inout) :: profiler integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = sunctx farg2 = c_loc(profiler) fresult = swigc_FSUNContext_GetProfiler(farg1, farg2) swig_result = fresult end function function FSUNContext_SetProfiler(sunctx, profiler) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: sunctx type(C_PTR) :: profiler integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = sunctx farg2 = profiler fresult = swigc_FSUNContext_SetProfiler(farg1, farg2) swig_result = fresult end function function FSUNContext_Free(ctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR), target, intent(inout) :: ctx integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(ctx) fresult = swigc_FSUNContext_Free(farg1) swig_result = fresult end function function FSUNContext_Create(comm, ctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING #ifdef SUNDIALS_BUILD_WITH_PROFILING use fsundials_profiler_mod, only : FSUNProfiler_Create #endif integer(C_INT) :: swig_result type(C_PTR) :: comm type(C_PTR), target, intent(inout) :: ctx integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 #ifdef SUNDIALS_BUILD_WITH_PROFILING type(C_PTR) :: profiler #endif farg1 = comm farg2 = c_loc(ctx) fresult = swigc_FSUNContext_Create(c_null_ptr, farg2) #ifdef SUNDIALS_BUILD_WITH_PROFILING fresult = FSUNProfiler_Create(farg1, "FSUNContext Default", profiler) fresult = swigc_FSUNContext_SetProfiler(ctx, profiler) #endif swig_result = fresult end function end module StanHeaders/src/sundials/fmod/fsundials_matrix_mod.c0000644000176200001440000002241714645137106022405 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_matrix.h" SWIGEXPORT SUNMatrix _wrap_FSUNMatNewEmpty(void *farg1) { SUNMatrix fresult ; SUNContext arg1 = (SUNContext) 0 ; SUNMatrix result; arg1 = (SUNContext)(farg1); result = (SUNMatrix)SUNMatNewEmpty(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FSUNMatFreeEmpty(SUNMatrix farg1) { SUNMatrix arg1 = (SUNMatrix) 0 ; arg1 = (SUNMatrix)(farg1); SUNMatFreeEmpty(arg1); } SWIGEXPORT int _wrap_FSUNMatCopyOps(SUNMatrix farg1, SUNMatrix farg2) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNMatCopyOps(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatGetID(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix_ID result; arg1 = (SUNMatrix)(farg1); result = (SUNMatrix_ID)SUNMatGetID(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT SUNMatrix _wrap_FSUNMatClone(SUNMatrix farg1) { SUNMatrix fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix result; arg1 = (SUNMatrix)(farg1); result = (SUNMatrix)SUNMatClone(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FSUNMatDestroy(SUNMatrix farg1) { SUNMatrix arg1 = (SUNMatrix) 0 ; arg1 = (SUNMatrix)(farg1); SUNMatDestroy(arg1); } SWIGEXPORT int _wrap_FSUNMatZero(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); result = (int)SUNMatZero(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatCopy(SUNMatrix farg1, SUNMatrix farg2) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNMatCopy(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatScaleAdd(double const *farg1, SUNMatrix farg2, SUNMatrix farg3) { int fresult ; realtype arg1 ; SUNMatrix arg2 = (SUNMatrix) 0 ; SUNMatrix arg3 = (SUNMatrix) 0 ; int result; arg1 = (realtype)(*farg1); arg2 = (SUNMatrix)(farg2); arg3 = (SUNMatrix)(farg3); result = (int)SUNMatScaleAdd(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatScaleAddI(double const *farg1, SUNMatrix farg2) { int fresult ; realtype arg1 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (realtype)(*farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNMatScaleAddI(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatMatvecSetup(SUNMatrix farg1) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; int result; arg1 = (SUNMatrix)(farg1); result = (int)SUNMatMatvecSetup(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatMatvec(SUNMatrix farg1, N_Vector farg2, N_Vector farg3) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)SUNMatMatvec(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNMatSpace(SUNMatrix farg1, long *farg2, long *farg3) { int fresult ; SUNMatrix arg1 = (SUNMatrix) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNMatrix)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNMatSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } StanHeaders/src/sundials/fmod/fsundials_linearsolver_mod.f900000644000176200001440000007511714645137106023767 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. module fsundials_linearsolver_mod use, intrinsic :: ISO_C_BINDING use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS enum, bind(c) enumerator :: PREC_NONE enumerator :: PREC_LEFT enumerator :: PREC_RIGHT enumerator :: PREC_BOTH end enum public :: PREC_NONE, PREC_LEFT, PREC_RIGHT, PREC_BOTH enum, bind(c) enumerator :: SUN_PREC_NONE enumerator :: SUN_PREC_LEFT enumerator :: SUN_PREC_RIGHT enumerator :: SUN_PREC_BOTH end enum public :: SUN_PREC_NONE, SUN_PREC_LEFT, SUN_PREC_RIGHT, SUN_PREC_BOTH enum, bind(c) enumerator :: MODIFIED_GS = 1 enumerator :: CLASSICAL_GS = 2 end enum public :: MODIFIED_GS, CLASSICAL_GS enum, bind(c) enumerator :: SUN_MODIFIED_GS = 1 enumerator :: SUN_CLASSICAL_GS = 2 end enum public :: SUN_MODIFIED_GS, SUN_CLASSICAL_GS public :: FSUNModifiedGS public :: FModifiedGS public :: FSUNClassicalGS public :: FClassicalGS public :: FSUNQRfact public :: FQRfact public :: FSUNQRsol public :: FQRsol public :: FSUNQRAdd_MGS public :: FSUNQRAdd_ICWY public :: FSUNQRAdd_ICWY_SB public :: FSUNQRAdd_CGS2 public :: FSUNQRAdd_DCGS2 public :: FSUNQRAdd_DCGS2_SB ! typedef enum SUNLinearSolver_Type enum, bind(c) enumerator :: SUNLINEARSOLVER_DIRECT enumerator :: SUNLINEARSOLVER_ITERATIVE enumerator :: SUNLINEARSOLVER_MATRIX_ITERATIVE enumerator :: SUNLINEARSOLVER_MATRIX_EMBEDDED end enum integer, parameter, public :: SUNLinearSolver_Type = kind(SUNLINEARSOLVER_DIRECT) public :: SUNLINEARSOLVER_DIRECT, SUNLINEARSOLVER_ITERATIVE, SUNLINEARSOLVER_MATRIX_ITERATIVE, & SUNLINEARSOLVER_MATRIX_EMBEDDED ! typedef enum SUNLinearSolver_ID enum, bind(c) enumerator :: SUNLINEARSOLVER_BAND enumerator :: SUNLINEARSOLVER_DENSE enumerator :: SUNLINEARSOLVER_KLU enumerator :: SUNLINEARSOLVER_LAPACKBAND enumerator :: SUNLINEARSOLVER_LAPACKDENSE enumerator :: SUNLINEARSOLVER_PCG enumerator :: SUNLINEARSOLVER_SPBCGS enumerator :: SUNLINEARSOLVER_SPFGMR enumerator :: SUNLINEARSOLVER_SPGMR enumerator :: SUNLINEARSOLVER_SPTFQMR enumerator :: SUNLINEARSOLVER_SUPERLUDIST enumerator :: SUNLINEARSOLVER_SUPERLUMT enumerator :: SUNLINEARSOLVER_CUSOLVERSP_BATCHQR enumerator :: SUNLINEARSOLVER_MAGMADENSE enumerator :: SUNLINEARSOLVER_ONEMKLDENSE enumerator :: SUNLINEARSOLVER_CUSTOM end enum integer, parameter, public :: SUNLinearSolver_ID = kind(SUNLINEARSOLVER_BAND) public :: SUNLINEARSOLVER_BAND, SUNLINEARSOLVER_DENSE, SUNLINEARSOLVER_KLU, SUNLINEARSOLVER_LAPACKBAND, & SUNLINEARSOLVER_LAPACKDENSE, SUNLINEARSOLVER_PCG, SUNLINEARSOLVER_SPBCGS, SUNLINEARSOLVER_SPFGMR, SUNLINEARSOLVER_SPGMR, & SUNLINEARSOLVER_SPTFQMR, SUNLINEARSOLVER_SUPERLUDIST, SUNLINEARSOLVER_SUPERLUMT, SUNLINEARSOLVER_CUSOLVERSP_BATCHQR, & SUNLINEARSOLVER_MAGMADENSE, SUNLINEARSOLVER_ONEMKLDENSE, SUNLINEARSOLVER_CUSTOM ! struct struct _generic_SUNLinearSolver_Ops type, bind(C), public :: SUNLinearSolver_Ops type(C_FUNPTR), public :: gettype type(C_FUNPTR), public :: getid type(C_FUNPTR), public :: setatimes type(C_FUNPTR), public :: setpreconditioner type(C_FUNPTR), public :: setscalingvectors type(C_FUNPTR), public :: setzeroguess type(C_FUNPTR), public :: initialize type(C_FUNPTR), public :: setup type(C_FUNPTR), public :: solve type(C_FUNPTR), public :: numiters type(C_FUNPTR), public :: resnorm type(C_FUNPTR), public :: lastflag type(C_FUNPTR), public :: space type(C_FUNPTR), public :: resid type(C_FUNPTR), public :: free end type SUNLinearSolver_Ops ! struct struct _generic_SUNLinearSolver type, bind(C), public :: SUNLinearSolver type(C_PTR), public :: content type(C_PTR), public :: ops type(C_PTR), public :: sunctx end type SUNLinearSolver public :: FSUNLinSolNewEmpty public :: FSUNLinSolFreeEmpty public :: FSUNLinSolGetType public :: FSUNLinSolGetID public :: FSUNLinSolSetATimes public :: FSUNLinSolSetPreconditioner public :: FSUNLinSolSetScalingVectors public :: FSUNLinSolSetZeroGuess public :: FSUNLinSolInitialize public :: FSUNLinSolSetup public :: FSUNLinSolSolve public :: FSUNLinSolNumIters public :: FSUNLinSolResNorm public :: FSUNLinSolResid public :: FSUNLinSolLastFlag public :: FSUNLinSolSpace public :: FSUNLinSolFree integer(C_INT), parameter, public :: SUNLS_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: SUNLS_MEM_NULL = -801_C_INT integer(C_INT), parameter, public :: SUNLS_ILL_INPUT = -802_C_INT integer(C_INT), parameter, public :: SUNLS_MEM_FAIL = -803_C_INT integer(C_INT), parameter, public :: SUNLS_ATIMES_NULL = -804_C_INT integer(C_INT), parameter, public :: SUNLS_ATIMES_FAIL_UNREC = -805_C_INT integer(C_INT), parameter, public :: SUNLS_PSET_FAIL_UNREC = -806_C_INT integer(C_INT), parameter, public :: SUNLS_PSOLVE_NULL = -807_C_INT integer(C_INT), parameter, public :: SUNLS_PSOLVE_FAIL_UNREC = -808_C_INT integer(C_INT), parameter, public :: SUNLS_PACKAGE_FAIL_UNREC = -809_C_INT integer(C_INT), parameter, public :: SUNLS_GS_FAIL = -810_C_INT integer(C_INT), parameter, public :: SUNLS_QRSOL_FAIL = -811_C_INT integer(C_INT), parameter, public :: SUNLS_VECTOROP_ERR = -812_C_INT integer(C_INT), parameter, public :: SUNLS_RES_REDUCED = 801_C_INT integer(C_INT), parameter, public :: SUNLS_CONV_FAIL = 802_C_INT integer(C_INT), parameter, public :: SUNLS_ATIMES_FAIL_REC = 803_C_INT integer(C_INT), parameter, public :: SUNLS_PSET_FAIL_REC = 804_C_INT integer(C_INT), parameter, public :: SUNLS_PSOLVE_FAIL_REC = 805_C_INT integer(C_INT), parameter, public :: SUNLS_PACKAGE_FAIL_REC = 806_C_INT integer(C_INT), parameter, public :: SUNLS_QRFACT_FAIL = 807_C_INT integer(C_INT), parameter, public :: SUNLS_LUFACT_FAIL = 808_C_INT ! WRAPPER DECLARATIONS interface function swigc_FSUNModifiedGS(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNModifiedGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT), intent(in) :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FModifiedGS(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FModifiedGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT), intent(in) :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FSUNClassicalGS(farg1, farg2, farg3, farg4, farg5, farg6, farg7) & bind(C, name="_wrap_FSUNClassicalGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 type(C_PTR), value :: farg7 integer(C_INT) :: fresult end function function swigc_FClassicalGS(farg1, farg2, farg3, farg4, farg5, farg6, farg7) & bind(C, name="_wrap_FClassicalGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 type(C_PTR), value :: farg7 integer(C_INT) :: fresult end function function swigc_FSUNQRfact(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNQRfact") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT), intent(in) :: farg4 integer(C_INT) :: fresult end function function swigc_FQRfact(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FQRfact") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT), intent(in) :: farg4 integer(C_INT) :: fresult end function function swigc_FSUNQRsol(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNQRsol") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FQRsol(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FQRsol") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FSUNQRAdd_MGS(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FSUNQRAdd_MGS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT), intent(in) :: farg4 integer(C_INT), intent(in) :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FSUNQRAdd_ICWY(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FSUNQRAdd_ICWY") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT), intent(in) :: farg4 integer(C_INT), intent(in) :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FSUNQRAdd_ICWY_SB(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FSUNQRAdd_ICWY_SB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT), intent(in) :: farg4 integer(C_INT), intent(in) :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FSUNQRAdd_CGS2(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FSUNQRAdd_CGS2") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT), intent(in) :: farg4 integer(C_INT), intent(in) :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FSUNQRAdd_DCGS2(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FSUNQRAdd_DCGS2") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT), intent(in) :: farg4 integer(C_INT), intent(in) :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FSUNQRAdd_DCGS2_SB(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FSUNQRAdd_DCGS2_SB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT), intent(in) :: farg4 integer(C_INT), intent(in) :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FSUNLinSolNewEmpty(farg1) & bind(C, name="_wrap_FSUNLinSolNewEmpty") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FSUNLinSolFreeEmpty(farg1) & bind(C, name="_wrap_FSUNLinSolFreeEmpty") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FSUNLinSolGetType(farg1) & bind(C, name="_wrap_FSUNLinSolGetType") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolGetID(farg1) & bind(C, name="_wrap_FSUNLinSolGetID") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetATimes(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetATimes") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetPreconditioner(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FSUNLinSolSetPreconditioner") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetScalingVectors(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSetScalingVectors") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetZeroGuess(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetZeroGuess") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolInitialize(farg1) & bind(C, name="_wrap_FSUNLinSolInitialize") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSetup(farg1, farg2) & bind(C, name="_wrap_FSUNLinSolSetup") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNLinSolSolve(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FSUNLinSolSolve") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FSUNLinSolNumIters(farg1) & bind(C, name="_wrap_FSUNLinSolNumIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNLinSolResNorm(farg1) & bind(C, name="_wrap_FSUNLinSolResNorm") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FSUNLinSolResid(farg1) & bind(C, name="_wrap_FSUNLinSolResid") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FSUNLinSolLastFlag(farg1) & bind(C, name="_wrap_FSUNLinSolLastFlag") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function function swigc_FSUNLinSolSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNLinSolSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNLinSolFree(farg1) & bind(C, name="_wrap_FSUNLinSolFree") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNModifiedGS(v, h, k, p, new_vk_norm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: v type(C_PTR), target, intent(inout) :: h integer(C_INT), intent(in) :: k integer(C_INT), intent(in) :: p real(C_DOUBLE), dimension(*), target, intent(inout) :: new_vk_norm integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 integer(C_INT) :: farg4 type(C_PTR) :: farg5 farg1 = v farg2 = c_loc(h) farg3 = k farg4 = p farg5 = c_loc(new_vk_norm(1)) fresult = swigc_FSUNModifiedGS(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FModifiedGS(v, h, k, p, new_vk_norm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: v type(C_PTR), target, intent(inout) :: h integer(C_INT), intent(in) :: k integer(C_INT), intent(in) :: p real(C_DOUBLE), dimension(*), target, intent(inout) :: new_vk_norm integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 integer(C_INT) :: farg4 type(C_PTR) :: farg5 farg1 = v farg2 = c_loc(h) farg3 = k farg4 = p farg5 = c_loc(new_vk_norm(1)) fresult = swigc_FModifiedGS(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FSUNClassicalGS(v, h, k, p, new_vk_norm, stemp, vtemp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: v type(C_PTR), target, intent(inout) :: h integer(C_INT), intent(in) :: k integer(C_INT), intent(in) :: p real(C_DOUBLE), dimension(*), target, intent(inout) :: new_vk_norm real(C_DOUBLE), dimension(*), target, intent(inout) :: stemp type(C_PTR) :: vtemp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 integer(C_INT) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 type(C_PTR) :: farg7 farg1 = v farg2 = c_loc(h) farg3 = k farg4 = p farg5 = c_loc(new_vk_norm(1)) farg6 = c_loc(stemp(1)) farg7 = vtemp fresult = swigc_FSUNClassicalGS(farg1, farg2, farg3, farg4, farg5, farg6, farg7) swig_result = fresult end function function FClassicalGS(v, h, k, p, new_vk_norm, stemp, vtemp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: v type(C_PTR), target, intent(inout) :: h integer(C_INT), intent(in) :: k integer(C_INT), intent(in) :: p real(C_DOUBLE), dimension(*), target, intent(inout) :: new_vk_norm real(C_DOUBLE), dimension(*), target, intent(inout) :: stemp type(C_PTR) :: vtemp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 integer(C_INT) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 type(C_PTR) :: farg7 farg1 = v farg2 = c_loc(h) farg3 = k farg4 = p farg5 = c_loc(new_vk_norm(1)) farg6 = c_loc(stemp(1)) farg7 = vtemp fresult = swigc_FClassicalGS(farg1, farg2, farg3, farg4, farg5, farg6, farg7) swig_result = fresult end function function FSUNQRfact(n, h, q, job) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: n type(C_PTR), target, intent(inout) :: h real(C_DOUBLE), dimension(*), target, intent(inout) :: q integer(C_INT), intent(in) :: job integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 integer(C_INT) :: farg4 farg1 = n farg2 = c_loc(h) farg3 = c_loc(q(1)) farg4 = job fresult = swigc_FSUNQRfact(farg1, farg2, farg3, farg4) swig_result = fresult end function function FQRfact(n, h, q, job) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: n type(C_PTR), target, intent(inout) :: h real(C_DOUBLE), dimension(*), target, intent(inout) :: q integer(C_INT), intent(in) :: job integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 integer(C_INT) :: farg4 farg1 = n farg2 = c_loc(h) farg3 = c_loc(q(1)) farg4 = job fresult = swigc_FQRfact(farg1, farg2, farg3, farg4) swig_result = fresult end function function FSUNQRsol(n, h, q, b) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: n type(C_PTR), target, intent(inout) :: h real(C_DOUBLE), dimension(*), target, intent(inout) :: q real(C_DOUBLE), dimension(*), target, intent(inout) :: b integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = n farg2 = c_loc(h) farg3 = c_loc(q(1)) farg4 = c_loc(b(1)) fresult = swigc_FSUNQRsol(farg1, farg2, farg3, farg4) swig_result = fresult end function function FQRsol(n, h, q, b) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: n type(C_PTR), target, intent(inout) :: h real(C_DOUBLE), dimension(*), target, intent(inout) :: q real(C_DOUBLE), dimension(*), target, intent(inout) :: b integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = n farg2 = c_loc(h) farg3 = c_loc(q(1)) farg4 = c_loc(b(1)) fresult = swigc_FQRsol(farg1, farg2, farg3, farg4) swig_result = fresult end function function FSUNQRAdd_MGS(q, r, df, m, mmax, qrdata) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: q real(C_DOUBLE), dimension(*), target, intent(inout) :: r type(N_Vector), target, intent(inout) :: df integer(C_INT), intent(in) :: m integer(C_INT), intent(in) :: mmax type(C_PTR) :: qrdata integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 integer(C_INT) :: farg4 integer(C_INT) :: farg5 type(C_PTR) :: farg6 farg1 = q farg2 = c_loc(r(1)) farg3 = c_loc(df) farg4 = m farg5 = mmax farg6 = qrdata fresult = swigc_FSUNQRAdd_MGS(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FSUNQRAdd_ICWY(q, r, df, m, mmax, qrdata) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: q real(C_DOUBLE), dimension(*), target, intent(inout) :: r type(N_Vector), target, intent(inout) :: df integer(C_INT), intent(in) :: m integer(C_INT), intent(in) :: mmax type(C_PTR) :: qrdata integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 integer(C_INT) :: farg4 integer(C_INT) :: farg5 type(C_PTR) :: farg6 farg1 = q farg2 = c_loc(r(1)) farg3 = c_loc(df) farg4 = m farg5 = mmax farg6 = qrdata fresult = swigc_FSUNQRAdd_ICWY(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FSUNQRAdd_ICWY_SB(q, r, df, m, mmax, qrdata) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: q real(C_DOUBLE), dimension(*), target, intent(inout) :: r type(N_Vector), target, intent(inout) :: df integer(C_INT), intent(in) :: m integer(C_INT), intent(in) :: mmax type(C_PTR) :: qrdata integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 integer(C_INT) :: farg4 integer(C_INT) :: farg5 type(C_PTR) :: farg6 farg1 = q farg2 = c_loc(r(1)) farg3 = c_loc(df) farg4 = m farg5 = mmax farg6 = qrdata fresult = swigc_FSUNQRAdd_ICWY_SB(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FSUNQRAdd_CGS2(q, r, df, m, mmax, qrdata) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: q real(C_DOUBLE), dimension(*), target, intent(inout) :: r type(N_Vector), target, intent(inout) :: df integer(C_INT), intent(in) :: m integer(C_INT), intent(in) :: mmax type(C_PTR) :: qrdata integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 integer(C_INT) :: farg4 integer(C_INT) :: farg5 type(C_PTR) :: farg6 farg1 = q farg2 = c_loc(r(1)) farg3 = c_loc(df) farg4 = m farg5 = mmax farg6 = qrdata fresult = swigc_FSUNQRAdd_CGS2(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FSUNQRAdd_DCGS2(q, r, df, m, mmax, qrdata) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: q real(C_DOUBLE), dimension(*), target, intent(inout) :: r type(N_Vector), target, intent(inout) :: df integer(C_INT), intent(in) :: m integer(C_INT), intent(in) :: mmax type(C_PTR) :: qrdata integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 integer(C_INT) :: farg4 integer(C_INT) :: farg5 type(C_PTR) :: farg6 farg1 = q farg2 = c_loc(r(1)) farg3 = c_loc(df) farg4 = m farg5 = mmax farg6 = qrdata fresult = swigc_FSUNQRAdd_DCGS2(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FSUNQRAdd_DCGS2_SB(q, r, df, m, mmax, qrdata) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: q real(C_DOUBLE), dimension(*), target, intent(inout) :: r type(N_Vector), target, intent(inout) :: df integer(C_INT), intent(in) :: m integer(C_INT), intent(in) :: mmax type(C_PTR) :: qrdata integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 integer(C_INT) :: farg4 integer(C_INT) :: farg5 type(C_PTR) :: farg6 farg1 = q farg2 = c_loc(r(1)) farg3 = c_loc(df) farg4 = m farg5 = mmax farg6 = qrdata fresult = swigc_FSUNQRAdd_DCGS2_SB(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FSUNLinSolNewEmpty(sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNLinearSolver), pointer :: swig_result type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = sunctx fresult = swigc_FSUNLinSolNewEmpty(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FSUNLinSolFreeEmpty(s) use, intrinsic :: ISO_C_BINDING type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: farg1 farg1 = c_loc(s) call swigc_FSUNLinSolFreeEmpty(farg1) end subroutine function FSUNLinSolGetType(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_Type) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetType(farg1) swig_result = fresult end function function FSUNLinSolGetID(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNLinearSolver_ID) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolGetID(farg1) swig_result = fresult end function function FSUNLinSolSetATimes(s, a_data, atimes) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: a_data type(C_FUNPTR), intent(in), value :: atimes integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = c_loc(s) farg2 = a_data farg3 = atimes fresult = swigc_FSUNLinSolSetATimes(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetPreconditioner(s, p_data, pset, psol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: p_data type(C_FUNPTR), intent(in), value :: pset type(C_FUNPTR), intent(in), value :: psol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = c_loc(s) farg2 = p_data farg3 = pset farg4 = psol fresult = swigc_FSUNLinSolSetPreconditioner(farg1, farg2, farg3, farg4) swig_result = fresult end function function FSUNLinSolSetScalingVectors(s, s1, s2) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(N_Vector), target, intent(inout) :: s1 type(N_Vector), target, intent(inout) :: s2 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(s1) farg3 = c_loc(s2) fresult = swigc_FSUNLinSolSetScalingVectors(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolSetZeroGuess(s, onoff) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT), intent(in) :: onoff integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(s) farg2 = onoff fresult = swigc_FSUNLinSolSetZeroGuess(farg1, farg2) swig_result = fresult end function function FSUNLinSolInitialize(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolInitialize(farg1) swig_result = fresult end function function FSUNLinSolSetup(s, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(s) farg2 = c_loc(a) fresult = swigc_FSUNLinSolSetup(farg1, farg2) swig_result = fresult end function function FSUNLinSolSolve(s, a, x, b, tol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: b real(C_DOUBLE), intent(in) :: tol integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 farg1 = c_loc(s) farg2 = c_loc(a) farg3 = c_loc(x) farg4 = c_loc(b) farg5 = tol fresult = swigc_FSUNLinSolSolve(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FSUNLinSolNumIters(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolNumIters(farg1) swig_result = fresult end function function FSUNLinSolResNorm(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResNorm(farg1) swig_result = fresult end function function FSUNLinSolResid(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(SUNLinearSolver), target, intent(inout) :: s type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolResid(farg1) call c_f_pointer(fresult, swig_result) end function function FSUNLinSolLastFlag(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolLastFlag(farg1) swig_result = fresult end function function FSUNLinSolSpace(s, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(s) farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FSUNLinSolSpace(farg1, farg2, farg3) swig_result = fresult end function function FSUNLinSolFree(s) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNLinearSolver), target, intent(inout) :: s integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(s) fresult = swigc_FSUNLinSolFree(farg1) swig_result = fresult end function end module StanHeaders/src/sundials/fmod/fsundials_matrix_mod.f900000644000176200001440000002300214645137106022550 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. module fsundials_matrix_mod use, intrinsic :: ISO_C_BINDING use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS ! typedef enum SUNMatrix_ID enum, bind(c) enumerator :: SUNMATRIX_DENSE enumerator :: SUNMATRIX_MAGMADENSE enumerator :: SUNMATRIX_ONEMKLDENSE enumerator :: SUNMATRIX_BAND enumerator :: SUNMATRIX_SPARSE enumerator :: SUNMATRIX_SLUNRLOC enumerator :: SUNMATRIX_CUSPARSE enumerator :: SUNMATRIX_CUSTOM end enum integer, parameter, public :: SUNMatrix_ID = kind(SUNMATRIX_DENSE) public :: SUNMATRIX_DENSE, SUNMATRIX_MAGMADENSE, SUNMATRIX_ONEMKLDENSE, SUNMATRIX_BAND, SUNMATRIX_SPARSE, SUNMATRIX_SLUNRLOC, & SUNMATRIX_CUSPARSE, SUNMATRIX_CUSTOM ! struct struct _generic_SUNMatrix_Ops type, bind(C), public :: SUNMatrix_Ops type(C_FUNPTR), public :: getid type(C_FUNPTR), public :: clone type(C_FUNPTR), public :: destroy type(C_FUNPTR), public :: zero type(C_FUNPTR), public :: copy type(C_FUNPTR), public :: scaleadd type(C_FUNPTR), public :: scaleaddi type(C_FUNPTR), public :: matvecsetup type(C_FUNPTR), public :: matvec type(C_FUNPTR), public :: space end type SUNMatrix_Ops ! struct struct _generic_SUNMatrix type, bind(C), public :: SUNMatrix type(C_PTR), public :: content type(C_PTR), public :: ops type(C_PTR), public :: sunctx end type SUNMatrix public :: FSUNMatNewEmpty public :: FSUNMatFreeEmpty public :: FSUNMatCopyOps public :: FSUNMatGetID public :: FSUNMatClone public :: FSUNMatDestroy public :: FSUNMatZero public :: FSUNMatCopy public :: FSUNMatScaleAdd public :: FSUNMatScaleAddI public :: FSUNMatMatvecSetup public :: FSUNMatMatvec public :: FSUNMatSpace integer(C_INT), parameter, public :: SUNMAT_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: SUNMAT_ILL_INPUT = -701_C_INT integer(C_INT), parameter, public :: SUNMAT_MEM_FAIL = -702_C_INT integer(C_INT), parameter, public :: SUNMAT_OPERATION_FAIL = -703_C_INT integer(C_INT), parameter, public :: SUNMAT_MATVEC_SETUP_REQUIRED = -704_C_INT ! WRAPPER DECLARATIONS interface function swigc_FSUNMatNewEmpty(farg1) & bind(C, name="_wrap_FSUNMatNewEmpty") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FSUNMatFreeEmpty(farg1) & bind(C, name="_wrap_FSUNMatFreeEmpty") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FSUNMatCopyOps(farg1, farg2) & bind(C, name="_wrap_FSUNMatCopyOps") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNMatGetID(farg1) & bind(C, name="_wrap_FSUNMatGetID") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNMatClone(farg1) & bind(C, name="_wrap_FSUNMatClone") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FSUNMatDestroy(farg1) & bind(C, name="_wrap_FSUNMatDestroy") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FSUNMatZero(farg1) & bind(C, name="_wrap_FSUNMatZero") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNMatCopy(farg1, farg2) & bind(C, name="_wrap_FSUNMatCopy") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNMatScaleAdd(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNMatScaleAdd") & result(fresult) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNMatScaleAddI(farg1, farg2) & bind(C, name="_wrap_FSUNMatScaleAddI") & result(fresult) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNMatMatvecSetup(farg1) & bind(C, name="_wrap_FSUNMatMatvecSetup") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNMatMatvec(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNMatMatvec") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNMatSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNMatSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNMatNewEmpty(sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), pointer :: swig_result type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = sunctx fresult = swigc_FSUNMatNewEmpty(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FSUNMatFreeEmpty(a) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: farg1 farg1 = c_loc(a) call swigc_FSUNMatFreeEmpty(farg1) end subroutine function FSUNMatCopyOps(a, b) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a type(SUNMatrix), target, intent(inout) :: b integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(a) farg2 = c_loc(b) fresult = swigc_FSUNMatCopyOps(farg1, farg2) swig_result = fresult end function function FSUNMatGetID(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNMatrix_ID) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNMatGetID(farg1) swig_result = fresult end function function FSUNMatClone(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), pointer :: swig_result type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNMatClone(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FSUNMatDestroy(a) use, intrinsic :: ISO_C_BINDING type(SUNMatrix), target, intent(inout) :: a type(C_PTR) :: farg1 farg1 = c_loc(a) call swigc_FSUNMatDestroy(farg1) end subroutine function FSUNMatZero(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNMatZero(farg1) swig_result = fresult end function function FSUNMatCopy(a, b) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a type(SUNMatrix), target, intent(inout) :: b integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(a) farg2 = c_loc(b) fresult = swigc_FSUNMatCopy(farg1, farg2) swig_result = fresult end function function FSUNMatScaleAdd(c, a, b) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result real(C_DOUBLE), intent(in) :: c type(SUNMatrix), target, intent(inout) :: a type(SUNMatrix), target, intent(inout) :: b integer(C_INT) :: fresult real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(a) farg3 = c_loc(b) fresult = swigc_FSUNMatScaleAdd(farg1, farg2, farg3) swig_result = fresult end function function FSUNMatScaleAddI(c, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result real(C_DOUBLE), intent(in) :: c type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 farg1 = c farg2 = c_loc(a) fresult = swigc_FSUNMatScaleAddI(farg1, farg2) swig_result = fresult end function function FSUNMatMatvecSetup(a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(a) fresult = swigc_FSUNMatMatvecSetup(farg1) swig_result = fresult end function function FSUNMatMatvec(a, x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(a) farg2 = c_loc(x) farg3 = c_loc(y) fresult = swigc_FSUNMatMatvec(farg1, farg2, farg3) swig_result = fresult end function function FSUNMatSpace(a, lenrw, leniw) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNMatrix), target, intent(inout) :: a integer(C_LONG), dimension(*), target, intent(inout) :: lenrw integer(C_LONG), dimension(*), target, intent(inout) :: leniw integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(a) farg2 = c_loc(lenrw(1)) farg3 = c_loc(leniw(1)) fresult = swigc_FSUNMatSpace(farg1, farg2, farg3) swig_result = fresult end function end module StanHeaders/src/sundials/fmod/fsundials_futils_mod.f900000644000176200001440000000555214645137106022564 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsundials_futils_mod use, intrinsic :: ISO_C_BINDING implicit none private ! DECLARATION CONSTRUCTS type, bind(C) :: SwigArrayWrapper type(C_PTR), public :: data = C_NULL_PTR integer(C_SIZE_T), public :: size = 0 end type public :: FSUNDIALSFileOpen public :: FSUNDIALSFileClose ! WRAPPER DECLARATIONS interface function swigc_FSUNDIALSFileOpen(farg1, farg2) & bind(C, name="_wrap_FSUNDIALSFileOpen") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper type(SwigArrayWrapper) :: farg1 type(SwigArrayWrapper) :: farg2 type(C_PTR) :: fresult end function subroutine swigc_FSUNDIALSFileClose(farg1) & bind(C, name="_wrap_FSUNDIALSFileClose") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine end interface contains ! MODULE SUBPROGRAMS subroutine SWIG_string_to_chararray(string, chars, wrap) use, intrinsic :: ISO_C_BINDING character(kind=C_CHAR, len=*), intent(IN) :: string character(kind=C_CHAR), dimension(:), target, allocatable, intent(OUT) :: chars type(SwigArrayWrapper), intent(OUT) :: wrap integer :: i allocate(character(kind=C_CHAR) :: chars(len(string) + 1)) do i=1,len(string) chars(i) = string(i:i) end do i = len(string) + 1 chars(i) = C_NULL_CHAR ! C string compatibility wrap%data = c_loc(chars) wrap%size = len(string) end subroutine function FSUNDIALSFileOpen(filename, modes) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result character(kind=C_CHAR, len=*), target :: filename character(kind=C_CHAR), dimension(:), allocatable, target :: farg1_chars character(kind=C_CHAR, len=*), target :: modes character(kind=C_CHAR), dimension(:), allocatable, target :: farg2_chars type(C_PTR) :: fresult type(SwigArrayWrapper) :: farg1 type(SwigArrayWrapper) :: farg2 call SWIG_string_to_chararray(filename, farg1_chars, farg1) call SWIG_string_to_chararray(modes, farg2_chars, farg2) fresult = swigc_FSUNDIALSFileOpen(farg1, farg2) swig_result = fresult end function subroutine FSUNDIALSFileClose(fp) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: fp type(C_PTR) :: farg1 farg1 = fp call swigc_FSUNDIALSFileClose(farg1) end subroutine end module StanHeaders/src/sundials/fmod/fsundials_nonlinearsolver_mod.f900000644000176200001440000003127214645137106024474 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. module fsundials_nonlinearsolver_mod use, intrinsic :: ISO_C_BINDING use fsundials_types_mod use fsundials_context_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS ! typedef enum SUNNonlinearSolver_Type enum, bind(c) enumerator :: SUNNONLINEARSOLVER_ROOTFIND enumerator :: SUNNONLINEARSOLVER_FIXEDPOINT end enum integer, parameter, public :: SUNNonlinearSolver_Type = kind(SUNNONLINEARSOLVER_ROOTFIND) public :: SUNNONLINEARSOLVER_ROOTFIND, SUNNONLINEARSOLVER_FIXEDPOINT ! struct struct _generic_SUNNonlinearSolver_Ops type, bind(C), public :: SUNNonlinearSolver_Ops type(C_FUNPTR), public :: gettype type(C_FUNPTR), public :: initialize type(C_FUNPTR), public :: setup type(C_FUNPTR), public :: solve type(C_FUNPTR), public :: free type(C_FUNPTR), public :: setsysfn type(C_FUNPTR), public :: setlsetupfn type(C_FUNPTR), public :: setlsolvefn type(C_FUNPTR), public :: setctestfn type(C_FUNPTR), public :: setmaxiters type(C_FUNPTR), public :: getnumiters type(C_FUNPTR), public :: getcuriter type(C_FUNPTR), public :: getnumconvfails end type SUNNonlinearSolver_Ops ! struct struct _generic_SUNNonlinearSolver type, bind(C), public :: SUNNonlinearSolver type(C_PTR), public :: content type(C_PTR), public :: ops type(C_PTR), public :: sunctx end type SUNNonlinearSolver public :: FSUNNonlinSolNewEmpty public :: FSUNNonlinSolFreeEmpty public :: FSUNNonlinSolGetType public :: FSUNNonlinSolInitialize public :: FSUNNonlinSolSetup public :: FSUNNonlinSolSolve public :: FSUNNonlinSolFree public :: FSUNNonlinSolSetSysFn public :: FSUNNonlinSolSetLSetupFn public :: FSUNNonlinSolSetLSolveFn public :: FSUNNonlinSolSetConvTestFn public :: FSUNNonlinSolSetMaxIters public :: FSUNNonlinSolGetNumIters public :: FSUNNonlinSolGetCurIter public :: FSUNNonlinSolGetNumConvFails integer(C_INT), parameter, public :: SUN_NLS_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: SUN_NLS_CONTINUE = +901_C_INT integer(C_INT), parameter, public :: SUN_NLS_CONV_RECVR = +902_C_INT integer(C_INT), parameter, public :: SUN_NLS_MEM_NULL = -901_C_INT integer(C_INT), parameter, public :: SUN_NLS_MEM_FAIL = -902_C_INT integer(C_INT), parameter, public :: SUN_NLS_ILL_INPUT = -903_C_INT integer(C_INT), parameter, public :: SUN_NLS_VECTOROP_ERR = -904_C_INT integer(C_INT), parameter, public :: SUN_NLS_EXT_FAIL = -905_C_INT ! WRAPPER DECLARATIONS interface function swigc_FSUNNonlinSolNewEmpty(farg1) & bind(C, name="_wrap_FSUNNonlinSolNewEmpty") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FSUNNonlinSolFreeEmpty(farg1) & bind(C, name="_wrap_FSUNNonlinSolFreeEmpty") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FSUNNonlinSolGetType(farg1) & bind(C, name="_wrap_FSUNNonlinSolGetType") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolInitialize(farg1) & bind(C, name="_wrap_FSUNNonlinSolInitialize") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetup(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNNonlinSolSetup") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSolve(farg1, farg2, farg3, farg4, farg5, farg6, farg7) & bind(C, name="_wrap_FSUNNonlinSolSolve") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT), intent(in) :: farg6 type(C_PTR), value :: farg7 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolFree(farg1) & bind(C, name="_wrap_FSUNNonlinSolFree") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetSysFn(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetSysFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetLSetupFn(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetLSetupFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetLSolveFn(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetLSolveFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetConvTestFn(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNNonlinSolSetConvTestFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolSetMaxIters(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolSetMaxIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetNumIters(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetNumIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetCurIter(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetCurIter") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNNonlinSolGetNumConvFails(farg1, farg2) & bind(C, name="_wrap_FSUNNonlinSolGetNumConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNNonlinSolNewEmpty(sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(SUNNonlinearSolver), pointer :: swig_result type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = sunctx fresult = swigc_FSUNNonlinSolNewEmpty(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FSUNNonlinSolFreeEmpty(nls) use, intrinsic :: ISO_C_BINDING type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_PTR) :: farg1 farg1 = c_loc(nls) call swigc_FSUNNonlinSolFreeEmpty(farg1) end subroutine function FSUNNonlinSolGetType(nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(SUNNonlinearSolver_Type) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(nls) fresult = swigc_FSUNNonlinSolGetType(farg1) swig_result = fresult end function function FSUNNonlinSolInitialize(nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(nls) fresult = swigc_FSUNNonlinSolInitialize(farg1) swig_result = fresult end function function FSUNNonlinSolSetup(nls, y, mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(N_Vector), target, intent(inout) :: y type(C_PTR) :: mem integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(nls) farg2 = c_loc(y) farg3 = mem fresult = swigc_FSUNNonlinSolSetup(farg1, farg2, farg3) swig_result = fresult end function function FSUNNonlinSolSolve(nls, y0, y, w, tol, calllsetup, mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(N_Vector), target, intent(inout) :: y0 type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: w real(C_DOUBLE), intent(in) :: tol integer(C_INT), intent(in) :: calllsetup type(C_PTR) :: mem integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 real(C_DOUBLE) :: farg5 integer(C_INT) :: farg6 type(C_PTR) :: farg7 farg1 = c_loc(nls) farg2 = c_loc(y0) farg3 = c_loc(y) farg4 = c_loc(w) farg5 = tol farg6 = calllsetup farg7 = mem fresult = swigc_FSUNNonlinSolSolve(farg1, farg2, farg3, farg4, farg5, farg6, farg7) swig_result = fresult end function function FSUNNonlinSolFree(nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(nls) fresult = swigc_FSUNNonlinSolFree(farg1) swig_result = fresult end function function FSUNNonlinSolSetSysFn(nls, sysfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), intent(in), value :: sysfn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = c_loc(nls) farg2 = sysfn fresult = swigc_FSUNNonlinSolSetSysFn(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetLSetupFn(nls, setupfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), intent(in), value :: setupfn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = c_loc(nls) farg2 = setupfn fresult = swigc_FSUNNonlinSolSetLSetupFn(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetLSolveFn(nls, solvefn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), intent(in), value :: solvefn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = c_loc(nls) farg2 = solvefn fresult = swigc_FSUNNonlinSolSetLSolveFn(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolSetConvTestFn(nls, ctestfn, ctest_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls type(C_FUNPTR), intent(in), value :: ctestfn type(C_PTR) :: ctest_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(nls) farg2 = ctestfn farg3 = ctest_data fresult = swigc_FSUNNonlinSolSetConvTestFn(farg1, farg2, farg3) swig_result = fresult end function function FSUNNonlinSolSetMaxIters(nls, maxiters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT), intent(in) :: maxiters integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = c_loc(nls) farg2 = maxiters fresult = swigc_FSUNNonlinSolSetMaxIters(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetNumIters(nls, niters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_LONG), dimension(*), target, intent(inout) :: niters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(niters(1)) fresult = swigc_FSUNNonlinSolGetNumIters(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetCurIter(nls, iter) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT), dimension(*), target, intent(inout) :: iter integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(iter(1)) fresult = swigc_FSUNNonlinSolGetCurIter(farg1, farg2) swig_result = fresult end function function FSUNNonlinSolGetNumConvFails(nls, nconvfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_LONG), dimension(*), target, intent(inout) :: nconvfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(nls) farg2 = c_loc(nconvfails(1)) fresult = swigc_FSUNNonlinSolGetNumConvFails(farg1, farg2) swig_result = fresult end function end module StanHeaders/src/sundials/fmod/fsundials_profiler_mod.f900000644000176200001440000001215614645137106023076 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsundials_profiler_mod use, intrinsic :: ISO_C_BINDING implicit none private ! DECLARATION CONSTRUCTS public :: FSUNProfiler_Free type, bind(C) :: SwigArrayWrapper type(C_PTR), public :: data = C_NULL_PTR integer(C_SIZE_T), public :: size = 0 end type public :: FSUNProfiler_Begin public :: FSUNProfiler_End public :: FSUNProfiler_Print public :: FSUNProfiler_Create ! WRAPPER DECLARATIONS interface function swigc_FSUNProfiler_Free(farg1) & bind(C, name="_wrap_FSUNProfiler_Free") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FSUNProfiler_Begin(farg1, farg2) & bind(C, name="_wrap_FSUNProfiler_Begin") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper type(C_PTR), value :: farg1 type(SwigArrayWrapper) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNProfiler_End(farg1, farg2) & bind(C, name="_wrap_FSUNProfiler_End") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper type(C_PTR), value :: farg1 type(SwigArrayWrapper) :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNProfiler_Print(farg1, farg2) & bind(C, name="_wrap_FSUNProfiler_Print") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FSUNProfiler_Create(farg1, farg2, farg3) & bind(C, name="_wrap_FSUNProfiler_Create") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper type(C_PTR), value :: farg1 type(SwigArrayWrapper) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FSUNProfiler_Free(p) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR), target, intent(inout) :: p integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(p) fresult = swigc_FSUNProfiler_Free(farg1) swig_result = fresult end function subroutine SWIG_string_to_chararray(string, chars, wrap) use, intrinsic :: ISO_C_BINDING character(kind=C_CHAR, len=*), intent(IN) :: string character(kind=C_CHAR), dimension(:), target, allocatable, intent(OUT) :: chars type(SwigArrayWrapper), intent(OUT) :: wrap integer :: i allocate(character(kind=C_CHAR) :: chars(len(string) + 1)) do i=1,len(string) chars(i) = string(i:i) end do i = len(string) + 1 chars(i) = C_NULL_CHAR ! C string compatibility wrap%data = c_loc(chars) wrap%size = len(string) end subroutine function FSUNProfiler_Begin(p, name) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: p character(kind=C_CHAR, len=*), target :: name character(kind=C_CHAR), dimension(:), allocatable, target :: farg2_chars integer(C_INT) :: fresult type(C_PTR) :: farg1 type(SwigArrayWrapper) :: farg2 farg1 = p call SWIG_string_to_chararray(name, farg2_chars, farg2) fresult = swigc_FSUNProfiler_Begin(farg1, farg2) swig_result = fresult end function function FSUNProfiler_End(p, name) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: p character(kind=C_CHAR, len=*), target :: name character(kind=C_CHAR), dimension(:), allocatable, target :: farg2_chars integer(C_INT) :: fresult type(C_PTR) :: farg1 type(SwigArrayWrapper) :: farg2 farg1 = p call SWIG_string_to_chararray(name, farg2_chars, farg2) fresult = swigc_FSUNProfiler_End(farg1, farg2) swig_result = fresult end function function FSUNProfiler_Print(p, fp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: p type(C_PTR) :: fp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = p farg2 = fp fresult = swigc_FSUNProfiler_Print(farg1, farg2) swig_result = fresult end function function FSUNProfiler_Create(comm, title, p) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: comm character(kind=C_CHAR, len=*), target :: title character(kind=C_CHAR), dimension(:), allocatable, target :: farg2_chars type(C_PTR), target, intent(inout) :: p integer(C_INT) :: fresult type(C_PTR) :: farg1 type(SwigArrayWrapper) :: farg2 type(C_PTR) :: farg3 farg1 = comm call SWIG_string_to_chararray(title, farg2_chars, farg2) farg3 = c_loc(p) fresult = swigc_FSUNProfiler_Create(farg1, farg2, farg3) swig_result = fresult end function end module StanHeaders/src/sundials/fmod/fsundials_linearsolver_mod.c0000644000176200001440000004611314645137106023605 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_iterative.h" #include "sundials/sundials_linearsolver.h" SWIGEXPORT int _wrap_FSUNModifiedGS(void *farg1, void *farg2, int const *farg3, int const *farg4, double *farg5) { int fresult ; N_Vector *arg1 = (N_Vector *) 0 ; realtype **arg2 = (realtype **) 0 ; int arg3 ; int arg4 ; realtype *arg5 = (realtype *) 0 ; int result; arg1 = (N_Vector *)(farg1); arg2 = (realtype **)(farg2); arg3 = (int)(*farg3); arg4 = (int)(*farg4); arg5 = (realtype *)(farg5); result = (int)SUNModifiedGS(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FModifiedGS(void *farg1, void *farg2, int const *farg3, int const *farg4, double *farg5) { int fresult ; N_Vector *arg1 = (N_Vector *) 0 ; realtype **arg2 = (realtype **) 0 ; int arg3 ; int arg4 ; realtype *arg5 = (realtype *) 0 ; int result; arg1 = (N_Vector *)(farg1); arg2 = (realtype **)(farg2); arg3 = (int)(*farg3); arg4 = (int)(*farg4); arg5 = (realtype *)(farg5); result = (int)ModifiedGS(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNClassicalGS(void *farg1, void *farg2, int const *farg3, int const *farg4, double *farg5, double *farg6, void *farg7) { int fresult ; N_Vector *arg1 = (N_Vector *) 0 ; realtype **arg2 = (realtype **) 0 ; int arg3 ; int arg4 ; realtype *arg5 = (realtype *) 0 ; realtype *arg6 = (realtype *) 0 ; N_Vector *arg7 = (N_Vector *) 0 ; int result; arg1 = (N_Vector *)(farg1); arg2 = (realtype **)(farg2); arg3 = (int)(*farg3); arg4 = (int)(*farg4); arg5 = (realtype *)(farg5); arg6 = (realtype *)(farg6); arg7 = (N_Vector *)(farg7); result = (int)SUNClassicalGS(arg1,arg2,arg3,arg4,arg5,arg6,arg7); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FClassicalGS(void *farg1, void *farg2, int const *farg3, int const *farg4, double *farg5, double *farg6, void *farg7) { int fresult ; N_Vector *arg1 = (N_Vector *) 0 ; realtype **arg2 = (realtype **) 0 ; int arg3 ; int arg4 ; realtype *arg5 = (realtype *) 0 ; realtype *arg6 = (realtype *) 0 ; N_Vector *arg7 = (N_Vector *) 0 ; int result; arg1 = (N_Vector *)(farg1); arg2 = (realtype **)(farg2); arg3 = (int)(*farg3); arg4 = (int)(*farg4); arg5 = (realtype *)(farg5); arg6 = (realtype *)(farg6); arg7 = (N_Vector *)(farg7); result = (int)ClassicalGS(arg1,arg2,arg3,arg4,arg5,arg6,arg7); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNQRfact(int const *farg1, void *farg2, double *farg3, int const *farg4) { int fresult ; int arg1 ; realtype **arg2 = (realtype **) 0 ; realtype *arg3 = (realtype *) 0 ; int arg4 ; int result; arg1 = (int)(*farg1); arg2 = (realtype **)(farg2); arg3 = (realtype *)(farg3); arg4 = (int)(*farg4); result = (int)SUNQRfact(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FQRfact(int const *farg1, void *farg2, double *farg3, int const *farg4) { int fresult ; int arg1 ; realtype **arg2 = (realtype **) 0 ; realtype *arg3 = (realtype *) 0 ; int arg4 ; int result; arg1 = (int)(*farg1); arg2 = (realtype **)(farg2); arg3 = (realtype *)(farg3); arg4 = (int)(*farg4); result = (int)QRfact(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNQRsol(int const *farg1, void *farg2, double *farg3, double *farg4) { int fresult ; int arg1 ; realtype **arg2 = (realtype **) 0 ; realtype *arg3 = (realtype *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype **)(farg2); arg3 = (realtype *)(farg3); arg4 = (realtype *)(farg4); result = (int)SUNQRsol(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FQRsol(int const *farg1, void *farg2, double *farg3, double *farg4) { int fresult ; int arg1 ; realtype **arg2 = (realtype **) 0 ; realtype *arg3 = (realtype *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype **)(farg2); arg3 = (realtype *)(farg3); arg4 = (realtype *)(farg4); result = (int)QRsol(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNQRAdd_MGS(void *farg1, double *farg2, N_Vector farg3, int const *farg4, int const *farg5, void *farg6) { int fresult ; N_Vector *arg1 = (N_Vector *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; int arg4 ; int arg5 ; void *arg6 = (void *) 0 ; int result; arg1 = (N_Vector *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (int)(*farg4); arg5 = (int)(*farg5); arg6 = (void *)(farg6); result = (int)SUNQRAdd_MGS(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNQRAdd_ICWY(void *farg1, double *farg2, N_Vector farg3, int const *farg4, int const *farg5, void *farg6) { int fresult ; N_Vector *arg1 = (N_Vector *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; int arg4 ; int arg5 ; void *arg6 = (void *) 0 ; int result; arg1 = (N_Vector *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (int)(*farg4); arg5 = (int)(*farg5); arg6 = (void *)(farg6); result = (int)SUNQRAdd_ICWY(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNQRAdd_ICWY_SB(void *farg1, double *farg2, N_Vector farg3, int const *farg4, int const *farg5, void *farg6) { int fresult ; N_Vector *arg1 = (N_Vector *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; int arg4 ; int arg5 ; void *arg6 = (void *) 0 ; int result; arg1 = (N_Vector *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (int)(*farg4); arg5 = (int)(*farg5); arg6 = (void *)(farg6); result = (int)SUNQRAdd_ICWY_SB(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNQRAdd_CGS2(void *farg1, double *farg2, N_Vector farg3, int const *farg4, int const *farg5, void *farg6) { int fresult ; N_Vector *arg1 = (N_Vector *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; int arg4 ; int arg5 ; void *arg6 = (void *) 0 ; int result; arg1 = (N_Vector *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (int)(*farg4); arg5 = (int)(*farg5); arg6 = (void *)(farg6); result = (int)SUNQRAdd_CGS2(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNQRAdd_DCGS2(void *farg1, double *farg2, N_Vector farg3, int const *farg4, int const *farg5, void *farg6) { int fresult ; N_Vector *arg1 = (N_Vector *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; int arg4 ; int arg5 ; void *arg6 = (void *) 0 ; int result; arg1 = (N_Vector *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (int)(*farg4); arg5 = (int)(*farg5); arg6 = (void *)(farg6); result = (int)SUNQRAdd_DCGS2(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNQRAdd_DCGS2_SB(void *farg1, double *farg2, N_Vector farg3, int const *farg4, int const *farg5, void *farg6) { int fresult ; N_Vector *arg1 = (N_Vector *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; int arg4 ; int arg5 ; void *arg6 = (void *) 0 ; int result; arg1 = (N_Vector *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (int)(*farg4); arg5 = (int)(*farg5); arg6 = (void *)(farg6); result = (int)SUNQRAdd_DCGS2_SB(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT SUNLinearSolver _wrap_FSUNLinSolNewEmpty(void *farg1) { SUNLinearSolver fresult ; SUNContext arg1 = (SUNContext) 0 ; SUNLinearSolver result; arg1 = (SUNContext)(farg1); result = (SUNLinearSolver)SUNLinSolNewEmpty(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FSUNLinSolFreeEmpty(SUNLinearSolver farg1) { SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; arg1 = (SUNLinearSolver)(farg1); SUNLinSolFreeEmpty(arg1); } SWIGEXPORT int _wrap_FSUNLinSolGetType(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_Type result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_Type)SUNLinSolGetType(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolGetID(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNLinearSolver_ID result; arg1 = (SUNLinearSolver)(farg1); result = (SUNLinearSolver_ID)SUNLinSolGetID(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetATimes(SUNLinearSolver farg1, void *farg2, SUNATimesFn farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNATimesFn arg3 = (SUNATimesFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNATimesFn)(farg3); result = (int)SUNLinSolSetATimes(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetPreconditioner(SUNLinearSolver farg1, void *farg2, SUNPSetupFn farg3, SUNPSolveFn farg4) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; void *arg2 = (void *) 0 ; SUNPSetupFn arg3 = (SUNPSetupFn) 0 ; SUNPSolveFn arg4 = (SUNPSolveFn) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (void *)(farg2); arg3 = (SUNPSetupFn)(farg3); arg4 = (SUNPSolveFn)(farg4); result = (int)SUNLinSolSetPreconditioner(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetScalingVectors(SUNLinearSolver farg1, N_Vector farg2, N_Vector farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)SUNLinSolSetScalingVectors(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetZeroGuess(SUNLinearSolver farg1, int const *farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNLinSolSetZeroGuess(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolInitialize(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolInitialize(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSetup(SUNLinearSolver farg1, SUNMatrix farg2) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); result = (int)SUNLinSolSetup(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSolve(SUNLinearSolver farg1, SUNMatrix farg2, N_Vector farg3, N_Vector farg4, double const *farg5) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; SUNMatrix arg2 = (SUNMatrix) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (SUNMatrix)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); result = (int)SUNLinSolSolve(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolNumIters(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolNumIters(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FSUNLinSolResNorm(SUNLinearSolver farg1) { double fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; realtype result; arg1 = (SUNLinearSolver)(farg1); result = (realtype)SUNLinSolResNorm(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FSUNLinSolResid(SUNLinearSolver farg1) { N_Vector fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; N_Vector result; arg1 = (SUNLinearSolver)(farg1); result = (N_Vector)SUNLinSolResid(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FSUNLinSolLastFlag(SUNLinearSolver farg1) { int64_t fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; sunindextype result; arg1 = (SUNLinearSolver)(farg1); result = SUNLinSolLastFlag(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolSpace(SUNLinearSolver farg1, long *farg2, long *farg3) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)SUNLinSolSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNLinSolFree(SUNLinearSolver farg1) { int fresult ; SUNLinearSolver arg1 = (SUNLinearSolver) 0 ; int result; arg1 = (SUNLinearSolver)(farg1); result = (int)SUNLinSolFree(arg1); fresult = (int)(result); return fresult; } StanHeaders/src/sundials/fmod/fsundials_types_mod.f900000644000176200001440000000167514645137106022424 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fsundials_types_mod use, intrinsic :: ISO_C_BINDING implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: SUNFALSE = 0_C_INT integer(C_INT), parameter, public :: SUNTRUE = 1_C_INT end module StanHeaders/src/sundials/fmod/fsundials_nvector_mod.f900000644000176200001440000012611114645137106022731 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. module fsundials_nvector_mod use, intrinsic :: ISO_C_BINDING use fsundials_context_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS ! typedef enum N_Vector_ID enum, bind(c) enumerator :: SUNDIALS_NVEC_SERIAL enumerator :: SUNDIALS_NVEC_PARALLEL enumerator :: SUNDIALS_NVEC_OPENMP enumerator :: SUNDIALS_NVEC_PTHREADS enumerator :: SUNDIALS_NVEC_PARHYP enumerator :: SUNDIALS_NVEC_PETSC enumerator :: SUNDIALS_NVEC_CUDA enumerator :: SUNDIALS_NVEC_HIP enumerator :: SUNDIALS_NVEC_SYCL enumerator :: SUNDIALS_NVEC_RAJA enumerator :: SUNDIALS_NVEC_OPENMPDEV enumerator :: SUNDIALS_NVEC_TRILINOS enumerator :: SUNDIALS_NVEC_MANYVECTOR enumerator :: SUNDIALS_NVEC_MPIMANYVECTOR enumerator :: SUNDIALS_NVEC_MPIPLUSX enumerator :: SUNDIALS_NVEC_CUSTOM end enum integer, parameter, public :: N_Vector_ID = kind(SUNDIALS_NVEC_SERIAL) public :: SUNDIALS_NVEC_SERIAL, SUNDIALS_NVEC_PARALLEL, SUNDIALS_NVEC_OPENMP, SUNDIALS_NVEC_PTHREADS, SUNDIALS_NVEC_PARHYP, & SUNDIALS_NVEC_PETSC, SUNDIALS_NVEC_CUDA, SUNDIALS_NVEC_HIP, SUNDIALS_NVEC_SYCL, SUNDIALS_NVEC_RAJA, SUNDIALS_NVEC_OPENMPDEV, & SUNDIALS_NVEC_TRILINOS, SUNDIALS_NVEC_MANYVECTOR, SUNDIALS_NVEC_MPIMANYVECTOR, SUNDIALS_NVEC_MPIPLUSX, SUNDIALS_NVEC_CUSTOM ! struct struct _generic_N_Vector_Ops type, bind(C), public :: N_Vector_Ops type(C_FUNPTR), public :: nvgetvectorid type(C_FUNPTR), public :: nvclone type(C_FUNPTR), public :: nvcloneempty type(C_FUNPTR), public :: nvdestroy type(C_FUNPTR), public :: nvspace type(C_FUNPTR), public :: nvgetarraypointer type(C_FUNPTR), public :: nvgetdevicearraypointer type(C_FUNPTR), public :: nvsetarraypointer type(C_FUNPTR), public :: nvgetcommunicator type(C_FUNPTR), public :: nvgetlength type(C_FUNPTR), public :: nvlinearsum type(C_FUNPTR), public :: nvconst type(C_FUNPTR), public :: nvprod type(C_FUNPTR), public :: nvdiv type(C_FUNPTR), public :: nvscale type(C_FUNPTR), public :: nvabs type(C_FUNPTR), public :: nvinv type(C_FUNPTR), public :: nvaddconst type(C_FUNPTR), public :: nvdotprod type(C_FUNPTR), public :: nvmaxnorm type(C_FUNPTR), public :: nvwrmsnorm type(C_FUNPTR), public :: nvwrmsnormmask type(C_FUNPTR), public :: nvmin type(C_FUNPTR), public :: nvwl2norm type(C_FUNPTR), public :: nvl1norm type(C_FUNPTR), public :: nvcompare type(C_FUNPTR), public :: nvinvtest type(C_FUNPTR), public :: nvconstrmask type(C_FUNPTR), public :: nvminquotient type(C_FUNPTR), public :: nvlinearcombination type(C_FUNPTR), public :: nvscaleaddmulti type(C_FUNPTR), public :: nvdotprodmulti type(C_FUNPTR), public :: nvlinearsumvectorarray type(C_FUNPTR), public :: nvscalevectorarray type(C_FUNPTR), public :: nvconstvectorarray type(C_FUNPTR), public :: nvwrmsnormvectorarray type(C_FUNPTR), public :: nvwrmsnormmaskvectorarray type(C_FUNPTR), public :: nvscaleaddmultivectorarray type(C_FUNPTR), public :: nvlinearcombinationvectorarray type(C_FUNPTR), public :: nvdotprodlocal type(C_FUNPTR), public :: nvmaxnormlocal type(C_FUNPTR), public :: nvminlocal type(C_FUNPTR), public :: nvl1normlocal type(C_FUNPTR), public :: nvinvtestlocal type(C_FUNPTR), public :: nvconstrmasklocal type(C_FUNPTR), public :: nvminquotientlocal type(C_FUNPTR), public :: nvwsqrsumlocal type(C_FUNPTR), public :: nvwsqrsummasklocal type(C_FUNPTR), public :: nvdotprodmultilocal type(C_FUNPTR), public :: nvdotprodmultiallreduce type(C_FUNPTR), public :: nvbufsize type(C_FUNPTR), public :: nvbufpack type(C_FUNPTR), public :: nvbufunpack type(C_FUNPTR), public :: nvprint type(C_FUNPTR), public :: nvprintfile end type N_Vector_Ops ! struct struct _generic_N_Vector type, bind(C), public :: N_Vector type(C_PTR), public :: content type(C_PTR), public :: ops type(C_PTR), public :: sunctx end type N_Vector public :: FN_VNewEmpty public :: FN_VFreeEmpty public :: FN_VCopyOps public :: FN_VGetVectorID public :: FN_VClone public :: FN_VCloneEmpty public :: FN_VDestroy public :: FN_VSpace public :: FN_VGetArrayPointer public :: FN_VGetDeviceArrayPointer public :: FN_VSetArrayPointer public :: FN_VGetCommunicator public :: FN_VGetLength public :: FN_VLinearSum public :: FN_VConst public :: FN_VProd public :: FN_VDiv public :: FN_VScale public :: FN_VAbs public :: FN_VInv public :: FN_VAddConst public :: FN_VDotProd public :: FN_VMaxNorm public :: FN_VWrmsNorm public :: FN_VWrmsNormMask public :: FN_VMin public :: FN_VWL2Norm public :: FN_VL1Norm public :: FN_VCompare public :: FN_VInvTest public :: FN_VConstrMask public :: FN_VMinQuotient public :: FN_VLinearCombination public :: FN_VScaleAddMulti public :: FN_VDotProdMulti public :: FN_VLinearSumVectorArray public :: FN_VScaleVectorArray public :: FN_VConstVectorArray public :: FN_VWrmsNormVectorArray public :: FN_VWrmsNormMaskVectorArray public :: FN_VDotProdLocal public :: FN_VMaxNormLocal public :: FN_VMinLocal public :: FN_VL1NormLocal public :: FN_VWSqrSumLocal public :: FN_VWSqrSumMaskLocal public :: FN_VInvTestLocal public :: FN_VConstrMaskLocal public :: FN_VMinQuotientLocal public :: FN_VDotProdMultiLocal public :: FN_VDotProdMultiAllReduce public :: FN_VBufSize public :: FN_VBufPack public :: FN_VBufUnpack public :: FN_VNewVectorArray public :: FN_VCloneEmptyVectorArray public :: FN_VCloneVectorArray public :: FN_VDestroyVectorArray public :: FN_VGetVecAtIndexVectorArray public :: FN_VSetVecAtIndexVectorArray public :: FN_VPrint public :: FN_VPrintFile ! WRAPPER DECLARATIONS interface function swigc_FN_VNewEmpty(farg1) & bind(C, name="_wrap_FN_VNewEmpty") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VFreeEmpty(farg1) & bind(C, name="_wrap_FN_VFreeEmpty") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FN_VCopyOps(farg1, farg2) & bind(C, name="_wrap_FN_VCopyOps") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VGetVectorID(farg1) & bind(C, name="_wrap_FN_VGetVectorID") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FN_VClone(farg1) & bind(C, name="_wrap_FN_VClone") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VCloneEmpty(farg1) & bind(C, name="_wrap_FN_VCloneEmpty") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroy(farg1) & bind(C, name="_wrap_FN_VDestroy") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VSpace") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VGetArrayPointer(farg1) & bind(C, name="_wrap_FN_VGetArrayPointer") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VGetDeviceArrayPointer(farg1) & bind(C, name="_wrap_FN_VGetDeviceArrayPointer") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function subroutine swigc_FN_VSetArrayPointer(farg1, farg2) & bind(C, name="_wrap_FN_VSetArrayPointer") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_FN_VGetCommunicator(farg1) & bind(C, name="_wrap_FN_VGetCommunicator") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VGetLength(farg1) & bind(C, name="_wrap_FN_VGetLength") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T) :: fresult end function subroutine swigc_FN_VLinearSum(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VLinearSum") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 end subroutine subroutine swigc_FN_VConst(farg1, farg2) & bind(C, name="_wrap_FN_VConst") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VProd(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VProd") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VDiv(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VDiv") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VScale(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VScale") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VAbs(farg1, farg2) & bind(C, name="_wrap_FN_VAbs") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VInv(farg1, farg2) & bind(C, name="_wrap_FN_VInv") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine subroutine swigc_FN_VAddConst(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VAddConst") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VDotProd(farg1, farg2) & bind(C, name="_wrap_FN_VDotProd") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VMaxNorm(farg1) & bind(C, name="_wrap_FN_VMaxNorm") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNorm(farg1, farg2) & bind(C, name="_wrap_FN_VWrmsNorm") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWrmsNormMask(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWrmsNormMask") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VMin(farg1) & bind(C, name="_wrap_FN_VMin") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWL2Norm(farg1, farg2) & bind(C, name="_wrap_FN_VWL2Norm") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VL1Norm(farg1) & bind(C, name="_wrap_FN_VL1Norm") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function subroutine swigc_FN_VCompare(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VCompare") use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 end subroutine function swigc_FN_VInvTest(farg1, farg2) & bind(C, name="_wrap_FN_VInvTest") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VConstrMask(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstrMask") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VMinQuotient(farg1, farg2) & bind(C, name="_wrap_FN_VMinQuotient") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VLinearCombination(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VLinearCombination") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VScaleAddMulti(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VScaleAddMulti") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMulti(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMulti") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VLinearSumVectorArray(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FN_VLinearSumVectorArray") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FN_VScaleVectorArray(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VScaleVectorArray") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VConstVectorArray(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstVectorArray") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormVectorArray(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VWrmsNormVectorArray") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VWrmsNormMaskVectorArray(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FN_VWrmsNormMaskVectorArray") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FN_VDotProdLocal(farg1, farg2) & bind(C, name="_wrap_FN_VDotProdLocal") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VMaxNormLocal(farg1) & bind(C, name="_wrap_FN_VMaxNormLocal") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VMinLocal(farg1) & bind(C, name="_wrap_FN_VMinLocal") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VL1NormLocal(farg1) & bind(C, name="_wrap_FN_VL1NormLocal") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumLocal(farg1, farg2) & bind(C, name="_wrap_FN_VWSqrSumLocal") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VWSqrSumMaskLocal(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VWSqrSumMaskLocal") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 real(C_DOUBLE) :: fresult end function function swigc_FN_VInvTestLocal(farg1, farg2) & bind(C, name="_wrap_FN_VInvTestLocal") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VConstrMaskLocal(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VConstrMaskLocal") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VMinQuotientLocal(farg1, farg2) & bind(C, name="_wrap_FN_VMinQuotientLocal") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 real(C_DOUBLE) :: fresult end function function swigc_FN_VDotProdMultiLocal(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FN_VDotProdMultiLocal") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FN_VDotProdMultiAllReduce(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VDotProdMultiAllReduce") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FN_VBufSize(farg1, farg2) & bind(C, name="_wrap_FN_VBufSize") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufPack(farg1, farg2) & bind(C, name="_wrap_FN_VBufPack") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VBufUnpack(farg1, farg2) & bind(C, name="_wrap_FN_VBufUnpack") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FN_VNewVectorArray(farg1) & bind(C, name="_wrap_FN_VNewVectorArray") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR) :: fresult end function function swigc_FN_VCloneEmptyVectorArray(farg1, farg2) & bind(C, name="_wrap_FN_VCloneEmptyVectorArray") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function function swigc_FN_VCloneVectorArray(farg1, farg2) & bind(C, name="_wrap_FN_VCloneVectorArray") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function subroutine swigc_FN_VDestroyVectorArray(farg1, farg2) & bind(C, name="_wrap_FN_VDestroyVectorArray") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 end subroutine function swigc_FN_VGetVecAtIndexVectorArray(farg1, farg2) & bind(C, name="_wrap_FN_VGetVecAtIndexVectorArray") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR) :: fresult end function subroutine swigc_FN_VSetVecAtIndexVectorArray(farg1, farg2, farg3) & bind(C, name="_wrap_FN_VSetVecAtIndexVectorArray") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 end subroutine subroutine swigc_FN_VPrint(farg1) & bind(C, name="_wrap_FN_VPrint") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine subroutine swigc_FN_VPrintFile(farg1, farg2) & bind(C, name="_wrap_FN_VPrintFile") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 end subroutine end interface contains ! MODULE SUBPROGRAMS function FN_VNewEmpty(sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(C_PTR) :: sunctx type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = sunctx fresult = swigc_FN_VNewEmpty(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FN_VFreeEmpty(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VFreeEmpty(farg1) end subroutine function FN_VCopyOps(w, v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: v integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(w) farg2 = c_loc(v) fresult = swigc_FN_VCopyOps(farg1, farg2) swig_result = fresult end function function FN_VGetVectorID(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(N_Vector_ID) :: swig_result type(N_Vector), target, intent(inout) :: w integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VGetVectorID(farg1) swig_result = fresult end function function FN_VClone(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VClone(farg1) call c_f_pointer(fresult, swig_result) end function function FN_VCloneEmpty(w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(w) fresult = swigc_FN_VCloneEmpty(farg1) call c_f_pointer(fresult, swig_result) end function subroutine FN_VDestroy(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VDestroy(farg1) end subroutine subroutine FN_VSpace(v, lrw, liw) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v integer(C_INT64_T), dimension(*), target, intent(inout) :: lrw integer(C_INT64_T), dimension(*), target, intent(inout) :: liw type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(v) farg2 = c_loc(lrw(1)) farg3 = c_loc(liw(1)) call swigc_FN_VSpace(farg1, farg2, farg3) end subroutine function FN_VGetArrayPointer(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetArrayPointer(farg1) call c_f_pointer(fresult, swig_result, [1]) end function function FN_VGetDeviceArrayPointer(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(:), pointer :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetDeviceArrayPointer(farg1) call c_f_pointer(fresult, swig_result, [1]) end function subroutine FN_VSetArrayPointer(v_data, v) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), dimension(*), target, intent(inout) :: v_data type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v_data(1)) farg2 = c_loc(v) call swigc_FN_VSetArrayPointer(farg1, farg2) end subroutine function FN_VGetCommunicator(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result type(N_Vector), target, intent(inout) :: v type(C_PTR) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetCommunicator(farg1) swig_result = fresult end function function FN_VGetLength(v) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT64_T) :: swig_result type(N_Vector), target, intent(inout) :: v integer(C_INT64_T) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(v) fresult = swigc_FN_VGetLength(farg1) swig_result = fresult end function subroutine FN_VLinearSum(a, x, b, y, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: a type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = a farg2 = c_loc(x) farg3 = b farg4 = c_loc(y) farg5 = c_loc(z) call swigc_FN_VLinearSum(farg1, farg2, farg3, farg4, farg5) end subroutine subroutine FN_VConst(c, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 farg1 = c farg2 = c_loc(z) call swigc_FN_VConst(farg1, farg2) end subroutine subroutine FN_VProd(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VProd(farg1, farg2, farg3) end subroutine subroutine FN_VDiv(x, y, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(y) farg3 = c_loc(z) call swigc_FN_VDiv(farg1, farg2, farg3) end subroutine subroutine FN_VScale(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VScale(farg1, farg2, farg3) end subroutine subroutine FN_VAbs(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VAbs(farg1, farg2) end subroutine subroutine FN_VInv(x, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) call swigc_FN_VInv(farg1, farg2) end subroutine subroutine FN_VAddConst(x, b, z) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), intent(in) :: b type(N_Vector), target, intent(inout) :: z type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = b farg3 = c_loc(z) call swigc_FN_VAddConst(farg1, farg2, farg3) end subroutine function FN_VDotProd(x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(y) fresult = swigc_FN_VDotProd(farg1, farg2) swig_result = fresult end function function FN_VMaxNorm(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMaxNorm(farg1) swig_result = fresult end function function FN_VWrmsNorm(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWrmsNorm(farg1, farg2) swig_result = fresult end function function FN_VWrmsNormMask(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWrmsNormMask(farg1, farg2, farg3) swig_result = fresult end function function FN_VMin(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMin(farg1) swig_result = fresult end function function FN_VWL2Norm(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWL2Norm(farg1, farg2) swig_result = fresult end function function FN_VL1Norm(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VL1Norm(farg1) swig_result = fresult end function subroutine FN_VCompare(c, x, z) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE), intent(in) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z real(C_DOUBLE) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c farg2 = c_loc(x) farg3 = c_loc(z) call swigc_FN_VCompare(farg1, farg2, farg3) end subroutine function FN_VInvTest(x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) fresult = swigc_FN_VInvTest(farg1, farg2) swig_result = fresult end function function FN_VConstrMask(c, x, m) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: m integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(c) farg2 = c_loc(x) farg3 = c_loc(m) fresult = swigc_FN_VConstrMask(farg1, farg2, farg3) swig_result = fresult end function function FN_VMinQuotient(num, denom) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: num type(N_Vector), target, intent(inout) :: denom real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(num) farg2 = c_loc(denom) fresult = swigc_FN_VMinQuotient(farg1, farg2) swig_result = fresult end function function FN_VLinearCombination(nvec, c, x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = x farg4 = c_loc(z) fresult = swigc_FN_VLinearCombination(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VScaleAddMulti(nvec, a, x, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: a type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = c_loc(a(1)) farg3 = c_loc(x) farg4 = y farg5 = z fresult = swigc_FN_VScaleAddMulti(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdMulti(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMulti(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VLinearSumVectorArray(nvec, a, x, b, y, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), intent(in) :: a type(C_PTR) :: x real(C_DOUBLE), intent(in) :: b type(C_PTR) :: y type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 farg1 = nvec farg2 = a farg3 = x farg4 = b farg5 = y farg6 = z fresult = swigc_FN_VLinearSumVectorArray(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FN_VScaleVectorArray(nvec, c, x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), dimension(*), target, intent(inout) :: c type(C_PTR) :: x type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(c(1)) farg3 = x farg4 = z fresult = swigc_FN_VScaleVectorArray(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VConstVectorArray(nvec, c, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec real(C_DOUBLE), intent(in) :: c type(C_PTR) :: z integer(C_INT) :: fresult integer(C_INT) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = nvec farg2 = c farg3 = z fresult = swigc_FN_VConstVectorArray(farg1, farg2, farg3) swig_result = fresult end function function FN_VWrmsNormVectorArray(nvec, x, w, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(C_PTR) :: x type(C_PTR) :: w real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = x farg3 = w farg4 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormVectorArray(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VWrmsNormMaskVectorArray(nvec, x, w, id, nrm) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(C_PTR) :: x type(C_PTR) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE), dimension(*), target, intent(inout) :: nrm integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = nvec farg2 = x farg3 = w farg4 = c_loc(id) farg5 = c_loc(nrm(1)) fresult = swigc_FN_VWrmsNormMaskVectorArray(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FN_VDotProdLocal(x, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: y real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(y) fresult = swigc_FN_VDotProdLocal(farg1, farg2) swig_result = fresult end function function FN_VMaxNormLocal(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMaxNormLocal(farg1) swig_result = fresult end function function FN_VMinLocal(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VMinLocal(farg1) swig_result = fresult end function function FN_VL1NormLocal(x) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 farg1 = c_loc(x) fresult = swigc_FN_VL1NormLocal(farg1) swig_result = fresult end function function FN_VWSqrSumLocal(x, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(w) fresult = swigc_FN_VWSqrSumLocal(farg1, farg2) swig_result = fresult end function function FN_VWSqrSumMaskLocal(x, w, id) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: w type(N_Vector), target, intent(inout) :: id real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(x) farg2 = c_loc(w) farg3 = c_loc(id) fresult = swigc_FN_VWSqrSumMaskLocal(farg1, farg2, farg3) swig_result = fresult end function function FN_VInvTestLocal(x, z) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: z integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(z) fresult = swigc_FN_VInvTestLocal(farg1, farg2) swig_result = fresult end function function FN_VConstrMaskLocal(c, x, m) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: c type(N_Vector), target, intent(inout) :: x type(N_Vector), target, intent(inout) :: m integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = c_loc(c) farg2 = c_loc(x) farg3 = c_loc(m) fresult = swigc_FN_VConstrMaskLocal(farg1, farg2, farg3) swig_result = fresult end function function FN_VMinQuotientLocal(num, denom) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result type(N_Vector), target, intent(inout) :: num type(N_Vector), target, intent(inout) :: denom real(C_DOUBLE) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(num) farg2 = c_loc(denom) fresult = swigc_FN_VMinQuotientLocal(farg1, farg2) swig_result = fresult end function function FN_VDotProdMultiLocal(nvec, x, y, dotprods) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec type(N_Vector), target, intent(inout) :: x type(C_PTR) :: y real(C_DOUBLE), dimension(*), target, intent(inout) :: dotprods integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = nvec farg2 = c_loc(x) farg3 = y farg4 = c_loc(dotprods(1)) fresult = swigc_FN_VDotProdMultiLocal(farg1, farg2, farg3, farg4) swig_result = fresult end function function FN_VDotProdMultiAllReduce(nvec_total, x, sum) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result integer(C_INT), intent(in) :: nvec_total type(N_Vector), target, intent(inout) :: x real(C_DOUBLE), dimension(*), target, intent(inout) :: sum integer(C_INT) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = nvec_total farg2 = c_loc(x) farg3 = c_loc(sum(1)) fresult = swigc_FN_VDotProdMultiAllReduce(farg1, farg2, farg3) swig_result = fresult end function function FN_VBufSize(x, size) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x integer(C_INT64_T), dimension(*), target, intent(inout) :: size integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = c_loc(size(1)) fresult = swigc_FN_VBufSize(farg1, farg2) swig_result = fresult end function function FN_VBufPack(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufPack(farg1, farg2) swig_result = fresult end function function FN_VBufUnpack(x, buf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(N_Vector), target, intent(inout) :: x type(C_PTR) :: buf integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(x) farg2 = buf fresult = swigc_FN_VBufUnpack(farg1, farg2) swig_result = fresult end function function FN_VNewVectorArray(count) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(C_PTR) :: fresult integer(C_INT) :: farg1 farg1 = count fresult = swigc_FN_VNewVectorArray(farg1) swig_result = fresult end function function FN_VCloneEmptyVectorArray(count, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = count farg2 = c_loc(w) fresult = swigc_FN_VCloneEmptyVectorArray(farg1, farg2) swig_result = fresult end function function FN_VCloneVectorArray(count, w) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: count type(N_Vector), target, intent(inout) :: w type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = count farg2 = c_loc(w) fresult = swigc_FN_VCloneVectorArray(farg1, farg2) swig_result = fresult end function subroutine FN_VDestroyVectorArray(vs, count) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: vs integer(C_INT), intent(in) :: count type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = vs farg2 = count call swigc_FN_VDestroyVectorArray(farg1, farg2) end subroutine function FN_VGetVecAtIndexVectorArray(vs, index) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(N_Vector), pointer :: swig_result type(C_PTR) :: vs integer(C_INT), intent(in) :: index type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = vs farg2 = index fresult = swigc_FN_VGetVecAtIndexVectorArray(farg1, farg2) call c_f_pointer(fresult, swig_result) end function subroutine FN_VSetVecAtIndexVectorArray(vs, index, w) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: vs integer(C_INT), intent(in) :: index type(N_Vector), target, intent(inout) :: w type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = vs farg2 = index farg3 = c_loc(w) call swigc_FN_VSetVecAtIndexVectorArray(farg1, farg2, farg3) end subroutine subroutine FN_VPrint(v) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: farg1 farg1 = c_loc(v) call swigc_FN_VPrint(farg1) end subroutine subroutine FN_VPrintFile(v, outfile) use, intrinsic :: ISO_C_BINDING type(N_Vector), target, intent(inout) :: v type(C_PTR) :: outfile type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = c_loc(v) farg2 = outfile call swigc_FN_VPrintFile(farg1, farg2) end subroutine end module StanHeaders/src/sundials/fmod/fsundials_context_mod.c0000644000176200001440000001724014645137106022563 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_context.h" SWIGEXPORT int _wrap_FSUNContext_GetProfiler(void *farg1, void *farg2) { int fresult ; SUNContext arg1 = (SUNContext) 0 ; SUNProfiler *arg2 = (SUNProfiler *) 0 ; int result; arg1 = (SUNContext)(farg1); arg2 = (SUNProfiler *)(farg2); result = (int)SUNContext_GetProfiler(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNContext_SetProfiler(void *farg1, void *farg2) { int fresult ; SUNContext arg1 = (SUNContext) 0 ; SUNProfiler arg2 ; int result; arg1 = (SUNContext)(farg1); arg2 = (SUNProfiler)(farg2); result = (int)SUNContext_SetProfiler(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNContext_Free(void *farg1) { int fresult ; SUNContext *arg1 = (SUNContext *) 0 ; int result; #ifdef SUNDIALS_BUILD_WITH_PROFILING SUNProfiler profiler; #endif arg1 = (SUNContext *)(farg1); #ifdef SUNDIALS_BUILD_WITH_PROFILING result = (int)SUNContext_GetProfiler(*arg1,&profiler); result = (int)SUNContext_Free(arg1); result = (int)SUNProfiler_Free(&profiler); #else result = (int)SUNContext_Free(arg1); #endif fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNContext_Create(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; SUNContext *arg2 = (SUNContext *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNContext *)(farg2); result = (int)SUNContext_Create(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/sundials/fmod/fsundials_futils_mod.c0000644000176200001440000001611114645137106022401 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_futils.h" #include #ifdef _MSC_VER # ifndef strtoull # define strtoull _strtoui64 # endif # ifndef strtoll # define strtoll _strtoi64 # endif #endif typedef struct { void* data; size_t size; } SwigArrayWrapper; SWIGINTERN SwigArrayWrapper SwigArrayWrapper_uninitialized() { SwigArrayWrapper result; result.data = NULL; result.size = 0; return result; } SWIGEXPORT void * _wrap_FSUNDIALSFileOpen(SwigArrayWrapper *farg1, SwigArrayWrapper *farg2) { void * fresult ; char *arg1 = (char *) 0 ; char *arg2 = (char *) 0 ; FILE *result = 0 ; arg1 = (char *)(farg1->data); arg2 = (char *)(farg2->data); result = (FILE *)SUNDIALSFileOpen((char const *)arg1,(char const *)arg2); fresult = result; return fresult; } SWIGEXPORT void _wrap_FSUNDIALSFileClose(void *farg1) { FILE *arg1 = (FILE *) 0 ; arg1 = (FILE *)(farg1); SUNDIALSFileClose(arg1); } StanHeaders/src/sundials/fmod/fsundials_nvector_mod.c0000644000176200001440000006217214645137106022563 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nvector.h" SWIGEXPORT N_Vector _wrap_FN_VNewEmpty(void *farg1) { N_Vector fresult ; SUNContext arg1 = (SUNContext) 0 ; N_Vector result; arg1 = (SUNContext)(farg1); result = (N_Vector)N_VNewEmpty(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VFreeEmpty(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VFreeEmpty(arg1); } SWIGEXPORT int _wrap_FN_VCopyOps(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VCopyOps(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VGetVectorID(N_Vector farg1) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector_ID result; arg1 = (N_Vector)(farg1); result = (N_Vector_ID)N_VGetVectorID(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT N_Vector _wrap_FN_VClone(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VClone(arg1); fresult = result; return fresult; } SWIGEXPORT N_Vector _wrap_FN_VCloneEmpty(N_Vector farg1) { N_Vector fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector result; arg1 = (N_Vector)(farg1); result = (N_Vector)N_VCloneEmpty(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroy(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VDestroy(arg1); } SWIGEXPORT void _wrap_FN_VSpace(N_Vector farg1, int64_t *farg2, int64_t *farg3) { N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; sunindextype *arg3 = (sunindextype *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); arg3 = (sunindextype *)(farg3); N_VSpace(arg1,arg2,arg3); } SWIGEXPORT double * _wrap_FN_VGetArrayPointer(N_Vector farg1) { double * fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype *result = 0 ; arg1 = (N_Vector)(farg1); result = (realtype *)N_VGetArrayPointer(arg1); fresult = result; return fresult; } SWIGEXPORT double * _wrap_FN_VGetDeviceArrayPointer(N_Vector farg1) { double * fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype *result = 0 ; arg1 = (N_Vector)(farg1); result = (realtype *)N_VGetDeviceArrayPointer(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VSetArrayPointer(double *farg1, N_Vector farg2) { realtype *arg1 = (realtype *) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype *)(farg1); arg2 = (N_Vector)(farg2); N_VSetArrayPointer(arg1,arg2); } SWIGEXPORT void * _wrap_FN_VGetCommunicator(N_Vector farg1) { void * fresult ; N_Vector arg1 = (N_Vector) 0 ; void *result = 0 ; arg1 = (N_Vector)(farg1); result = (void *)N_VGetCommunicator(arg1); fresult = result; return fresult; } SWIGEXPORT int64_t _wrap_FN_VGetLength(N_Vector farg1) { int64_t fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype result; arg1 = (N_Vector)(farg1); result = N_VGetLength(arg1); fresult = (sunindextype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VLinearSum(double const *farg1, N_Vector farg2, double const *farg3, N_Vector farg4, N_Vector farg5) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); N_VLinearSum(arg1,arg2,arg3,arg4,arg5); } SWIGEXPORT void _wrap_FN_VConst(double const *farg1, N_Vector farg2) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); N_VConst(arg1,arg2); } SWIGEXPORT void _wrap_FN_VProd(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VProd(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VDiv(N_Vector farg1, N_Vector farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VDiv(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VScale(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VScale(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VAbs(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VAbs(arg1,arg2); } SWIGEXPORT void _wrap_FN_VInv(N_Vector farg1, N_Vector farg2) { N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); N_VInv(arg1,arg2); } SWIGEXPORT void _wrap_FN_VAddConst(N_Vector farg1, double const *farg2, N_Vector farg3) { N_Vector arg1 = (N_Vector) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); N_VAddConst(arg1,arg2,arg3); } SWIGEXPORT double _wrap_FN_VDotProd(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VDotProd(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMaxNorm(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMaxNorm(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNorm(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWrmsNorm(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWrmsNormMask(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWrmsNormMask(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMin(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMin(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWL2Norm(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWL2Norm(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VL1Norm(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VL1Norm(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_FN_VCompare(double const *farg1, N_Vector farg2, N_Vector farg3) { realtype arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (realtype)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); N_VCompare(arg1,arg2,arg3); } SWIGEXPORT int _wrap_FN_VInvTest(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VInvTest(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstrMask(N_Vector farg1, N_Vector farg2, N_Vector farg3) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)N_VConstrMask(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinQuotient(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VMinQuotient(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearCombination(int const *farg1, double *farg2, void *farg3, N_Vector farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); result = (int)N_VLinearCombination(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleAddMulti(int const *farg1, double *farg2, N_Vector farg3, void *farg4, void *farg5) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector *)(farg4); arg5 = (N_Vector *)(farg5); result = (int)N_VScaleAddMulti(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMulti(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMulti(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VLinearSumVectorArray(int const *farg1, double const *farg2, void *farg3, double const *farg4, void *farg5, void *farg6) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype arg4 ; N_Vector *arg5 = (N_Vector *) 0 ; N_Vector *arg6 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector *)(farg5); arg6 = (N_Vector *)(farg6); result = (int)N_VLinearSumVectorArray(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VScaleVectorArray(int const *farg1, double *farg2, void *farg3, void *farg4) { int fresult ; int arg1 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector *)(farg4); result = (int)N_VScaleVectorArray(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstVectorArray(int const *farg1, double const *farg2, void *farg3) { int fresult ; int arg1 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)N_VConstVectorArray(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormVectorArray(int const *farg1, void *farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VWrmsNormVectorArray(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VWrmsNormMaskVectorArray(int const *farg1, void *farg2, void *farg3, N_Vector farg4, double *farg5) { int fresult ; int arg1 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype *arg5 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype *)(farg5); result = (int)N_VWrmsNormMaskVectorArray(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VDotProdLocal(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VDotProdLocal(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMaxNormLocal(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMaxNormLocal(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinLocal(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VMinLocal(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VL1NormLocal(N_Vector farg1) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); result = (realtype)N_VL1NormLocal(arg1); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumLocal(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VWSqrSumLocal(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT double _wrap_FN_VWSqrSumMaskLocal(N_Vector farg1, N_Vector farg2, N_Vector farg3) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (realtype)N_VWSqrSumMaskLocal(arg1,arg2,arg3); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VInvTestLocal(N_Vector farg1, N_Vector farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (int)N_VInvTestLocal(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VConstrMaskLocal(N_Vector farg1, N_Vector farg2, N_Vector farg3) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)N_VConstrMaskLocal(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT double _wrap_FN_VMinQuotientLocal(N_Vector farg1, N_Vector farg2) { double fresult ; N_Vector arg1 = (N_Vector) 0 ; N_Vector arg2 = (N_Vector) 0 ; realtype result; arg1 = (N_Vector)(farg1); arg2 = (N_Vector)(farg2); result = (realtype)N_VMinQuotientLocal(arg1,arg2); fresult = (realtype)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMultiLocal(int const *farg1, N_Vector farg2, void *farg3, double *farg4) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; realtype *arg4 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (realtype *)(farg4); result = (int)N_VDotProdMultiLocal(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VDotProdMultiAllReduce(int const *farg1, N_Vector farg2, double *farg3) { int fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; realtype *arg3 = (realtype *) 0 ; int result; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); arg3 = (realtype *)(farg3); result = (int)N_VDotProdMultiAllReduce(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufSize(N_Vector farg1, int64_t *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; sunindextype *arg2 = (sunindextype *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (sunindextype *)(farg2); result = (int)N_VBufSize(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufPack(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufPack(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FN_VBufUnpack(N_Vector farg1, void *farg2) { int fresult ; N_Vector arg1 = (N_Vector) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (N_Vector)(farg1); arg2 = (void *)(farg2); result = (int)N_VBufUnpack(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT void * _wrap_FN_VNewVectorArray(int const *farg1) { void * fresult ; int arg1 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); result = (N_Vector *)N_VNewVectorArray(arg1); fresult = result; return fresult; } SWIGEXPORT void * _wrap_FN_VCloneEmptyVectorArray(int const *farg1, N_Vector farg2) { void * fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); result = (N_Vector *)N_VCloneEmptyVectorArray(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void * _wrap_FN_VCloneVectorArray(int const *farg1, N_Vector farg2) { void * fresult ; int arg1 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector *result = 0 ; arg1 = (int)(*farg1); arg2 = (N_Vector)(farg2); result = (N_Vector *)N_VCloneVectorArray(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VDestroyVectorArray(void *farg1, int const *farg2) { N_Vector *arg1 = (N_Vector *) 0 ; int arg2 ; arg1 = (N_Vector *)(farg1); arg2 = (int)(*farg2); N_VDestroyVectorArray(arg1,arg2); } SWIGEXPORT N_Vector _wrap_FN_VGetVecAtIndexVectorArray(void *farg1, int const *farg2) { N_Vector fresult ; N_Vector *arg1 = (N_Vector *) 0 ; int arg2 ; N_Vector result; arg1 = (N_Vector *)(farg1); arg2 = (int)(*farg2); result = (N_Vector)N_VGetVecAtIndexVectorArray(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT void _wrap_FN_VSetVecAtIndexVectorArray(void *farg1, int const *farg2, N_Vector farg3) { N_Vector *arg1 = (N_Vector *) 0 ; int arg2 ; N_Vector arg3 = (N_Vector) 0 ; arg1 = (N_Vector *)(farg1); arg2 = (int)(*farg2); arg3 = (N_Vector)(farg3); N_VSetVecAtIndexVectorArray(arg1,arg2,arg3); } SWIGEXPORT void _wrap_FN_VPrint(N_Vector farg1) { N_Vector arg1 = (N_Vector) 0 ; arg1 = (N_Vector)(farg1); N_VPrint(arg1); } SWIGEXPORT void _wrap_FN_VPrintFile(N_Vector farg1, void *farg2) { N_Vector arg1 = (N_Vector) 0 ; FILE *arg2 = (FILE *) 0 ; arg1 = (N_Vector)(farg1); arg2 = (FILE *)(farg2); N_VPrintFile(arg1,arg2); } StanHeaders/src/sundials/fmod/fsundials_nonlinearsolver_mod.c0000644000176200001440000002653214645137106024323 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_nonlinearsolver.h" SWIGEXPORT SUNNonlinearSolver _wrap_FSUNNonlinSolNewEmpty(void *farg1) { SUNNonlinearSolver fresult ; SUNContext arg1 = (SUNContext) 0 ; SUNNonlinearSolver result; arg1 = (SUNContext)(farg1); result = (SUNNonlinearSolver)SUNNonlinSolNewEmpty(arg1); fresult = result; return fresult; } SWIGEXPORT void _wrap_FSUNNonlinSolFreeEmpty(SUNNonlinearSolver farg1) { SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; arg1 = (SUNNonlinearSolver)(farg1); SUNNonlinSolFreeEmpty(arg1); } SWIGEXPORT int _wrap_FSUNNonlinSolGetType(SUNNonlinearSolver farg1) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinearSolver_Type result; arg1 = (SUNNonlinearSolver)(farg1); result = (SUNNonlinearSolver_Type)SUNNonlinSolGetType(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolInitialize(SUNNonlinearSolver farg1) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); result = (int)SUNNonlinSolInitialize(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetup(SUNNonlinearSolver farg1, N_Vector farg2, void *farg3) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; N_Vector arg2 = (N_Vector) 0 ; void *arg3 = (void *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (N_Vector)(farg2); arg3 = (void *)(farg3); result = (int)SUNNonlinSolSetup(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSolve(SUNNonlinearSolver farg1, N_Vector farg2, N_Vector farg3, N_Vector farg4, double const *farg5, int const *farg6, void *farg7) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; realtype arg5 ; int arg6 ; void *arg7 = (void *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); arg5 = (realtype)(*farg5); arg6 = (int)(*farg6); arg7 = (void *)(farg7); result = (int)SUNNonlinSolSolve(arg1,arg2,arg3,arg4,arg5,arg6,arg7); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolFree(SUNNonlinearSolver farg1) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); result = (int)SUNNonlinSolFree(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetSysFn(SUNNonlinearSolver farg1, SUNNonlinSolSysFn farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolSysFn arg2 = (SUNNonlinSolSysFn) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolSysFn)(farg2); result = (int)SUNNonlinSolSetSysFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetLSetupFn(SUNNonlinearSolver farg1, SUNNonlinSolLSetupFn farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolLSetupFn arg2 = (SUNNonlinSolLSetupFn) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolLSetupFn)(farg2); result = (int)SUNNonlinSolSetLSetupFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetLSolveFn(SUNNonlinearSolver farg1, SUNNonlinSolLSolveFn farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolLSolveFn arg2 = (SUNNonlinSolLSolveFn) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolLSolveFn)(farg2); result = (int)SUNNonlinSolSetLSolveFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetConvTestFn(SUNNonlinearSolver farg1, SUNNonlinSolConvTestFn farg2, void *farg3) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; SUNNonlinSolConvTestFn arg2 = (SUNNonlinSolConvTestFn) 0 ; void *arg3 = (void *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (SUNNonlinSolConvTestFn)(farg2); arg3 = (void *)(farg3); result = (int)SUNNonlinSolSetConvTestFn(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolSetMaxIters(SUNNonlinearSolver farg1, int const *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int arg2 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (int)(*farg2); result = (int)SUNNonlinSolSetMaxIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetNumIters(SUNNonlinearSolver farg1, long *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (long *)(farg2); result = (int)SUNNonlinSolGetNumIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetCurIter(SUNNonlinearSolver farg1, int *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (int *)(farg2); result = (int)SUNNonlinSolGetCurIter(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNNonlinSolGetNumConvFails(SUNNonlinearSolver farg1, long *farg2) { int fresult ; SUNNonlinearSolver arg1 = (SUNNonlinearSolver) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (SUNNonlinearSolver)(farg1); arg2 = (long *)(farg2); result = (int)SUNNonlinSolGetNumConvFails(arg1,arg2); fresult = (int)(result); return fresult; } StanHeaders/src/sundials/fmod/fsundials_types_mod.c0000644000176200001440000001465614645137106022253 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include #include "sundials/sundials_types.h" #ifndef SUNDIALS_DOUBLE_PRECISION #error "The Fortran bindings are only targeted at double-precision" #endif #ifndef SUNDIALS_INT64_T #error "The Fortran bindings are only targeted at 64-bit indices" #endif StanHeaders/src/sundials/fmod/fsundials_profiler_mod.c0000644000176200001440000002112614645137106022717 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "sundials/sundials_profiler.h" #if SUNDIALS_MPI_ENABLED #include #endif #include #ifdef _MSC_VER # ifndef strtoull # define strtoull _strtoui64 # endif # ifndef strtoll # define strtoll _strtoi64 # endif #endif typedef struct { void* data; size_t size; } SwigArrayWrapper; SWIGINTERN SwigArrayWrapper SwigArrayWrapper_uninitialized() { SwigArrayWrapper result; result.data = NULL; result.size = 0; return result; } SWIGEXPORT int _wrap_FSUNProfiler_Free(void *farg1) { int fresult ; SUNProfiler *arg1 = (SUNProfiler *) 0 ; int result; arg1 = (SUNProfiler *)(farg1); result = (int)SUNProfiler_Free(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNProfiler_Begin(void *farg1, SwigArrayWrapper *farg2) { int fresult ; SUNProfiler arg1 = (SUNProfiler) 0 ; char *arg2 = (char *) 0 ; int result; arg1 = (SUNProfiler)(farg1); arg2 = (char *)(farg2->data); result = (int)SUNProfiler_Begin(arg1,(char const *)arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNProfiler_End(void *farg1, SwigArrayWrapper *farg2) { int fresult ; SUNProfiler arg1 = (SUNProfiler) 0 ; char *arg2 = (char *) 0 ; int result; arg1 = (SUNProfiler)(farg1); arg2 = (char *)(farg2->data); result = (int)SUNProfiler_End(arg1,(char const *)arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNProfiler_Print(void *farg1, void *farg2) { int fresult ; SUNProfiler arg1 = (SUNProfiler) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (SUNProfiler)(farg1); arg2 = (FILE *)(farg2); result = (int)SUNProfiler_Print(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FSUNProfiler_Create(void *farg1, SwigArrayWrapper *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; char *arg2 = (char *) 0 ; SUNProfiler *arg3 = (SUNProfiler *) 0 ; int result; #if SUNDIALS_MPI_ENABLED MPI_Comm comm; #endif arg1 = (void *)(farg1); arg2 = (char *)(farg2->data); arg3 = (SUNProfiler *)(farg3); #if SUNDIALS_MPI_ENABLED if (arg1 != NULL) { comm = MPI_Comm_f2c(*((MPI_Fint *) arg1)); result = (int)SUNProfiler_Create((void*)&comm,(char const *)arg2,arg3); } else { result = (int)SUNProfiler_Create(arg1,(char const *)arg2,arg3); } #else result = (int)SUNProfiler_Create(arg1,(char const *)arg2,arg3); #endif fresult = (int)(result); return fresult; } StanHeaders/src/sundials/sundials_lapack_defs.h0000644000176200001440000000736614645137106021416 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer: Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_LAPACK_H #define _SUNDIALS_LAPACK_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ================================================================== * Blas and Lapack functions * ================================================================== */ #if defined(SUNDIALS_F77_FUNC) #define dgbtrf_f77 SUNDIALS_F77_FUNC(dgbtrf, DGBTRF) #define dgbtrs_f77 SUNDIALS_F77_FUNC(dgbtrs, DGBTRS) #define dgetrf_f77 SUNDIALS_F77_FUNC(dgetrf, DGETRF) #define dgetrs_f77 SUNDIALS_F77_FUNC(dgetrs, DGETRS) #define sgbtrf_f77 SUNDIALS_F77_FUNC(sgbtrf, SGBTRF) #define sgbtrs_f77 SUNDIALS_F77_FUNC(sgbtrs, SGBTRS) #define sgetrf_f77 SUNDIALS_F77_FUNC(sgetrf, SGETRF) #define sgetrs_f77 SUNDIALS_F77_FUNC(sgetrs, SGETRS) #else #define dgbtrf_f77 dgbtrf_ #define dgbtrs_f77 dgbtrs_ #define dgetrf_f77 dgetrf_ #define dgetrs_f77 dgetrs_ #define sgbtrf_f77 sgbtrf_ #define sgbtrs_f77 sgbtrs_ #define sgetrf_f77 sgetrf_ #define sgetrs_f77 sgetrs_ #endif /* LAPACK */ extern void dgbtrf_f77(const sunindextype *m, const sunindextype *n, const sunindextype *kl, const sunindextype *ku, double *ab, sunindextype *ldab, sunindextype *ipiv, sunindextype *info); extern void dgbtrs_f77(const char *trans, const sunindextype *n, const sunindextype *kl, const sunindextype *ku, const sunindextype *nrhs, double *ab, const sunindextype *ldab, sunindextype *ipiv, double *b, const sunindextype *ldb, sunindextype *info); extern void dgetrf_f77(const sunindextype *m, const sunindextype *n, double *a, sunindextype *lda, sunindextype *ipiv, sunindextype *info); extern void dgetrs_f77(const char *trans, const sunindextype *n, const sunindextype *nrhs, double *a, const sunindextype *lda, sunindextype *ipiv, double *b, const sunindextype *ldb, sunindextype *info); extern void sgbtrf_f77(const sunindextype *m, const sunindextype *n, const sunindextype *kl, const sunindextype *ku, float *ab, sunindextype *ldab, sunindextype *ipiv, sunindextype *info); extern void sgbtrs_f77(const char *trans, const sunindextype *n, const sunindextype *kl, const sunindextype *ku, const sunindextype *nrhs, float *ab, const sunindextype *ldab, sunindextype *ipiv, float *b, const sunindextype *ldb, sunindextype *info); extern void sgetrf_f77(const sunindextype *m, const sunindextype *n, float *a, sunindextype *lda, sunindextype *ipiv, sunindextype *info); extern void sgetrs_f77(const char *trans, const sunindextype *n, const sunindextype *nrhs, float *a, const sunindextype *lda, sunindextype *ipiv, float *b, const sunindextype *ldb, sunindextype *info); #ifdef __cplusplus } #endif #endif StanHeaders/src/sundials/sundials_band.c0000644000176200001440000002077714645137106020062 0ustar liggesusers/* * ----------------------------------------------------------------- * $Revision$ * $Date$ * ----------------------------------------------------------------- * Programmer(s): Alan C. Hindmarsh and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a generic BAND linear * solver package. * ----------------------------------------------------------------- */ #include #include #include #include #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define ROW(i,j,smu) (i-j+smu) /* * ----------------------------------------------------- * Functions working on SUNDlsMat * ----------------------------------------------------- */ sunindextype SUNDlsMat_BandGBTRF(SUNDlsMat A, sunindextype* p) { return(SUNDlsMat_bandGBTRF(A->cols, A->M, A->mu, A->ml, A->s_mu, p)); } sunindextype BandGBTRF(SUNDlsMat A, sunindextype *p) { return(SUNDlsMat_bandGBTRF(A->cols, A->M, A->mu, A->ml, A->s_mu, p)); } void SUNDlsMat_BandGBTRS(SUNDlsMat A, sunindextype *p, realtype *b) { SUNDlsMat_bandGBTRS(A->cols, A->M, A->s_mu, A->ml, p, b); } void BandGBTRS(SUNDlsMat A, sunindextype *p, realtype *b) { SUNDlsMat_bandGBTRS(A->cols, A->M, A->s_mu, A->ml, p, b); } void SUNDlsMat_BandCopy(SUNDlsMat A, SUNDlsMat B, sunindextype copymu, sunindextype copyml) { SUNDlsMat_bandCopy(A->cols, B->cols, A->M, A->s_mu, B->s_mu, copymu, copyml); } void BandCopy(SUNDlsMat A, SUNDlsMat B, sunindextype copymu, sunindextype copyml) { SUNDlsMat_bandCopy(A->cols, B->cols, A->M, A->s_mu, B->s_mu, copymu, copyml); } void SUNDlsMat_BandScale(realtype c, SUNDlsMat A) { SUNDlsMat_bandScale(c, A->cols, A->M, A->mu, A->ml, A->s_mu); } void BandScale(realtype c, SUNDlsMat A) { SUNDlsMat_bandScale(c, A->cols, A->M, A->mu, A->ml, A->s_mu); } void SUNDlsMat_BandMatvec(SUNDlsMat A, realtype *x, realtype *y) { SUNDlsMat_bandMatvec(A->cols, x, y, A->M, A->mu, A->ml, A->s_mu); } void BandMatvec(SUNDlsMat A, realtype *x, realtype *y) { SUNDlsMat_bandMatvec(A->cols, x, y, A->M, A->mu, A->ml, A->s_mu); } /* * ----------------------------------------------------- * Functions working on realtype** * ----------------------------------------------------- */ sunindextype bandGBTRF(realtype **a, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu, sunindextype *p) { return(SUNDlsMat_bandGBTRF(a, n, mu, ml, smu, p)); } sunindextype SUNDlsMat_bandGBTRF(realtype **a, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu, sunindextype *p) { sunindextype c, r, num_rows; sunindextype i, j, k, l, storage_l, storage_k, last_col_k, last_row_k; realtype *a_c, *col_k, *diag_k, *sub_diag_k, *col_j, *kptr, *jptr; realtype max, temp, mult, a_kj; booleantype swap; /* zero out the first smu - mu rows of the rectangular array a */ num_rows = smu - mu; if (num_rows > 0) { for (c=0; c < n; c++) { a_c = a[c]; for (r=0; r < num_rows; r++) { a_c[r] = ZERO; } } } /* k = elimination step number */ for (k=0; k < n-1; k++, p++) { col_k = a[k]; diag_k = col_k + smu; sub_diag_k = diag_k + 1; last_row_k = SUNMIN(n-1,k+ml); /* find l = pivot row number */ l=k; max = SUNRabs(*diag_k); for (i=k+1, kptr=sub_diag_k; i <= last_row_k; i++, kptr++) { if (SUNRabs(*kptr) > max) { l=i; max = SUNRabs(*kptr); } } storage_l = ROW(l, k, smu); *p = l; /* check for zero pivot element */ if (col_k[storage_l] == ZERO) return(k+1); /* swap a(l,k) and a(k,k) if necessary */ if ( (swap = (l != k) )) { temp = col_k[storage_l]; col_k[storage_l] = *diag_k; *diag_k = temp; } /* Scale the elements below the diagonal in */ /* column k by -1.0 / a(k,k). After the above swap, */ /* a(k,k) holds the pivot element. This scaling */ /* stores the pivot row multipliers -a(i,k)/a(k,k) */ /* in a(i,k), i=k+1, ..., SUNMIN(n-1,k+ml). */ mult = -ONE / (*diag_k); for (i=k+1, kptr = sub_diag_k; i <= last_row_k; i++, kptr++) (*kptr) *= mult; /* row_i = row_i - [a(i,k)/a(k,k)] row_k, i=k+1, ..., SUNMIN(n-1,k+ml) */ /* row k is the pivot row after swapping with row l. */ /* The computation is done one column at a time, */ /* column j=k+1, ..., SUNMIN(k+smu,n-1). */ last_col_k = SUNMIN(k+smu,n-1); for (j=k+1; j <= last_col_k; j++) { col_j = a[j]; storage_l = ROW(l,j,smu); storage_k = ROW(k,j,smu); a_kj = col_j[storage_l]; /* Swap the elements a(k,j) and a(k,l) if l!=k. */ if (swap) { col_j[storage_l] = col_j[storage_k]; col_j[storage_k] = a_kj; } /* a(i,j) = a(i,j) - [a(i,k)/a(k,k)]*a(k,j) */ /* a_kj = a(k,j), *kptr = - a(i,k)/a(k,k), *jptr = a(i,j) */ if (a_kj != ZERO) { for (i=k+1, kptr=sub_diag_k, jptr=col_j+ROW(k+1,j,smu); i <= last_row_k; i++, kptr++, jptr++) (*jptr) += a_kj * (*kptr); } } } /* set the last pivot row to be n-1 and check for a zero pivot */ *p = n-1; if (a[n-1][smu] == ZERO) return(n); /* return 0 to indicate success */ return(0); } void bandGBTRS(realtype **a, sunindextype n, sunindextype smu, sunindextype ml, sunindextype *p, realtype *b) { SUNDlsMat_bandGBTRS(a, n, smu, ml, p, b); } void SUNDlsMat_bandGBTRS(realtype **a, sunindextype n, sunindextype smu, sunindextype ml, sunindextype *p, realtype *b) { sunindextype k, l, i, first_row_k, last_row_k; realtype mult, *diag_k; /* Solve Ly = Pb, store solution y in b */ for (k=0; k < n-1; k++) { l = p[k]; mult = b[l]; if (l != k) { b[l] = b[k]; b[k] = mult; } diag_k = a[k]+smu; last_row_k = SUNMIN(n-1,k+ml); for (i=k+1; i <= last_row_k; i++) b[i] += mult * diag_k[i-k]; } /* Solve Ux = y, store solution x in b */ for (k=n-1; k >= 0; k--) { diag_k = a[k]+smu; first_row_k = SUNMAX(0,k-smu); b[k] /= (*diag_k); mult = -b[k]; for (i=first_row_k; i <= k-1; i++) b[i] += mult*diag_k[i-k]; } } void bandCopy(realtype **a, realtype **b, sunindextype n, sunindextype a_smu, sunindextype b_smu, sunindextype copymu, sunindextype copyml) { SUNDlsMat_bandCopy(a, b, n, a_smu, b_smu, copymu, copyml); } void SUNDlsMat_bandCopy(realtype **a, realtype **b, sunindextype n, sunindextype a_smu, sunindextype b_smu, sunindextype copymu, sunindextype copyml) { sunindextype i, j, copySize; realtype *a_col_j, *b_col_j; copySize = copymu + copyml + 1; for (j=0; j < n; j++) { a_col_j = a[j]+a_smu-copymu; b_col_j = b[j]+b_smu-copymu; for (i=0; i < copySize; i++) b_col_j[i] = a_col_j[i]; } } void bandScale(realtype c, realtype **a, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu) { SUNDlsMat_bandScale(c, a, n, mu, ml, smu); } void SUNDlsMat_bandScale(realtype c, realtype **a, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu) { sunindextype i, j, colSize; realtype *col_j; colSize = mu + ml + 1; for(j=0; j < n; j++) { col_j = a[j]+smu-mu; for (i=0; i < colSize; i++) col_j[i] *= c; } } void bandAddIdentity(realtype **a, sunindextype n, sunindextype smu) { SUNDlsMat_bandAddIdentity(a, n, smu); } void SUNDlsMat_bandAddIdentity(realtype **a, sunindextype n, sunindextype smu) { sunindextype j; for(j=0; j < n; j++) a[j][smu] += ONE; } void bandMatvec(realtype **a, realtype *x, realtype *y, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu) { SUNDlsMat_bandMatvec(a, x, y, n, mu, ml, smu); } void SUNDlsMat_bandMatvec(realtype **a, realtype *x, realtype *y, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu) { sunindextype i, j, is, ie; realtype *col_j; for (i=0; i j-mu) ? 0 : j-mu; ie = (n-1 < j+ml) ? n-1 : j+ml; for (i=is; i<=ie; i++) y[i] += col_j[i-j+mu]*x[j]; } } StanHeaders/src/sundials/sundials_memory.c0000644000176200001440000001312514645137106020453 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS memory helper. * ----------------------------------------------------------------*/ #include #include "sundials_debug.h" #include #include "sundials_context_impl.h" #if defined(SUNDIALS_BUILD_WITH_PROFILING) static SUNProfiler getSUNProfiler(SUNMemoryHelper H) { return(H->sunctx->profiler); } #endif SUNMemory SUNMemoryNewEmpty(void) { SUNMemory mem = NULL; mem = (SUNMemory) malloc(sizeof(struct _SUNMemory)); if (mem == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryNewEmpty: malloc failed\n"); return(NULL); } return(mem); } SUNMemoryHelper SUNMemoryHelper_NewEmpty(SUNContext sunctx) { SUNMemoryHelper helper = NULL; if (sunctx == NULL) return(NULL); helper = (SUNMemoryHelper) malloc(sizeof(struct _SUNMemoryHelper)); if (helper == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_NewEmpty: malloc failed\n"); return(NULL); } helper->ops = (SUNMemoryHelper_Ops) malloc(sizeof(struct _SUNMemoryHelper_Ops)); if (helper->ops == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_NewEmpty: malloc failed\n"); free(helper); return(NULL); } /* Set all ops to NULL */ memset(helper->ops, 0, sizeof(struct _SUNMemoryHelper_Ops)); helper->content = NULL; helper->sunctx = sunctx; return(helper); } int SUNMemoryHelper_CopyOps(SUNMemoryHelper src, SUNMemoryHelper dst) { /* Check that ops structures exist */ if (src == NULL || dst == NULL || src->ops == NULL || dst->ops == NULL) return(-1); memcpy(dst->ops, src->ops, sizeof(struct _SUNMemoryHelper_Ops)); return(0); } booleantype SUNMemoryHelper_ImplementsRequiredOps(SUNMemoryHelper helper) { if (helper->ops->alloc == NULL || helper->ops->dealloc == NULL || helper->ops->copy == NULL) { return(SUNFALSE); } return(SUNTRUE); } SUNMemory SUNMemoryHelper_Alias(SUNMemory mem) { SUNMemory alias = SUNMemoryNewEmpty(); alias->ptr = mem->ptr; alias->type = mem->type; alias->own = SUNFALSE; return(alias); } SUNMemory SUNMemoryHelper_Wrap(void* ptr, SUNMemoryType mem_type) { SUNMemory mem = SUNMemoryNewEmpty(); mem->ptr = ptr; mem->own = SUNFALSE; switch(mem_type) { case SUNMEMTYPE_HOST: mem->type = SUNMEMTYPE_HOST; break; case SUNMEMTYPE_PINNED: mem->type = SUNMEMTYPE_PINNED; break; case SUNMEMTYPE_DEVICE: mem->type = SUNMEMTYPE_DEVICE; break; case SUNMEMTYPE_UVM: mem->type = SUNMEMTYPE_UVM; break; default: free(mem); SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Wrap: unknown memory type\n"); return(NULL); } return(mem); } int SUNMemoryHelper_Alloc(SUNMemoryHelper helper, SUNMemory* memptr, size_t memsize, SUNMemoryType mem_type, void* queue) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(helper)); if (helper->ops->alloc == NULL) ier = -1; else ier = helper->ops->alloc(helper, memptr, memsize, mem_type, queue); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(helper)); return(ier); } int SUNMemoryHelper_Dealloc(SUNMemoryHelper helper, SUNMemory mem, void* queue) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(helper)); if (helper->ops->dealloc == NULL) ier = -1; if (mem == NULL) ier = 0; else ier = helper->ops->dealloc(helper, mem, queue); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(helper)); return(ier); } int SUNMemoryHelper_Copy(SUNMemoryHelper helper, SUNMemory dst, SUNMemory src, size_t memory_size, void* queue) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(helper)); if (helper->ops->copy == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Copy: function pointer is NULL\n"); ier = -1; } else { ier = helper->ops->copy(helper, dst, src, memory_size, queue); } SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(helper)); return(ier); } int SUNMemoryHelper_CopyAsync(SUNMemoryHelper helper, SUNMemory dst, SUNMemory src, size_t memory_size, void* queue) { int ier; SUNDIALS_MARK_FUNCTION_BEGIN(getSUNProfiler(helper)); if (helper->ops->copyasync == NULL) ier = SUNMemoryHelper_Copy(helper, dst, src, memory_size, queue); else ier = helper->ops->copyasync(helper, dst, src, memory_size, queue); SUNDIALS_MARK_FUNCTION_END(getSUNProfiler(helper)); return(ier); } int SUNMemoryHelper_Destroy(SUNMemoryHelper helper) { if (helper->ops->destroy == NULL) { if (helper->content != NULL) { return(-1); } else { free(helper->ops); free(helper); } } else { return(helper->ops->destroy(helper)); } return(0); } SUNMemoryHelper SUNMemoryHelper_Clone(SUNMemoryHelper helper) { if (helper->ops->clone == NULL) { if (helper->content != NULL) { return(NULL); } else { SUNMemoryHelper hclone = SUNMemoryHelper_NewEmpty(helper->sunctx); if (hclone) SUNMemoryHelper_CopyOps(helper, hclone); return(hclone); } } else { return(helper->ops->clone(helper)); } } StanHeaders/src/sundials/sundials_hashmap.h0000644000176200001440000002354214645137106020575 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer: Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * A simple header-only hashmap implementation for char* keys and * void* values. Uses linear probing to resolve collisions. * The values can be anything, but will be freed by * the hash map upon its destruction. * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_HASHMAP_H #define _SUNDIALS_HASHMAP_H #include #include /* For a nice discussion on popular hashing algorithms see: https://softwareengineering.stackexchange.com/questions/49550/which-hashing-algorithm-is-best-for-uniqueness-and-speed/145633#145633 This is a 64-bit implementation of the 'a' modification of the Fowler–Noll–Vo hash (i.e., FNV1-a). */ static unsigned long fnv1a_hash(const char* str) { const unsigned long long prime = 14695981039346656037U; /* prime */ unsigned long long hash = 1099511628211U; /* offset basis */ char c; while ((c = *str++)) hash = (hash^c) * prime; return hash; } typedef struct _SUNHashMapKeyValue* SUNHashMapKeyValue; struct _SUNHashMapKeyValue { const char* key; void* value; }; typedef struct _SUNHashMap *SUNHashMap; struct _SUNHashMap { int size; /* current number of entries */ int max_size; /* max number of entries */ SUNHashMapKeyValue* buckets; }; /* This function creates a new SUNHashMap object allocated to hold up to 'max_size' entries. **Arguments:** * ``max_size`` -- the max number of entries in the hashmap * ``map`` -- on input, a SUNHasMap pointer, on output the SUNHashMap will be allocated **Returns:** * ``0`` -- success * ``-1`` -- an error occurred */ static int SUNHashMap_New(int max_size, SUNHashMap* map) { int i; if (max_size <= 0) return(-1); *map = NULL; *map = (SUNHashMap) malloc(sizeof(struct _SUNHashMap)); if (map == NULL) return(-1); (*map)->size = 0; (*map)->max_size = max_size; (*map)->buckets = NULL; (*map)->buckets = (SUNHashMapKeyValue*) malloc(max_size*sizeof(SUNHashMapKeyValue)); if ((*map)->buckets == NULL) { free(*map); return(-1); } /* Initialize all buckets to NULL */ for (i = 0; i < max_size; i++) (*map)->buckets[i] = NULL; return(0); } /* This function frees the SUNHashMap object. **Arguments:** * ``map`` -- on input, a SUNHasMap pointer, on output the SUNHashMap will be deallocated and set to ``NULL`` * ``freevalue`` -- callback function that should free the value object **Returns:** * ``0`` -- success * ``-1`` -- an error occurred */ static int SUNHashMap_Free(SUNHashMap* map, void (*freevalue)(void* ptr)) { int i; if (map == NULL || freevalue == NULL) return(-1); for (i = 0; i < (*map)->max_size; i++) { if ((*map)->buckets[i] && (*map)->buckets[i]->value) freevalue((*map)->buckets[i]->value); if ((*map)->buckets[i]) free((*map)->buckets[i]); } if ((*map)->buckets) free((*map)->buckets); if (*map) free(*map); *map = NULL; return(0); } /* This function iterates the map over the range [start, N]. N is either the index at which ``yieldfn`` indicates the iteration should stop, or the max entries in the map. **Arguments:** * ``map`` -- the ``SUNHashMap`` object to operate on * ``start`` -- the start of the iteration range * ``yieldfn`` -- the callback function to call every iteration this should return -1 to continue the iteration, or >= 0 to stop; the first argument is the current index, the second argument is the current key-value pair, and the final argument is the same pointer ``ctx`` as the final argument to SUNHashMapIterate. * ``ctx`` -- a pointer to pass on to ``yieldfn`` **Returns:** * ``max_size`` -- iterated the whole map * ``>=0`` -- the index at which the iteration stopped * ``<-1`` -- an error occurred */ static int SUNHashMap_Iterate(SUNHashMap map, int start, int (*yieldfn)(int, SUNHashMapKeyValue, void*), void* ctx) { int i; if (map == NULL || yieldfn == NULL) return(-2); for (i = start; i < map->max_size; i++) { int retval = yieldfn(i, map->buckets[i], ctx); if (retval >= 0) return(retval); /* yieldfn indicates the loop should break */ if (retval < -1) return(retval); /* error occurred */ } return(map->max_size); } static int sunHashMapLinearProbeInsert(int idx, SUNHashMapKeyValue kv, void* ctx) { /* find the next open spot */ if (kv == NULL) return(idx); /* open spot found at idx */ return(-1); /* keep looking */ } /* This function creates a key-value pair and attempts to insert it into the map. Will use linear probing if there is a collision. **Arguments:** * ``map`` -- the ``SUNHashMap`` object to operate on * ``key`` -- the key to store * ``value`` -- the value associated with the key **Returns:** * ``0`` -- success * ``-1`` -- an error occurred * ``-2`` -- the map is full */ static int SUNHashMap_Insert(SUNHashMap map, const char* key, void* value) { int idx, retval; SUNHashMapKeyValue kvp; if (map == NULL || key == NULL || value == NULL) return(-1); /* We want the index to be in (0, map->max_size) */ idx = (int) (fnv1a_hash(key) % map->max_size); /* Check if the bucket is already filled */ if (map->buckets[idx] != NULL) { /* Find the next open spot */ retval = SUNHashMap_Iterate(map, idx, sunHashMapLinearProbeInsert, NULL); if (retval < 0) return(-1); /* error occurred */ else if (retval == map->max_size) return(-2); /* no open entry */ else idx = retval; } /* Create the key-value pair */ kvp = (SUNHashMapKeyValue) malloc(sizeof(struct _SUNHashMapKeyValue)); if (kvp == NULL) return(-1); kvp->key = key; kvp->value = value; /* Insert the key-value pair */ map->buckets[idx] = kvp; map->size++; return(0); } static int sunHashMapLinearProbeGet(int idx, SUNHashMapKeyValue kv, void* key) { /* target key cannot be NULL */ if (key == NULL) return(-2); /* find the matching entry */ if (kv == NULL) return(-1); /* keep looking since this bucket is empty */ else if (!strcmp((const char*) kv->key, (const char*) key)) return(idx); /* found it at idx */ else return(-1); /* keep looking */ } /* This function gets the value for the given key. **Arguments:** * ``map`` -- the ``SUNHashMap`` object to operate on * ``key`` -- the key to look up * ``value`` -- the value associated with the key **Returns:** * ``0`` -- success * ``-1`` -- an error occurred * ``-2`` -- key not found */ static int SUNHashMap_GetValue(SUNHashMap map, const char* key, void** value) { int idx, retval; if (map == NULL || key == NULL || value == NULL) return(-1); /* We want the index to be in (0, map->max_size) */ idx = (int) (fnv1a_hash(key) % map->max_size); /* Check if the key exists */ if (map->buckets[idx] == NULL) return(-2); /* Check to see if this is a collision */ if (strcmp((const char*) map->buckets[idx]->key, (const char*) key)) { /* Keys did not match, so we have a collision and need to probe */ retval = SUNHashMap_Iterate(map, idx+1, sunHashMapLinearProbeGet, (void*) key); if (retval < 0) return(-1); /* error occurred */ else if (retval == map->max_size) return(-2); /* not found */ } /* Return a reference to the value only */ *value = map->buckets[idx]->value; return(0); } /* This function allocates a new array the same max_size as the map, then it sorts map into a new array of key-value pairs leaving the map unchanged. **Arguments:** * ``map`` -- the ``SUNHashMap`` object to operate on * ``sorted`` -- pointer to the sorted array of key-value pairs, this function will allocate the array * ``compar`` -- comparator function that is passed to the C standard qsort function **Returns:** * ``0`` -- success * ``-1`` -- an error occurred */ static int SUNHashMap_Sort(SUNHashMap map, SUNHashMapKeyValue** sorted, int (*compar)(const void *, const void*)) { int i; if (map == NULL || compar == NULL) return(-1); *sorted = (SUNHashMapKeyValue*) malloc(map->max_size*sizeof(SUNHashMapKeyValue)); if (*sorted == NULL) return(-1); /* Copy the buckets into a new array */ for (i = 0; i < map->max_size; i++) (*sorted)[i] = map->buckets[i]; qsort(*sorted, map->max_size, sizeof(SUNHashMapKeyValue), compar); return(0); } /* This function allocates a new array with just they values of the map. **Arguments:** * ``map`` -- the ``SUNHashMap`` object to operate on * ``values`` -- pointer to the array of keys * ``value_size`` -- the size of the values in bytes **Returns:** * ``0`` -- success * ``-1`` -- an error occurred */ #if SUNDIALS_MPI_ENABLED static int SUNHashMap_Values(SUNHashMap map, void*** values, size_t value_size) { int i, count = 0; if (map == NULL) return(-1); *values = (void**) malloc(map->size*sizeof(value_size)); if (values == NULL) return(-1); /* Copy the values into a new array */ for (i = 0; i < map->max_size; i++) { if (map->buckets[i]) (*values)[count++] = map->buckets[i]->value; } return(0); } #endif #endif StanHeaders/src/sunmemory/0000755000176200001440000000000014511135055015300 5ustar liggesusersStanHeaders/src/sunmemory/cuda/0000755000176200001440000000000014511135055016214 5ustar liggesusersStanHeaders/src/sunmemory/cuda/sundials_cuda_memory.cu0000644000176200001440000001562614645137106022774 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS CUDA memory helper implementation. * ----------------------------------------------------------------*/ #include #include #include "sundials_debug.h" #include "sundials_cuda.h" SUNMemoryHelper SUNMemoryHelper_Cuda(SUNContext sunctx) { SUNMemoryHelper helper; /* Allocate the helper */ helper = SUNMemoryHelper_NewEmpty(sunctx); /* Set the ops */ helper->ops->alloc = SUNMemoryHelper_Alloc_Cuda; helper->ops->dealloc = SUNMemoryHelper_Dealloc_Cuda; helper->ops->copy = SUNMemoryHelper_Copy_Cuda; helper->ops->copyasync = SUNMemoryHelper_CopyAsync_Cuda; /* Attach content and ops */ helper->content = NULL; return helper; } int SUNMemoryHelper_Alloc_Cuda(SUNMemoryHelper helper, SUNMemory* memptr, size_t mem_size, SUNMemoryType mem_type, void* queue) { SUNMemory mem = SUNMemoryNewEmpty(); mem->ptr = NULL; mem->own = SUNTRUE; mem->type = mem_type; if (mem_type == SUNMEMTYPE_HOST) { mem->ptr = malloc(mem_size); if (mem->ptr == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Cuda: malloc returned NULL\n"); free(mem); return(-1); } } else if (mem_type == SUNMEMTYPE_PINNED) { if (!SUNDIALS_CUDA_VERIFY(cudaMallocHost(&(mem->ptr), mem_size))) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Cuda: cudaMallocHost failed\n"); free(mem); return(-1); } } else if (mem_type == SUNMEMTYPE_DEVICE) { if (!SUNDIALS_CUDA_VERIFY(cudaMalloc(&(mem->ptr), mem_size))) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Cuda: cudaMalloc failed\n"); free(mem); return(-1); } } else if (mem_type == SUNMEMTYPE_UVM) { if (!SUNDIALS_CUDA_VERIFY(cudaMallocManaged(&(mem->ptr), mem_size))) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Cuda: cudaMallocManaged failed\n"); free(mem); return(-1); } } else { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Cuda: unknown memory type\n"); free(mem); return(-1); } *memptr = mem; return(0); } int SUNMemoryHelper_Dealloc_Cuda(SUNMemoryHelper helper, SUNMemory mem, void *queue) { if (mem == NULL) return(0); if (mem->ptr != NULL && mem->own) { if (mem->type == SUNMEMTYPE_HOST) { free(mem->ptr); mem->ptr = NULL; } else if (mem->type == SUNMEMTYPE_PINNED) { if (!SUNDIALS_CUDA_VERIFY(cudaFreeHost(mem->ptr))) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Dealloc_Cuda: cudaFreeHost failed\n"); return(-1); } mem->ptr = NULL; } else if (mem->type == SUNMEMTYPE_DEVICE || mem->type == SUNMEMTYPE_UVM) { if (!SUNDIALS_CUDA_VERIFY(cudaFree(mem->ptr))) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Dealloc_Cuda: cudaFree failed\n"); return(-1); } mem->ptr = NULL; } else { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Dealloc_Cuda: unknown memory type\n"); return(-1); } } free(mem); return(0); } int SUNMemoryHelper_Copy_Cuda(SUNMemoryHelper helper, SUNMemory dst, SUNMemory src, size_t memory_size, void* queue) { int retval = 0; cudaError_t cuerr = cudaSuccess; switch(src->type) { case SUNMEMTYPE_HOST: case SUNMEMTYPE_PINNED: if (dst->type == SUNMEMTYPE_HOST || dst->type == SUNMEMTYPE_PINNED) { memcpy(dst->ptr, src->ptr, memory_size); } else if (dst->type == SUNMEMTYPE_DEVICE || dst->type == SUNMEMTYPE_UVM) { cuerr = cudaMemcpy(dst->ptr, src->ptr, memory_size, cudaMemcpyHostToDevice); } if (!SUNDIALS_CUDA_VERIFY(cuerr)) retval = -1; break; case SUNMEMTYPE_UVM: case SUNMEMTYPE_DEVICE: if (dst->type == SUNMEMTYPE_HOST || dst->type == SUNMEMTYPE_PINNED) { cuerr = cudaMemcpy(dst->ptr, src->ptr, memory_size, cudaMemcpyDeviceToHost); } else if (dst->type == SUNMEMTYPE_DEVICE || dst->type == SUNMEMTYPE_UVM) { cuerr = cudaMemcpy(dst->ptr, src->ptr, memory_size, cudaMemcpyDeviceToDevice); } if (!SUNDIALS_CUDA_VERIFY(cuerr)) retval = -1; break; default: SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_CopyAsync_Cuda: unknown memory type\n"); retval = -1; } return(retval); } int SUNMemoryHelper_CopyAsync_Cuda(SUNMemoryHelper helper, SUNMemory dst, SUNMemory src, size_t memory_size, void* queue) { int retval = 0; cudaError_t cuerr = cudaSuccess; cudaStream_t stream = 0; if (queue != NULL) { stream = *((cudaStream_t*) queue); } switch(src->type) { case SUNMEMTYPE_HOST: case SUNMEMTYPE_PINNED: if (dst->type == SUNMEMTYPE_HOST || dst->type == SUNMEMTYPE_PINNED) { memcpy(dst->ptr, src->ptr, memory_size); } else if (dst->type == SUNMEMTYPE_DEVICE || dst->type == SUNMEMTYPE_UVM) { cuerr = cudaMemcpyAsync(dst->ptr, src->ptr, memory_size, cudaMemcpyHostToDevice, stream); } if (!SUNDIALS_CUDA_VERIFY(cuerr)) retval = -1; break; case SUNMEMTYPE_UVM: case SUNMEMTYPE_DEVICE: if (dst->type == SUNMEMTYPE_HOST || dst->type == SUNMEMTYPE_PINNED) { cuerr = cudaMemcpyAsync(dst->ptr, src->ptr, memory_size, cudaMemcpyDeviceToHost, stream); } else if (dst->type == SUNMEMTYPE_DEVICE || dst->type == SUNMEMTYPE_UVM) { cuerr = cudaMemcpyAsync(dst->ptr, src->ptr, memory_size, cudaMemcpyDeviceToDevice, stream); } if (!SUNDIALS_CUDA_VERIFY(cuerr)) retval = -1; break; default: SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_CopyAsync_Cuda: unknown memory type\n"); retval = -1; } return(retval); } StanHeaders/src/sunmemory/hip/0000755000176200001440000000000014511135055016060 5ustar liggesusersStanHeaders/src/sunmemory/hip/sundials_hip_memory.hip.cpp0000644000176200001440000001555314645137106023435 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS HIP memory helper implementation. * ----------------------------------------------------------------*/ #include #include #include "sundials_debug.h" #include "sundials_hip.h" SUNMemoryHelper SUNMemoryHelper_Hip(SUNContext sunctx) { SUNMemoryHelper helper; /* Allocate the helper */ helper = SUNMemoryHelper_NewEmpty(sunctx); /* Set the ops */ helper->ops->alloc = SUNMemoryHelper_Alloc_Hip; helper->ops->dealloc = SUNMemoryHelper_Dealloc_Hip; helper->ops->copy = SUNMemoryHelper_Copy_Hip; helper->ops->copyasync = SUNMemoryHelper_CopyAsync_Hip; /* Attach content and ops */ helper->content = NULL; return helper; } int SUNMemoryHelper_Alloc_Hip(SUNMemoryHelper helper, SUNMemory* memptr, size_t mem_size, SUNMemoryType mem_type, void* queue) { SUNMemory mem = SUNMemoryNewEmpty(); mem->ptr = NULL; mem->own = SUNTRUE; mem->type = mem_type; if (mem_type == SUNMEMTYPE_HOST) { mem->ptr = malloc(mem_size); if (mem->ptr == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Hip: malloc returned NULL\n"); free(mem); return(-1); } } else if (mem_type == SUNMEMTYPE_PINNED) { if (!SUNDIALS_HIP_VERIFY(hipHostMalloc(&(mem->ptr), mem_size, hipHostMallocDefault))) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Hip: hipHostMalloc failed\n"); free(mem); return(-1); } } else if (mem_type == SUNMEMTYPE_DEVICE) { if (!SUNDIALS_HIP_VERIFY(hipMalloc(&(mem->ptr), mem_size))) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Hip: hipMalloc failed\n"); free(mem); return(-1); } } else if (mem_type == SUNMEMTYPE_UVM) { if (!SUNDIALS_HIP_VERIFY(hipMallocManaged(&(mem->ptr), mem_size))) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Hip: hipMallocManaged failed\n"); free(mem); return(-1); } } else { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Hip: unknown memory type\n"); free(mem); return(-1); } *memptr = mem; return(0); } int SUNMemoryHelper_Dealloc_Hip(SUNMemoryHelper helper, SUNMemory mem, void* queue) { if (mem == NULL) return(0); if (mem->ptr != NULL && mem->own) { if (mem->type == SUNMEMTYPE_HOST) { free(mem->ptr); mem->ptr = NULL; } else if (mem->type == SUNMEMTYPE_PINNED) { if (!SUNDIALS_HIP_VERIFY(hipHostFree(mem->ptr))) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Dealloc_Hip: hipHostFree failed\n"); return(-1); } mem->ptr = NULL; } else if (mem->type == SUNMEMTYPE_DEVICE || mem->type == SUNMEMTYPE_UVM) { if (!SUNDIALS_HIP_VERIFY(hipFree(mem->ptr))) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Dealloc_Hip: hipFree failed\n"); return(-1); } mem->ptr = NULL; } else { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Dealloc_Hip: unknown memory type\n"); return(-1); } } free(mem); return(0); } int SUNMemoryHelper_Copy_Hip(SUNMemoryHelper helper, SUNMemory dst, SUNMemory src, size_t memory_size, void* queue) { int retval = 0; hipError_t cuerr = hipSuccess; switch(src->type) { case SUNMEMTYPE_HOST: case SUNMEMTYPE_PINNED: if (dst->type == SUNMEMTYPE_HOST || dst->type == SUNMEMTYPE_PINNED) { memcpy(dst->ptr, src->ptr, memory_size); } else if (dst->type == SUNMEMTYPE_DEVICE || dst->type == SUNMEMTYPE_UVM) { cuerr = hipMemcpy(dst->ptr, src->ptr, memory_size, hipMemcpyHostToDevice); } if (!SUNDIALS_HIP_VERIFY(cuerr)) retval = -1; break; case SUNMEMTYPE_UVM: case SUNMEMTYPE_DEVICE: if (dst->type == SUNMEMTYPE_HOST || dst->type == SUNMEMTYPE_PINNED) { cuerr = hipMemcpy(dst->ptr, src->ptr, memory_size, hipMemcpyDeviceToHost); } else if (dst->type == SUNMEMTYPE_DEVICE || dst->type == SUNMEMTYPE_UVM) { cuerr = hipMemcpy(dst->ptr, src->ptr, memory_size, hipMemcpyDeviceToDevice); } if (!SUNDIALS_HIP_VERIFY(cuerr)) retval = -1; break; default: SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_CopyAsync_Hip: unknown memory type\n"); retval = -1; } return(retval); } int SUNMemoryHelper_CopyAsync_Hip(SUNMemoryHelper helper, SUNMemory dst, SUNMemory src, size_t memory_size, void* queue) { int retval = 0; hipError_t cuerr = hipSuccess; hipStream_t stream = 0; if (queue != NULL) { stream = *((hipStream_t*) queue); } switch(src->type) { case SUNMEMTYPE_HOST: case SUNMEMTYPE_PINNED: if (dst->type == SUNMEMTYPE_HOST || dst->type == SUNMEMTYPE_PINNED) { memcpy(dst->ptr, src->ptr, memory_size); } else if (dst->type == SUNMEMTYPE_DEVICE || dst->type == SUNMEMTYPE_UVM) { cuerr = hipMemcpyAsync(dst->ptr, src->ptr, memory_size, hipMemcpyHostToDevice, stream); } if (!SUNDIALS_HIP_VERIFY(cuerr)) retval = -1; break; case SUNMEMTYPE_UVM: case SUNMEMTYPE_DEVICE: if (dst->type == SUNMEMTYPE_HOST || dst->type == SUNMEMTYPE_PINNED) { cuerr = hipMemcpyAsync(dst->ptr, src->ptr, memory_size, hipMemcpyDeviceToHost, stream); } else if (dst->type == SUNMEMTYPE_DEVICE || dst->type == SUNMEMTYPE_UVM) { cuerr = hipMemcpyAsync(dst->ptr, src->ptr, memory_size, hipMemcpyDeviceToDevice, stream); } if (!SUNDIALS_HIP_VERIFY(cuerr)) retval = -1; break; default: SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_CopyAsync_Hip: unknown memory type\n"); retval = -1; } return(retval); } StanHeaders/src/sunmemory/system/0000755000176200001440000000000014511135055016624 5ustar liggesusersStanHeaders/src/sunmemory/system/sundials_system_memory.c0000644000176200001440000000506614645137106023624 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS memory helper implementation that uses the standard * system memory allocators. * ----------------------------------------------------------------*/ #include #include #include #include "sundials_debug.h" SUNMemoryHelper SUNMemoryHelper_Sys(SUNContext sunctx) { SUNMemoryHelper helper; /* Allocate the helper */ helper = SUNMemoryHelper_NewEmpty(sunctx); /* Set the ops */ helper->ops->alloc = SUNMemoryHelper_Alloc_Sys; helper->ops->dealloc = SUNMemoryHelper_Dealloc_Sys; helper->ops->copy = SUNMemoryHelper_Copy_Sys; /* Attach content and ops */ helper->content = NULL; return helper; } int SUNMemoryHelper_Alloc_Sys(SUNMemoryHelper helper, SUNMemory* memptr, size_t mem_size, SUNMemoryType mem_type, void* queue) { SUNMemory mem = SUNMemoryNewEmpty(); mem->ptr = NULL; mem->own = SUNTRUE; mem->type = mem_type; if (mem_type == SUNMEMTYPE_HOST) { mem->ptr = malloc(mem_size); if (mem->ptr == NULL) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Sys: malloc returned NULL\n"); free(mem); return(-1); } } else { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Sys: unsupported memory type\n"); free(mem); return(-1); } *memptr = mem; return(0); } int SUNMemoryHelper_Dealloc_Sys(SUNMemoryHelper helper, SUNMemory mem, void* queue) { if (mem == NULL) return(0); if (mem->ptr != NULL && mem->own) { if (mem->type == SUNMEMTYPE_HOST) { free(mem->ptr); mem->ptr = NULL; } else { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Dealloc_Sys: unsupported memory type\n"); return(-1); } } free(mem); return(0); } int SUNMemoryHelper_Copy_Sys(SUNMemoryHelper helper, SUNMemory dst, SUNMemory src, size_t memory_size, void* queue) { int retval = 0; memcpy(dst->ptr, src->ptr, memory_size); return(retval); } StanHeaders/src/sunmemory/sycl/0000755000176200001440000000000014511135055016252 5ustar liggesusersStanHeaders/src/sunmemory/sycl/sundials_sycl_memory.cpp0000644000176200001440000001203614645137106023233 0ustar liggesusers/* ---------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ---------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ---------------------------------------------------------------- * SUNDIALS SYCL memory helper implementation. * ----------------------------------------------------------------*/ #include #include #include "sundials_debug.h" SUNMemoryHelper SUNMemoryHelper_Sycl(SUNContext sunctx) { // Allocate the helper SUNMemoryHelper helper = SUNMemoryHelper_NewEmpty(sunctx); if (!helper) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Sycl: SUNMemoryHelper_NewEmpty returned NULL\n"); return NULL; } // Set the ops helper->ops->alloc = SUNMemoryHelper_Alloc_Sycl; helper->ops->dealloc = SUNMemoryHelper_Dealloc_Sycl; helper->ops->copy = SUNMemoryHelper_Copy_Sycl; helper->ops->copyasync = SUNMemoryHelper_CopyAsync_Sycl; return helper; } int SUNMemoryHelper_Alloc_Sycl(SUNMemoryHelper helper, SUNMemory* memptr, size_t mem_size, SUNMemoryType mem_type, void* queue) { // Check inputs if (!queue) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Sycl: queue is NULL\n"); return -1; } ::sycl::queue* sycl_queue = static_cast<::sycl::queue*>(queue); // Allocate the memory struct SUNMemory mem = SUNMemoryNewEmpty(); if (!mem) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Sycl: SUNMemoryNewEmpty returned NULL\n"); return -1; } // Initialize the memory content mem->ptr = nullptr; mem->own = SUNTRUE; mem->type = mem_type; // Allocate the data pointer if (mem_type == SUNMEMTYPE_HOST) { mem->ptr = malloc(mem_size); if (!(mem->ptr)) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Sycl: malloc returned NULL\n"); free(mem); return -1; } } else if (mem_type == SUNMEMTYPE_PINNED) { mem->ptr = ::sycl::malloc_host(mem_size, *sycl_queue); if (!(mem->ptr)) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Sycl: malloc_host returned NULL\n"); free(mem); return -1; } } else if (mem_type == SUNMEMTYPE_DEVICE) { mem->ptr = ::sycl::malloc_device(mem_size, *sycl_queue); if (!(mem->ptr)) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Sycl: malloc_device returned NULL\n"); free(mem); return -1; } } else if (mem_type == SUNMEMTYPE_UVM) { mem->ptr = ::sycl::malloc_shared(mem_size, *sycl_queue); if (!(mem->ptr)) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Sycl: malloc_shared returned NULL\n"); free(mem); return -1; } } else { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Alloc_Sycl: unknown memory type\n"); free(mem); return -1; } *memptr = mem; return 0; } int SUNMemoryHelper_Dealloc_Sycl(SUNMemoryHelper helper, SUNMemory mem, void* queue) { if (!mem) return 0; if (mem->ptr && mem->own) { if (!queue) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Dealloc_Sycl: queue is NULL\n"); return -1; } ::sycl::queue* sycl_queue = static_cast<::sycl::queue*>(queue); if (mem->type == SUNMEMTYPE_HOST) { free(mem->ptr); mem->ptr = nullptr; } else if (mem->type == SUNMEMTYPE_PINNED || mem->type == SUNMEMTYPE_DEVICE || mem->type == SUNMEMTYPE_UVM) { ::sycl::free(mem->ptr, *sycl_queue); mem->ptr = nullptr; } else { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Dealloc_Sycl: unknown memory type\n"); return -1; } } free(mem); return 0; } int SUNMemoryHelper_Copy_Sycl(SUNMemoryHelper helper, SUNMemory dst, SUNMemory src, size_t memory_size, void* queue) { if (!queue) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_Copy_Sycl: queue is NULL\n"); return -1; } ::sycl::queue* sycl_queue = static_cast<::sycl::queue*>(queue); if (SUNMemoryHelper_CopyAsync_Sycl(helper, dst, src, memory_size, queue)) return -1; sycl_queue->wait_and_throw(); return 0; } int SUNMemoryHelper_CopyAsync_Sycl(SUNMemoryHelper helper, SUNMemory dst, SUNMemory src, size_t memory_size, void* queue) { if (!queue) { SUNDIALS_DEBUG_PRINT("ERROR in SUNMemoryHelper_CopyAsync_Sycl: queue is NULL\n"); return -1; } ::sycl::queue* sycl_queue = static_cast<::sycl::queue*>(queue); if (src->type == SUNMEMTYPE_HOST && dst->type == SUNMEMTYPE_HOST) { memcpy(dst->ptr, src->ptr, memory_size); } else { sycl_queue->memcpy(dst->ptr, src->ptr, memory_size); } return 0; } StanHeaders/src/cvodes/0000755000176200001440000000000014645137104014532 5ustar liggesusersStanHeaders/src/cvodes/cvodes_ls_impl.h0000644000176200001440000002411314645137106017710 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Radu Serban @ LLNL *----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *----------------------------------------------------------------- * Implementation header file for CVODES' linear solver interface. *-----------------------------------------------------------------*/ #ifndef _CVSLS_IMPL_H #define _CVSLS_IMPL_H #include #include "cvodes_impl.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*----------------------------------------------------------------- CVSLS solver constants CVLS_MSBJ maximum number of steps between Jacobian and/or preconditioner evaluations CVLS_DGMAX maximum change in gamma between Jacobian and/or preconditioner evaluations CVLS_EPLIN default value for factor by which the tolerance on the nonlinear iteration is multiplied to get a tolerance on the linear iteration -----------------------------------------------------------------*/ #define CVLS_MSBJ 51 #define CVLS_DGMAX RCONST(0.2) #define CVLS_EPLIN RCONST(0.05) /*================================================================= PART I: Forward Problems =================================================================*/ /*----------------------------------------------------------------- Types : CVLsMemRec, CVLsMem The type CVLsMem is pointer to a CVLsMemRec. -----------------------------------------------------------------*/ typedef struct CVLsMemRec { /* Linear solver type information */ booleantype iterative; /* is the solver iterative? */ booleantype matrixbased; /* is a matrix structure used? */ /* Jacobian construction & storage */ booleantype jacDQ; /* SUNTRUE if using internal DQ Jac approx. */ CVLsJacFn jac; /* Jacobian routine to be called */ void *J_data; /* user data is passed to jac */ booleantype jbad; /* heuristic suggestion for pset */ /* Matrix-based solver, scale solution to account for change in gamma */ booleantype scalesol; /* Iterative solver tolerance */ realtype eplifac; /* nonlinear -> linear tol scaling factor */ realtype nrmfac; /* integrator -> LS norm conversion factor */ /* Linear solver, matrix and vector objects/pointers */ SUNLinearSolver LS; /* generic linear solver object */ SUNMatrix A; /* A = I - gamma * df/dy */ SUNMatrix savedJ; /* savedJ = old Jacobian */ N_Vector ytemp; /* temp vector passed to jtimes and psolve */ N_Vector x; /* temp vector used by CVLsSolve */ N_Vector ycur; /* CVODE current y vector in Newton Iteration */ N_Vector fcur; /* fcur = f(tn, ycur) */ /* Statistics and associated parameters */ long int msbj; /* max num steps between jac/pset calls */ long int nje; /* nje = no. of calls to jac */ long int nfeDQ; /* no. of calls to f due to DQ Jacobian or J*v approximations */ long int nstlj; /* nstlj = nst at last jac/pset call */ long int npe; /* npe = total number of pset calls */ long int nli; /* nli = total number of linear iterations */ long int nps; /* nps = total number of psolve calls */ long int ncfl; /* ncfl = total number of convergence failures */ long int njtsetup; /* njtsetup = total number of calls to jtsetup */ long int njtimes; /* njtimes = total number of calls to jtimes */ /* Preconditioner computation * (a) user-provided: * - P_data == user_data * - pfree == NULL (the user dealocates memory for user_data) * (b) internal preconditioner module * - P_data == cvode_mem * - pfree == set by the prec. module and called in CVodeFree */ CVLsPrecSetupFn pset; CVLsPrecSolveFn psolve; int (*pfree)(CVodeMem cv_mem); void *P_data; /* Jacobian times vector compuation * (a) jtimes function provided by the user: * - jt_data == user_data * - jtimesDQ == SUNFALSE * (b) internal jtimes * - jt_data == cvode_mem * - jtimesDQ == SUNTRUE */ booleantype jtimesDQ; CVLsJacTimesSetupFn jtsetup; CVLsJacTimesVecFn jtimes; CVRhsFn jt_f; void *jt_data; /* Linear system setup function * (a) user-provided linsys function: * - user_linsys = SUNTRUE * - A_data = user_data * (b) internal linsys function: * - user_linsys = SUNFALSE * - A_data = cvode_mem */ booleantype user_linsys; CVLsLinSysFn linsys; void* A_data; int last_flag; /* last error flag returned by any function */ } *CVLsMem; /*----------------------------------------------------------------- Prototypes of internal functions -----------------------------------------------------------------*/ /* Interface routines called by system SUNLinearSolver */ int cvLsATimes(void* cvode_mem, N_Vector v, N_Vector z); int cvLsPSetup(void* cvode_mem); int cvLsPSolve(void* cvode_mem, N_Vector r, N_Vector z, realtype tol, int lr); /* Difference quotient approximation for Jac times vector */ int cvLsDQJtimes(N_Vector v, N_Vector Jv, realtype t, N_Vector y, N_Vector fy, void *data, N_Vector work); /* Difference-quotient Jacobian approximation routines */ int cvLsDQJac(realtype t, N_Vector y, N_Vector fy, SUNMatrix Jac, void *data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); int cvLsDenseDQJac(realtype t, N_Vector y, N_Vector fy, SUNMatrix Jac, CVodeMem cv_mem, N_Vector tmp1); int cvLsBandDQJac(realtype t, N_Vector y, N_Vector fy, SUNMatrix Jac, CVodeMem cv_mem, N_Vector tmp1, N_Vector tmp2); /* Generic linit/lsetup/lsolve/lfree interface routines for CVode to call */ int cvLsInitialize(CVodeMem cv_mem); int cvLsSetup(CVodeMem cv_mem, int convfail, N_Vector ypred, N_Vector fpred, booleantype *jcurPtr, N_Vector vtemp1, N_Vector vtemp2, N_Vector vtemp3); int cvLsSolve(CVodeMem cv_mem, N_Vector b, N_Vector weight, N_Vector ycur, N_Vector fcur); int cvLsFree(CVodeMem cv_mem); /* Auxilliary functions */ int cvLsInitializeCounters(CVLsMem cvls_mem); int cvLs_AccessLMem(void* cvode_mem, const char* fname, CVodeMem* cv_mem, CVLsMem* cvls_mem); /*================================================================= PART II: Backward Problems =================================================================*/ /*----------------------------------------------------------------- Types : CVLsMemRecB, CVLsMemB CVodeSetLinearSolverB attaches such a structure to the lmemB field of CVodeBMem -----------------------------------------------------------------*/ typedef struct CVLsMemRecB { CVLsJacFnB jacB; CVLsJacFnBS jacBS; CVLsJacTimesSetupFnB jtsetupB; CVLsJacTimesSetupFnBS jtsetupBS; CVLsJacTimesVecFnB jtimesB; CVLsJacTimesVecFnBS jtimesBS; CVLsLinSysFnB linsysB; CVLsLinSysFnBS linsysBS; CVLsPrecSetupFnB psetB; CVLsPrecSetupFnBS psetBS; CVLsPrecSolveFnB psolveB; CVLsPrecSolveFnBS psolveBS; void *P_dataB; } *CVLsMemB; /*----------------------------------------------------------------- Prototypes of internal functions -----------------------------------------------------------------*/ int cvLsFreeB(CVodeBMem cvb_mem); int cvLs_AccessLMemB(void *cvode_mem, int which, const char *fname, CVodeMem *cv_mem, CVadjMem *ca_mem, CVodeBMem *cvB_mem, CVLsMemB *cvlsB_mem); int cvLs_AccessLMemBCur(void *cvode_mem, const char *fname, CVodeMem *cv_mem, CVadjMem *ca_mem, CVodeBMem *cvB_mem, CVLsMemB *cvlsB_mem); /*================================================================= Error Messages =================================================================*/ #define MSG_LS_CVMEM_NULL "Integrator memory is NULL." #define MSG_LS_MEM_FAIL "A memory request failed." #define MSG_LS_BAD_NVECTOR "A required vector operation is not implemented." #define MSG_LS_BAD_LSTYPE "Incompatible linear solver type." #define MSG_LS_LMEM_NULL "Linear solver memory is NULL." #define MSG_LS_BAD_SIZES "Illegal bandwidth parameter(s). Must have 0 <= ml, mu <= N-1." #define MSG_LS_BAD_EPLIN "eplifac < 0 illegal." #define MSG_LS_BAD_PRETYPE "Illegal value for pretype. Legal values are PREC_NONE, PREC_LEFT, PREC_RIGHT, and PREC_BOTH." #define MSG_LS_PSOLVE_REQ "pretype != PREC_NONE, but PSOLVE = NULL is illegal." #define MSG_LS_BAD_GSTYPE "Illegal value for gstype. Legal values are MODIFIED_GS and CLASSICAL_GS." #define MSG_LS_PSET_FAILED "The preconditioner setup routine failed in an unrecoverable manner." #define MSG_LS_PSOLVE_FAILED "The preconditioner solve routine failed in an unrecoverable manner." #define MSG_LS_JTSETUP_FAILED "The Jacobian x vector setup routine failed in an unrecoverable manner." #define MSG_LS_JTIMES_FAILED "The Jacobian x vector routine failed in an unrecoverable manner." #define MSG_LS_JACFUNC_FAILED "The Jacobian routine failed in an unrecoverable manner." #define MSG_LS_SUNMAT_FAILED "A SUNMatrix routine failed in an unrecoverable manner." #define MSG_LS_NO_ADJ "Illegal attempt to call before calling CVodeAdjMalloc." #define MSG_LS_BAD_WHICH "Illegal value for which." #define MSG_LS_LMEMB_NULL "Linear solver memory is NULL for the backward integration." #define MSG_LS_BAD_TINTERP "Bad t for interpolation." #ifdef __cplusplus } #endif #endif StanHeaders/src/cvodes/cvodes_diag.c0000644000176200001440000003304714645137106017156 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the CVDIAG linear solver. * ----------------------------------------------------------------- */ #include #include #include "cvodes_diag_impl.h" #include "cvodes_impl.h" /* Other Constants */ #define FRACT RCONST(0.1) #define ONE RCONST(1.0) /* CVDIAG linit, lsetup, lsolve, and lfree routines */ static int CVDiagInit(CVodeMem cv_mem); static int CVDiagSetup(CVodeMem cv_mem, int convfail, N_Vector ypred, N_Vector fpred, booleantype *jcurPtr, N_Vector vtemp1, N_Vector vtemp2, N_Vector vtemp3); static int CVDiagSolve(CVodeMem cv_mem, N_Vector b, N_Vector weight, N_Vector ycur, N_Vector fcur); static int CVDiagFree(CVodeMem cv_mem); /* * ================================================================ * * PART I - forward problems * * ================================================================ */ /* Readability Replacements */ #define lrw1 (cv_mem->cv_lrw1) #define liw1 (cv_mem->cv_liw1) #define f (cv_mem->cv_f) #define uround (cv_mem->cv_uround) #define tn (cv_mem->cv_tn) #define h (cv_mem->cv_h) #define rl1 (cv_mem->cv_rl1) #define gamma (cv_mem->cv_gamma) #define ewt (cv_mem->cv_ewt) #define nfe (cv_mem->cv_nfe) #define zn (cv_mem->cv_zn) #define linit (cv_mem->cv_linit) #define lsetup (cv_mem->cv_lsetup) #define lsolve (cv_mem->cv_lsolve) #define lfree (cv_mem->cv_lfree) #define lmem (cv_mem->cv_lmem) #define vec_tmpl (cv_mem->cv_tempv) #define setupNonNull (cv_mem->cv_setupNonNull) #define gammasv (cvdiag_mem->di_gammasv) #define M (cvdiag_mem->di_M) #define bit (cvdiag_mem->di_bit) #define bitcomp (cvdiag_mem->di_bitcomp) #define nfeDI (cvdiag_mem->di_nfeDI) #define last_flag (cvdiag_mem->di_last_flag) /* * ----------------------------------------------------------------- * CVDiag * ----------------------------------------------------------------- * This routine initializes the memory record and sets various function * fields specific to the diagonal linear solver module. CVDense first * calls the existing lfree routine if this is not NULL. Then it sets * the cv_linit, cv_lsetup, cv_lsolve, cv_lfree fields in (*cvode_mem) * to be CVDiagInit, CVDiagSetup, CVDiagSolve, and CVDiagFree, * respectively. It allocates memory for a structure of type * CVDiagMemRec and sets the cv_lmem field in (*cvode_mem) to the * address of this structure. It sets setupNonNull in (*cvode_mem) to * SUNTRUE. Finally, it allocates memory for M, bit, and bitcomp. * The CVDiag return value is SUCCESS = 0, LMEM_FAIL = -1, or * LIN_ILL_INPUT=-2. * ----------------------------------------------------------------- */ int CVDiag(void *cvode_mem) { CVodeMem cv_mem; CVDiagMem cvdiag_mem; /* Return immediately if cvode_mem is NULL */ if (cvode_mem == NULL) { cvProcessError(NULL, CVDIAG_MEM_NULL, "CVDIAG", "CVDiag", MSGDG_CVMEM_NULL); return(CVDIAG_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Check if N_VCompare and N_VInvTest are present */ if(vec_tmpl->ops->nvcompare == NULL || vec_tmpl->ops->nvinvtest == NULL) { cvProcessError(cv_mem, CVDIAG_ILL_INPUT, "CVDIAG", "CVDiag", MSGDG_BAD_NVECTOR); return(CVDIAG_ILL_INPUT); } if (lfree != NULL) lfree(cv_mem); /* Set four main function fields in cv_mem */ linit = CVDiagInit; lsetup = CVDiagSetup; lsolve = CVDiagSolve; lfree = CVDiagFree; /* Get memory for CVDiagMemRec */ cvdiag_mem = NULL; cvdiag_mem = (CVDiagMem) malloc(sizeof(CVDiagMemRec)); if (cvdiag_mem == NULL) { cvProcessError(cv_mem, CVDIAG_MEM_FAIL, "CVDIAG", "CVDiag", MSGDG_MEM_FAIL); return(CVDIAG_MEM_FAIL); } last_flag = CVDIAG_SUCCESS; /* Allocate memory for M, bit, and bitcomp */ M = N_VClone(vec_tmpl); if (M == NULL) { cvProcessError(cv_mem, CVDIAG_MEM_FAIL, "CVDIAG", "CVDiag", MSGDG_MEM_FAIL); free(cvdiag_mem); cvdiag_mem = NULL; return(CVDIAG_MEM_FAIL); } bit = N_VClone(vec_tmpl); if (bit == NULL) { cvProcessError(cv_mem, CVDIAG_MEM_FAIL, "CVDIAG", "CVDiag", MSGDG_MEM_FAIL); N_VDestroy(M); free(cvdiag_mem); cvdiag_mem = NULL; return(CVDIAG_MEM_FAIL); } bitcomp = N_VClone(vec_tmpl); if (bitcomp == NULL) { cvProcessError(cv_mem, CVDIAG_MEM_FAIL, "CVDIAG", "CVDiag", MSGDG_MEM_FAIL); N_VDestroy(M); N_VDestroy(bit); free(cvdiag_mem); cvdiag_mem = NULL; return(CVDIAG_MEM_FAIL); } /* Attach linear solver memory to integrator memory */ lmem = cvdiag_mem; return(CVDIAG_SUCCESS); } /* * ----------------------------------------------------------------- * CVDiagGetWorkSpace * ----------------------------------------------------------------- */ int CVDiagGetWorkSpace(void *cvode_mem, long int *lenrwLS, long int *leniwLS) { CVodeMem cv_mem; /* Return immediately if cvode_mem is NULL */ if (cvode_mem == NULL) { cvProcessError(NULL, CVDIAG_MEM_NULL, "CVDIAG", "CVDiagGetWorkSpace", MSGDG_CVMEM_NULL); return(CVDIAG_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *lenrwLS = 3*lrw1; *leniwLS = 3*liw1; return(CVDIAG_SUCCESS); } /* * ----------------------------------------------------------------- * CVDiagGetNumRhsEvals * ----------------------------------------------------------------- */ int CVDiagGetNumRhsEvals(void *cvode_mem, long int *nfevalsLS) { CVodeMem cv_mem; CVDiagMem cvdiag_mem; /* Return immediately if cvode_mem is NULL */ if (cvode_mem == NULL) { cvProcessError(NULL, CVDIAG_MEM_NULL, "CVDIAG", "CVDiagGetNumRhsEvals", MSGDG_CVMEM_NULL); return(CVDIAG_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (lmem == NULL) { cvProcessError(cv_mem, CVDIAG_LMEM_NULL, "CVDIAG", "CVDiagGetNumRhsEvals", MSGDG_LMEM_NULL); return(CVDIAG_LMEM_NULL); } cvdiag_mem = (CVDiagMem) lmem; *nfevalsLS = nfeDI; return(CVDIAG_SUCCESS); } /* * ----------------------------------------------------------------- * CVDiagGetLastFlag * ----------------------------------------------------------------- */ int CVDiagGetLastFlag(void *cvode_mem, long int *flag) { CVodeMem cv_mem; CVDiagMem cvdiag_mem; /* Return immediately if cvode_mem is NULL */ if (cvode_mem == NULL) { cvProcessError(NULL, CVDIAG_MEM_NULL, "CVDIAG", "CVDiagGetLastFlag", MSGDG_CVMEM_NULL); return(CVDIAG_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (lmem == NULL) { cvProcessError(cv_mem, CVDIAG_LMEM_NULL, "CVDIAG", "CVDiagGetLastFlag", MSGDG_LMEM_NULL); return(CVDIAG_LMEM_NULL); } cvdiag_mem = (CVDiagMem) lmem; *flag = last_flag; return(CVDIAG_SUCCESS); } /* * ----------------------------------------------------------------- * CVDiagGetReturnFlagName * ----------------------------------------------------------------- */ char *CVDiagGetReturnFlagName(long int flag) { char *name; name = (char *)malloc(30*sizeof(char)); switch(flag) { case CVDIAG_SUCCESS: sprintf(name,"CVDIAG_SUCCESS"); break; case CVDIAG_MEM_NULL: sprintf(name,"CVDIAG_MEM_NULL"); break; case CVDIAG_LMEM_NULL: sprintf(name,"CVDIAG_LMEM_NULL"); break; case CVDIAG_ILL_INPUT: sprintf(name,"CVDIAG_ILL_INPUT"); break; case CVDIAG_MEM_FAIL: sprintf(name,"CVDIAG_MEM_FAIL"); break; case CVDIAG_INV_FAIL: sprintf(name,"CVDIAG_INV_FAIL"); break; case CVDIAG_RHSFUNC_UNRECVR: sprintf(name,"CVDIAG_RHSFUNC_UNRECVR"); break; case CVDIAG_RHSFUNC_RECVR: sprintf(name,"CVDIAG_RHSFUNC_RECVR"); break; case CVDIAG_NO_ADJ: sprintf(name,"CVDIAG_NO_ADJ"); break; default: sprintf(name,"NONE"); } return(name); } /* * ----------------------------------------------------------------- * CVDiagInit * ----------------------------------------------------------------- * This routine does remaining initializations specific to the diagonal * linear solver. * ----------------------------------------------------------------- */ static int CVDiagInit(CVodeMem cv_mem) { CVDiagMem cvdiag_mem; cvdiag_mem = (CVDiagMem) lmem; nfeDI = 0; last_flag = CVDIAG_SUCCESS; return(0); } /* * ----------------------------------------------------------------- * CVDiagSetup * ----------------------------------------------------------------- * This routine does the setup operations for the diagonal linear * solver. It constructs a diagonal approximation to the Newton matrix * M = I - gamma*J, updates counters, and inverts M. * ----------------------------------------------------------------- */ static int CVDiagSetup(CVodeMem cv_mem, int convfail, N_Vector ypred, N_Vector fpred, booleantype *jcurPtr, N_Vector vtemp1, N_Vector vtemp2, N_Vector vtemp3) { realtype r; N_Vector ftemp, y; booleantype invOK; CVDiagMem cvdiag_mem; int retval; cvdiag_mem = (CVDiagMem) lmem; /* Rename work vectors for use as temporary values of y and f */ ftemp = vtemp1; y = vtemp2; /* Form y with perturbation = FRACT*(func. iter. correction) */ r = FRACT * rl1; N_VLinearSum(h, fpred, -ONE, zn[1], ftemp); N_VLinearSum(r, ftemp, ONE, ypred, y); /* Evaluate f at perturbed y */ retval = f(tn, y, M, cv_mem->cv_user_data); nfeDI++; if (retval < 0) { cvProcessError(cv_mem, CVDIAG_RHSFUNC_UNRECVR, "CVDIAG", "CVDiagSetup", MSGDG_RHSFUNC_FAILED); last_flag = CVDIAG_RHSFUNC_UNRECVR; return(-1); } if (retval > 0) { last_flag = CVDIAG_RHSFUNC_RECVR; return(1); } /* Construct M = I - gamma*J with J = diag(deltaf_i/deltay_i) */ N_VLinearSum(ONE, M, -ONE, fpred, M); N_VLinearSum(FRACT, ftemp, -h, M, M); N_VProd(ftemp, ewt, y); /* Protect against deltay_i being at roundoff level */ N_VCompare(uround, y, bit); N_VAddConst(bit, -ONE, bitcomp); N_VProd(ftemp, bit, y); N_VLinearSum(FRACT, y, -ONE, bitcomp, y); N_VDiv(M, y, M); N_VProd(M, bit, M); N_VLinearSum(ONE, M, -ONE, bitcomp, M); /* Invert M with test for zero components */ invOK = N_VInvTest(M, M); if (!invOK) { last_flag = CVDIAG_INV_FAIL; return(1); } /* Set jcur = SUNTRUE, save gamma in gammasv, and return */ *jcurPtr = SUNTRUE; gammasv = gamma; last_flag = CVDIAG_SUCCESS; return(0); } /* * ----------------------------------------------------------------- * CVDiagSolve * ----------------------------------------------------------------- * This routine performs the solve operation for the diagonal linear * solver. If necessary it first updates gamma in M = I - gamma*J. * ----------------------------------------------------------------- */ static int CVDiagSolve(CVodeMem cv_mem, N_Vector b, N_Vector weight, N_Vector ycur, N_Vector fcur) { booleantype invOK; realtype r; CVDiagMem cvdiag_mem; cvdiag_mem = (CVDiagMem) lmem; /* If gamma has changed, update factor in M, and save gamma value */ if (gammasv != gamma) { r = gamma / gammasv; N_VInv(M, M); N_VAddConst(M, -ONE, M); N_VScale(r, M, M); N_VAddConst(M, ONE, M); invOK = N_VInvTest(M, M); if (!invOK) { last_flag = CVDIAG_INV_FAIL; return (1); } gammasv = gamma; } /* Apply M-inverse to b */ N_VProd(b, M, b); last_flag = CVDIAG_SUCCESS; return(0); } /* * ----------------------------------------------------------------- * CVDiagFree * ----------------------------------------------------------------- * This routine frees memory specific to the diagonal linear solver. * ----------------------------------------------------------------- */ static int CVDiagFree(CVodeMem cv_mem) { CVDiagMem cvdiag_mem; cvdiag_mem = (CVDiagMem) lmem; N_VDestroy(M); N_VDestroy(bit); N_VDestroy(bitcomp); free(cvdiag_mem); cv_mem->cv_lmem = NULL; return(0); } /* * ================================================================ * * PART II - backward problems * * ================================================================ */ /* * CVDiagB * * Wrappers for the backward phase around the corresponding * CVODES functions */ int CVDiagB(void *cvode_mem, int which) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CVDIAG_MEM_NULL, "CVSDIAG", "CVDiagB", MSGDG_CVMEM_NULL); return(CVDIAG_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CVDIAG_NO_ADJ, "CVSDIAG", "CVDiagB", MSGDG_NO_ADJ); return(CVDIAG_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CVDIAG_ILL_INPUT, "CVSDIAG", "CVDiagB", MSGDG_BAD_WHICH); return(CVDIAG_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVDiag(cvodeB_mem); return(flag); } StanHeaders/src/cvodes/cvodea_io.c0000644000176200001440000004621114645137106016634 0ustar liggesusers/* * ----------------------------------------------------------------- * $Revision$ * $Date$ * ----------------------------------------------------------------- * Programmer: Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the optional input and output * functions for the adjoint module in the CVODES solver. * ----------------------------------------------------------------- */ /* * ================================================================= * IMPORTED HEADER FILES * ================================================================= */ #include #include #include "cvodes_impl.h" #include /* * ================================================================= * CVODEA PRIVATE CONSTANTS * ================================================================= */ #define ONE RCONST(1.0) /* * ================================================================= * EXPORTED FUNCTIONS IMPLEMENTATION * ================================================================= */ /* * ----------------------------------------------------------------- * Optional input functions for ASA * ----------------------------------------------------------------- */ int CVodeSetAdjNoSensi(void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetAdjNoSensi", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetAdjNoSensi", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; ca_mem->ca_IMstoreSensi = SUNFALSE; return(CV_SUCCESS); } /* * ----------------------------------------------------------------- * Optional input functions for backward integration * ----------------------------------------------------------------- */ int CVodeSetNonlinearSolverB(void *cvode_mem, int which, SUNNonlinearSolver NLS) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetNonlinearSolverB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetNonlinearSolverB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSetNonlinearSolverB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); return(CVodeSetNonlinearSolver(cvodeB_mem, NLS)); } int CVodeSetUserDataB(void *cvode_mem, int which, void *user_dataB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetUserDataB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetUserDataB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSetUserDataB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvB_mem->cv_user_data = user_dataB; return(CV_SUCCESS); } int CVodeSetMaxOrdB(void *cvode_mem, int which, int maxordB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetMaxOrdB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetMaxOrdB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSetMaxOrdB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeSetMaxOrd(cvodeB_mem, maxordB); return(flag); } int CVodeSetMaxNumStepsB(void *cvode_mem, int which, long int mxstepsB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetMaxNumStepsB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetMaxNumStepsB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSetMaxNumStepsB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeSetMaxNumSteps(cvodeB_mem, mxstepsB); return(flag); } int CVodeSetStabLimDetB(void *cvode_mem, int which, booleantype stldetB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetStabLimDetB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetStabLimDetB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSetStabLimDetB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeSetStabLimDet(cvodeB_mem, stldetB); return(flag); } int CVodeSetInitStepB(void *cvode_mem, int which, realtype hinB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetInitStepB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetInitStepB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSetInitStepB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeSetInitStep(cvodeB_mem, hinB); return(flag); } int CVodeSetMinStepB(void *cvode_mem, int which, realtype hminB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetMinStepB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetMinStepB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSetMinStepB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeSetMinStep(cvodeB_mem, hminB); return(flag); } int CVodeSetMaxStepB(void *cvode_mem, int which, realtype hmaxB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetMaxStepB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetMaxStepB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSetMaxStepB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeSetMaxStep(cvodeB_mem, hmaxB); return(flag); } int CVodeSetConstraintsB(void *cvode_mem, int which, N_Vector constraintsB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Is cvode_mem valid? */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetConstraintsB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Is ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetConstraintsB", MSGCV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check the value of which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSetConstraintsB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to 'which'. */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index) break; /* advance */ cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) cvB_mem->cv_mem; flag = CVodeSetConstraints(cvodeB_mem, constraintsB); return(flag); } /* * CVodeSetQuad*B * * Wrappers for the backward phase around the corresponding * CVODES quadrature optional input functions */ int CVodeSetQuadErrConB(void *cvode_mem, int which, booleantype errconQB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSetQuadErrConB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSetQuadErrConB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSetQuadErrConB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeSetQuadErrCon(cvodeB_mem, errconQB); return(flag); } /* * ----------------------------------------------------------------- * Optional output functions for backward integration * ----------------------------------------------------------------- */ /* * CVodeGetAdjCVodeBmem * * This function returns a (void *) pointer to the CVODES * memory allocated for the backward problem. This pointer can * then be used to call any of the CVodeGet* CVODES routines to * extract optional output for the backward integration phase. */ void *CVodeGetAdjCVodeBmem(void *cvode_mem, int which) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, 0, "CVODEA", "CVodeGetAdjCVodeBmem", MSGCV_NO_MEM); return(NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, 0, "CVODEA", "CVodeGetAdjCVodeBmem", MSGCV_NO_ADJ); return(NULL); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, 0, "CVODEA", "CVodeGetAdjCVodeBmem", MSGCV_BAD_WHICH); return(NULL); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); return(cvodeB_mem); } /* * CVodeGetAdjCheckPointsInfo * * This routine loads an array of nckpnts structures of type CVadjCheckPointRec. * The user must allocate space for ckpnt. */ int CVodeGetAdjCheckPointsInfo(void *cvode_mem, CVadjCheckPointRec *ckpnt) { CVodeMem cv_mem; CVadjMem ca_mem; CkpntMem ck_mem; int i; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeGetAdjCheckPointsInfo", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeGetAdjCheckPointsInfo", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; ck_mem = ca_mem->ck_mem; i = 0; while (ck_mem != NULL) { ckpnt[i].my_addr = (void *) ck_mem; ckpnt[i].next_addr = (void *) ck_mem->ck_next; ckpnt[i].t0 = ck_mem->ck_t0; ckpnt[i].t1 = ck_mem->ck_t1; ckpnt[i].nstep = ck_mem->ck_nst; ckpnt[i].order = ck_mem->ck_q; ckpnt[i].step = ck_mem->ck_h; ck_mem = ck_mem->ck_next; i++; } return(CV_SUCCESS); } /* * ----------------------------------------------------------------- * Undocumented Development User-Callable Functions * ----------------------------------------------------------------- */ /* * CVodeGetAdjDataPointHermite * * This routine returns the solution stored in the data structure * at the 'which' data point. Cubic Hermite interpolation. */ int CVodeGetAdjDataPointHermite(void *cvode_mem, int which, realtype *t, N_Vector y, N_Vector yd) { CVodeMem cv_mem; CVadjMem ca_mem; DtpntMem *dt_mem; HermiteDataMem content; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeGetAdjDataPointHermite", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeGetAdjDataPointHermite", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; dt_mem = ca_mem->dt_mem; if (ca_mem->ca_IMtype != CV_HERMITE) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVadjGetDataPointHermite", MSGCV_WRONG_INTERP); return(CV_ILL_INPUT); } *t = dt_mem[which]->t; content = (HermiteDataMem) (dt_mem[which]->content); if (y != NULL) N_VScale(ONE, content->y, y); if (yd != NULL) N_VScale(ONE, content->yd, yd); return(CV_SUCCESS); } /* * CVodeGetAdjDataPointPolynomial * * This routine returns the solution stored in the data structure * at the 'which' data point. Polynomial interpolation. */ int CVodeGetAdjDataPointPolynomial(void *cvode_mem, int which, realtype *t, int *order, N_Vector y) { CVodeMem cv_mem; CVadjMem ca_mem; DtpntMem *dt_mem; PolynomialDataMem content; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeGetAdjDataPointPolynomial", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeGetAdjDataPointPolynomial", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; dt_mem = ca_mem->dt_mem; if (ca_mem->ca_IMtype != CV_POLYNOMIAL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVadjGetDataPointPolynomial", MSGCV_WRONG_INTERP); return(CV_ILL_INPUT); } *t = dt_mem[which]->t; content = (PolynomialDataMem) (dt_mem[which]->content); if (y != NULL) N_VScale(ONE, content->y, y); *order = content->order; return(CV_SUCCESS); } /* * CVodeGetAdjCurrentCheckPoint * * Returns the address of the 'active' check point. */ int CVodeGetAdjCurrentCheckPoint(void *cvode_mem, void **addr) { CVodeMem cv_mem; CVadjMem ca_mem; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeGetAdjCurrentCheckPoint", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeGetAdjCurrentCheckPoint", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; *addr = (void *) ca_mem->ca_ckpntData; return(CV_SUCCESS); } StanHeaders/src/cvodes/cvodes_impl.h0000644000176200001440000014634714645137106017230 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Implementation header file for the main CVODES integrator. * -----------------------------------------------------------------*/ #ifndef _CVODES_IMPL_H #define _CVODES_IMPL_H #include #include "cvodes/cvodes.h" #include "sundials_context_impl.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ================================================================= * I N T E R N A L C O N S T A N T S * ================================================================= */ /* Basic constants */ #define ADAMS_Q_MAX 12 /* max value of q for lmm == ADAMS */ #define BDF_Q_MAX 5 /* max value of q for lmm == BDF */ #define Q_MAX ADAMS_Q_MAX /* max value of q for either lmm */ #define L_MAX (Q_MAX+1) /* max value of L for either lmm */ #define NUM_TESTS 5 /* number of error test quantities */ #define HMIN_DEFAULT RCONST(0.0) /* hmin default value */ #define HMAX_INV_DEFAULT RCONST(0.0) /* hmax_inv default value */ #define MXHNIL_DEFAULT 10 /* mxhnil default value */ #define MXSTEP_DEFAULT 500 /* mxstep default value */ #define MSBP 20 /* max no. of steps between lsetup calls */ /* Return values for lower level routines used by CVode and functions provided to the nonlinear solver */ #define RHSFUNC_RECVR +9 #define SRHSFUNC_RECVR +12 /* nonlinear solver constants NLS_MAXCOR maximum no. of corrector iterations for the nonlinear solver CRDOWN constant used in the estimation of the convergence rate (crate) of the iterates for the nonlinear equation RDIV declare divergence if ratio del/delp > RDIV */ #define NLS_MAXCOR 3 #define CRDOWN RCONST(0.3) #define RDIV RCONST(2.0) /* * ================================================================= * F O R W A R D P O I N T E R R E F E R E N C E S * ================================================================= */ typedef struct CVadjMemRec *CVadjMem; typedef struct CkpntMemRec *CkpntMem; typedef struct DtpntMemRec *DtpntMem; typedef struct CVodeBMemRec *CVodeBMem; /* * ================================================================= * M A I N I N T E G R A T O R M E M O R Y B L O C K * ================================================================= */ /* * ----------------------------------------------------------------- * Types: struct CVodeMemRec, CVodeMem * ----------------------------------------------------------------- * The type CVodeMem is type pointer to struct CVodeMemRec. * This structure contains fields to keep track of problem state. * ----------------------------------------------------------------- */ typedef struct CVodeMemRec { SUNContext cv_sunctx; realtype cv_uround; /* machine unit roundoff */ /*-------------------------- Problem Specification Data --------------------------*/ CVRhsFn cv_f; /* y' = f(t,y(t)) */ void *cv_user_data; /* user pointer passed to f */ int cv_lmm; /* lmm = CV_ADAMS or CV_BDF */ int cv_itol; /* itol = CV_SS, CV_SV, CV_WF, CV_NN */ realtype cv_reltol; /* relative tolerance */ realtype cv_Sabstol; /* scalar absolute tolerance */ N_Vector cv_Vabstol; /* vector absolute tolerance */ booleantype cv_atolmin0; /* flag indicating that min(abstol) = 0 */ booleantype cv_user_efun; /* SUNTRUE if user sets efun */ CVEwtFn cv_efun; /* function to set ewt */ void *cv_e_data; /* user pointer passed to efun */ booleantype cv_constraintsSet; /* constraints vector present: do constraints calc */ /*----------------------- Quadrature Related Data -----------------------*/ booleantype cv_quadr; /* SUNTRUE if integrating quadratures */ CVQuadRhsFn cv_fQ; /* q' = fQ(t, y(t)) */ booleantype cv_errconQ; /* SUNTRUE if quadrs. are included in error test */ int cv_itolQ; /* itolQ = CV_SS or CV_SV */ realtype cv_reltolQ; /* relative tolerance for quadratures */ realtype cv_SabstolQ; /* scalar absolute tolerance for quadratures */ N_Vector cv_VabstolQ; /* vector absolute tolerance for quadratures */ booleantype cv_atolQmin0; /* flag indicating that min(abstolQ) = 0 */ /*------------------------ Sensitivity Related Data ------------------------*/ booleantype cv_sensi; /* SUNTRUE if computing sensitivities */ int cv_Ns; /* Number of sensitivities */ int cv_ism; /* ism = SIMULTANEOUS or STAGGERED */ CVSensRhsFn cv_fS; /* fS = (df/dy)*yS + (df/dp) */ CVSensRhs1Fn cv_fS1; /* fS1 = (df/dy)*yS_i + (df/dp) */ void *cv_fS_data; /* data pointer passed to fS */ booleantype cv_fSDQ; /* SUNTRUE if using internal DQ functions */ int cv_ifS; /* ifS = ALLSENS or ONESENS */ realtype *cv_p; /* parameters in f(t,y,p) */ realtype *cv_pbar; /* scale factors for parameters */ int *cv_plist; /* list of sensitivities */ int cv_DQtype; /* central/forward finite differences */ realtype cv_DQrhomax; /* cut-off value for separate/simultaneous FD */ booleantype cv_errconS; /* SUNTRUE if yS are considered in err. control */ int cv_itolS; realtype cv_reltolS; /* relative tolerance for sensitivities */ realtype *cv_SabstolS; /* scalar absolute tolerances for sensi. */ N_Vector *cv_VabstolS; /* vector absolute tolerances for sensi. */ booleantype *cv_atolSmin0; /* flags indicating that min(abstolS[i]) = 0 */ /*----------------------------------- Quadrature Sensitivity Related Data -----------------------------------*/ booleantype cv_quadr_sensi; /* SUNTRUE if computing sensitivties of quadrs. */ CVQuadSensRhsFn cv_fQS; /* fQS = (dfQ/dy)*yS + (dfQ/dp) */ void *cv_fQS_data; /* data pointer passed to fQS */ booleantype cv_fQSDQ; /* SUNTRUE if using internal DQ functions */ booleantype cv_errconQS; /* SUNTRUE if yQS are considered in err. con. */ int cv_itolQS; realtype cv_reltolQS; /* relative tolerance for yQS */ realtype *cv_SabstolQS; /* scalar absolute tolerances for yQS */ N_Vector *cv_VabstolQS; /* vector absolute tolerances for yQS */ booleantype *cv_atolQSmin0; /* flags indicating that min(abstolQS[i]) = 0 */ /*----------------------- Nordsieck History Array -----------------------*/ N_Vector cv_zn[L_MAX]; /* Nordsieck array, of size N x (q+1). zn[j] is a vector of length N (j=0,...,q) zn[j] = [1/factorial(j)] * h^j * (jth derivative of the interpolating polynomial) */ /*------------------- Vectors of length N -------------------*/ N_Vector cv_ewt; /* error weight vector */ N_Vector cv_y; /* y is used as temporary storage by the solver The memory is provided by the user to CVode where the vector is named yout. */ N_Vector cv_acor; /* In the context of the solution of the nonlinear equation, acor = y_n(m) - y_n(0). On return, this vector is scaled to give the estimated local error */ N_Vector cv_tempv; /* temporary storage vector */ N_Vector cv_ftemp; /* temporary storage vector */ N_Vector cv_vtemp1; /* temporary storage vector */ N_Vector cv_vtemp2; /* temporary storage vector */ N_Vector cv_vtemp3; /* temporary storage vector */ N_Vector cv_constraints; /* vector of inequality constraint options */ /*-------------------------- Quadrature Related Vectors --------------------------*/ N_Vector cv_znQ[L_MAX]; /* Nordsieck arrays for quadratures */ N_Vector cv_ewtQ; /* error weight vector for quadratures */ N_Vector cv_yQ; /* Unlike y, yQ is not allocated by the user */ N_Vector cv_acorQ; /* acorQ = yQ_n(m) - yQ_n(0) */ N_Vector cv_tempvQ; /* temporary storage vector (~ tempv) */ /*--------------------------- Sensitivity Related Vectors ---------------------------*/ N_Vector *cv_znS[L_MAX]; /* Nordsieck arrays for sensitivities */ N_Vector *cv_ewtS; /* error weight vectors for sensitivities */ N_Vector *cv_yS; /* yS=yS0 (allocated by the user) */ N_Vector *cv_acorS; /* acorS = yS_n(m) - yS_n(0) */ N_Vector *cv_tempvS; /* temporary storage vector (~ tempv) */ N_Vector *cv_ftempS; /* temporary storage vector (~ ftemp) */ booleantype cv_stgr1alloc; /* Did we allocate ncfS1, ncfnS1, and nniS1? */ /*-------------------------------------- Quadrature Sensitivity Related Vectors --------------------------------------*/ N_Vector *cv_znQS[L_MAX]; /* Nordsieck arrays for quadr. sensitivities */ N_Vector *cv_ewtQS; /* error weight vectors for sensitivities */ N_Vector *cv_yQS; /* Unlike yS, yQS is not allocated by the user */ N_Vector *cv_acorQS; /* acorQS = yQS_n(m) - yQS_n(0) */ N_Vector *cv_tempvQS; /* temporary storage vector (~ tempv) */ N_Vector cv_ftempQ; /* temporary storage vector (~ ftemp) */ /*----------------- Tstop information -----------------*/ booleantype cv_tstopset; realtype cv_tstop; /*--------- Step Data ---------*/ int cv_q; /* current order */ int cv_qprime; /* order to be used on the next step qprime = q-1, q, or q+1 */ int cv_next_q; /* order to be used on the next step */ int cv_qwait; /* number of internal steps to wait before considering a change in q */ int cv_L; /* L = q + 1 */ realtype cv_hin; /* initial step size */ realtype cv_h; /* current step size */ realtype cv_hprime; /* step size to be used on the next step */ realtype cv_next_h; /* step size to be used on the next step */ realtype cv_eta; /* eta = hprime / h */ realtype cv_hscale; /* value of h used in zn */ realtype cv_tn; /* current internal value of t */ realtype cv_tretlast; /* last value of t returned by CVode */ realtype cv_tau[L_MAX+1]; /* array of previous q+1 successful step sizes indexed from 1 to q+1 */ realtype cv_tq[NUM_TESTS+1]; /* array of test quantities indexed from 1 to NUM_TESTS(=5) */ realtype cv_l[L_MAX]; /* coefficients of l(x) (degree q poly) */ realtype cv_rl1; /* the scalar 1/l[1] */ realtype cv_gamma; /* gamma = h * rl1 */ realtype cv_gammap; /* gamma at the last setup call */ realtype cv_gamrat; /* gamma / gammap */ realtype cv_crate; /* estimated corrector convergence rate */ realtype cv_crateS; /* estimated corrector convergence rate (Stgr) */ realtype cv_delp; /* norm of previous nonlinear solver update */ realtype cv_acnrm; /* | acor | */ booleantype cv_acnrmcur; /* is | acor | current? */ realtype cv_acnrmQ; /* | acorQ | */ realtype cv_acnrmS; /* | acorS | */ booleantype cv_acnrmScur; /* is | acorS | current? */ realtype cv_acnrmQS; /* | acorQS | */ realtype cv_nlscoef; /* coeficient in nonlinear convergence test */ int *cv_ncfS1; /* Array of Ns local counters for conv. * failures (used in CVStep for STAGGERED1) */ /*------ Limits ------*/ int cv_qmax; /* q <= qmax */ long int cv_mxstep; /* maximum number of internal steps for one user call */ int cv_mxhnil; /* maximum number of warning messages issued to the user that t + h == t for the next internal step */ int cv_maxnef; /* maximum number of error test failures */ int cv_maxncf; /* maximum number of nonlinear convergence failures */ realtype cv_hmin; /* |h| >= hmin */ realtype cv_hmax_inv; /* |h| <= 1/hmax_inv */ realtype cv_etamax; /* eta <= etamax */ /*-------- Counters --------*/ long int cv_nst; /* number of internal steps taken */ long int cv_nfe; /* number of f calls */ long int cv_nfQe; /* number of fQ calls */ long int cv_nfSe; /* number of fS calls */ long int cv_nfeS; /* number of f calls from sensi DQ */ long int cv_nfQSe; /* number of fQS calls */ long int cv_nfQeS; /* number of fQ calls from sensi DQ */ long int cv_ncfn; /* number of corrector convergence failures */ long int cv_ncfnS; /* number of total sensi. corr. conv. failures */ long int *cv_ncfnS1; /* number of sensi. corrector conv. failures */ long int cv_nni; /* number of nonlinear iterations performed */ long int cv_nniS; /* number of total sensi. nonlinear iterations */ long int *cv_nniS1; /* number of sensi. nonlinear iterations */ long int cv_netf; /* number of error test failures */ long int cv_netfQ; /* number of quadr. error test failures */ long int cv_netfS; /* number of sensi. error test failures */ long int cv_netfQS; /* number of quadr. sensi. error test failures */ long int cv_nsetups; /* number of setup calls */ long int cv_nsetupsS; /* number of setup calls due to sensitivities */ int cv_nhnil; /* number of messages issued to the user that t + h == t for the next iternal step */ /*---------------- Step size ratios ----------------*/ realtype cv_etaqm1; /* ratio of new to old h for order q-1 */ realtype cv_etaq; /* ratio of new to old h for order q */ realtype cv_etaqp1; /* ratio of new to old h for order q+1 */ /*------------------ Space requirements ------------------*/ sunindextype cv_lrw1; /* no. of realtype words in 1 N_Vector y */ sunindextype cv_liw1; /* no. of integer words in 1 N_Vector y */ sunindextype cv_lrw1Q; /* no. of realtype words in 1 N_Vector yQ */ sunindextype cv_liw1Q; /* no. of integer words in 1 N_Vector yQ */ long int cv_lrw; /* no. of realtype words in CVODE work vectors */ long int cv_liw; /* no. of integer words in CVODE work vectors */ /*--------------------- Nonlinear Solver Data ---------------------*/ SUNNonlinearSolver NLS; /* nonlinear solver object */ booleantype ownNLS; /* flag indicating NLS ownership */ SUNNonlinearSolver NLSsim; /* NLS object for the simultaneous corrector */ booleantype ownNLSsim; /* flag indicating NLS ownership */ SUNNonlinearSolver NLSstg; /* NLS object for the staggered corrector */ booleantype ownNLSstg; /* flag indicating NLS ownership */ SUNNonlinearSolver NLSstg1; /* NLS object for the staggered1 corrector */ booleantype ownNLSstg1; /* flag indicating NLS ownership */ int sens_solve_idx; /* index of the current staggered1 solve */ long int nnip; /* previous total number of iterations */ booleantype sens_solve; /* flag indicating if the current solve is a staggered or staggered1 sensitivity solve */ CVRhsFn nls_f; /* f(t,y(t)) used in the nonlinear solver */ int convfail; /* flag to indicate when a Jacobian update may be needed */ /* The following vectors are NVector wrappers for use with the simultaneous and staggered corrector methods: Simultaneous: zn0Sim = [cv_zn[0], cv_znS[0]] ycorSim = [cv_acor, cv_acorS] ewtSim = [cv_ewt, cv_ewtS] Staggered: zn0Stg = cv_znS[0] ycorStg = cv_acorS ewtStg = cv_ewtS */ N_Vector zn0Sim, ycorSim, ewtSim; N_Vector zn0Stg, ycorStg, ewtStg; /* flags indicating if vector wrappers for the simultaneous and staggered correctors have been allocated */ booleantype simMallocDone; booleantype stgMallocDone; /*------------------ Linear Solver Data ------------------*/ /* Linear Solver functions to be called */ int (*cv_linit)(struct CVodeMemRec *cv_mem); int (*cv_lsetup)(struct CVodeMemRec *cv_mem, int convfail, N_Vector ypred, N_Vector fpred, booleantype *jcurPtr, N_Vector vtemp1, N_Vector vtemp2, N_Vector vtemp3); int (*cv_lsolve)(struct CVodeMemRec *cv_mem, N_Vector b, N_Vector weight, N_Vector ycur, N_Vector fcur); int (*cv_lfree)(struct CVodeMemRec *cv_mem); /* Linear Solver specific memory */ void *cv_lmem; /* linear solver interface memory structure */ long int cv_msbp; /* max number of steps between lsetip calls */ /* Flag to request a call to the setup routine */ booleantype cv_forceSetup; /*------------ Saved Values ------------*/ int cv_qu; /* last successful q value used */ long int cv_nstlp; /* step number of last setup call */ realtype cv_h0u; /* actual initial stepsize */ realtype cv_hu; /* last successful h value used */ realtype cv_saved_tq5; /* saved value of tq[5] */ booleantype cv_jcur; /* is Jacobian info for linear solver current? */ int cv_convfail; /* flag storing previous solver failure mode */ realtype cv_tolsf; /* tolerance scale factor */ int cv_qmax_alloc; /* value of qmax used when allocating mem */ int cv_qmax_allocQ; /* qmax used when allocating quad. mem */ int cv_qmax_allocS; /* qmax used when allocating sensi. mem */ int cv_qmax_allocQS; /* qmax used when allocating quad. sensi. mem */ int cv_indx_acor; /* index of the zn vector with saved acor */ /*-------------------------------------------------------------------- Flags turned ON by CVodeInit, CVodeSensMalloc, and CVodeQuadMalloc and read by CVodeReInit, CVodeSensReInit, and CVodeQuadReInit --------------------------------------------------------------------*/ booleantype cv_VabstolMallocDone; booleantype cv_MallocDone; booleantype cv_constraintsMallocDone; booleantype cv_VabstolQMallocDone; booleantype cv_QuadMallocDone; booleantype cv_VabstolSMallocDone; booleantype cv_SabstolSMallocDone; booleantype cv_SensMallocDone; booleantype cv_VabstolQSMallocDone; booleantype cv_SabstolQSMallocDone; booleantype cv_QuadSensMallocDone; /*------------------------------------------- Error handler function and error ouput file -------------------------------------------*/ CVErrHandlerFn cv_ehfun; /* error messages are handled by ehfun */ void *cv_eh_data; /* data pointer passed to ehfun */ FILE *cv_errfp; /* CVODE error messages are sent to errfp */ /*------------------------------------------- User access function -------------------------------------------*/ CVMonitorFn cv_monitorfun; /* func called with CVODE mem and user data */ long int cv_monitor_interval; /* step interval to call cv_monitorfun */ /*------------------------- Stability Limit Detection -------------------------*/ booleantype cv_sldeton; /* is Stability Limit Detection on? */ realtype cv_ssdat[6][4]; /* scaled data array for STALD */ int cv_nscon; /* counter for STALD method */ long int cv_nor; /* counter for number of order reductions */ /*---------------- Rootfinding Data ----------------*/ CVRootFn cv_gfun; /* function g for roots sought */ int cv_nrtfn; /* number of components of g */ int *cv_iroots; /* array for root information */ int *cv_rootdir; /* array specifying direction of zero-crossing */ realtype cv_tlo; /* nearest endpoint of interval in root search */ realtype cv_thi; /* farthest endpoint of interval in root search */ realtype cv_trout; /* t value returned by rootfinding routine */ realtype *cv_glo; /* saved array of g values at t = tlo */ realtype *cv_ghi; /* saved array of g values at t = thi */ realtype *cv_grout; /* array of g values at t = trout */ realtype cv_toutc; /* copy of tout (if NORMAL mode) */ realtype cv_ttol; /* tolerance on root location trout */ int cv_taskc; /* copy of parameter itask */ int cv_irfnd; /* flag showing whether last step had a root */ long int cv_nge; /* counter for g evaluations */ booleantype *cv_gactive; /* array with active/inactive event functions */ int cv_mxgnull; /* number of warning messages about possible g==0 */ /*----------------------- Fused Vector Operations -----------------------*/ realtype* cv_cvals; /* array of scalars */ N_Vector* cv_Xvecs; /* array of vectors */ N_Vector* cv_Zvecs; /* array of vectors */ /*------------------------ Adjoint sensitivity data ------------------------*/ booleantype cv_adj; /* SUNTRUE if performing ASA */ struct CVadjMemRec *cv_adj_mem; /* Pointer to adjoint memory structure */ booleantype cv_adjMallocDone; } *CVodeMem; /* * ================================================================= * A D J O I N T M O D U L E M E M O R Y B L O C K * ================================================================= */ /* * ----------------------------------------------------------------- * Types : struct CkpntMemRec, CkpntMem * ----------------------------------------------------------------- * The type CkpntMem is type pointer to struct CkpntMemRec. * This structure contains fields to store all information at a * check point that is needed to 'hot' start cvodes. * ----------------------------------------------------------------- */ struct CkpntMemRec { /* Integration limits */ realtype ck_t0; realtype ck_t1; /* Nordsieck History Array */ N_Vector ck_zn[L_MAX]; /* Do we need to carry quadratures? */ booleantype ck_quadr; /* Nordsieck History Array for quadratures */ N_Vector ck_znQ[L_MAX]; /* Do we need to carry sensitivities? */ booleantype ck_sensi; /* number of sensitivities */ int ck_Ns; /* Nordsieck History Array for sensitivities */ N_Vector *ck_znS[L_MAX]; /* Do we need to carry quadrature sensitivities? */ booleantype ck_quadr_sensi; /* Nordsieck History Array for quadrature sensitivities */ N_Vector *ck_znQS[L_MAX]; /* Was ck_zn[qmax] allocated? ck_zqm = 0 - no ck_zqm = qmax - yes */ int ck_zqm; /* Step data */ long int ck_nst; realtype ck_tretlast; int ck_q; int ck_qprime; int ck_qwait; int ck_L; realtype ck_gammap; realtype ck_h; realtype ck_hprime; realtype ck_hscale; realtype ck_eta; realtype ck_etamax; realtype ck_tau[L_MAX+1]; realtype ck_tq[NUM_TESTS+1]; realtype ck_l[L_MAX]; /* Saved values */ realtype ck_saved_tq5; /* Pointer to next structure in list */ struct CkpntMemRec *ck_next; }; /* * ----------------------------------------------------------------- * Types for functions provided by an interpolation module * ----------------------------------------------------------------- * cvaIMMallocFn: Type for a function that initializes the content * field of the structures in the dt array * cvaIMFreeFn: Type for a function that deallocates the content * field of the structures in the dt array * cvaIMGetYFn: Type for a function that returns the * interpolated forward solution. * cvaIMStorePnt: Type for a function that stores a new * point in the structure d * ----------------------------------------------------------------- */ typedef booleantype (*cvaIMMallocFn)(CVodeMem cv_mem); typedef void (*cvaIMFreeFn)(CVodeMem cv_mem); typedef int (*cvaIMGetYFn)(CVodeMem cv_mem, realtype t, N_Vector y, N_Vector *yS); typedef int (*cvaIMStorePntFn)(CVodeMem cv_mem, DtpntMem d); /* * ----------------------------------------------------------------- * Type : struct DtpntMemRec * ----------------------------------------------------------------- * This structure contains fields to store all information at a * data point that is needed to interpolate solution of forward * simulations. Its content field depends on IMtype. * ----------------------------------------------------------------- */ struct DtpntMemRec { realtype t; /* time */ void *content; /* IMtype-dependent content */ }; /* Data for cubic Hermite interpolation */ typedef struct HermiteDataMemRec { N_Vector y; N_Vector yd; N_Vector *yS; N_Vector *ySd; } *HermiteDataMem; /* Data for polynomial interpolation */ typedef struct PolynomialDataMemRec { N_Vector y; N_Vector *yS; int order; } *PolynomialDataMem; /* * ----------------------------------------------------------------- * Type : struct CVodeBMemRec * ----------------------------------------------------------------- * The type CVodeBMem is a pointer to a structure which stores all * information for ONE backward problem. * The CVadjMem structure contains a linked list of CVodeBMem pointers * ----------------------------------------------------------------- */ struct CVodeBMemRec { /* Index of this backward problem */ int cv_index; /* Time at which the backward problem is initialized */ realtype cv_t0; /* CVODES memory for this backward problem */ CVodeMem cv_mem; /* Flags to indicate that this backward problem's RHS or quad RHS * require forward sensitivities */ booleantype cv_f_withSensi; booleantype cv_fQ_withSensi; /* Right hand side function for backward run */ CVRhsFnB cv_f; CVRhsFnBS cv_fs; /* Right hand side quadrature function for backward run */ CVQuadRhsFnB cv_fQ; CVQuadRhsFnBS cv_fQs; /* User user_data */ void *cv_user_data; /* Memory block for a linear solver's interface to CVODEA */ void *cv_lmem; /* Function to free any memory allocated by the linear solver */ int (*cv_lfree)(CVodeBMem cvB_mem); /* Memory block for a preconditioner's module interface to CVODEA */ void *cv_pmem; /* Function to free any memory allocated by the preconditioner module */ int (*cv_pfree)(CVodeBMem cvB_mem); /* Time at which to extract solution / quadratures */ realtype cv_tout; /* Workspace Nvector */ N_Vector cv_y; /* Pointer to next structure in list */ struct CVodeBMemRec *cv_next; }; /* * ----------------------------------------------------------------- * Type : struct CVadjMemRec * ----------------------------------------------------------------- * The type CVadjMem is type pointer to struct CVadjMemRec. * This structure contins fields to store all information * necessary for adjoint sensitivity analysis. * ----------------------------------------------------------------- */ struct CVadjMemRec { /* -------------------- * Forward problem data * -------------------- */ /* Integration interval */ realtype ca_tinitial, ca_tfinal; /* Flag for first call to CVodeF */ booleantype ca_firstCVodeFcall; /* Flag if CVodeF was called with TSTOP */ booleantype ca_tstopCVodeFcall; realtype ca_tstopCVodeF; /* Flag if CVodeF was called in CV_NORMAL_MODE and encountered a root after tout */ booleantype ca_rootret; realtype ca_troot; /* ---------------------- * Backward problems data * ---------------------- */ /* Storage for backward problems */ struct CVodeBMemRec *cvB_mem; /* Number of backward problems */ int ca_nbckpbs; /* Address of current backward problem */ struct CVodeBMemRec *ca_bckpbCrt; /* Flag for first call to CVodeB */ booleantype ca_firstCVodeBcall; /* ---------------- * Check point data * ---------------- */ /* Storage for check point information */ struct CkpntMemRec *ck_mem; /* Number of check points */ int ca_nckpnts; /* address of the check point structure for which data is available */ struct CkpntMemRec *ca_ckpntData; /* ------------------ * Interpolation data * ------------------ */ /* Number of steps between 2 check points */ long int ca_nsteps; /* Last index used in CVAfindIndex */ long int ca_ilast; /* Storage for data from forward runs */ struct DtpntMemRec **dt_mem; /* Actual number of data points in dt_mem (typically np=nsteps+1) */ long int ca_np; /* Interpolation type */ int ca_IMtype; /* Functions set by the interpolation module */ cvaIMMallocFn ca_IMmalloc; cvaIMFreeFn ca_IMfree; cvaIMStorePntFn ca_IMstore; /* store a new interpolation point */ cvaIMGetYFn ca_IMget; /* interpolate forward solution */ /* Flags controlling the interpolation module */ booleantype ca_IMmallocDone; /* IM initialized? */ booleantype ca_IMnewData; /* new data available in dt_mem?*/ booleantype ca_IMstoreSensi; /* store sensitivities? */ booleantype ca_IMinterpSensi; /* interpolate sensitivities? */ /* Workspace for the interpolation module */ N_Vector ca_Y[L_MAX]; /* pointers to zn[i] */ N_Vector *ca_YS[L_MAX]; /* pointers to znS[i] */ realtype ca_T[L_MAX]; /* ------------------------------- * Workspace for wrapper functions * ------------------------------- */ N_Vector ca_ytmp; N_Vector *ca_yStmp; }; /* * ================================================================= * I N T E R F A C E T O L I N E A R S O L V E R S * ================================================================= */ /* * ----------------------------------------------------------------- * Communication between CVODE and a CVODE Linear Solver * ----------------------------------------------------------------- * convfail (input to cv_lsetup) * * CV_NO_FAILURES : Either this is the first cv_setup call for this * step, or the local error test failed on the * previous attempt at this step (but the nonlinear * solver iteration converged). * * CV_FAIL_BAD_J : This value is passed to cv_lsetup if * * (a) The previous nonlinear solver corrector iteration * did not converge and the linear solver's * setup routine indicated that its Jacobian- * related data is not current * or * (b) During the previous nonlinear solver corrector * iteration, the linear solver's solve routine * failed in a recoverable manner and the * linear solver's setup routine indicated that * its Jacobian-related data is not current. * * CV_FAIL_OTHER : During the current internal step try, the * previous nonlinear solver iteration failed to converge * even though the linear solver was using current * Jacobian-related data. * ----------------------------------------------------------------- */ /* Constants for convfail (input to cv_lsetup) */ #define CV_NO_FAILURES 0 #define CV_FAIL_BAD_J 1 #define CV_FAIL_OTHER 2 /* * ----------------------------------------------------------------- * int (*cv_linit)(CVodeMem cv_mem); * ----------------------------------------------------------------- * The purpose of cv_linit is to complete initializations for a * specific linear solver, such as counters and statistics. * An LInitFn should return 0 if it has successfully initialized the * CVODE linear solver and a negative value otherwise. * If an error does occur, an appropriate message should be sent to * the error handler function. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*cv_lsetup)(CVodeMem cv_mem, int convfail, N_Vector ypred, * N_Vector fpred, booleantype *jcurPtr, * N_Vector vtemp1, N_Vector vtemp2, * N_Vector vtemp3); * ----------------------------------------------------------------- * The job of cv_lsetup is to prepare the linear solver for * subsequent calls to cv_lsolve. It may recompute Jacobian- * related data is it deems necessary. Its parameters are as * follows: * * cv_mem - problem memory pointer of type CVodeMem. See the * typedef earlier in this file. * * convfail - a flag to indicate any problem that occurred during * the solution of the nonlinear equation on the * current time step for which the linear solver is * being used. This flag can be used to help decide * whether the Jacobian data kept by a CVODE linear * solver needs to be updated or not. * Its possible values have been documented above. * * ypred - the predicted y vector for the current CVODE internal * step. * * fpred - f(tn, ypred). * * jcurPtr - a pointer to a boolean to be filled in by cv_lsetup. * The function should set *jcurPtr=SUNTRUE if its Jacobian * data is current after the call and should set * *jcurPtr=SUNFALSE if its Jacobian data is not current. * Note: If cv_lsetup calls for re-evaluation of * Jacobian data (based on convfail and CVODE state * data), it should return *jcurPtr=SUNTRUE always; * otherwise an infinite loop can result. * * vtemp1 - temporary N_Vector provided for use by cv_lsetup. * * vtemp3 - temporary N_Vector provided for use by cv_lsetup. * * vtemp3 - temporary N_Vector provided for use by cv_lsetup. * * The cv_lsetup routine should return 0 if successful, a positive * value for a recoverable error, and a negative value for an * unrecoverable error. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*cv_lsolve)(CVodeMem cv_mem, N_Vector b, N_Vector weight, * N_Vector ycur, N_Vector fcur); * ----------------------------------------------------------------- * cv_lsolve must solve the linear equation P x = b, where * P is some approximation to (I - gamma J), J = (df/dy)(tn,ycur) * and the RHS vector b is input. The N-vector ycur contains * the solver's current approximation to y(tn) and the vector * fcur contains the N_Vector f(tn,ycur). The solution is to be * returned in the vector b. cv_lsolve returns a positive value * for a recoverable error and a negative value for an * unrecoverable error. Success is indicated by a 0 return value. * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * int (*cv_lfree)(CVodeMem cv_mem); * ----------------------------------------------------------------- * cv_lfree should free up any memory allocated by the linear * solver. This routine is called once a problem has been * completed and the linear solver is no longer needed. It should * return 0 upon success, nonzero on failure. * ----------------------------------------------------------------- */ /* * ================================================================= * I N T E R N A L F U N C T I O N S * ================================================================= */ /* Norm functions */ realtype cvSensNorm(CVodeMem cv_mem, N_Vector *xS, N_Vector *wS); realtype cvSensUpdateNorm(CVodeMem cv_mem, realtype old_nrm, N_Vector *xS, N_Vector *wS); /* Prototype of internal ewtSet function */ int cvEwtSet(N_Vector ycur, N_Vector weight, void *data); /* High level error handler */ void cvProcessError(CVodeMem cv_mem, int error_code, const char *module, const char *fname, const char *msgfmt, ...); /* Prototype of internal ErrHandler function */ void cvErrHandler(int error_code, const char *module, const char *function, char *msg, void *data); /* Nonlinear solver initialization */ int cvNlsInit(CVodeMem cv_mem); int cvNlsInitSensSim(CVodeMem cv_mem); int cvNlsInitSensStg(CVodeMem cv_mem); int cvNlsInitSensStg1(CVodeMem cv_mem); /* Prototypes for internal sensitivity rhs wrappers */ int cvSensRhsWrapper(CVodeMem cv_mem, realtype time, N_Vector ycur, N_Vector fcur, N_Vector *yScur, N_Vector *fScur, N_Vector temp1, N_Vector temp2); int cvSensRhs1Wrapper(CVodeMem cv_mem, realtype time, N_Vector ycur, N_Vector fcur, int is, N_Vector yScur, N_Vector fScur, N_Vector temp1, N_Vector temp2); /* Prototypes for internal sensitivity rhs DQ functions */ int cvSensRhsInternalDQ(int Ns, realtype t, N_Vector y, N_Vector ydot, N_Vector *yS, N_Vector *ySdot, void *fS_data, N_Vector tempv, N_Vector ftemp); int cvSensRhs1InternalDQ(int Ns, realtype t, N_Vector y, N_Vector ydot, int is, N_Vector yS, N_Vector ySdot, void *fS_data, N_Vector tempv, N_Vector ftemp); /* * ================================================================= * E R R O R M E S S A G E S * ================================================================= */ #if defined(SUNDIALS_EXTENDED_PRECISION) #define MSG_TIME "t = %Lg" #define MSG_TIME_H "t = %Lg and h = %Lg" #define MSG_TIME_INT "t = %Lg is not between tcur - hu = %Lg and tcur = %Lg." #define MSG_TIME_TOUT "tout = %Lg" #define MSG_TIME_TSTOP "tstop = %Lg" #elif defined(SUNDIALS_DOUBLE_PRECISION) #define MSG_TIME "t = %lg" #define MSG_TIME_H "t = %lg and h = %lg" #define MSG_TIME_INT "t = %lg is not between tcur - hu = %lg and tcur = %lg." #define MSG_TIME_TOUT "tout = %lg" #define MSG_TIME_TSTOP "tstop = %lg" #else #define MSG_TIME "t = %g" #define MSG_TIME_H "t = %g and h = %g" #define MSG_TIME_INT "t = %g is not between tcur - hu = %g and tcur = %g." #define MSG_TIME_TOUT "tout = %g" #define MSG_TIME_TSTOP "tstop = %g" #endif /* Initialization and I/O error messages */ #define MSGCV_NO_MEM "cvode_mem = NULL illegal." #define MSGCV_CVMEM_FAIL "Allocation of cvode_mem failed." #define MSGCV_MEM_FAIL "A memory request failed." #define MSGCV_BAD_LMM "Illegal value for lmm. The legal values are CV_ADAMS and CV_BDF." #define MSGCV_NULL_SUNCTX "sunctx = NULL illegal." #define MSGCV_NO_MALLOC "Attempt to call before CVodeInit." #define MSGCV_NEG_MAXORD "maxord <= 0 illegal." #define MSGCV_BAD_MAXORD "Illegal attempt to increase maximum method order." #define MSGCV_SET_SLDET "Attempt to use stability limit detection with the CV_ADAMS method illegal." #define MSGCV_NEG_HMIN "hmin < 0 illegal." #define MSGCV_NEG_HMAX "hmax < 0 illegal." #define MSGCV_BAD_HMIN_HMAX "Inconsistent step size limits: hmin > hmax." #define MSGCV_BAD_RELTOL "reltol < 0 illegal." #define MSGCV_BAD_ABSTOL "abstol has negative component(s) (illegal)." #define MSGCV_NULL_ABSTOL "abstol = NULL illegal." #define MSGCV_NULL_Y0 "y0 = NULL illegal." #define MSGCV_Y0_FAIL_CONSTR "y0 fails to satisfy constraints." #define MSGCV_BAD_ISM_CONSTR "Constraints can not be enforced while forward sensitivity is used with simultaneous method" #define MSGCV_NULL_F "f = NULL illegal." #define MSGCV_NULL_G "g = NULL illegal." #define MSGCV_BAD_NVECTOR "A required vector operation is not implemented." #define MSGCV_BAD_CONSTR "Illegal values in constraints vector." #define MSGCV_BAD_K "Illegal value for k." #define MSGCV_NULL_DKY "dky = NULL illegal." #define MSGCV_BAD_T "Illegal value for t." MSG_TIME_INT #define MSGCV_NO_ROOT "Rootfinding was not initialized." #define MSGCV_NLS_INIT_FAIL "The nonlinear solver's init routine failed." #define MSGCV_NO_QUAD "Quadrature integration not activated." #define MSGCV_BAD_ITOLQ "Illegal value for itolQ. The legal values are CV_SS and CV_SV." #define MSGCV_NULL_ABSTOLQ "abstolQ = NULL illegal." #define MSGCV_BAD_RELTOLQ "reltolQ < 0 illegal." #define MSGCV_BAD_ABSTOLQ "abstolQ has negative component(s) (illegal)." #define MSGCV_SENSINIT_2 "Sensitivity analysis already initialized." #define MSGCV_NO_SENSI "Forward sensitivity analysis not activated." #define MSGCV_BAD_ITOLS "Illegal value for itolS. The legal values are CV_SS, CV_SV, and CV_EE." #define MSGCV_NULL_ABSTOLS "abstolS = NULL illegal." #define MSGCV_BAD_RELTOLS "reltolS < 0 illegal." #define MSGCV_BAD_ABSTOLS "abstolS has negative component(s) (illegal)." #define MSGCV_BAD_PBAR "pbar has zero component(s) (illegal)." #define MSGCV_BAD_PLIST "plist has negative component(s) (illegal)." #define MSGCV_BAD_NS "NS <= 0 illegal." #define MSGCV_NULL_YS0 "yS0 = NULL illegal." #define MSGCV_BAD_ISM "Illegal value for ism. Legal values are: CV_SIMULTANEOUS, CV_STAGGERED and CV_STAGGERED1." #define MSGCV_BAD_IFS "Illegal value for ifS. Legal values are: CV_ALLSENS and CV_ONESENS." #define MSGCV_BAD_ISM_IFS "Illegal ism = CV_STAGGERED1 for CVodeSensInit." #define MSGCV_BAD_IS "Illegal value for is." #define MSGCV_NULL_DKYA "dkyA = NULL illegal." #define MSGCV_BAD_DQTYPE "Illegal value for DQtype. Legal values are: CV_CENTERED and CV_FORWARD." #define MSGCV_BAD_DQRHO "DQrhomax < 0 illegal." #define MSGCV_BAD_ITOLQS "Illegal value for itolQS. The legal values are CV_SS, CV_SV, and CV_EE." #define MSGCV_NULL_ABSTOLQS "abstolQS = NULL illegal." #define MSGCV_BAD_RELTOLQS "reltolQS < 0 illegal." #define MSGCV_BAD_ABSTOLQS "abstolQS has negative component(s) (illegal)." #define MSGCV_NO_QUADSENSI "Forward sensitivity analysis for quadrature variables not activated." #define MSGCV_NULL_YQS0 "yQS0 = NULL illegal." /* CVode Error Messages */ #define MSGCV_NO_TOL "No integration tolerances have been specified." #define MSGCV_LSOLVE_NULL "The linear solver's solve routine is NULL." #define MSGCV_YOUT_NULL "yout = NULL illegal." #define MSGCV_TRET_NULL "tret = NULL illegal." #define MSGCV_BAD_EWT "Initial ewt has component(s) equal to zero (illegal)." #define MSGCV_EWT_NOW_BAD "At " MSG_TIME ", a component of ewt has become <= 0." #define MSGCV_BAD_ITASK "Illegal value for itask." #define MSGCV_BAD_H0 "h0 and tout - t0 inconsistent." #define MSGCV_BAD_TOUT "Trouble interpolating at " MSG_TIME_TOUT ". tout too far back in direction of integration" #define MSGCV_EWT_FAIL "The user-provide EwtSet function failed." #define MSGCV_EWT_NOW_FAIL "At " MSG_TIME ", the user-provide EwtSet function failed." #define MSGCV_LINIT_FAIL "The linear solver's init routine failed." #define MSGCV_HNIL_DONE "The above warning has been issued mxhnil times and will not be issued again for this problem." #define MSGCV_TOO_CLOSE "tout too close to t0 to start integration." #define MSGCV_MAX_STEPS "At " MSG_TIME ", mxstep steps taken before reaching tout." #define MSGCV_TOO_MUCH_ACC "At " MSG_TIME ", too much accuracy requested." #define MSGCV_HNIL "Internal " MSG_TIME_H " are such that t + h = t on the next step. The solver will continue anyway." #define MSGCV_ERR_FAILS "At " MSG_TIME_H ", the error test failed repeatedly or with |h| = hmin." #define MSGCV_CONV_FAILS "At " MSG_TIME_H ", the corrector convergence test failed repeatedly or with |h| = hmin." #define MSGCV_SETUP_FAILED "At " MSG_TIME ", the setup routine failed in an unrecoverable manner." #define MSGCV_SOLVE_FAILED "At " MSG_TIME ", the solve routine failed in an unrecoverable manner." #define MSGCV_FAILED_CONSTR "At " MSG_TIME ", unable to satisfy inequality constraints." #define MSGCV_RHSFUNC_FAILED "At " MSG_TIME ", the right-hand side routine failed in an unrecoverable manner." #define MSGCV_RHSFUNC_UNREC "At " MSG_TIME ", the right-hand side failed in a recoverable manner, but no recovery is possible." #define MSGCV_RHSFUNC_REPTD "At " MSG_TIME " repeated recoverable right-hand side function errors." #define MSGCV_RHSFUNC_FIRST "The right-hand side routine failed at the first call." #define MSGCV_RTFUNC_FAILED "At " MSG_TIME ", the rootfinding routine failed in an unrecoverable manner." #define MSGCV_CLOSE_ROOTS "Root found at and very near " MSG_TIME "." #define MSGCV_BAD_TSTOP "The value " MSG_TIME_TSTOP " is behind current " MSG_TIME " in the direction of integration." #define MSGCV_INACTIVE_ROOTS "At the end of the first step, there are still some root functions identically 0. This warning will not be issued again." #define MSGCV_NLS_SETUP_FAILED "At " MSG_TIME ", the nonlinear solver setup failed unrecoverably." #define MSGCV_NLS_INPUT_NULL "At " MSG_TIME ", the nonlinear solver was passed a NULL input." #define MSGCV_NLS_FAIL "At " MSG_TIME ", the nonlinear solver failed in an unrecoverable manner." #define MSGCV_NO_TOLQ "No integration tolerances for quadrature variables have been specified." #define MSGCV_BAD_EWTQ "Initial ewtQ has component(s) equal to zero (illegal)." #define MSGCV_EWTQ_NOW_BAD "At " MSG_TIME ", a component of ewtQ has become <= 0." #define MSGCV_QRHSFUNC_FAILED "At " MSG_TIME ", the quadrature right-hand side routine failed in an unrecoverable manner." #define MSGCV_QRHSFUNC_UNREC "At " MSG_TIME ", the quadrature right-hand side failed in a recoverable manner, but no recovery is possible." #define MSGCV_QRHSFUNC_REPTD "At " MSG_TIME " repeated recoverable quadrature right-hand side function errors." #define MSGCV_QRHSFUNC_FIRST "The quadrature right-hand side routine failed at the first call." #define MSGCV_NO_TOLS "No integration tolerances for sensitivity variables have been specified." #define MSGCV_NULL_P "p = NULL when using internal DQ for sensitivity RHS illegal." #define MSGCV_BAD_EWTS "Initial ewtS has component(s) equal to zero (illegal)." #define MSGCV_EWTS_NOW_BAD "At " MSG_TIME ", a component of ewtS has become <= 0." #define MSGCV_SRHSFUNC_FAILED "At " MSG_TIME ", the sensitivity right-hand side routine failed in an unrecoverable manner." #define MSGCV_SRHSFUNC_UNREC "At " MSG_TIME ", the sensitivity right-hand side failed in a recoverable manner, but no recovery is possible." #define MSGCV_SRHSFUNC_REPTD "At " MSG_TIME " repeated recoverable sensitivity right-hand side function errors." #define MSGCV_SRHSFUNC_FIRST "The sensitivity right-hand side routine failed at the first call." #define MSGCV_NULL_FQ "CVODES is expected to use DQ to evaluate the RHS of quad. sensi., but quadratures were not initialized." #define MSGCV_NO_TOLQS "No integration tolerances for quadrature sensitivity variables have been specified." #define MSGCV_BAD_EWTQS "Initial ewtQS has component(s) equal to zero (illegal)." #define MSGCV_EWTQS_NOW_BAD "At " MSG_TIME ", a component of ewtQS has become <= 0." #define MSGCV_QSRHSFUNC_FAILED "At " MSG_TIME ", the quadrature sensitivity right-hand side routine failed in an unrecoverable manner." #define MSGCV_QSRHSFUNC_UNREC "At " MSG_TIME ", the quadrature sensitivity right-hand side failed in a recoverable manner, but no recovery is possible." #define MSGCV_QSRHSFUNC_REPTD "At " MSG_TIME " repeated recoverable quadrature sensitivity right-hand side function errors." #define MSGCV_QSRHSFUNC_FIRST "The quadrature sensitivity right-hand side routine failed at the first call." /* * ================================================================= * A D J O I N T E R R O R M E S S A G E S * ================================================================= */ #define MSGCV_NO_ADJ "Illegal attempt to call before calling CVodeAdjMalloc." #define MSGCV_BAD_STEPS "Steps nonpositive illegal." #define MSGCV_BAD_INTERP "Illegal value for interp." #define MSGCV_BAD_WHICH "Illegal value for which." #define MSGCV_NO_BCK "No backward problems have been defined yet." #define MSGCV_NO_FWD "Illegal attempt to call before calling CVodeF." #define MSGCV_BAD_TB0 "The initial time tB0 for problem %d is outside the interval over which the forward problem was solved." #define MSGCV_BAD_SENSI "At least one backward problem requires sensitivities, but they were not stored for interpolation." #define MSGCV_BAD_ITASKB "Illegal value for itaskB. Legal values are CV_NORMAL and CV_ONE_STEP." #define MSGCV_BAD_TBOUT "The final time tBout is outside the interval over which the forward problem was solved." #define MSGCV_BACK_ERROR "Error occured while integrating backward problem # %d" #define MSGCV_BAD_TINTERP "Bad t = %g for interpolation." #define MSGCV_WRONG_INTERP "This function cannot be called for the specified interp type." #ifdef __cplusplus } #endif #endif StanHeaders/src/cvodes/cvodes_nls_stg1.c0000644000176200001440000002761714645137106020012 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This the implementation file for the CVODES nonlinear solver interface. * ---------------------------------------------------------------------------*/ #include "cvodes_impl.h" #include "sundials/sundials_math.h" /* constant macros */ #define ONE RCONST(1.0) /* private functions */ static int cvNlsResidualSensStg1(N_Vector ycor, N_Vector res, void* cvode_mem); static int cvNlsFPFunctionSensStg1(N_Vector ycor, N_Vector res, void* cvode_mem); static int cvNlsLSetupSensStg1(booleantype jbad, booleantype* jcur, void* cvode_mem); static int cvNlsLSolveSensStg1(N_Vector delta, void* cvode_mem); static int cvNlsConvTestSensStg1(SUNNonlinearSolver NLS, N_Vector ycor, N_Vector del, realtype tol, N_Vector ewt, void* cvode_mem); /* ----------------------------------------------------------------------------- * Exported functions * ---------------------------------------------------------------------------*/ int CVodeSetNonlinearSolverSensStg1(void *cvode_mem, SUNNonlinearSolver NLS) { CVodeMem cv_mem; int retval; /* Return immediately if CVode memory is NULL */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetNonlinearSolverSensStg1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Return immediately if NLS memory is NULL */ if (NLS == NULL) { cvProcessError(NULL, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg1", "NLS must be non-NULL"); return (CV_ILL_INPUT); } /* check for required nonlinear solver functions */ if ( NLS->ops->gettype == NULL || NLS->ops->solve == NULL || NLS->ops->setsysfn == NULL ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg1", "NLS does not support required operations"); return(CV_ILL_INPUT); } /* check that sensitivities were initialized */ if (!(cv_mem->cv_sensi)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg1", MSGCV_NO_SENSI); return(CV_ILL_INPUT); } /* check that staggered corrector was selected */ if (cv_mem->cv_ism != CV_STAGGERED1) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg1", "Sensitivity solution method is not CV_STAGGERED1"); return(CV_ILL_INPUT); } /* free any existing nonlinear solver */ if ((cv_mem->NLSstg1 != NULL) && (cv_mem->ownNLSstg1)) retval = SUNNonlinSolFree(cv_mem->NLSstg1); /* set SUNNonlinearSolver pointer */ cv_mem->NLSstg1 = NLS; /* Set NLS ownership flag. If this function was called to attach the default NLS, CVODE will set the flag to SUNTRUE after this function returns. */ cv_mem->ownNLSstg1 = SUNFALSE; /* set the nonlinear system function */ if (SUNNonlinSolGetType(NLS) == SUNNONLINEARSOLVER_ROOTFIND) { retval = SUNNonlinSolSetSysFn(cv_mem->NLSstg1, cvNlsResidualSensStg1); } else if (SUNNonlinSolGetType(NLS) == SUNNONLINEARSOLVER_FIXEDPOINT) { retval = SUNNonlinSolSetSysFn(cv_mem->NLSstg1, cvNlsFPFunctionSensStg1); } else { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg1", "Invalid nonlinear solver type"); return(CV_ILL_INPUT); } if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg1", "Setting nonlinear system function failed"); return(CV_ILL_INPUT); } /* set convergence test function */ retval = SUNNonlinSolSetConvTestFn(cv_mem->NLSstg1, cvNlsConvTestSensStg1, cvode_mem); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg1", "Setting convergence test function failed"); return(CV_ILL_INPUT); } /* set max allowed nonlinear iterations */ retval = SUNNonlinSolSetMaxIters(cv_mem->NLSstg1, NLS_MAXCOR); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg1", "Setting maximum number of nonlinear iterations failed"); return(CV_ILL_INPUT); } /* Reset the acnrmScur flag to SUNFALSE (always false for stg1) */ cv_mem->cv_acnrmScur = SUNFALSE; return(CV_SUCCESS); } /* ----------------------------------------------------------------------------- * Private functions * ---------------------------------------------------------------------------*/ int cvNlsInitSensStg1(CVodeMem cvode_mem) { int retval; /* set the linear solver setup wrapper function */ if (cvode_mem->cv_lsetup) retval = SUNNonlinSolSetLSetupFn(cvode_mem->NLSstg1, cvNlsLSetupSensStg1); else retval = SUNNonlinSolSetLSetupFn(cvode_mem->NLSstg1, NULL); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODES", "cvNlsInitSensStg1", "Setting the linear solver setup function failed"); return(CV_NLS_INIT_FAIL); } /* set the linear solver solve wrapper function */ if (cvode_mem->cv_lsolve) retval = SUNNonlinSolSetLSolveFn(cvode_mem->NLSstg1, cvNlsLSolveSensStg1); else retval = SUNNonlinSolSetLSolveFn(cvode_mem->NLSstg1, NULL); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODES", "cvNlsInitSensStg1", "Setting linear solver solve function failed"); return(CV_NLS_INIT_FAIL); } /* initialize nonlinear solver */ retval = SUNNonlinSolInitialize(cvode_mem->NLSstg1); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODES", "cvNlsInitSensStg1", MSGCV_NLS_INIT_FAIL); return(CV_NLS_INIT_FAIL); } /* reset previous iteration count for updating nniS1 */ cvode_mem->nnip = 0; return(CV_SUCCESS); } static int cvNlsLSetupSensStg1(booleantype jbad, booleantype* jcur, void* cvode_mem) { CVodeMem cv_mem; int retval; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsLSetupSensStg1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* if the nonlinear solver marked the Jacobian as bad update convfail */ if (jbad) cv_mem->convfail = CV_FAIL_BAD_J; /* setup the linear solver */ retval = cv_mem->cv_lsetup(cv_mem, cv_mem->convfail, cv_mem->cv_y, cv_mem->cv_ftemp, &(cv_mem->cv_jcur), cv_mem->cv_vtemp1, cv_mem->cv_vtemp2, cv_mem->cv_vtemp3); cv_mem->cv_nsetups++; cv_mem->cv_nsetupsS++; /* update Jacobian status */ *jcur = cv_mem->cv_jcur; cv_mem->cv_gamrat = ONE; cv_mem->cv_gammap = cv_mem->cv_gamma; cv_mem->cv_crate = ONE; cv_mem->cv_crateS = ONE; cv_mem->cv_nstlp = cv_mem->cv_nst; if (retval < 0) return(CV_LSETUP_FAIL); if (retval > 0) return(SUN_NLS_CONV_RECVR); return(CV_SUCCESS); } static int cvNlsLSolveSensStg1(N_Vector delta, void* cvode_mem) { CVodeMem cv_mem; int retval, is; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsLSolveSensStg1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* get index of current sensitivity solve */ is = cv_mem->sens_solve_idx; /* solve the sensitivity linear systems */ retval = cv_mem->cv_lsolve(cv_mem, delta, cv_mem->cv_ewtS[is], cv_mem->cv_y, cv_mem->cv_ftemp); if (retval < 0) return(CV_LSOLVE_FAIL); if (retval > 0) return(SUN_NLS_CONV_RECVR); return(CV_SUCCESS); } static int cvNlsConvTestSensStg1(SUNNonlinearSolver NLS, N_Vector ycor, N_Vector delta, realtype tol, N_Vector ewt, void* cvode_mem) { CVodeMem cv_mem; int m, retval; realtype del; realtype dcon; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsConvTestSensStg1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* compute the norm of the state and sensitivity corrections */ del = N_VWrmsNorm(delta, ewt); /* get the current nonlinear solver iteration count */ retval = SUNNonlinSolGetCurIter(NLS, &m); if (retval != CV_SUCCESS) return(CV_MEM_NULL); /* Test for convergence. If m > 0, an estimate of the convergence rate constant is stored in crate, and used in the test. */ if (m > 0) { cv_mem->cv_crateS = SUNMAX(CRDOWN * cv_mem->cv_crateS, del/cv_mem->cv_delp); } dcon = del * SUNMIN(ONE, cv_mem->cv_crateS) / tol; /* check if nonlinear system was solved successfully */ if (dcon <= ONE) return(CV_SUCCESS); /* check if the iteration seems to be diverging */ if ((m >= 1) && (del > RDIV*cv_mem->cv_delp)) return(SUN_NLS_CONV_RECVR); /* Save norm of correction and loop again */ cv_mem->cv_delp = del; /* Not yet converged */ return(SUN_NLS_CONTINUE); } static int cvNlsResidualSensStg1(N_Vector ycor, N_Vector res, void* cvode_mem) { CVodeMem cv_mem; int retval, is; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsResidualSensStg1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* get index of current sensitivity solve */ is = cv_mem->sens_solve_idx; /* update sensitivity based on the current correction */ N_VLinearSum(ONE, cv_mem->cv_znS[0][is], ONE, ycor, cv_mem->cv_yS[is]); /* evaluate the sensitivity rhs function */ retval = cvSensRhs1Wrapper(cv_mem, cv_mem->cv_tn, cv_mem->cv_y, cv_mem->cv_ftemp, is, cv_mem->cv_yS[is], cv_mem->cv_ftempS[is], cv_mem->cv_vtemp1, cv_mem->cv_vtemp2); if (retval < 0) return(CV_SRHSFUNC_FAIL); if (retval > 0) return(SRHSFUNC_RECVR); /* compute the sensitivity resiudal */ N_VLinearSum(cv_mem->cv_rl1, cv_mem->cv_znS[1][is], ONE, ycor, res); N_VLinearSum(-cv_mem->cv_gamma, cv_mem->cv_ftempS[is], ONE, res, res); return(CV_SUCCESS); } static int cvNlsFPFunctionSensStg1(N_Vector ycor, N_Vector res, void* cvode_mem) { CVodeMem cv_mem; int retval, is; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsFPFunctionSensStg1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* get index of current sensitivity solve */ is = cv_mem->sens_solve_idx; /* update the sensitivities based on the current correction */ N_VLinearSum(ONE, cv_mem->cv_znS[0][is], ONE, ycor, cv_mem->cv_yS[is]); /* evaluate the sensitivity rhs function */ retval = cvSensRhs1Wrapper(cv_mem, cv_mem->cv_tn, cv_mem->cv_y, cv_mem->cv_ftemp, is, cv_mem->cv_yS[is], res, cv_mem->cv_vtemp1, cv_mem->cv_vtemp2); if (retval < 0) return(CV_SRHSFUNC_FAIL); if (retval > 0) return(SRHSFUNC_RECVR); /* evaluate sensitivity fixed point function */ N_VLinearSum(cv_mem->cv_h, res, -ONE, cv_mem->cv_znS[1][is], res); N_VScale(cv_mem->cv_rl1, res, res); return(CV_SUCCESS); } StanHeaders/src/cvodes/cvodes_nls.c0000644000176200001440000002756014645137106017051 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This the implementation file for the CVODES nonlinear solver interface. * ---------------------------------------------------------------------------*/ #include "cvodes_impl.h" #include "sundials/sundials_math.h" #include "sundials/sundials_nvector_senswrapper.h" /* constant macros */ #define ONE RCONST(1.0) /* private functions */ static int cvNlsResidual(N_Vector ycor, N_Vector res, void* cvode_mem); static int cvNlsFPFunction(N_Vector ycor, N_Vector res, void* cvode_mem); static int cvNlsLSetup(booleantype jbad, booleantype* jcur, void* cvode_mem); static int cvNlsLSolve(N_Vector delta, void* cvode_mem); static int cvNlsConvTest(SUNNonlinearSolver NLS, N_Vector ycor, N_Vector del, realtype tol, N_Vector ewt, void* cvode_mem); /* ----------------------------------------------------------------------------- * Exported functions * ---------------------------------------------------------------------------*/ int CVodeSetNonlinearSolver(void *cvode_mem, SUNNonlinearSolver NLS) { CVodeMem cv_mem; int retval; /* Return immediately if CVode memory is NULL */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetNonlinearSolver", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Return immediately if NLS memory is NULL */ if (NLS == NULL) { cvProcessError(NULL, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolver", "NLS must be non-NULL"); return (CV_ILL_INPUT); } /* check for required nonlinear solver functions */ if ( NLS->ops->gettype == NULL || NLS->ops->solve == NULL || NLS->ops->setsysfn == NULL ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolver", "NLS does not support required operations"); return(CV_ILL_INPUT); } /* free any existing nonlinear solver */ if ((cv_mem->NLS != NULL) && (cv_mem->ownNLS)) retval = SUNNonlinSolFree(cv_mem->NLS); /* set SUNNonlinearSolver pointer */ cv_mem->NLS = NLS; /* Set NLS ownership flag. If this function was called to attach the default NLS, CVODE will set the flag to SUNTRUE after this function returns. */ cv_mem->ownNLS = SUNFALSE; /* set the nonlinear system function */ if (SUNNonlinSolGetType(NLS) == SUNNONLINEARSOLVER_ROOTFIND) { retval = SUNNonlinSolSetSysFn(cv_mem->NLS, cvNlsResidual); } else if (SUNNonlinSolGetType(NLS) == SUNNONLINEARSOLVER_FIXEDPOINT) { retval = SUNNonlinSolSetSysFn(cv_mem->NLS, cvNlsFPFunction); } else { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolver", "Invalid nonlinear solver type"); return(CV_ILL_INPUT); } if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolver", "Setting nonlinear system function failed"); return(CV_ILL_INPUT); } /* set convergence test function */ retval = SUNNonlinSolSetConvTestFn(cv_mem->NLS, cvNlsConvTest, cvode_mem); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolver", "Setting convergence test function failed"); return(CV_ILL_INPUT); } /* set max allowed nonlinear iterations */ retval = SUNNonlinSolSetMaxIters(cv_mem->NLS, NLS_MAXCOR); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolver", "Setting maximum number of nonlinear iterations failed"); return(CV_ILL_INPUT); } /* Reset the acnrmcur flag to SUNFALSE */ cv_mem->cv_acnrmcur = SUNFALSE; /* Set the nonlinear system RHS function */ if (!(cv_mem->cv_f)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolver", "The ODE RHS function is NULL"); return(CV_ILL_INPUT); } cv_mem->nls_f = cv_mem->cv_f; return(CV_SUCCESS); } /*--------------------------------------------------------------- CVodeSetNlsRhsFn: This routine sets an alternative user-supplied ODE right-hand side function to use in the evaluation of nonlinear system functions. ---------------------------------------------------------------*/ int CVodeSetNlsRhsFn(void *cvode_mem, CVRhsFn f) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetNlsRhsFn", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (f) cv_mem->nls_f = f; else cv_mem->nls_f = cv_mem->cv_f; return(CV_SUCCESS); } /*--------------------------------------------------------------- CVodeGetNonlinearSystemData: This routine provides access to the relevant data needed to compute the nonlinear system function. ---------------------------------------------------------------*/ int CVodeGetNonlinearSystemData(void *cvode_mem, realtype *tcur, N_Vector *ypred, N_Vector *yn, N_Vector *fn, realtype *gamma, realtype *rl1, N_Vector *zn1, void **user_data) { CVodeMem cv_mem; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNonlinearSystemData", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *tcur = cv_mem->cv_tn; *ypred = cv_mem->cv_zn[0]; *yn = cv_mem->cv_y; *fn = cv_mem->cv_ftemp; *gamma = cv_mem->cv_gamma; *rl1 = cv_mem->cv_rl1; *zn1 = cv_mem->cv_zn[1]; *user_data = cv_mem->cv_user_data; return(CV_SUCCESS); } /* ----------------------------------------------------------------------------- * Private functions * ---------------------------------------------------------------------------*/ int cvNlsInit(CVodeMem cvode_mem) { int retval; /* set the linear solver setup wrapper function */ if (cvode_mem->cv_lsetup) retval = SUNNonlinSolSetLSetupFn(cvode_mem->NLS, cvNlsLSetup); else retval = SUNNonlinSolSetLSetupFn(cvode_mem->NLS, NULL); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODE", "cvNlsInit", "Setting the linear solver setup function failed"); return(CV_NLS_INIT_FAIL); } /* set the linear solver solve wrapper function */ if (cvode_mem->cv_lsolve) retval = SUNNonlinSolSetLSolveFn(cvode_mem->NLS, cvNlsLSolve); else retval = SUNNonlinSolSetLSolveFn(cvode_mem->NLS, NULL); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODE", "cvNlsInit", "Setting linear solver solve function failed"); return(CV_NLS_INIT_FAIL); } /* initialize nonlinear solver */ retval = SUNNonlinSolInitialize(cvode_mem->NLS); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODE", "cvNlsInit", MSGCV_NLS_INIT_FAIL); return(CV_NLS_INIT_FAIL); } return(CV_SUCCESS); } static int cvNlsLSetup(booleantype jbad, booleantype* jcur, void* cvode_mem) { CVodeMem cv_mem; int retval; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODE", "cvNlsLSetup", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* if the nonlinear solver marked the Jacobian as bad update convfail */ if (jbad) cv_mem->convfail = CV_FAIL_BAD_J; /* setup the linear solver */ retval = cv_mem->cv_lsetup(cv_mem, cv_mem->convfail, cv_mem->cv_y, cv_mem->cv_ftemp, &(cv_mem->cv_jcur), cv_mem->cv_vtemp1, cv_mem->cv_vtemp2, cv_mem->cv_vtemp3); cv_mem->cv_nsetups++; /* update Jacobian status */ *jcur = cv_mem->cv_jcur; cv_mem->cv_forceSetup = SUNFALSE; cv_mem->cv_gamrat = ONE; cv_mem->cv_gammap = cv_mem->cv_gamma; cv_mem->cv_crate = ONE; cv_mem->cv_crateS = ONE; cv_mem->cv_nstlp = cv_mem->cv_nst; if (retval < 0) return(CV_LSETUP_FAIL); if (retval > 0) return(SUN_NLS_CONV_RECVR); return(CV_SUCCESS); } static int cvNlsLSolve(N_Vector delta, void* cvode_mem) { CVodeMem cv_mem; int retval; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODE", "cvNlsLSolve", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; retval = cv_mem->cv_lsolve(cv_mem, delta, cv_mem->cv_ewt, cv_mem->cv_y, cv_mem->cv_ftemp); if (retval < 0) return(CV_LSOLVE_FAIL); if (retval > 0) return(SUN_NLS_CONV_RECVR); return(CV_SUCCESS); } static int cvNlsConvTest(SUNNonlinearSolver NLS, N_Vector ycor, N_Vector delta, realtype tol, N_Vector ewt, void* cvode_mem) { CVodeMem cv_mem; int m, retval; realtype del; realtype dcon; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODE", "cvNlsConvTest", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* compute the norm of the correction */ del = N_VWrmsNorm(delta, ewt); /* get the current nonlinear solver iteration count */ retval = SUNNonlinSolGetCurIter(NLS, &m); if (retval != CV_SUCCESS) return(CV_MEM_NULL); /* Test for convergence. If m > 0, an estimate of the convergence rate constant is stored in crate, and used in the test. */ if (m > 0) { cv_mem->cv_crate = SUNMAX(CRDOWN * cv_mem->cv_crate, del/cv_mem->cv_delp); } dcon = del * SUNMIN(ONE, cv_mem->cv_crate) / tol; if (dcon <= ONE) { cv_mem->cv_acnrm = (m==0) ? del : N_VWrmsNorm(ycor, ewt); cv_mem->cv_acnrmcur = SUNTRUE; return(CV_SUCCESS); /* Nonlinear system was solved successfully */ } /* check if the iteration seems to be diverging */ if ((m >= 1) && (del > RDIV*cv_mem->cv_delp)) return(SUN_NLS_CONV_RECVR); /* Save norm of correction and loop again */ cv_mem->cv_delp = del; /* Not yet converged */ return(SUN_NLS_CONTINUE); } static int cvNlsResidual(N_Vector ycor, N_Vector res, void* cvode_mem) { CVodeMem cv_mem; int retval; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODE", "cvNlsResidual", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* update the state based on the current correction */ N_VLinearSum(ONE, cv_mem->cv_zn[0], ONE, ycor, cv_mem->cv_y); /* evaluate the rhs function */ retval = cv_mem->nls_f(cv_mem->cv_tn, cv_mem->cv_y, cv_mem->cv_ftemp, cv_mem->cv_user_data); cv_mem->cv_nfe++; if (retval < 0) return(CV_RHSFUNC_FAIL); if (retval > 0) return(RHSFUNC_RECVR); /* compute the resiudal */ N_VLinearSum(cv_mem->cv_rl1, cv_mem->cv_zn[1], ONE, ycor, res); N_VLinearSum(-cv_mem->cv_gamma, cv_mem->cv_ftemp, ONE, res, res); return(CV_SUCCESS); } static int cvNlsFPFunction(N_Vector ycor, N_Vector res, void* cvode_mem) { CVodeMem cv_mem; int retval; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODE", "cvNlsFPFunction", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* update the state based on the current correction */ N_VLinearSum(ONE, cv_mem->cv_zn[0], ONE, ycor, cv_mem->cv_y); /* evaluate the rhs function */ retval = cv_mem->nls_f(cv_mem->cv_tn, cv_mem->cv_y, res, cv_mem->cv_user_data); cv_mem->cv_nfe++; if (retval < 0) return(CV_RHSFUNC_FAIL); if (retval > 0) return(RHSFUNC_RECVR); N_VLinearSum(cv_mem->cv_h, res, -ONE, cv_mem->cv_zn[1], res); N_VScale(cv_mem->cv_rl1, res, res); return(CV_SUCCESS); } StanHeaders/src/cvodes/cvodes.c0000644000176200001440000103174314645137106016174 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Alan C. Hindmarsh and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the main CVODES integrator * with sensitivity analysis capabilities. * ----------------------------------------------------------------- * * EXPORTED FUNCTIONS * ------------------ * * Creation, allocation and re-initialization functions * * CVodeCreate * * CVodeInit * CVodeReInit * CVodeSStolerances * CVodeSVtolerances * CVodeWFtolerances * * CVodeQuadInit * CVodeQuadReInit * CVodeQuadSStolerances * CVodeQuadSVtolerances * * CVodeSensInit * CVodeSensInit1 * CVodeSensReInit * CVodeSensSStolerances * CVodeSensSVtolerances * CVodeSensEEtolerances * * CVodeQuadSensInit * CVodeQuadSensReInit * * CVodeSensToggleOff * * CVodeRootInit * * Main solver function * CVode * * Interpolated output and extraction functions * CVodeGetDky * CVodeGetQuad * CVodeGetQuadDky * CVodeGetSens * CVodeGetSens1 * CVodeGetSensDky * CVodeGetSensDky1 * CVodeGetQuadSens * CVodeGetQuadSens1 * CVodeGetQuadSensDky * CVodeGetQuadSensDky1 * * Deallocation functions * CVodeFree * CVodeQuadFree * CVodeSensFree * CVodeQuadSensFree * * PRIVATE FUNCTIONS * ----------------- * * cvCheckNvector * * Memory allocation/deallocation * cvAllocVectors * cvFreeVectors * cvQuadAllocVectors * cvQuadFreeVectors * cvSensAllocVectors * cvSensFreeVectors * cvQuadSensAllocVectors * cvQuadSensFreeVectors * * Initial stepsize calculation * cvHin * cvUpperBoundH0 * cvYddNorm * * Initial setup * cvInitialSetup * cvEwtSet * cvEwtSetSS * cvEwtSetSV * cvQuadEwtSet * cvQuadEwtSetSS * cvQuadEwtSetSV * cvSensEwtSet * cvSensEwtSetEE * cvSensEwtSetSS * cvSensEwtSetSV * cvQuadSensEwtSet * cvQuadSensEwtSetEE * cvQuadSensEwtSetSS * cvQuadSensEwtSetSV * * Main cvStep function * cvStep * * Functions called at beginning of step * cvAdjustParams * cvAdjustOrder * cvAdjustAdams * cvAdjustBDF * cvIncreaseBDF * cvDecreaseBDF * cvRescale * cvPredict * cvSet * cvSetAdams * cvAdamsStart * cvAdamsFinish * cvAltSum * cvSetBDF * cvSetTqBDF * * Nonlinear solver functions * cvNls * cvQuadNls * cvStgrNls * cvStgr1Nls * cvQuadSensNls * cvHandleNFlag * cvRestore * * Error Test * cvDoErrorTest * * Functions called after a successful step * cvCompleteStep * cvPrepareNextStep * cvSetEta * cvComputeEtaqm1 * cvComputeEtaqp1 * cvChooseEta * * Function to handle failures * cvHandleFailure * * Functions for BDF Stability Limit Detection * cvBDFStab * cvSLdet * * Functions for rootfinding * cvRcheck1 * cvRcheck2 * cvRcheck3 * cvRootfind * * Functions for combined norms * cvQuadUpdateNorm * cvSensNorm * cvSensUpdateNorm * cvQuadSensNorm * cvQuadSensUpdateNorm * * Wrappers for sensitivity RHS * cvSensRhsWrapper * cvSensRhs1Wrapper * * Internal DQ approximations for sensitivity RHS * cvSensRhsInternalDQ * cvSensRhs1InternalDQ * cvQuadSensRhsDQ * * Error message handling functions * cvProcessError * cvErrHandler * * -----------------------------------------------------------------*/ /*=================================================================*/ /* Import Header Files */ /*=================================================================*/ #include #include #include #include #include "cvodes_impl.h" #include #include #include /*=================================================================*/ /* CVODE Private Constants */ /*=================================================================*/ #define ZERO RCONST(0.0) /* real 0.0 */ #define TINY RCONST(1.0e-10) /* small number */ #define PT1 RCONST(0.1) /* real 0.1 */ #define POINT2 RCONST(0.2) /* real 0.2 */ #define FOURTH RCONST(0.25) /* real 0.25 */ #define HALF RCONST(0.5) /* real 0.5 */ #define PT9 RCONST(0.9) /* real 0.9 */ #define ONE RCONST(1.0) /* real 1.0 */ #define ONEPT5 RCONST(1.50) /* real 1.5 */ #define TWO RCONST(2.0) /* real 2.0 */ #define THREE RCONST(3.0) /* real 3.0 */ #define FOUR RCONST(4.0) /* real 4.0 */ #define FIVE RCONST(5.0) /* real 5.0 */ #define TWELVE RCONST(12.0) /* real 12.0 */ #define HUNDRED RCONST(100.0) /* real 100.0 */ /*=================================================================*/ /* CVODE Routine-Specific Constants */ /*=================================================================*/ /* * Control constants for lower-level functions used by cvStep * ---------------------------------------------------------- * * cvHin return values: * CV_SUCCESS, * CV_RHSFUNC_FAIL, CV_RPTD_RHSFUNC_ERR, * CV_QRHSFUNC_FAIL, CV_RPTD_QRHSFUNC_ERR, * CV_SRHSFUNC_FAIL, CV_RPTD_SRHSFUNC_ERR, * CV_TOO_CLOSE * * cvStep control constants: * DO_ERROR_TEST * PREDICT_AGAIN * * cvStep return values: * CV_SUCCESS, * CV_CONV_FAILURE, CV_ERR_FAILURE, * CV_LSETUP_FAIL, CV_LSOLVE_FAIL, * CV_RTFUNC_FAIL, * CV_RHSFUNC_FAIL, CV_QRHSFUNC_FAIL, CV_SRHSFUNC_FAIL, CV_QSRHSFUNC_FAIL, * CV_FIRST_RHSFUNC_ERR, CV_FIRST_QRHSFUNC_ERR, CV_FIRST_SRHSFUNC_ERR, CV_FIRST_QSRHSFUNC_ERR, * CV_UNREC_RHSFUNC_ERR, CV_UNREC_QRHSFUNC_ERR, CV_UNREC_SRHSFUNC_ERR, CV_UNREC_QSRHSFUNC_ERR, * CV_REPTD_RHSFUNC_ERR, CV_REPTD_QRHSFUNC_ERR, CV_REPTD_SRHSFUNC_ERR, CV_REPTD_QSRHSFUNC_ERR, * * cvNls input nflag values: * FIRST_CALL * PREV_CONV_FAIL * PREV_ERR_FAIL * * cvNls return values: * CV_SUCCESS, * CV_LSETUP_FAIL, CV_LSOLVE_FAIL, * CV_RHSFUNC_FAIL, CV_SRHSFUNC_FAIL, * SUN_NLS_CONV_RECVR, * RHSFUNC_RECVR, SRHSFUNC_RECVR * */ #define DO_ERROR_TEST +2 #define PREDICT_AGAIN +3 #define CONV_FAIL +4 #define TRY_AGAIN +5 #define FIRST_CALL +6 #define PREV_CONV_FAIL +7 #define PREV_ERR_FAIL +8 #define CONSTR_RECVR +10 #define QRHSFUNC_RECVR +11 #define QSRHSFUNC_RECVR +13 /* * Control constants for lower-level rootfinding functions * ------------------------------------------------------- * * cvRcheck1 return values: * CV_SUCCESS * CV_RTFUNC_FAIL * cvRcheck2 return values: * CV_SUCCESS * CV_RTFUNC_FAIL * CLOSERT * RTFOUND * cvRcheck3 return values: * CV_SUCCESS * CV_RTFUNC_FAIL * RTFOUND * cvRootfind return values: * CV_SUCCESS * CV_RTFUNC_FAIL * RTFOUND */ #define RTFOUND +1 #define CLOSERT +3 /* * Control constants for sensitivity DQ * ------------------------------------ */ #define CENTERED1 +1 #define CENTERED2 +2 #define FORWARD1 +3 #define FORWARD2 +4 /* * Control constants for type of sensitivity RHS * --------------------------------------------- */ #define CV_ONESENS 1 #define CV_ALLSENS 2 /* * Control constants for tolerances * -------------------------------- */ #define CV_NN 0 #define CV_SS 1 #define CV_SV 2 #define CV_WF 3 #define CV_EE 4 /* * Algorithmic constants * --------------------- * * CVodeGetDky and cvStep * * FUZZ_FACTOR fuzz factor used to estimate infinitesimal time intervals * * cvHin * * HLB_FACTOR factor for upper bound on initial step size * HUB_FACTOR factor for lower bound on initial step size * H_BIAS bias factor in selection of initial step size * MAX_ITERS maximum attempts to compute the initial step size * * CVodeCreate * * CORTES constant in nonlinear iteration convergence test * * cvStep * * THRESH if eta < THRESH reject a change in step size or order * ETAMX1 -+ * ETAMX2 | * ETAMX3 |-> bounds for eta (step size change) * ETAMXF | * ETAMIN | * ETACF -+ * ADDON safety factor in computing eta * BIAS1 -+ * BIAS2 |-> bias factors in eta selection * BIAS3 -+ * ONEPSM (1+epsilon) used in testing if the step size is below its bound * * SMALL_NST nst > SMALL_NST => use ETAMX3 * MXNCF max no. of convergence failures during one step try * MXNEF max no. of error test failures during one step try * MXNEF1 max no. of error test failures before forcing a reduction of order * SMALL_NEF if an error failure occurs and SMALL_NEF <= nef <= MXNEF1, then * reset eta = SUNMIN(eta, ETAMXF) * LONG_WAIT number of steps to wait before considering an order change when * q==1 and MXNEF1 error test failures have occurred * * cvNls * * DGMAX |gamma/gammap-1| > DGMAX => call lsetup * */ #define FUZZ_FACTOR RCONST(100.0) #define HLB_FACTOR RCONST(100.0) #define HUB_FACTOR RCONST(0.1) #define H_BIAS HALF #define MAX_ITERS 4 #define CORTES RCONST(0.1) #define THRESH RCONST(1.5) #define ETAMX1 RCONST(10000.0) #define ETAMX2 RCONST(10.0) #define ETAMX3 RCONST(10.0) #define ETAMXF RCONST(0.2) #define ETAMIN RCONST(0.1) #define ETACF RCONST(0.25) #define ADDON RCONST(0.000001) #define BIAS1 RCONST(6.0) #define BIAS2 RCONST(6.0) #define BIAS3 RCONST(10.0) #define ONEPSM RCONST(1.000001) #define SMALL_NST 10 #define MXNCF 10 #define MXNEF 7 #define MXNEF1 3 #define SMALL_NEF 2 #define LONG_WAIT 10 #define DGMAX RCONST(0.3) /*=================================================================*/ /* Shortcuts */ /*=================================================================*/ #define CV_PROFILER cv_mem->cv_sunctx->profiler /*=================================================================*/ /* Private Helper Functions Prototypes */ /*=================================================================*/ static booleantype cvCheckNvector(N_Vector tmpl); /* Initial setup */ static int cvInitialSetup(CVodeMem cv_mem); /* Memory allocation/deallocation */ static booleantype cvAllocVectors(CVodeMem cv_mem, N_Vector tmpl); static void cvFreeVectors(CVodeMem cv_mem); static booleantype cvQuadAllocVectors(CVodeMem cv_mem, N_Vector tmpl); static void cvQuadFreeVectors(CVodeMem cv_mem); static booleantype cvSensAllocVectors(CVodeMem cv_mem, N_Vector tmpl); static void cvSensFreeVectors(CVodeMem cv_mem); static booleantype cvQuadSensAllocVectors(CVodeMem cv_mem, N_Vector tmpl); static void cvQuadSensFreeVectors(CVodeMem cv_mem); static int cvEwtSetSS(CVodeMem cv_mem, N_Vector ycur, N_Vector weight); static int cvEwtSetSV(CVodeMem cv_mem, N_Vector ycur, N_Vector weight); static int cvQuadEwtSet(CVodeMem cv_mem, N_Vector qcur, N_Vector weightQ); static int cvQuadEwtSetSS(CVodeMem cv_mem, N_Vector qcur, N_Vector weightQ); static int cvQuadEwtSetSV(CVodeMem cv_mem, N_Vector qcur, N_Vector weightQ); static int cvSensEwtSet(CVodeMem cv_mem, N_Vector *yScur, N_Vector *weightS); static int cvSensEwtSetEE(CVodeMem cv_mem, N_Vector *yScur, N_Vector *weightS); static int cvSensEwtSetSS(CVodeMem cv_mem, N_Vector *yScur, N_Vector *weightS); static int cvSensEwtSetSV(CVodeMem cv_mem, N_Vector *yScur, N_Vector *weightS); static int cvQuadSensEwtSet(CVodeMem cv_mem, N_Vector *yQScur, N_Vector *weightQS); static int cvQuadSensEwtSetEE(CVodeMem cv_mem, N_Vector *yQScur, N_Vector *weightQS); static int cvQuadSensEwtSetSS(CVodeMem cv_mem, N_Vector *yQScur, N_Vector *weightQS); static int cvQuadSensEwtSetSV(CVodeMem cv_mem, N_Vector *yQScur, N_Vector *weightQS); /* Initial stepsize calculation */ static int cvHin(CVodeMem cv_mem, realtype tout); static realtype cvUpperBoundH0(CVodeMem cv_mem, realtype tdist); static int cvYddNorm(CVodeMem cv_mem, realtype hg, realtype *yddnrm); /* Main cvStep function */ static int cvStep(CVodeMem cv_mem); /* Function called at beginning of step */ static void cvAdjustParams(CVodeMem cv_mem); static void cvAdjustOrder(CVodeMem cv_mem, int deltaq); static void cvAdjustAdams(CVodeMem cv_mem, int deltaq); static void cvAdjustBDF(CVodeMem cv_mem, int deltaq); static void cvIncreaseBDF(CVodeMem cv_mem); static void cvDecreaseBDF(CVodeMem cv_mem); static void cvRescale(CVodeMem cv_mem); static void cvPredict(CVodeMem cv_mem); static void cvSet(CVodeMem cv_mem); static void cvSetAdams(CVodeMem cv_mem); static realtype cvAdamsStart(CVodeMem cv_mem, realtype m[]); static void cvAdamsFinish(CVodeMem cv_mem, realtype m[], realtype M[], realtype hsum); static realtype cvAltSum(int iend, realtype a[], int k); static void cvSetBDF(CVodeMem cv_mem); static void cvSetTqBDF(CVodeMem cv_mem, realtype hsum, realtype alpha0, realtype alpha0_hat, realtype xi_inv, realtype xistar_inv); /* Nonlinear solver functions */ static int cvNls(CVodeMem cv_mem, int nflag); static int cvQuadNls(CVodeMem cv_mem); static int cvStgrNls(CVodeMem cv_mem); static int cvStgr1Nls(CVodeMem cv_mem, int is); static int cvQuadSensNls(CVodeMem cv_mem); static int cvCheckConstraints(CVodeMem cv_mem); static int cvHandleNFlag(CVodeMem cv_mem, int *nflagPtr, realtype saved_t, int *ncfPtr, long int *ncfnPtr); static void cvRestore(CVodeMem cv_mem, realtype saved_t); /* Error Test */ static int cvDoErrorTest(CVodeMem cv_mem, int *nflagPtr, realtype saved_t, realtype acor_nrm, int *nefPtr, long int *netfPtr, realtype *dsmPtr); /* Function called after a successful step */ static void cvCompleteStep(CVodeMem cv_mem); static void cvPrepareNextStep(CVodeMem cv_mem, realtype dsm); static void cvSetEta(CVodeMem cv_mem); static realtype cvComputeEtaqm1(CVodeMem cv_mem); static realtype cvComputeEtaqp1(CVodeMem cv_mem); static void cvChooseEta(CVodeMem cv_mem); /* Function to handle failures */ static int cvHandleFailure(CVodeMem cv_mem,int flag); /* Functions for BDF Stability Limit Detection */ static void cvBDFStab(CVodeMem cv_mem); static int cvSLdet(CVodeMem cv_mem); /* Functions for rootfinding */ static int cvRcheck1(CVodeMem cv_mem); static int cvRcheck2(CVodeMem cv_mem); static int cvRcheck3(CVodeMem cv_mem); static int cvRootfind(CVodeMem cv_mem); /* Function for combined norms */ static realtype cvQuadUpdateNorm(CVodeMem cv_mem, realtype old_nrm, N_Vector xQ, N_Vector wQ); static realtype cvQuadSensNorm(CVodeMem cv_mem, N_Vector *xQS, N_Vector *wQS); static realtype cvQuadSensUpdateNorm(CVodeMem cv_mem, realtype old_nrm, N_Vector *xQS, N_Vector *wQS); /* Internal sensitivity RHS DQ functions */ static int cvQuadSensRhsInternalDQ(int Ns, realtype t, N_Vector y, N_Vector *yS, N_Vector yQdot, N_Vector *yQSdot, void *cvode_mem, N_Vector tmp, N_Vector tmpQ); static int cvQuadSensRhs1InternalDQ(CVodeMem cv_mem, int is, realtype t, N_Vector y, N_Vector yS, N_Vector yQdot, N_Vector yQSdot, N_Vector tmp, N_Vector tmpQ); /* * ================================================================= * Exported Functions Implementation * ================================================================= */ /* * ----------------------------------------------------------------- * Creation, allocation and re-initialization functions * ----------------------------------------------------------------- */ /* * CVodeCreate * * CVodeCreate creates an internal memory block for a problem to * be solved by CVODES. * If successful, CVodeCreate returns a pointer to the problem memory. * This pointer should be passed to CVodeInit. * If an initialization error occurs, CVodeCreate prints an error * message to standard err and returns NULL. */ void *CVodeCreate(int lmm, SUNContext sunctx) { int maxord; CVodeMem cv_mem; /* Test inputs */ if ((lmm != CV_ADAMS) && (lmm != CV_BDF)) { cvProcessError(NULL, 0, "CVODES", "CVodeCreate", MSGCV_BAD_LMM); return(NULL); } if (sunctx == NULL) { cvProcessError(NULL, 0, "CVODES", "CVodeCreate", MSGCV_NULL_SUNCTX); return(NULL); } cv_mem = NULL; cv_mem = (CVodeMem) malloc(sizeof(struct CVodeMemRec)); if (cv_mem == NULL) { cvProcessError(NULL, 0, "CVODES", "CVodeCreate", MSGCV_CVMEM_FAIL); return(NULL); } /* Zero out cv_mem */ memset(cv_mem, 0, sizeof(struct CVodeMemRec)); maxord = (lmm == CV_ADAMS) ? ADAMS_Q_MAX : BDF_Q_MAX; /* copy input parameters into cv_mem */ cv_mem->cv_sunctx = sunctx; cv_mem->cv_lmm = lmm; /* Set uround */ cv_mem->cv_uround = UNIT_ROUNDOFF; /* Set default values for integrator optional inputs */ cv_mem->cv_f = NULL; cv_mem->cv_user_data = NULL; cv_mem->cv_itol = CV_NN; cv_mem->cv_atolmin0 = SUNTRUE; cv_mem->cv_user_efun = SUNFALSE; cv_mem->cv_efun = NULL; cv_mem->cv_e_data = NULL; cv_mem->cv_ehfun = cvErrHandler; cv_mem->cv_eh_data = cv_mem; cv_mem->cv_monitorfun = NULL; cv_mem->cv_monitor_interval = 0; cv_mem->cv_errfp = stderr; cv_mem->cv_qmax = maxord; cv_mem->cv_mxstep = MXSTEP_DEFAULT; cv_mem->cv_mxhnil = MXHNIL_DEFAULT; cv_mem->cv_sldeton = SUNFALSE; cv_mem->cv_hin = ZERO; cv_mem->cv_hmin = HMIN_DEFAULT; cv_mem->cv_hmax_inv = HMAX_INV_DEFAULT; cv_mem->cv_tstopset = SUNFALSE; cv_mem->cv_maxnef = MXNEF; cv_mem->cv_maxncf = MXNCF; cv_mem->cv_nlscoef = CORTES; cv_mem->cv_msbp = MSBP; cv_mem->convfail = CV_NO_FAILURES; cv_mem->cv_constraints = NULL; cv_mem->cv_constraintsSet = SUNFALSE; /* Initialize root finding variables */ cv_mem->cv_glo = NULL; cv_mem->cv_ghi = NULL; cv_mem->cv_grout = NULL; cv_mem->cv_iroots = NULL; cv_mem->cv_rootdir = NULL; cv_mem->cv_gfun = NULL; cv_mem->cv_nrtfn = 0; cv_mem->cv_gactive = NULL; cv_mem->cv_mxgnull = 1; /* Set default values for quad. optional inputs */ cv_mem->cv_quadr = SUNFALSE; cv_mem->cv_fQ = NULL; cv_mem->cv_errconQ = SUNFALSE; cv_mem->cv_itolQ = CV_NN; cv_mem->cv_atolQmin0 = SUNTRUE; /* Set default values for sensi. optional inputs */ cv_mem->cv_sensi = SUNFALSE; cv_mem->cv_fS_data = NULL; cv_mem->cv_fS = cvSensRhsInternalDQ; cv_mem->cv_fS1 = cvSensRhs1InternalDQ; cv_mem->cv_fSDQ = SUNTRUE; cv_mem->cv_ifS = CV_ONESENS; cv_mem->cv_DQtype = CV_CENTERED; cv_mem->cv_DQrhomax = ZERO; cv_mem->cv_p = NULL; cv_mem->cv_pbar = NULL; cv_mem->cv_plist = NULL; cv_mem->cv_errconS = SUNFALSE; cv_mem->cv_ncfS1 = NULL; cv_mem->cv_ncfnS1 = NULL; cv_mem->cv_nniS1 = NULL; cv_mem->cv_itolS = CV_NN; cv_mem->cv_atolSmin0 = NULL; /* Set default values for quad. sensi. optional inputs */ cv_mem->cv_quadr_sensi = SUNFALSE; cv_mem->cv_fQS = NULL; cv_mem->cv_fQS_data = NULL; cv_mem->cv_fQSDQ = SUNTRUE; cv_mem->cv_errconQS = SUNFALSE; cv_mem->cv_itolQS = CV_NN; cv_mem->cv_atolQSmin0 = NULL; /* Set default for ASA */ cv_mem->cv_adj = SUNFALSE; cv_mem->cv_adj_mem = NULL; /* Set the saved value for qmax_alloc */ cv_mem->cv_qmax_alloc = maxord; cv_mem->cv_qmax_allocQ = maxord; cv_mem->cv_qmax_allocS = maxord; /* Initialize lrw and liw */ cv_mem->cv_lrw = 65 + 2*L_MAX + NUM_TESTS; cv_mem->cv_liw = 52; /* No mallocs have been done yet */ cv_mem->cv_VabstolMallocDone = SUNFALSE; cv_mem->cv_MallocDone = SUNFALSE; cv_mem->cv_constraintsMallocDone = SUNFALSE; cv_mem->cv_VabstolQMallocDone = SUNFALSE; cv_mem->cv_QuadMallocDone = SUNFALSE; cv_mem->cv_VabstolSMallocDone = SUNFALSE; cv_mem->cv_SabstolSMallocDone = SUNFALSE; cv_mem->cv_SensMallocDone = SUNFALSE; cv_mem->cv_VabstolQSMallocDone = SUNFALSE; cv_mem->cv_SabstolQSMallocDone = SUNFALSE; cv_mem->cv_QuadSensMallocDone = SUNFALSE; cv_mem->cv_adjMallocDone = SUNFALSE; /* Initialize nonlinear solver variables */ cv_mem->NLS = NULL; cv_mem->ownNLS = SUNFALSE; cv_mem->NLSsim = NULL; cv_mem->ownNLSsim = SUNFALSE; cv_mem->zn0Sim = NULL; cv_mem->ycorSim = NULL; cv_mem->ewtSim = NULL; cv_mem->simMallocDone = SUNFALSE; cv_mem->NLSstg = NULL; cv_mem->ownNLSstg = SUNFALSE; cv_mem->zn0Stg = NULL; cv_mem->ycorStg = NULL; cv_mem->ewtStg = NULL; cv_mem->stgMallocDone = SUNFALSE; cv_mem->NLSstg1 = NULL; cv_mem->ownNLSstg1 = SUNFALSE; cv_mem->sens_solve = SUNFALSE; cv_mem->sens_solve_idx = -1; /* Return pointer to CVODES memory block */ return((void *)cv_mem); } /*-----------------------------------------------------------------*/ /* * CVodeInit * * CVodeInit allocates and initializes memory for a problem. All * problem inputs are checked for errors. If any error occurs during * initialization, it is reported to the file whose file pointer is * errfp and an error flag is returned. Otherwise, it returns CV_SUCCESS */ int CVodeInit(void *cvode_mem, CVRhsFn f, realtype t0, N_Vector y0) { CVodeMem cv_mem; booleantype nvectorOK, allocOK; sunindextype lrw1, liw1; int i,k, retval; SUNNonlinearSolver NLS; /* Check cvode_mem */ if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Check for legal input parameters */ if (y0==NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeInit", MSGCV_NULL_Y0); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } if (f == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeInit", MSGCV_NULL_F); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Test if all required vector operations are implemented */ nvectorOK = cvCheckNvector(y0); if(!nvectorOK) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeInit", MSGCV_BAD_NVECTOR); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Set space requirements for one N_Vector */ if (y0->ops->nvspace != NULL) { N_VSpace(y0, &lrw1, &liw1); } else { lrw1 = 0; liw1 = 0; } cv_mem->cv_lrw1 = lrw1; cv_mem->cv_liw1 = liw1; /* Allocate the vectors (using y0 as a template) */ allocOK = cvAllocVectors(cv_mem, y0); if (!allocOK) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeInit", MSGCV_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_MEM_FAIL); } /* Allocate temporary work arrays for fused vector ops */ cv_mem->cv_cvals = NULL; cv_mem->cv_cvals = (realtype *) malloc(L_MAX*sizeof(realtype)); cv_mem->cv_Xvecs = NULL; cv_mem->cv_Xvecs = (N_Vector *) malloc(L_MAX*sizeof(N_Vector)); cv_mem->cv_Zvecs = NULL; cv_mem->cv_Zvecs = (N_Vector *) malloc(L_MAX*sizeof(N_Vector)); if ((cv_mem->cv_cvals == NULL) || (cv_mem->cv_Xvecs == NULL) || (cv_mem->cv_Zvecs == NULL)) { cvFreeVectors(cv_mem); cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeInit", MSGCV_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_MEM_FAIL); } /* Input checks complete at this point and history array allocated */ /* Copy the input parameters into CVODE state */ cv_mem->cv_f = f; cv_mem->cv_tn = t0; /* Initialize zn[0] in the history array */ N_VScale(ONE, y0, cv_mem->cv_zn[0]); /* create a Newton nonlinear solver object by default */ NLS = SUNNonlinSol_Newton(y0, cv_mem->cv_sunctx); /* check that nonlinear solver is non-NULL */ if (NLS == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeInit", MSGCV_MEM_FAIL); cvFreeVectors(cv_mem); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_MEM_FAIL); } /* attach the nonlinear solver to the CVODE memory */ retval = CVodeSetNonlinearSolver(cv_mem, NLS); /* check that the nonlinear solver was successfully attached */ if (retval != CV_SUCCESS) { cvProcessError(cv_mem, retval, "CVODES", "CVodeInit", "Setting the nonlinear solver failed"); cvFreeVectors(cv_mem); SUNNonlinSolFree(NLS); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_MEM_FAIL); } /* set ownership flag */ cv_mem->ownNLS = SUNTRUE; /* All error checking is complete at this point */ /* Set step parameters */ cv_mem->cv_q = 1; cv_mem->cv_L = 2; cv_mem->cv_qwait = cv_mem->cv_L; cv_mem->cv_etamax = ETAMX1; cv_mem->cv_qu = 0; cv_mem->cv_hu = ZERO; cv_mem->cv_tolsf = ONE; /* Set the linear solver addresses to NULL. (We check != NULL later, in CVode) */ cv_mem->cv_linit = NULL; cv_mem->cv_lsetup = NULL; cv_mem->cv_lsolve = NULL; cv_mem->cv_lfree = NULL; cv_mem->cv_lmem = NULL; /* Set forceSetup to SUNFALSE */ cv_mem->cv_forceSetup = SUNFALSE; /* Initialize all the counters */ cv_mem->cv_nst = 0; cv_mem->cv_nfe = 0; cv_mem->cv_ncfn = 0; cv_mem->cv_netf = 0; cv_mem->cv_nni = 0; cv_mem->cv_nsetups = 0; cv_mem->cv_nhnil = 0; cv_mem->cv_nstlp = 0; cv_mem->cv_nscon = 0; cv_mem->cv_nge = 0; cv_mem->cv_irfnd = 0; /* Initialize other integrator optional outputs */ cv_mem->cv_h0u = ZERO; cv_mem->cv_next_h = ZERO; cv_mem->cv_next_q = 0; /* Initialize Stablilty Limit Detection data */ /* NOTE: We do this even if stab lim det was not turned on yet. This way, the user can turn it on at any time */ cv_mem->cv_nor = 0; for (i = 1; i <= 5; i++) for (k = 1; k <= 3; k++) cv_mem->cv_ssdat[i-1][k-1] = ZERO; /* Problem has been successfully initialized */ cv_mem->cv_MallocDone = SUNTRUE; SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeReInit * * CVodeReInit re-initializes CVODES's memory for a problem, assuming * it has already been allocated in a prior CVodeInit call. * All problem specification inputs are checked for errors. * If any error occurs during initialization, it is reported to the * file whose file pointer is errfp. * The return value is CV_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int CVodeReInit(void *cvode_mem, realtype t0, N_Vector y0) { CVodeMem cv_mem; int i,k; /* Check cvode_mem */ if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeReInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Check if cvode_mem was allocated */ if (cv_mem->cv_MallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_MALLOC, "CVODES", "CVodeReInit", MSGCV_NO_MALLOC); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_MALLOC); } /* Check for legal input parameters */ if (y0 == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeReInit", MSGCV_NULL_Y0); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Copy the input parameters into CVODES state */ cv_mem->cv_tn = t0; /* Set step parameters */ cv_mem->cv_q = 1; cv_mem->cv_L = 2; cv_mem->cv_qwait = cv_mem->cv_L; cv_mem->cv_etamax = ETAMX1; cv_mem->cv_qu = 0; cv_mem->cv_hu = ZERO; cv_mem->cv_tolsf = ONE; /* Set forceSetup to SUNFALSE */ cv_mem->cv_forceSetup = SUNFALSE; /* Initialize zn[0] in the history array */ N_VScale(ONE, y0, cv_mem->cv_zn[0]); /* Initialize all the counters */ cv_mem->cv_nst = 0; cv_mem->cv_nfe = 0; cv_mem->cv_ncfn = 0; cv_mem->cv_netf = 0; cv_mem->cv_nni = 0; cv_mem->cv_nsetups = 0; cv_mem->cv_nhnil = 0; cv_mem->cv_nstlp = 0; cv_mem->cv_nscon = 0; cv_mem->cv_nge = 0; cv_mem->cv_irfnd = 0; /* Initialize other integrator optional outputs */ cv_mem->cv_h0u = ZERO; cv_mem->cv_next_h = ZERO; cv_mem->cv_next_q = 0; /* Initialize Stablilty Limit Detection data */ cv_mem->cv_nor = 0; for (i = 1; i <= 5; i++) for (k = 1; k <= 3; k++) cv_mem->cv_ssdat[i-1][k-1] = ZERO; /* Problem has been successfully re-initialized */ SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeSStolerances * CVodeSVtolerances * CVodeWFtolerances * * These functions specify the integration tolerances. One of them * MUST be called before the first call to CVode. * * CVodeSStolerances specifies scalar relative and absolute tolerances. * CVodeSVtolerances specifies scalar relative tolerance and a vector * absolute tolerance (a potentially different absolute tolerance * for each vector component). * CVodeWFtolerances specifies a user-provides function (of type CVEwtFn) * which will be called to set the error weight vector. */ int CVodeSStolerances(void *cvode_mem, realtype reltol, realtype abstol) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSStolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_MallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_MALLOC, "CVODES", "CVodeSStolerances", MSGCV_NO_MALLOC); return(CV_NO_MALLOC); } /* Check inputs */ if (reltol < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSStolerances", MSGCV_BAD_RELTOL); return(CV_ILL_INPUT); } if (abstol < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSStolerances", MSGCV_BAD_ABSTOL); return(CV_ILL_INPUT); } /* Copy tolerances into memory */ cv_mem->cv_reltol = reltol; cv_mem->cv_Sabstol = abstol; cv_mem->cv_atolmin0 = (abstol == ZERO); cv_mem->cv_itol = CV_SS; cv_mem->cv_user_efun = SUNFALSE; cv_mem->cv_efun = cvEwtSet; cv_mem->cv_e_data = NULL; /* will be set to cvode_mem in InitialSetup */ return(CV_SUCCESS); } int CVodeSVtolerances(void *cvode_mem, realtype reltol, N_Vector abstol) { CVodeMem cv_mem; realtype atolmin; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSVtolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_MallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_MALLOC, "CVODES", "CVodeSVtolerances", MSGCV_NO_MALLOC); return(CV_NO_MALLOC); } /* Check inputs */ if (reltol < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSVtolerances", MSGCV_BAD_RELTOL); return(CV_ILL_INPUT); } if (abstol->ops->nvmin == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSVtolerances", "Missing N_VMin routine from N_Vector"); return(CV_ILL_INPUT); } atolmin = N_VMin(abstol); if (atolmin < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSVtolerances", MSGCV_BAD_ABSTOL); return(CV_ILL_INPUT); } /* Copy tolerances into memory */ if ( !(cv_mem->cv_VabstolMallocDone) ) { cv_mem->cv_Vabstol = N_VClone(cv_mem->cv_ewt); cv_mem->cv_lrw += cv_mem->cv_lrw1; cv_mem->cv_liw += cv_mem->cv_liw1; cv_mem->cv_VabstolMallocDone = SUNTRUE; } cv_mem->cv_reltol = reltol; N_VScale(ONE, abstol, cv_mem->cv_Vabstol); cv_mem->cv_atolmin0 = (atolmin == ZERO); cv_mem->cv_itol = CV_SV; cv_mem->cv_user_efun = SUNFALSE; cv_mem->cv_efun = cvEwtSet; cv_mem->cv_e_data = NULL; /* will be set to cvode_mem in InitialSetup */ return(CV_SUCCESS); } int CVodeWFtolerances(void *cvode_mem, CVEwtFn efun) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeWFtolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_MallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_MALLOC, "CVODES", "CVodeWFtolerances", MSGCV_NO_MALLOC); return(CV_NO_MALLOC); } cv_mem->cv_itol = CV_WF; cv_mem->cv_user_efun = SUNTRUE; cv_mem->cv_efun = efun; cv_mem->cv_e_data = NULL; /* will be set to user_data in InitialSetup */ return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeQuadInit * * CVodeQuadInit allocates and initializes quadrature related * memory for a problem. All problem specification inputs are * checked for errors. If any error occurs during initialization, * it is reported to the file whose file pointer is errfp. * The return value is CV_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int CVodeQuadInit(void *cvode_mem, CVQuadRhsFn fQ, N_Vector yQ0) { CVodeMem cv_mem; booleantype allocOK; sunindextype lrw1Q, liw1Q; /* Check cvode_mem */ if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeQuadInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Set space requirements for one N_Vector */ N_VSpace(yQ0, &lrw1Q, &liw1Q); cv_mem->cv_lrw1Q = lrw1Q; cv_mem->cv_liw1Q = liw1Q; /* Allocate the vectors (using yQ0 as a template) */ allocOK = cvQuadAllocVectors(cv_mem, yQ0); if (!allocOK) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeQuadInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } /* Initialize znQ[0] in the history array */ N_VScale(ONE, yQ0, cv_mem->cv_znQ[0]); /* Copy the input parameters into CVODES state */ cv_mem->cv_fQ = fQ; /* Initialize counters */ cv_mem->cv_nfQe = 0; cv_mem->cv_netfQ = 0; /* Quadrature integration turned ON */ cv_mem->cv_quadr = SUNTRUE; cv_mem->cv_QuadMallocDone = SUNTRUE; /* Quadrature initialization was successfull */ return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeQuadReInit * * CVodeQuadReInit re-initializes CVODES's quadrature related memory * for a problem, assuming it has already been allocated in prior * calls to CVodeInit and CVodeQuadInit. * All problem specification inputs are checked for errors. * If any error occurs during initialization, it is reported to the * file whose file pointer is errfp. * The return value is CV_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int CVodeQuadReInit(void *cvode_mem, N_Vector yQ0) { CVodeMem cv_mem; /* Check cvode_mem */ if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeQuadReInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Ckeck if quadrature was initialized? */ if (cv_mem->cv_QuadMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUAD, "CVODES", "CVodeQuadReInit", MSGCV_NO_QUAD); return(CV_NO_QUAD); } /* Initialize znQ[0] in the history array */ N_VScale(ONE, yQ0, cv_mem->cv_znQ[0]); /* Initialize counters */ cv_mem->cv_nfQe = 0; cv_mem->cv_netfQ = 0; /* Quadrature integration turned ON */ cv_mem->cv_quadr = SUNTRUE; /* Quadrature re-initialization was successfull */ return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeQuadSStolerances * CVodeQuadSVtolerances * * These functions specify the integration tolerances for sensitivity * variables. One of them MUST be called before the first call to * CVode IF error control on the quadrature variables is enabled * (see CVodeSetQuadErrCon). * * CVodeQuadSStolerances specifies scalar relative and absolute tolerances. * CVodeQuadSVtolerances specifies scalar relative tolerance and a vector * absolute toleranc (a potentially different absolute tolerance for each * vector component). */ int CVodeQuadSStolerances(void *cvode_mem, realtype reltolQ, realtype abstolQ) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeQuadSStolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Ckeck if quadrature was initialized? */ if (cv_mem->cv_QuadMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUAD, "CVODES", "CVodeQuadSStolerances", MSGCV_NO_QUAD); return(CV_NO_QUAD); } /* Test user-supplied tolerances */ if (reltolQ < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSStolerances", MSGCV_BAD_RELTOLQ); return(CV_ILL_INPUT); } if (abstolQ < 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSStolerances", MSGCV_BAD_ABSTOLQ); return(CV_ILL_INPUT); } /* Copy tolerances into memory */ cv_mem->cv_itolQ = CV_SS; cv_mem->cv_reltolQ = reltolQ; cv_mem->cv_SabstolQ = abstolQ; cv_mem->cv_atolQmin0 = (abstolQ == ZERO); return(CV_SUCCESS); } int CVodeQuadSVtolerances(void *cvode_mem, realtype reltolQ, N_Vector abstolQ) { CVodeMem cv_mem; realtype atolmin; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeQuadSVtolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Ckeck if quadrature was initialized? */ if (cv_mem->cv_QuadMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUAD, "CVODES", "CVodeQuadSVtolerances", MSGCV_NO_QUAD); return(CV_NO_QUAD); } /* Test user-supplied tolerances */ if (reltolQ < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSVtolerances", MSGCV_BAD_RELTOLQ); return(CV_ILL_INPUT); } if (abstolQ == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSVtolerances", MSGCV_NULL_ABSTOLQ); return(CV_ILL_INPUT); } if (abstolQ->ops->nvmin == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSVtolerances", "Missing N_VMin routine from N_Vector"); return(CV_ILL_INPUT); } atolmin = N_VMin(abstolQ); if (atolmin < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSVtolerances", MSGCV_BAD_ABSTOLQ); return(CV_ILL_INPUT); } /* Copy tolerances into memory */ cv_mem->cv_itolQ = CV_SV; cv_mem->cv_reltolQ = reltolQ; if ( !(cv_mem->cv_VabstolQMallocDone) ) { cv_mem->cv_VabstolQ = N_VClone(cv_mem->cv_tempvQ); cv_mem->cv_lrw += cv_mem->cv_lrw1Q; cv_mem->cv_liw += cv_mem->cv_liw1Q; cv_mem->cv_VabstolQMallocDone = SUNTRUE; } N_VScale(ONE, abstolQ, cv_mem->cv_VabstolQ); cv_mem->cv_atolQmin0 = (atolmin == ZERO); return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeSensInit * * CVodeSensInit allocates and initializes sensitivity related * memory for a problem (using a sensitivity RHS function of type * CVSensRhsFn). All problem specification inputs are checked for * errors. * The return value is CV_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int CVodeSensInit(void *cvode_mem, int Ns, int ism, CVSensRhsFn fS, N_Vector *yS0) { CVodeMem cv_mem; booleantype allocOK; int is, retval; SUNNonlinearSolver NLS; /* Check cvode_mem */ if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSensInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Check if CVodeSensInit or CVodeSensInit1 was already called */ if (cv_mem->cv_SensMallocDone) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensInit", MSGCV_SENSINIT_2); return(CV_ILL_INPUT); } /* Check if Ns is legal */ if (Ns<=0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensInit", MSGCV_BAD_NS); return(CV_ILL_INPUT); } cv_mem->cv_Ns = Ns; /* Check if ism is compatible */ if (ism==CV_STAGGERED1) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensInit", MSGCV_BAD_ISM_IFS); return(CV_ILL_INPUT); } /* Check if ism is legal */ if ((ism!=CV_SIMULTANEOUS) && (ism!=CV_STAGGERED)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensInit", MSGCV_BAD_ISM); return(CV_ILL_INPUT); } cv_mem->cv_ism = ism; /* Check if yS0 is non-null */ if (yS0 == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensInit", MSGCV_NULL_YS0); return(CV_ILL_INPUT); } /* Store sensitivity RHS-related data */ cv_mem->cv_ifS = CV_ALLSENS; cv_mem->cv_fS1 = NULL; if (fS == NULL) { cv_mem->cv_fSDQ = SUNTRUE; cv_mem->cv_fS = cvSensRhsInternalDQ; cv_mem->cv_fS_data = cvode_mem; } else { cv_mem->cv_fSDQ = SUNFALSE; cv_mem->cv_fS = fS; cv_mem->cv_fS_data = cv_mem->cv_user_data; } /* No memory allocation for STAGGERED1 */ cv_mem->cv_stgr1alloc = SUNFALSE; /* Allocate the vectors (using yS0[0] as a template) */ allocOK = cvSensAllocVectors(cv_mem, yS0[0]); if (!allocOK) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSensInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } /* Check if larger temporary work arrays are needed for fused vector ops */ if (Ns*L_MAX > L_MAX) { free(cv_mem->cv_cvals); cv_mem->cv_cvals = NULL; free(cv_mem->cv_Xvecs); cv_mem->cv_Xvecs = NULL; free(cv_mem->cv_Zvecs); cv_mem->cv_Zvecs = NULL; cv_mem->cv_cvals = (realtype *) malloc((Ns*L_MAX)*sizeof(realtype)); cv_mem->cv_Xvecs = (N_Vector *) malloc((Ns*L_MAX)*sizeof(N_Vector)); cv_mem->cv_Zvecs = (N_Vector *) malloc((Ns*L_MAX)*sizeof(N_Vector)); if ((cv_mem->cv_cvals == NULL) || (cv_mem->cv_Xvecs == NULL) || (cv_mem->cv_Zvecs == NULL)) { cvSensFreeVectors(cv_mem); cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSensInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } } /*---------------------------------------------- All error checking is complete at this point -----------------------------------------------*/ /* Initialize znS[0] in the history array */ for (is=0; iscv_cvals[is] = ONE; retval = N_VScaleVectorArray(Ns, cv_mem->cv_cvals, yS0, cv_mem->cv_znS[0]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); /* Initialize all sensitivity related counters */ cv_mem->cv_nfSe = 0; cv_mem->cv_nfeS = 0; cv_mem->cv_ncfnS = 0; cv_mem->cv_netfS = 0; cv_mem->cv_nniS = 0; cv_mem->cv_nsetupsS = 0; /* Set default values for plist and pbar */ for (is=0; iscv_plist[is] = is; cv_mem->cv_pbar[is] = ONE; } /* Sensitivities will be computed */ cv_mem->cv_sensi = SUNTRUE; cv_mem->cv_SensMallocDone = SUNTRUE; /* create a Newton nonlinear solver object by default */ if (ism == CV_SIMULTANEOUS) NLS = SUNNonlinSol_NewtonSens(Ns+1, cv_mem->cv_acor, cv_mem->cv_sunctx); else NLS = SUNNonlinSol_NewtonSens(Ns, cv_mem->cv_acor, cv_mem->cv_sunctx); /* check that the nonlinear solver is non-NULL */ if (NLS == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSensInit", MSGCV_MEM_FAIL); cvSensFreeVectors(cv_mem); return(CV_MEM_FAIL); } /* attach the nonlinear solver to the CVODE memory */ if (ism == CV_SIMULTANEOUS) retval = CVodeSetNonlinearSolverSensSim(cv_mem, NLS); else retval = CVodeSetNonlinearSolverSensStg(cv_mem, NLS); /* check that the nonlinear solver was successfully attached */ if (retval != CV_SUCCESS) { cvProcessError(cv_mem, retval, "CVODES", "CVodeSensInit", "Setting the nonlinear solver failed"); cvSensFreeVectors(cv_mem); SUNNonlinSolFree(NLS); return(CV_MEM_FAIL); } /* set ownership flag */ if (ism == CV_SIMULTANEOUS) cv_mem->ownNLSsim = SUNTRUE; else cv_mem->ownNLSstg = SUNTRUE; /* Sensitivity initialization was successfull */ return(CV_SUCCESS); } /* * CVodeSensInit1 * * CVodeSensInit1 allocates and initializes sensitivity related * memory for a problem (using a sensitivity RHS function of type * CVSensRhs1Fn). All problem specification inputs are checked for * errors. * The return value is CV_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int CVodeSensInit1(void *cvode_mem, int Ns, int ism, CVSensRhs1Fn fS1, N_Vector *yS0) { CVodeMem cv_mem; booleantype allocOK; int is, retval; SUNNonlinearSolver NLS; /* Check cvode_mem */ if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSensInit1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Check if CVodeSensInit or CVodeSensInit1 was already called */ if (cv_mem->cv_SensMallocDone) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensInit1", MSGCV_SENSINIT_2); return(CV_ILL_INPUT); } /* Check if Ns is legal */ if (Ns<=0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensInit1", MSGCV_BAD_NS); return(CV_ILL_INPUT); } cv_mem->cv_Ns = Ns; /* Check if ism is legal */ if ((ism!=CV_SIMULTANEOUS) && (ism!=CV_STAGGERED) && (ism!=CV_STAGGERED1)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensInit1", MSGCV_BAD_ISM); return(CV_ILL_INPUT); } cv_mem->cv_ism = ism; /* Check if yS0 is non-null */ if (yS0 == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensInit1", MSGCV_NULL_YS0); return(CV_ILL_INPUT); } /* Store sensitivity RHS-related data */ cv_mem->cv_ifS = CV_ONESENS; cv_mem->cv_fS = NULL; if (fS1 == NULL) { cv_mem->cv_fSDQ = SUNTRUE; cv_mem->cv_fS1 = cvSensRhs1InternalDQ; cv_mem->cv_fS_data = cvode_mem; } else { cv_mem->cv_fSDQ = SUNFALSE; cv_mem->cv_fS1 = fS1; cv_mem->cv_fS_data = cv_mem->cv_user_data; } /* Allocate ncfS1, ncfnS1, and nniS1 if needed */ if (ism == CV_STAGGERED1) { cv_mem->cv_stgr1alloc = SUNTRUE; cv_mem->cv_ncfS1 = NULL; cv_mem->cv_ncfS1 = (int*)malloc(Ns*sizeof(int)); cv_mem->cv_ncfnS1 = NULL; cv_mem->cv_ncfnS1 = (long int*)malloc(Ns*sizeof(long int)); cv_mem->cv_nniS1 = NULL; cv_mem->cv_nniS1 = (long int*)malloc(Ns*sizeof(long int)); if ( (cv_mem->cv_ncfS1 == NULL) || (cv_mem->cv_ncfnS1 == NULL) || (cv_mem->cv_nniS1 == NULL) ) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSensInit1", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } } else { cv_mem->cv_stgr1alloc = SUNFALSE; } /* Allocate the vectors (using yS0[0] as a template) */ allocOK = cvSensAllocVectors(cv_mem, yS0[0]); if (!allocOK) { if (cv_mem->cv_stgr1alloc) { free(cv_mem->cv_ncfS1); cv_mem->cv_ncfS1 = NULL; free(cv_mem->cv_ncfnS1); cv_mem->cv_ncfnS1 = NULL; free(cv_mem->cv_nniS1); cv_mem->cv_nniS1 = NULL; } cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSensInit1", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } /* Check if larger temporary work arrays are needed for fused vector ops */ if (Ns*L_MAX > L_MAX) { free(cv_mem->cv_cvals); cv_mem->cv_cvals = NULL; free(cv_mem->cv_Xvecs); cv_mem->cv_Xvecs = NULL; free(cv_mem->cv_Zvecs); cv_mem->cv_Zvecs = NULL; cv_mem->cv_cvals = (realtype *) malloc((Ns*L_MAX)*sizeof(realtype)); cv_mem->cv_Xvecs = (N_Vector *) malloc((Ns*L_MAX)*sizeof(N_Vector)); cv_mem->cv_Zvecs = (N_Vector *) malloc((Ns*L_MAX)*sizeof(N_Vector)); if ((cv_mem->cv_cvals == NULL) || (cv_mem->cv_Xvecs == NULL) || (cv_mem->cv_Zvecs == NULL)) { if (cv_mem->cv_stgr1alloc) { free(cv_mem->cv_ncfS1); cv_mem->cv_ncfS1 = NULL; free(cv_mem->cv_ncfnS1); cv_mem->cv_ncfnS1 = NULL; free(cv_mem->cv_nniS1); cv_mem->cv_nniS1 = NULL; } cvSensFreeVectors(cv_mem); cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSensInit1", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } } /*---------------------------------------------- All error checking is complete at this point -----------------------------------------------*/ /* Initialize znS[0] in the history array */ for (is=0; iscv_cvals[is] = ONE; retval = N_VScaleVectorArray(Ns, cv_mem->cv_cvals, yS0, cv_mem->cv_znS[0]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); /* Initialize all sensitivity related counters */ cv_mem->cv_nfSe = 0; cv_mem->cv_nfeS = 0; cv_mem->cv_ncfnS = 0; cv_mem->cv_netfS = 0; cv_mem->cv_nniS = 0; cv_mem->cv_nsetupsS = 0; if (ism==CV_STAGGERED1) for (is=0; iscv_ncfnS1[is] = 0; cv_mem->cv_nniS1[is] = 0; } /* Set default values for plist and pbar */ for (is=0; iscv_plist[is] = is; cv_mem->cv_pbar[is] = ONE; } /* Sensitivities will be computed */ cv_mem->cv_sensi = SUNTRUE; cv_mem->cv_SensMallocDone = SUNTRUE; /* create a Newton nonlinear solver object by default */ if (ism == CV_SIMULTANEOUS) NLS = SUNNonlinSol_NewtonSens(Ns+1, cv_mem->cv_acor, cv_mem->cv_sunctx); else if (ism == CV_STAGGERED) NLS = SUNNonlinSol_NewtonSens(Ns, cv_mem->cv_acor, cv_mem->cv_sunctx); else NLS = SUNNonlinSol_Newton(cv_mem->cv_acor, cv_mem->cv_sunctx); /* check that the nonlinear solver is non-NULL */ if (NLS == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSensInit1", MSGCV_MEM_FAIL); cvSensFreeVectors(cv_mem); return(CV_MEM_FAIL); } /* attach the nonlinear solver to the CVODE memory */ if (ism == CV_SIMULTANEOUS) retval = CVodeSetNonlinearSolverSensSim(cv_mem, NLS); else if (ism == CV_STAGGERED) retval = CVodeSetNonlinearSolverSensStg(cv_mem, NLS); else retval = CVodeSetNonlinearSolverSensStg1(cv_mem, NLS); /* check that the nonlinear solver was successfully attached */ if (retval != CV_SUCCESS) { cvProcessError(cv_mem, retval, "CVODES", "CVodeSensInit1", "Setting the nonlinear solver failed"); cvSensFreeVectors(cv_mem); SUNNonlinSolFree(NLS); return(CV_MEM_FAIL); } /* set ownership flag */ if (ism == CV_SIMULTANEOUS) cv_mem->ownNLSsim = SUNTRUE; else if (ism == CV_STAGGERED) cv_mem->ownNLSstg = SUNTRUE; else cv_mem->ownNLSstg1 = SUNTRUE; /* Sensitivity initialization was successfull */ return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeSensReInit * * CVodeSensReInit re-initializes CVODES's sensitivity related memory * for a problem, assuming it has already been allocated in prior * calls to CVodeInit and CVodeSensInit/CVodeSensInit1. * All problem specification inputs are checked for errors. * The number of sensitivities Ns is assumed to be unchanged since * the previous call to CVodeSensInit. * If any error occurs during initialization, it is reported to the * file whose file pointer is errfp. * The return value is CV_SUCCESS = 0 if no errors occurred, or * a negative value otherwise. */ int CVodeSensReInit(void *cvode_mem, int ism, N_Vector *yS0) { CVodeMem cv_mem; int is, retval; SUNNonlinearSolver NLS; /* Check cvode_mem */ if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSensReInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was sensitivity initialized? */ if (cv_mem->cv_SensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeSensReInit", MSGCV_NO_SENSI); return(CV_NO_SENS); } /* Check if ism is compatible */ if ((cv_mem->cv_ifS==CV_ALLSENS) && (ism==CV_STAGGERED1)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensReInit", MSGCV_BAD_ISM_IFS); return(CV_ILL_INPUT); } /* Check if ism is legal */ if ((ism!=CV_SIMULTANEOUS) && (ism!=CV_STAGGERED) && (ism!=CV_STAGGERED1)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensReInit", MSGCV_BAD_ISM); return(CV_ILL_INPUT); } cv_mem->cv_ism = ism; /* Check if yS0 is non-null */ if (yS0 == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensReInit", MSGCV_NULL_YS0); return(CV_ILL_INPUT); } /* Allocate ncfS1, ncfnS1, and nniS1 if needed */ if ( (ism==CV_STAGGERED1) && (cv_mem->cv_stgr1alloc==SUNFALSE) ) { cv_mem->cv_stgr1alloc = SUNTRUE; cv_mem->cv_ncfS1 = NULL; cv_mem->cv_ncfS1 = (int*)malloc(cv_mem->cv_Ns*sizeof(int)); cv_mem->cv_ncfnS1 = NULL; cv_mem->cv_ncfnS1 = (long int*)malloc(cv_mem->cv_Ns*sizeof(long int)); cv_mem->cv_nniS1 = NULL; cv_mem->cv_nniS1 = (long int*)malloc(cv_mem->cv_Ns*sizeof(long int)); if ( (cv_mem->cv_ncfS1==NULL) || (cv_mem->cv_ncfnS1==NULL) || (cv_mem->cv_nniS1==NULL) ) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSensReInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } } /*---------------------------------------------- All error checking is complete at this point -----------------------------------------------*/ /* Initialize znS[0] in the history array */ for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, yS0, cv_mem->cv_znS[0]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); /* Initialize all sensitivity related counters */ cv_mem->cv_nfSe = 0; cv_mem->cv_nfeS = 0; cv_mem->cv_ncfnS = 0; cv_mem->cv_netfS = 0; cv_mem->cv_nniS = 0; cv_mem->cv_nsetupsS = 0; if (ism==CV_STAGGERED1) for (is=0; iscv_Ns; is++) { cv_mem->cv_ncfnS1[is] = 0; cv_mem->cv_nniS1[is] = 0; } /* Problem has been successfully re-initialized */ cv_mem->cv_sensi = SUNTRUE; /* Check if the NLS exists, create the default NLS if needed */ if ( (ism == CV_SIMULTANEOUS && cv_mem->NLSsim == NULL) || (ism == CV_STAGGERED && cv_mem->NLSstg == NULL) || (ism == CV_STAGGERED1 && cv_mem->NLSstg1 == NULL) ) { /* create a Newton nonlinear solver object by default */ if (ism == CV_SIMULTANEOUS) NLS = SUNNonlinSol_NewtonSens(cv_mem->cv_Ns+1, cv_mem->cv_acor, cv_mem->cv_sunctx); else if (ism == CV_STAGGERED) NLS = SUNNonlinSol_NewtonSens(cv_mem->cv_Ns, cv_mem->cv_acor, cv_mem->cv_sunctx); else NLS = SUNNonlinSol_Newton(cv_mem->cv_acor, cv_mem->cv_sunctx); /* check that the nonlinear solver is non-NULL */ if (NLS == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSensReInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } /* attach the nonlinear solver to the CVODES memory */ if (ism == CV_SIMULTANEOUS) retval = CVodeSetNonlinearSolverSensSim(cv_mem, NLS); else if (ism == CV_STAGGERED) retval = CVodeSetNonlinearSolverSensStg(cv_mem, NLS); else retval = CVodeSetNonlinearSolverSensStg1(cv_mem, NLS); /* check that the nonlinear solver was successfully attached */ if (retval != CV_SUCCESS) { cvProcessError(cv_mem, retval, "CVODES", "CVodeSensReInit", "Setting the nonlinear solver failed"); SUNNonlinSolFree(NLS); return(CV_MEM_FAIL); } /* set ownership flag */ if (ism == CV_SIMULTANEOUS) cv_mem->ownNLSsim = SUNTRUE; else if (ism == CV_STAGGERED) cv_mem->ownNLSstg = SUNTRUE; else cv_mem->ownNLSstg1 = SUNTRUE; /* initialize the NLS object, this assumes that the linear solver has already been initialized in CVodeInit */ if (ism == CV_SIMULTANEOUS) retval = cvNlsInitSensSim(cv_mem); else if (ism == CV_STAGGERED) retval = cvNlsInitSensStg(cv_mem); else retval = cvNlsInitSensStg1(cv_mem); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_NLS_INIT_FAIL, "CVODES", "CVodeSensReInit", MSGCV_NLS_INIT_FAIL); return(CV_NLS_INIT_FAIL); } } /* Sensitivity re-initialization was successfull */ return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeSensSStolerances * CVodeSensSVtolerances * CVodeSensEEtolerances * * These functions specify the integration tolerances for sensitivity * variables. One of them MUST be called before the first call to CVode. * * CVodeSensSStolerances specifies scalar relative and absolute tolerances. * CVodeSensSVtolerances specifies scalar relative tolerance and a vector * absolute tolerance for each sensitivity vector (a potentially different * absolute tolerance for each vector component). * CVodeEEtolerances specifies that tolerances for sensitivity variables * should be estimated from those provided for the state variables. */ int CVodeSensSStolerances(void *cvode_mem, realtype reltolS, realtype *abstolS) { CVodeMem cv_mem; int is; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSensSStolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was sensitivity initialized? */ if (cv_mem->cv_SensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeSensSStolerances", MSGCV_NO_SENSI); return(CV_NO_SENS); } /* Test user-supplied tolerances */ if (reltolS < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensSStolerances", MSGCV_BAD_RELTOLS); return(CV_ILL_INPUT); } if (abstolS == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensSStolerances", MSGCV_NULL_ABSTOLS); return(CV_ILL_INPUT); } for (is=0; iscv_Ns; is++) if (abstolS[is] < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensSStolerances", MSGCV_BAD_ABSTOLS); return(CV_ILL_INPUT); } /* Copy tolerances into memory */ cv_mem->cv_itolS = CV_SS; cv_mem->cv_reltolS = reltolS; if ( !(cv_mem->cv_SabstolSMallocDone) ) { cv_mem->cv_SabstolS = NULL; cv_mem->cv_SabstolS = (realtype *)malloc(cv_mem->cv_Ns*sizeof(realtype)); cv_mem->cv_atolSmin0 = (booleantype *)malloc(cv_mem->cv_Ns*sizeof(booleantype)); cv_mem->cv_lrw += cv_mem->cv_Ns; cv_mem->cv_SabstolSMallocDone = SUNTRUE; } for (is=0; iscv_Ns; is++) { cv_mem->cv_SabstolS[is] = abstolS[is]; cv_mem->cv_atolSmin0[is] = (abstolS[is] == ZERO); } return(CV_SUCCESS); } int CVodeSensSVtolerances(void *cvode_mem, realtype reltolS, N_Vector *abstolS) { CVodeMem cv_mem; int is, retval; realtype *atolmin; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSensSVtolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was sensitivity initialized? */ if (cv_mem->cv_SensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeSensSVtolerances", MSGCV_NO_SENSI); return(CV_NO_SENS); } /* Test user-supplied tolerances */ if (reltolS < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensSVtolerances", MSGCV_BAD_RELTOLS); return(CV_ILL_INPUT); } if (abstolS == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensSVtolerances", MSGCV_NULL_ABSTOLS); return(CV_ILL_INPUT); } if (cv_mem->cv_tempv->ops->nvmin == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensSVtolerances", "Missing N_VMin routine from N_Vector"); return(CV_ILL_INPUT); } atolmin = (realtype *)malloc(cv_mem->cv_Ns*sizeof(realtype)); for (is=0; iscv_Ns; is++) { atolmin[is] = N_VMin(abstolS[is]); if (atolmin[is] < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSensSVtolerances", MSGCV_BAD_ABSTOLS); free(atolmin); return(CV_ILL_INPUT); } } /* Copy tolerances into memory */ cv_mem->cv_itolS = CV_SV; cv_mem->cv_reltolS = reltolS; if ( !(cv_mem->cv_VabstolSMallocDone) ) { cv_mem->cv_VabstolS = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempv); cv_mem->cv_atolSmin0 = (booleantype *)malloc(cv_mem->cv_Ns*sizeof(booleantype)); cv_mem->cv_lrw += cv_mem->cv_Ns*cv_mem->cv_lrw1; cv_mem->cv_liw += cv_mem->cv_Ns*cv_mem->cv_liw1; cv_mem->cv_VabstolSMallocDone = SUNTRUE; } for (is=0; iscv_Ns; is++) { cv_mem->cv_cvals[is] = ONE; cv_mem->cv_atolSmin0[is] = (atolmin[is] == ZERO); } free(atolmin); retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, abstolS, cv_mem->cv_VabstolS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); return(CV_SUCCESS); } int CVodeSensEEtolerances(void *cvode_mem) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSensEEtolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was sensitivity initialized? */ if (cv_mem->cv_SensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeSensEEtolerances", MSGCV_NO_SENSI); return(CV_NO_SENS); } cv_mem->cv_itolS = CV_EE; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeQuadSensInit * */ int CVodeQuadSensInit(void *cvode_mem, CVQuadSensRhsFn fQS, N_Vector *yQS0) { CVodeMem cv_mem; booleantype allocOK; int is, retval; /* Check cvode_mem */ if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeQuadSensInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Check if sensitivity analysis is active */ if (!cv_mem->cv_sensi) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensInit", MSGCV_NO_SENSI); return(CV_ILL_INPUT); } /* Check if yQS0 is non-null */ if (yQS0 == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensInit", MSGCV_NULL_YQS0); return(CV_ILL_INPUT); } /* Allocate the vectors (using yQS0[0] as a template) */ allocOK = cvQuadSensAllocVectors(cv_mem, yQS0[0]); if (!allocOK) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeQuadSensInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } /*---------------------------------------------- All error checking is complete at this point -----------------------------------------------*/ /* Set fQS */ if (fQS == NULL) { cv_mem->cv_fQSDQ = SUNTRUE; cv_mem->cv_fQS = cvQuadSensRhsInternalDQ; cv_mem->cv_fQS_data = cvode_mem; } else { cv_mem->cv_fQSDQ = SUNFALSE; cv_mem->cv_fQS = fQS; cv_mem->cv_fQS_data = cv_mem->cv_user_data; } /* Initialize znQS[0] in the history array */ for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, yQS0, cv_mem->cv_znQS[0]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); /* Initialize all sensitivity related counters */ cv_mem->cv_nfQSe = 0; cv_mem->cv_nfQeS = 0; cv_mem->cv_netfQS = 0; /* Quadrature sensitivities will be computed */ cv_mem->cv_quadr_sensi = SUNTRUE; cv_mem->cv_QuadSensMallocDone = SUNTRUE; /* Sensitivity initialization was successfull */ return(CV_SUCCESS); } /* * CVodeQuadSensReInit * */ int CVodeQuadSensReInit(void *cvode_mem, N_Vector *yQS0) { CVodeMem cv_mem; int is, retval; /* Check cvode_mem */ if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeQuadSensReInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Check if sensitivity analysis is active */ if (!cv_mem->cv_sensi) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensReInit", MSGCV_NO_SENSI); return(CV_NO_SENS); } /* Was quadrature sensitivity initialized? */ if (cv_mem->cv_QuadSensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUADSENS, "CVODES", "CVodeQuadSensReInit", MSGCV_NO_QUADSENSI); return(CV_NO_QUADSENS); } /* Check if yQS0 is non-null */ if (yQS0 == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensReInit", MSGCV_NULL_YQS0); return(CV_ILL_INPUT); } /*---------------------------------------------- All error checking is complete at this point -----------------------------------------------*/ /* Initialize znQS[0] in the history array */ for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, yQS0, cv_mem->cv_znQS[0]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); /* Initialize all sensitivity related counters */ cv_mem->cv_nfQSe = 0; cv_mem->cv_nfQeS = 0; cv_mem->cv_netfQS = 0; /* Quadrature sensitivities will be computed */ cv_mem->cv_quadr_sensi = SUNTRUE; /* Problem has been successfully re-initialized */ return(CV_SUCCESS); } /* * CVodeQuadSensSStolerances * CVodeQuadSensSVtolerances * CVodeQuadSensEEtolerances * * These functions specify the integration tolerances for quadrature * sensitivity variables. One of them MUST be called before the first * call to CVode IF these variables are included in the error test. * * CVodeQuadSensSStolerances specifies scalar relative and absolute tolerances. * CVodeQuadSensSVtolerances specifies scalar relative tolerance and a vector * absolute tolerance for each quadrature sensitivity vector (a potentially * different absolute tolerance for each vector component). * CVodeQuadSensEEtolerances specifies that tolerances for sensitivity variables * should be estimated from those provided for the quadrature variables. * In this case, tolerances for the quadrature variables must be * specified through a call to one of CVodeQuad**tolerances. */ int CVodeQuadSensSStolerances(void *cvode_mem, realtype reltolQS, realtype *abstolQS) { CVodeMem cv_mem; int is; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeQuadSensSStolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Check if sensitivity was initialized */ if (cv_mem->cv_SensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeQuadSensSStolerances", MSGCV_NO_SENSI); return(CV_NO_SENS); } /* Ckeck if quadrature sensitivity was initialized? */ if (cv_mem->cv_QuadSensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUADSENS, "CVODES", "CVodeQuadSSensSStolerances", MSGCV_NO_QUADSENSI); return(CV_NO_QUAD); } /* Test user-supplied tolerances */ if (reltolQS < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensSStolerances", MSGCV_BAD_RELTOLQS); return(CV_ILL_INPUT); } if (abstolQS == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensSStolerances", MSGCV_NULL_ABSTOLQS); return(CV_ILL_INPUT); } for (is=0; iscv_Ns; is++) if (abstolQS[is] < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensSStolerances", MSGCV_BAD_ABSTOLQS); return(CV_ILL_INPUT); } /* Copy tolerances into memory */ cv_mem->cv_itolQS = CV_SS; cv_mem->cv_reltolQS = reltolQS; if ( !(cv_mem->cv_SabstolQSMallocDone) ) { cv_mem->cv_SabstolQS = NULL; cv_mem->cv_SabstolQS = (realtype *)malloc(cv_mem->cv_Ns*sizeof(realtype)); cv_mem->cv_atolQSmin0 = (booleantype *)malloc(cv_mem->cv_Ns*sizeof(booleantype)); cv_mem->cv_lrw += cv_mem->cv_Ns; cv_mem->cv_SabstolQSMallocDone = SUNTRUE; } for (is=0; iscv_Ns; is++) { cv_mem->cv_SabstolQS[is] = abstolQS[is]; cv_mem->cv_atolQSmin0[is] = (abstolQS[is] == ZERO); } return(CV_SUCCESS); } int CVodeQuadSensSVtolerances(void *cvode_mem, realtype reltolQS, N_Vector *abstolQS) { CVodeMem cv_mem; int is, retval; realtype *atolmin; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeQuadSensSVtolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* check if sensitivity was initialized */ if (cv_mem->cv_SensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeQuadSensSVtolerances", MSGCV_NO_SENSI); return(CV_NO_SENS); } /* Ckeck if quadrature sensitivity was initialized? */ if (cv_mem->cv_QuadSensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUADSENS, "CVODES", "CVodeQuadSensSVtolerances", MSGCV_NO_QUADSENSI); return(CV_NO_QUAD); } /* Test user-supplied tolerances */ if (reltolQS < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensSVtolerances", MSGCV_BAD_RELTOLQS); return(CV_ILL_INPUT); } if (abstolQS == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensSVtolerances", MSGCV_NULL_ABSTOLQS); return(CV_ILL_INPUT); } if (cv_mem->cv_tempv->ops->nvmin == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensSVtolerances", "Missing N_VMin routine from N_Vector"); return(CV_ILL_INPUT); } atolmin = (realtype *)malloc(cv_mem->cv_Ns*sizeof(realtype)); for (is=0; iscv_Ns; is++) { atolmin[is] = N_VMin(abstolQS[is]); if (atolmin[is] < ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeQuadSensSVtolerances", MSGCV_BAD_ABSTOLQS); free(atolmin); return(CV_ILL_INPUT); } } /* Copy tolerances into memory */ cv_mem->cv_itolQS = CV_SV; cv_mem->cv_reltolQS = reltolQS; if ( !(cv_mem->cv_VabstolQSMallocDone) ) { cv_mem->cv_VabstolQS = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempvQ); cv_mem->cv_atolQSmin0 = (booleantype *)malloc(cv_mem->cv_Ns*sizeof(booleantype)); cv_mem->cv_lrw += cv_mem->cv_Ns*cv_mem->cv_lrw1Q; cv_mem->cv_liw += cv_mem->cv_Ns*cv_mem->cv_liw1Q; cv_mem->cv_VabstolQSMallocDone = SUNTRUE; } for (is=0; iscv_Ns; is++) { cv_mem->cv_cvals[is] = ONE; cv_mem->cv_atolQSmin0[is] = (atolmin[is] == ZERO); } free(atolmin); retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, abstolQS, cv_mem->cv_VabstolQS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); return(CV_SUCCESS); } int CVodeQuadSensEEtolerances(void *cvode_mem) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeQuadSensEEtolerances", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* check if sensitivity was initialized */ if (cv_mem->cv_SensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeQuadSensEEtolerances", MSGCV_NO_SENSI); return(CV_NO_SENS); } /* Ckeck if quadrature sensitivity was initialized? */ if (cv_mem->cv_QuadSensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUADSENS, "CVODES", "CVodeQuadSensEEtolerances", MSGCV_NO_QUADSENSI); return(CV_NO_QUAD); } cv_mem->cv_itolQS = CV_EE; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeSensToggleOff * * CVodeSensToggleOff deactivates sensitivity calculations. * It does NOT deallocate sensitivity-related memory. */ int CVodeSensToggleOff(void *cvode_mem) { CVodeMem cv_mem; /* Check cvode_mem */ if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSensToggleOff", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Disable sensitivities */ cv_mem->cv_sensi = SUNFALSE; cv_mem->cv_quadr_sensi = SUNFALSE; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ /* * CVodeRootInit * * CVodeRootInit initializes a rootfinding problem to be solved * during the integration of the ODE system. It loads the root * function pointer and the number of root functions, and allocates * workspace memory. The return value is CV_SUCCESS = 0 if no errors * occurred, or a negative value otherwise. */ int CVodeRootInit(void *cvode_mem, int nrtfn, CVRootFn g) { CVodeMem cv_mem; int i, nrt; /* Check cvode_mem pointer */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeRootInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; nrt = (nrtfn < 0) ? 0 : nrtfn; /* If rerunning CVodeRootInit() with a different number of root functions (changing number of gfun components), then free currently held memory resources */ if ((nrt != cv_mem->cv_nrtfn) && (cv_mem->cv_nrtfn > 0)) { free(cv_mem->cv_glo); cv_mem->cv_glo = NULL; free(cv_mem->cv_ghi); cv_mem->cv_ghi = NULL; free(cv_mem->cv_grout); cv_mem->cv_grout = NULL; free(cv_mem->cv_iroots); cv_mem->cv_iroots = NULL; free(cv_mem->cv_rootdir); cv_mem->cv_rootdir = NULL; free(cv_mem->cv_gactive); cv_mem->cv_gactive = NULL; cv_mem->cv_lrw -= 3 * (cv_mem->cv_nrtfn); cv_mem->cv_liw -= 3 * (cv_mem->cv_nrtfn); } /* If CVodeRootInit() was called with nrtfn == 0, then set cv_nrtfn to zero and cv_gfun to NULL before returning */ if (nrt == 0) { cv_mem->cv_nrtfn = nrt; cv_mem->cv_gfun = NULL; return(CV_SUCCESS); } /* If rerunning CVodeRootInit() with the same number of root functions (not changing number of gfun components), then check if the root function argument has changed */ /* If g != NULL then return as currently reserved memory resources will suffice */ if (nrt == cv_mem->cv_nrtfn) { if (g != cv_mem->cv_gfun) { if (g == NULL) { free(cv_mem->cv_glo); cv_mem->cv_glo = NULL; free(cv_mem->cv_ghi); cv_mem->cv_ghi = NULL; free(cv_mem->cv_grout); cv_mem->cv_grout = NULL; free(cv_mem->cv_iroots); cv_mem->cv_iroots = NULL; free(cv_mem->cv_rootdir); cv_mem->cv_rootdir = NULL; free(cv_mem->cv_gactive); cv_mem->cv_gactive = NULL; cv_mem->cv_lrw -= 3*nrt; cv_mem->cv_liw -= 3*nrt; cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeRootInit", MSGCV_NULL_G); return(CV_ILL_INPUT); } else { cv_mem->cv_gfun = g; return(CV_SUCCESS); } } else return(CV_SUCCESS); } /* Set variable values in CVode memory block */ cv_mem->cv_nrtfn = nrt; if (g == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeRootInit", MSGCV_NULL_G); return(CV_ILL_INPUT); } else cv_mem->cv_gfun = g; /* Allocate necessary memory and return */ cv_mem->cv_glo = NULL; cv_mem->cv_glo = (realtype *) malloc(nrt*sizeof(realtype)); if (cv_mem->cv_glo == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeRootInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->cv_ghi = NULL; cv_mem->cv_ghi = (realtype *) malloc(nrt*sizeof(realtype)); if (cv_mem->cv_ghi == NULL) { free(cv_mem->cv_glo); cv_mem->cv_glo = NULL; cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeRootInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->cv_grout = NULL; cv_mem->cv_grout = (realtype *) malloc(nrt*sizeof(realtype)); if (cv_mem->cv_grout == NULL) { free(cv_mem->cv_glo); cv_mem->cv_glo = NULL; free(cv_mem->cv_ghi); cv_mem->cv_ghi = NULL; cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeRootInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->cv_iroots = NULL; cv_mem->cv_iroots = (int *) malloc(nrt*sizeof(int)); if (cv_mem->cv_iroots == NULL) { free(cv_mem->cv_glo); cv_mem->cv_glo = NULL; free(cv_mem->cv_ghi); cv_mem->cv_ghi = NULL; free(cv_mem->cv_grout); cv_mem->cv_grout = NULL; cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeRootInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->cv_rootdir = NULL; cv_mem->cv_rootdir = (int *) malloc(nrt*sizeof(int)); if (cv_mem->cv_rootdir == NULL) { free(cv_mem->cv_glo); cv_mem->cv_glo = NULL; free(cv_mem->cv_ghi); cv_mem->cv_ghi = NULL; free(cv_mem->cv_grout); cv_mem->cv_grout = NULL; free(cv_mem->cv_iroots); cv_mem->cv_iroots = NULL; cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeRootInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->cv_gactive = NULL; cv_mem->cv_gactive = (booleantype *) malloc(nrt*sizeof(booleantype)); if (cv_mem->cv_gactive == NULL) { free(cv_mem->cv_glo); cv_mem->cv_glo = NULL; free(cv_mem->cv_ghi); cv_mem->cv_ghi = NULL; free(cv_mem->cv_grout); cv_mem->cv_grout = NULL; free(cv_mem->cv_iroots); cv_mem->cv_iroots = NULL; free(cv_mem->cv_rootdir); cv_mem->cv_rootdir = NULL; cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeRootInit", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } /* Set default values for rootdir (both directions) */ for(i=0; icv_rootdir[i] = 0; /* Set default values for gactive (all active) */ for(i=0; icv_gactive[i] = SUNTRUE; cv_mem->cv_lrw += 3*nrt; cv_mem->cv_liw += 3*nrt; return(CV_SUCCESS); } /* * ----------------------------------------------------------------- * Main solver function * ----------------------------------------------------------------- */ /* * CVode * * This routine is the main driver of the CVODES package. * * It integrates over a time interval defined by the user, by calling * cvStep to do internal time steps. * * The first time that CVode is called for a successfully initialized * problem, it computes a tentative initial step size h. * * CVode supports two modes, specified by itask: CV_NORMAL, CV_ONE_STEP. * In the CV_NORMAL mode, the solver steps until it reaches or passes tout * and then interpolates to obtain y(tout). * In the CV_ONE_STEP mode, it takes one internal step and returns. */ int CVode(void *cvode_mem, realtype tout, N_Vector yout, realtype *tret, int itask) { CVodeMem cv_mem; long int nstloc; int retval, hflag, kflag, istate, is, ir, ier, irfndp; realtype troundoff, tout_hin, rh, nrm; booleantype inactive_roots; /* * ------------------------------------- * 1. Check and process inputs * ------------------------------------- */ /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVode", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Check if cvode_mem was allocated */ if (cv_mem->cv_MallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_MALLOC, "CVODES", "CVode", MSGCV_NO_MALLOC); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_MALLOC); } /* Check for yout != NULL */ if ((cv_mem->cv_y = yout) == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_YOUT_NULL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Check for tret != NULL */ if (tret == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_TRET_NULL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Check for valid itask */ if ( (itask != CV_NORMAL) && (itask != CV_ONE_STEP) ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_BAD_ITASK); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } if (itask == CV_NORMAL) cv_mem->cv_toutc = tout; cv_mem->cv_taskc = itask; /* * ---------------------------------------- * 2. Initializations performed only at * the first step (nst=0): * - initial setup * - initialize Nordsieck history array * - compute initial step size * - check for approach to tstop * - check for approach to a root * ---------------------------------------- */ if (cv_mem->cv_nst == 0) { cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; /* Check inputs for corectness */ ier = cvInitialSetup(cv_mem); if (ier != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(ier); } /* * Call f at (t0,y0), set zn[1] = y'(t0). * If computing any quadratures, call fQ at (t0,y0), set znQ[1] = yQ'(t0) * If computing sensitivities, call fS at (t0,y0,yS0), set znS[1][is] = yS'(t0), is=1,...,Ns. * If computing quadr. sensi., call fQS at (t0,y0,yS0), set znQS[1][is] = yQS'(t0), is=1,...,Ns. */ retval = cv_mem->cv_f(cv_mem->cv_tn, cv_mem->cv_zn[0], cv_mem->cv_zn[1], cv_mem->cv_user_data); cv_mem->cv_nfe++; if (retval < 0) { cvProcessError(cv_mem, CV_RHSFUNC_FAIL, "CVODES", "CVode", MSGCV_RHSFUNC_FAILED, cv_mem->cv_tn); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_RHSFUNC_FAIL); } if (retval > 0) { cvProcessError(cv_mem, CV_FIRST_RHSFUNC_ERR, "CVODES", "CVode", MSGCV_RHSFUNC_FIRST); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_FIRST_RHSFUNC_ERR); } if (cv_mem->cv_quadr) { retval = cv_mem->cv_fQ(cv_mem->cv_tn, cv_mem->cv_zn[0], cv_mem->cv_znQ[1], cv_mem->cv_user_data); cv_mem->cv_nfQe++; if (retval < 0) { cvProcessError(cv_mem, CV_QRHSFUNC_FAIL, "CVODES", "CVode", MSGCV_QRHSFUNC_FAILED, cv_mem->cv_tn); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_QRHSFUNC_FAIL); } if (retval > 0) { cvProcessError(cv_mem, CV_FIRST_QRHSFUNC_ERR, "CVODES", "CVode", MSGCV_QRHSFUNC_FIRST); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_FIRST_QRHSFUNC_ERR); } } if (cv_mem->cv_sensi) { retval = cvSensRhsWrapper(cv_mem, cv_mem->cv_tn, cv_mem->cv_zn[0], cv_mem->cv_zn[1], cv_mem->cv_znS[0], cv_mem->cv_znS[1], cv_mem->cv_tempv, cv_mem->cv_ftemp); if (retval < 0) { cvProcessError(cv_mem, CV_SRHSFUNC_FAIL, "CVODES", "CVode", MSGCV_SRHSFUNC_FAILED, cv_mem->cv_tn); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SRHSFUNC_FAIL); } if (retval > 0) { cvProcessError(cv_mem, CV_FIRST_SRHSFUNC_ERR, "CVODES", "CVode", MSGCV_SRHSFUNC_FIRST); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_FIRST_SRHSFUNC_ERR); } } if (cv_mem->cv_quadr_sensi) { retval = cv_mem->cv_fQS(cv_mem->cv_Ns, cv_mem->cv_tn, cv_mem->cv_zn[0], cv_mem->cv_znS[0], cv_mem->cv_znQ[1], cv_mem->cv_znQS[1], cv_mem->cv_fQS_data, cv_mem->cv_tempv, cv_mem->cv_tempvQ); cv_mem->cv_nfQSe++; if (retval < 0) { cvProcessError(cv_mem, CV_QSRHSFUNC_FAIL, "CVODES", "CVode", MSGCV_QSRHSFUNC_FAILED, cv_mem->cv_tn); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_QSRHSFUNC_FAIL); } if (retval > 0) { cvProcessError(cv_mem, CV_FIRST_QSRHSFUNC_ERR, "CVODES", "CVode", MSGCV_QSRHSFUNC_FIRST); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_FIRST_QSRHSFUNC_ERR); } } /* Test input tstop for legality. */ if (cv_mem->cv_tstopset) { if ( (cv_mem->cv_tstop - cv_mem->cv_tn)*(tout - cv_mem->cv_tn) <= ZERO ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_BAD_TSTOP, cv_mem->cv_tstop, cv_mem->cv_tn); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } } /* Set initial h (from H0 or cvHin). */ cv_mem->cv_h = cv_mem->cv_hin; if ( (cv_mem->cv_h != ZERO) && ((tout-cv_mem->cv_tn)*cv_mem->cv_h < ZERO) ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_BAD_H0); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } if (cv_mem->cv_h == ZERO) { tout_hin = tout; if ( cv_mem->cv_tstopset && (tout-cv_mem->cv_tn)*(tout-cv_mem->cv_tstop) > ZERO ) tout_hin = cv_mem->cv_tstop; hflag = cvHin(cv_mem, tout_hin); if (hflag != CV_SUCCESS) { istate = cvHandleFailure(cv_mem, hflag); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(istate); } } rh = SUNRabs(cv_mem->cv_h)*cv_mem->cv_hmax_inv; if (rh > ONE) cv_mem->cv_h /= rh; if (SUNRabs(cv_mem->cv_h) < cv_mem->cv_hmin) cv_mem->cv_h *= cv_mem->cv_hmin/SUNRabs(cv_mem->cv_h); /* Check for approach to tstop */ if (cv_mem->cv_tstopset) { if ( (cv_mem->cv_tn + cv_mem->cv_h - cv_mem->cv_tstop)*cv_mem->cv_h > ZERO ) cv_mem->cv_h = (cv_mem->cv_tstop - cv_mem->cv_tn)*(ONE-FOUR*cv_mem->cv_uround); } /* * Scale zn[1] by h. * If computing any quadratures, scale znQ[1] by h. * If computing sensitivities, scale znS[1][is] by h. * If computing quadrature sensitivities, scale znQS[1][is] by h. */ cv_mem->cv_hscale = cv_mem->cv_h; cv_mem->cv_h0u = cv_mem->cv_h; cv_mem->cv_hprime = cv_mem->cv_h; N_VScale(cv_mem->cv_h, cv_mem->cv_zn[1], cv_mem->cv_zn[1]); if (cv_mem->cv_quadr) N_VScale(cv_mem->cv_h, cv_mem->cv_znQ[1], cv_mem->cv_znQ[1]); if (cv_mem->cv_sensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = cv_mem->cv_h; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znS[1], cv_mem->cv_znS[1]); if (retval != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_VECTOROP_ERR); } } if (cv_mem->cv_quadr_sensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = cv_mem->cv_h; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znQS[1], cv_mem->cv_znQS[1]); if (retval != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_VECTOROP_ERR); } } /* Check for zeros of root function g at and near t0. */ if (cv_mem->cv_nrtfn > 0) { retval = cvRcheck1(cv_mem); if (retval == CV_RTFUNC_FAIL) { cvProcessError(cv_mem, CV_RTFUNC_FAIL, "CVODES", "cvRcheck1", MSGCV_RTFUNC_FAILED, cv_mem->cv_tn); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_RTFUNC_FAIL); } } } /* end of first call block */ /* * ------------------------------------------------------ * 3. At following steps, perform stop tests: * - check for root in last step * - check if we passed tstop * - check if we passed tout (NORMAL mode) * - check if current tn was returned (ONE_STEP mode) * - check if we are close to tstop * (adjust step size if needed) * ------------------------------------------------------- */ if (cv_mem->cv_nst > 0) { /* Estimate an infinitesimal time interval to be used as a roundoff for time quantities (based on current time and step size) */ troundoff = FUZZ_FACTOR * cv_mem->cv_uround * (SUNRabs(cv_mem->cv_tn) + SUNRabs(cv_mem->cv_h)); /* First, check for a root in the last step taken, other than the last root found, if any. If itask = CV_ONE_STEP and y(tn) was not returned because of an intervening root, return y(tn) now. */ if (cv_mem->cv_nrtfn > 0) { irfndp = cv_mem->cv_irfnd; retval = cvRcheck2(cv_mem); if (retval == CLOSERT) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvRcheck2", MSGCV_CLOSE_ROOTS, cv_mem->cv_tlo); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } else if (retval == CV_RTFUNC_FAIL) { cvProcessError(cv_mem, CV_RTFUNC_FAIL, "CVODES", "cvRcheck2", MSGCV_RTFUNC_FAILED, cv_mem->cv_tlo); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_RTFUNC_FAIL); } else if (retval == RTFOUND) { cv_mem->cv_tretlast = *tret = cv_mem->cv_tlo; SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ROOT_RETURN); } /* If tn is distinct from tretlast (within roundoff), check remaining interval for roots */ if ( SUNRabs(cv_mem->cv_tn - cv_mem->cv_tretlast) > troundoff ) { retval = cvRcheck3(cv_mem); if (retval == CV_SUCCESS) { /* no root found */ cv_mem->cv_irfnd = 0; if ((irfndp == 1) && (itask == CV_ONE_STEP)) { cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], yout); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } } else if (retval == RTFOUND) { /* a new root was found */ cv_mem->cv_irfnd = 1; cv_mem->cv_tretlast = *tret = cv_mem->cv_tlo; SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ROOT_RETURN); } else if (retval == CV_RTFUNC_FAIL) { /* g failed */ cvProcessError(cv_mem, CV_RTFUNC_FAIL, "CVODES", "cvRcheck3", MSGCV_RTFUNC_FAILED, cv_mem->cv_tlo); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_RTFUNC_FAIL); } } } /* end of root stop check */ /* In CV_NORMAL mode, test if tout was reached */ if ( (itask == CV_NORMAL) && ((cv_mem->cv_tn-tout)*cv_mem->cv_h >= ZERO) ) { cv_mem->cv_tretlast = *tret = tout; ier = CVodeGetDky(cv_mem, tout, 0, yout); if (ier != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_BAD_TOUT, tout); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* In CV_ONE_STEP mode, test if tn was returned */ if ( itask == CV_ONE_STEP && SUNRabs(cv_mem->cv_tn - cv_mem->cv_tretlast) > troundoff ) { cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], yout); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* Test for tn at tstop or near tstop */ if ( cv_mem->cv_tstopset ) { if ( SUNRabs(cv_mem->cv_tn - cv_mem->cv_tstop) <= troundoff ) { ier = CVodeGetDky(cv_mem, cv_mem->cv_tstop, 0, yout); if (ier != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_BAD_TSTOP, cv_mem->cv_tstop, cv_mem->cv_tn); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } cv_mem->cv_tretlast = *tret = cv_mem->cv_tstop; cv_mem->cv_tstopset = SUNFALSE; SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_TSTOP_RETURN); } /* If next step would overtake tstop, adjust stepsize */ if ( (cv_mem->cv_tn + cv_mem->cv_hprime - cv_mem->cv_tstop)*cv_mem->cv_h > ZERO ) { cv_mem->cv_hprime = (cv_mem->cv_tstop - cv_mem->cv_tn)*(ONE-FOUR*cv_mem->cv_uround); cv_mem->cv_eta = cv_mem->cv_hprime / cv_mem->cv_h; } } } /* end stopping tests block */ /* * -------------------------------------------------- * 4. Looping point for internal steps * * 4.1. check for errors (too many steps, too much * accuracy requested, step size too small) * 4.2. take a new step (call cvStep) * 4.3. stop on error * 4.4. perform stop tests: * - check for root in last step * - check if tout was passed * - check if close to tstop * - check if in ONE_STEP mode (must return) * -------------------------------------------------- */ nstloc = 0; for(;;) { cv_mem->cv_next_h = cv_mem->cv_h; cv_mem->cv_next_q = cv_mem->cv_q; /* Reset and check ewt, ewtQ, ewtS */ if (cv_mem->cv_nst > 0) { ier = cv_mem->cv_efun(cv_mem->cv_zn[0], cv_mem->cv_ewt, cv_mem->cv_e_data); if(ier != 0) { if (cv_mem->cv_itol == CV_WF) cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_EWT_NOW_FAIL, cv_mem->cv_tn); else cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_EWT_NOW_BAD, cv_mem->cv_tn); istate = CV_ILL_INPUT; cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], yout); break; } if (cv_mem->cv_quadr && cv_mem->cv_errconQ) { ier = cvQuadEwtSet(cv_mem, cv_mem->cv_znQ[0], cv_mem->cv_ewtQ); if(ier != 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_EWTQ_NOW_BAD, cv_mem->cv_tn); istate = CV_ILL_INPUT; cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], yout); break; } } if (cv_mem->cv_sensi) { ier = cvSensEwtSet(cv_mem, cv_mem->cv_znS[0], cv_mem->cv_ewtS); if (ier != 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_EWTS_NOW_BAD, cv_mem->cv_tn); istate = CV_ILL_INPUT; cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], yout); break; } } if (cv_mem->cv_quadr_sensi && cv_mem->cv_errconQS) { ier = cvQuadSensEwtSet(cv_mem, cv_mem->cv_znQS[0], cv_mem->cv_ewtQS); if (ier != 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVode", MSGCV_EWTQS_NOW_BAD, cv_mem->cv_tn); istate = CV_ILL_INPUT; cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], yout); break; } } } /* Check for too many steps */ if ( (cv_mem->cv_mxstep>0) && (nstloc >= cv_mem->cv_mxstep) ) { cvProcessError(cv_mem, CV_TOO_MUCH_WORK, "CVODES", "CVode", MSGCV_MAX_STEPS, cv_mem->cv_tn); istate = CV_TOO_MUCH_WORK; cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], yout); break; } /* Check for too much accuracy requested */ nrm = N_VWrmsNorm(cv_mem->cv_zn[0], cv_mem->cv_ewt); if (cv_mem->cv_quadr && cv_mem->cv_errconQ) { nrm = cvQuadUpdateNorm(cv_mem, nrm, cv_mem->cv_znQ[0], cv_mem->cv_ewtQ); } if (cv_mem->cv_sensi && cv_mem->cv_errconS) { nrm = cvSensUpdateNorm(cv_mem, nrm, cv_mem->cv_znS[0], cv_mem->cv_ewtS); } if (cv_mem->cv_quadr_sensi && cv_mem->cv_errconQS) { nrm = cvQuadSensUpdateNorm(cv_mem, nrm, cv_mem->cv_znQS[0], cv_mem->cv_ewtQS); } cv_mem->cv_tolsf = cv_mem->cv_uround * nrm; if (cv_mem->cv_tolsf > ONE) { cvProcessError(cv_mem, CV_TOO_MUCH_ACC, "CVODES", "CVode", MSGCV_TOO_MUCH_ACC, cv_mem->cv_tn); istate = CV_TOO_MUCH_ACC; cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], yout); cv_mem->cv_tolsf *= TWO; break; } else { cv_mem->cv_tolsf = ONE; } /* Check for h below roundoff level in tn */ if (cv_mem->cv_tn + cv_mem->cv_h == cv_mem->cv_tn) { cv_mem->cv_nhnil++; if (cv_mem->cv_nhnil <= cv_mem->cv_mxhnil) cvProcessError(cv_mem, CV_WARNING, "CVODES", "CVode", MSGCV_HNIL, cv_mem->cv_tn, cv_mem->cv_h); if (cv_mem->cv_nhnil == cv_mem->cv_mxhnil) cvProcessError(cv_mem, CV_WARNING, "CVODES", "CVode", MSGCV_HNIL_DONE); } /* Call cvStep to take a step */ kflag = cvStep(cv_mem); /* Process failed step cases, and exit loop */ if (kflag != CV_SUCCESS) { istate = cvHandleFailure(cv_mem, kflag); cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], yout); break; } nstloc++; /* If tstop is set and was reached, reset tn = tstop */ if ( cv_mem->cv_tstopset ) { troundoff = FUZZ_FACTOR * cv_mem->cv_uround * (SUNRabs(cv_mem->cv_tn) + SUNRabs(cv_mem->cv_h)); if ( SUNRabs(cv_mem->cv_tn - cv_mem->cv_tstop) <= troundoff) cv_mem->cv_tn = cv_mem->cv_tstop; } /* Check for root in last step taken. */ if (cv_mem->cv_nrtfn > 0) { retval = cvRcheck3(cv_mem); if (retval == RTFOUND) { /* A new root was found */ cv_mem->cv_irfnd = 1; istate = CV_ROOT_RETURN; cv_mem->cv_tretlast = *tret = cv_mem->cv_tlo; break; } else if (retval == CV_RTFUNC_FAIL) { /* g failed */ cvProcessError(cv_mem, CV_RTFUNC_FAIL, "CVODES", "cvRcheck3", MSGCV_RTFUNC_FAILED, cv_mem->cv_tlo); istate = CV_RTFUNC_FAIL; break; } /* If we are at the end of the first step and we still have * some event functions that are inactive, issue a warning * as this may indicate a user error in the implementation * of the root function. */ if (cv_mem->cv_nst==1) { inactive_roots = SUNFALSE; for (ir=0; ircv_nrtfn; ir++) { if (!cv_mem->cv_gactive[ir]) { inactive_roots = SUNTRUE; break; } } if ((cv_mem->cv_mxgnull > 0) && inactive_roots) { cvProcessError(cv_mem, CV_WARNING, "CVODES", "CVode", MSGCV_INACTIVE_ROOTS); } } } /* In NORMAL mode, check if tout reached */ if ( (itask == CV_NORMAL) && (cv_mem->cv_tn-tout)*cv_mem->cv_h >= ZERO ) { istate = CV_SUCCESS; cv_mem->cv_tretlast = *tret = tout; (void) CVodeGetDky(cv_mem, tout, 0, yout); cv_mem->cv_next_q = cv_mem->cv_qprime; cv_mem->cv_next_h = cv_mem->cv_hprime; break; } /* Check if tn is at tstop or near tstop */ if ( cv_mem->cv_tstopset ) { troundoff = FUZZ_FACTOR * cv_mem->cv_uround * (SUNRabs(cv_mem->cv_tn) + SUNRabs(cv_mem->cv_h)); if ( SUNRabs(cv_mem->cv_tn - cv_mem->cv_tstop) <= troundoff) { (void) CVodeGetDky(cv_mem, cv_mem->cv_tstop, 0, yout); cv_mem->cv_tretlast = *tret = cv_mem->cv_tstop; cv_mem->cv_tstopset = SUNFALSE; istate = CV_TSTOP_RETURN; break; } if ( (cv_mem->cv_tn + cv_mem->cv_hprime - cv_mem->cv_tstop)*cv_mem->cv_h > ZERO ) { cv_mem->cv_hprime = (cv_mem->cv_tstop - cv_mem->cv_tn)*(ONE-FOUR*cv_mem->cv_uround); cv_mem->cv_eta = cv_mem->cv_hprime / cv_mem->cv_h; } } /* In ONE_STEP mode, copy y and exit loop */ if (itask == CV_ONE_STEP) { istate = CV_SUCCESS; cv_mem->cv_tretlast = *tret = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], yout); cv_mem->cv_next_q = cv_mem->cv_qprime; cv_mem->cv_next_h = cv_mem->cv_hprime; break; } } /* end looping for internal steps */ /* Load optional output */ if (cv_mem->cv_sensi && (cv_mem->cv_ism==CV_STAGGERED1)) { cv_mem->cv_nniS = 0; cv_mem->cv_ncfnS = 0; for (is=0; iscv_Ns; is++) { cv_mem->cv_nniS += cv_mem->cv_nniS1[is]; cv_mem->cv_ncfnS += cv_mem->cv_ncfnS1[is]; } } SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(istate); } /* * CVodeComputeState * * Computes y based on the current prediction and given correction. */ int CVodeComputeState(void *cvode_mem, N_Vector ycor, N_Vector y) { CVodeMem cv_mem; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeComputeState", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); N_VLinearSum(ONE, cv_mem->cv_zn[0], ONE, ycor, y); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* * CVodeComputeStateSens * * Computes yS based on the current prediction and given correction. */ int CVodeComputeStateSens(void *cvode_mem, N_Vector *ycorS, N_Vector *yS) { int retval; CVodeMem cv_mem; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeComputeStateSens", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); retval = N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znS[0], ONE, ycorS, yS); if (retval != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_VECTOROP_ERR); } SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* * CVodeComputeStateSens1 * * Computes yS[idx] based on the current prediction and given correction. */ int CVodeComputeStateSens1(void *cvode_mem, int idx, N_Vector ycorS1, N_Vector yS1) { CVodeMem cv_mem; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeComputeStateSens1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); N_VLinearSum(ONE, cv_mem->cv_znS[0][idx], ONE, ycorS1, yS1); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* * ----------------------------------------------------------------- * Interpolated output and extraction functions * ----------------------------------------------------------------- */ /* * CVodeGetDky * * This routine computes the k-th derivative of the interpolating * polynomial at the time t and stores the result in the vector dky. * The formula is: * q * dky = SUM c(j,k) * (t - tn)^(j-k) * h^(-j) * zn[j] , * j=k * where c(j,k) = j*(j-1)*...*(j-k+1), q is the current order, and * zn[j] is the j-th column of the Nordsieck history array. * * This function is called by CVode with k = 0 and t = tout, but * may also be called directly by the user. */ int CVodeGetDky(void *cvode_mem, realtype t, int k, N_Vector dky) { realtype s, r; realtype tfuzz, tp, tn1; int i, j, nvec, ier; CVodeMem cv_mem; /* Check all inputs for legality */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetDky", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); if (dky == NULL) { cvProcessError(cv_mem, CV_BAD_DKY, "CVODES", "CVodeGetDky", MSGCV_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_DKY); } if ((k < 0) || (k > cv_mem->cv_q)) { cvProcessError(cv_mem, CV_BAD_K, "CVODES", "CVodeGetDky", MSGCV_BAD_K); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_K); } /* Allow for some slack */ tfuzz = FUZZ_FACTOR * cv_mem->cv_uround * (SUNRabs(cv_mem->cv_tn) + SUNRabs(cv_mem->cv_hu)); if (cv_mem->cv_hu < ZERO) tfuzz = -tfuzz; tp = cv_mem->cv_tn - cv_mem->cv_hu - tfuzz; tn1 = cv_mem->cv_tn + tfuzz; if ((t-tp)*(t-tn1) > ZERO) { cvProcessError(cv_mem, CV_BAD_T, "CVODES", "CVodeGetDky", MSGCV_BAD_T, t, cv_mem->cv_tn-cv_mem->cv_hu, cv_mem->cv_tn); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_T); } /* Sum the differentiated interpolating polynomial */ nvec = 0; s = (t - cv_mem->cv_tn) / cv_mem->cv_h; for (j=cv_mem->cv_q; j >= k; j--) { cv_mem->cv_cvals[nvec] = ONE; for (i=j; i >= j-k+1; i--) cv_mem->cv_cvals[nvec] *= i; for (i=0; i < j-k; i++) cv_mem->cv_cvals[nvec] *= s; cv_mem->cv_Xvecs[nvec] = cv_mem->cv_zn[j]; nvec += 1; } ier = N_VLinearCombination(nvec, cv_mem->cv_cvals, cv_mem->cv_Xvecs, dky); if (ier != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_VECTOROP_ERR); } if (k == 0) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } r = SUNRpowerI(cv_mem->cv_h, -k); N_VScale(r, dky, dky); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* * CVodeGetQuad * * This routine extracts quadrature solution into yQout at the * time which CVode returned the solution. * This is just a wrapper that calls CVodeGetQuadDky with k=0. */ int CVodeGetQuad(void *cvode_mem, realtype *tret, N_Vector yQout) { CVodeMem cv_mem; int flag; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuad", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); *tret = cv_mem->cv_tretlast; flag = CVodeGetQuadDky(cvode_mem,cv_mem->cv_tretlast,0,yQout); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } /* * CVodeGetQuadDky * * CVodeQuadDky computes the kth derivative of the yQ function at * time t, where tn-hu <= t <= tn, tn denotes the current * internal time reached, and hu is the last internal step size * successfully used by the solver. The user may request * k=0, 1, ..., qu, where qu is the current order. * The derivative vector is returned in dky. This vector * must be allocated by the caller. It is only legal to call this * function after a successful return from CVode with quadrature * computation enabled. */ int CVodeGetQuadDky(void *cvode_mem, realtype t, int k, N_Vector dkyQ) { realtype s, r; realtype tfuzz, tp, tn1; int i, j, nvec, ier; CVodeMem cv_mem; /* Check all inputs for legality */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadDky", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); if(cv_mem->cv_quadr != SUNTRUE) { cvProcessError(cv_mem, CV_NO_QUAD, "CVODES", "CVodeGetQuadDky", MSGCV_NO_QUAD); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_QUAD); } if (dkyQ == NULL) { cvProcessError(cv_mem, CV_BAD_DKY, "CVODES", "CVodeGetQuadDky", MSGCV_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_DKY); } if ((k < 0) || (k > cv_mem->cv_q)) { cvProcessError(cv_mem, CV_BAD_K, "CVODES", "CVodeGetQuadDky", MSGCV_BAD_K); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_K); } /* Allow for some slack */ tfuzz = FUZZ_FACTOR * cv_mem->cv_uround * (SUNRabs(cv_mem->cv_tn) + SUNRabs(cv_mem->cv_hu)); if (cv_mem->cv_hu < ZERO) tfuzz = -tfuzz; tp = cv_mem->cv_tn - cv_mem->cv_hu - tfuzz; tn1 = cv_mem->cv_tn + tfuzz; if ((t-tp)*(t-tn1) > ZERO) { cvProcessError(cv_mem, CV_BAD_T, "CVODES", "CVodeGetQuadDky", MSGCV_BAD_T); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_T); } /* Sum the differentiated interpolating polynomial */ nvec = 0; s = (t - cv_mem->cv_tn) / cv_mem->cv_h; for (j=cv_mem->cv_q; j >= k; j--) { cv_mem->cv_cvals[nvec] = ONE; for (i=j; i >= j-k+1; i--) cv_mem->cv_cvals[nvec] *= i; for (i=0; i < j-k; i++) cv_mem->cv_cvals[nvec] *= s; cv_mem->cv_Xvecs[nvec] = cv_mem->cv_znQ[j]; nvec += 1; } ier = N_VLinearCombination(nvec, cv_mem->cv_cvals, cv_mem->cv_Xvecs, dkyQ); if (ier != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_VECTOROP_ERR); } if (k == 0) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } r = SUNRpowerI(cv_mem->cv_h, -k); N_VScale(r, dkyQ, dkyQ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* * CVodeGetSens * * This routine extracts sensitivity solution into ySout at the * time at which CVode returned the solution. * This is just a wrapper that calls CVodeSensDky with k=0. */ int CVodeGetSens(void *cvode_mem, realtype *tret, N_Vector *ySout) { CVodeMem cv_mem; int flag; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSens", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); *tret = cv_mem->cv_tretlast; flag = CVodeGetSensDky(cvode_mem,cv_mem->cv_tretlast,0,ySout); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } /* * CVodeGetSens1 * * This routine extracts the is-th sensitivity solution into ySout * at the time at which CVode returned the solution. * This is just a wrapper that calls CVodeSensDky1 with k=0. */ int CVodeGetSens1(void *cvode_mem, realtype *tret, int is, N_Vector ySout) { CVodeMem cv_mem; int flag; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSens1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); *tret = cv_mem->cv_tretlast; flag = CVodeGetSensDky1(cvode_mem,cv_mem->cv_tretlast,0,is,ySout); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } /* * CVodeGetSensDky * * If the user calls directly CVodeSensDky then s must be allocated * prior to this call. When CVodeSensDky is called by * CVodeGetSens, only ier=CV_SUCCESS, ier=CV_NO_SENS, or * ier=CV_BAD_T are possible. */ int CVodeGetSensDky(void *cvode_mem, realtype t, int k, N_Vector *dkyS) { int ier=CV_SUCCESS; int is; CVodeMem cv_mem; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSensDky", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); if (dkyS == NULL) { cvProcessError(cv_mem, CV_BAD_DKY, "CVODES", "CVodeGetSensDky", MSGCV_NULL_DKYA); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_DKY); } for (is=0; iscv_Ns; is++) { ier = CVodeGetSensDky1(cvode_mem,t,k,is,dkyS[is]); if (ier!=CV_SUCCESS) break; } SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(ier); } /* * CVodeGetSensDky1 * * CVodeSensDky1 computes the kth derivative of the yS[is] function at * time t, where tn-hu <= t <= tn, tn denotes the current * internal time reached, and hu is the last internal step size * successfully used by the solver. The user may request * is=0, 1, ..., Ns-1 and k=0, 1, ..., qu, where qu is the current * order. The derivative vector is returned in dky. This vector * must be allocated by the caller. It is only legal to call this * function after a successful return from CVode with sensitivity * computation enabled. */ int CVodeGetSensDky1(void *cvode_mem, realtype t, int k, int is, N_Vector dkyS) { realtype s, r; realtype tfuzz, tp, tn1; int i, j, nvec, ier; CVodeMem cv_mem; /* Check all inputs for legality */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSensDky1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); if(cv_mem->cv_sensi != SUNTRUE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetSensDky1", MSGCV_NO_SENSI); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_SENS); } if (dkyS == NULL) { cvProcessError(cv_mem, CV_BAD_DKY, "CVODES", "CVodeGetSensDky1", MSGCV_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_DKY); } if ((k < 0) || (k > cv_mem->cv_q)) { cvProcessError(cv_mem, CV_BAD_K, "CVODES", "CVodeGetSensDky1", MSGCV_BAD_K); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_K); } if ((is < 0) || (is > cv_mem->cv_Ns-1)) { cvProcessError(cv_mem, CV_BAD_IS, "CVODES", "CVodeGetSensDky1", MSGCV_BAD_IS); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_IS); } /* Allow for some slack */ tfuzz = FUZZ_FACTOR * cv_mem->cv_uround * (SUNRabs(cv_mem->cv_tn) + SUNRabs(cv_mem->cv_hu)); if (cv_mem->cv_hu < ZERO) tfuzz = -tfuzz; tp = cv_mem->cv_tn - cv_mem->cv_hu - tfuzz; tn1 = cv_mem->cv_tn + tfuzz; if ((t-tp)*(t-tn1) > ZERO) { cvProcessError(cv_mem, CV_BAD_T, "CVODES", "CVodeGetSensDky1", MSGCV_BAD_T); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_T); } /* Sum the differentiated interpolating polynomial */ nvec = 0; s = (t - cv_mem->cv_tn) / cv_mem->cv_h; for (j=cv_mem->cv_q; j >= k; j--) { cv_mem->cv_cvals[nvec] = ONE; for (i=j; i >= j-k+1; i--) cv_mem->cv_cvals[nvec] *= i; for (i=0; i < j-k; i++) cv_mem->cv_cvals[nvec] *= s; cv_mem->cv_Xvecs[nvec] = cv_mem->cv_znS[j][is]; nvec += 1; } ier = N_VLinearCombination(nvec, cv_mem->cv_cvals, cv_mem->cv_Xvecs, dkyS); if (ier != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_VECTOROP_ERR); } if (k == 0) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } r = SUNRpowerI(cv_mem->cv_h, -k); N_VScale(r, dkyS, dkyS); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* * CVodeGetQuadSens and CVodeGetQuadSens1 * * Extraction functions for all or only one of the quadrature sensitivity * vectors at the time at which CVode returned the ODE solution. */ int CVodeGetQuadSens(void *cvode_mem, realtype *tret, N_Vector *yQSout) { CVodeMem cv_mem; int flag; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadSens", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); *tret = cv_mem->cv_tretlast; flag = CVodeGetQuadSensDky(cvode_mem,cv_mem->cv_tretlast,0,yQSout); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } int CVodeGetQuadSens1(void *cvode_mem, realtype *tret, int is, N_Vector yQSout) { CVodeMem cv_mem; int flag; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadSens1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); *tret = cv_mem->cv_tretlast; flag = CVodeGetQuadSensDky1(cvode_mem,cv_mem->cv_tretlast,0,is,yQSout); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } /* * CVodeGetQuadSensDky and CVodeGetQuadSensDky1 * * Dense output functions for all or only one of the quadrature sensitivity * vectors (or derivative thereof). */ int CVodeGetQuadSensDky(void *cvode_mem, realtype t, int k, N_Vector *dkyQS_all) { int ier=CV_SUCCESS; int is; CVodeMem cv_mem; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadSensDky", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); if (dkyQS_all == NULL) { cvProcessError(cv_mem, CV_BAD_DKY, "CVODES", "CVodeGetSensDky", MSGCV_NULL_DKYA); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_DKY); } for (is=0; iscv_Ns; is++) { ier = CVodeGetQuadSensDky1(cvode_mem,t,k,is,dkyQS_all[is]); if (ier!=CV_SUCCESS) break; } SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(ier); } int CVodeGetQuadSensDky1(void *cvode_mem, realtype t, int k, int is, N_Vector dkyQS) { realtype s, r; realtype tfuzz, tp, tn1; int i, j, nvec, ier; CVodeMem cv_mem; /* Check all inputs for legality */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadSensDky1", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); if(cv_mem->cv_quadr_sensi != SUNTRUE) { cvProcessError(cv_mem, CV_NO_QUADSENS, "CVODES", "CVodeGetQuadSensDky1", MSGCV_NO_QUADSENSI); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_QUADSENS); } if (dkyQS == NULL) { cvProcessError(cv_mem, CV_BAD_DKY, "CVODES", "CVodeGetQuadSensDky1", MSGCV_NULL_DKY); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_DKY); } if ((k < 0) || (k > cv_mem->cv_q)) { cvProcessError(cv_mem, CV_BAD_K, "CVODES", "CVodeGetQuadSensDky1", MSGCV_BAD_K); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_K); } if ((is < 0) || (is > cv_mem->cv_Ns-1)) { cvProcessError(cv_mem, CV_BAD_IS, "CVODES", "CVodeGetQuadSensDky1", MSGCV_BAD_IS); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_IS); } /* Allow for some slack */ tfuzz = FUZZ_FACTOR * cv_mem->cv_uround * (SUNRabs(cv_mem->cv_tn) + SUNRabs(cv_mem->cv_hu)); if (cv_mem->cv_hu < ZERO) tfuzz = -tfuzz; tp = cv_mem->cv_tn - cv_mem->cv_hu - tfuzz; tn1 = cv_mem->cv_tn + tfuzz; if ((t-tp)*(t-tn1) > ZERO) { cvProcessError(cv_mem, CV_BAD_T, "CVODES", "CVodeGetQuadSensDky1", MSGCV_BAD_T); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_T); } /* Sum the differentiated interpolating polynomial */ nvec = 0; s = (t - cv_mem->cv_tn) / cv_mem->cv_h; for (j=cv_mem->cv_q; j >= k; j--) { cv_mem->cv_cvals[nvec] = ONE; for (i=j; i >= j-k+1; i--) cv_mem->cv_cvals[nvec] *= i; for (i=0; i < j-k; i++) cv_mem->cv_cvals[nvec] *= s; cv_mem->cv_Xvecs[nvec] = cv_mem->cv_znQS[j][is]; nvec += 1; } ier = N_VLinearCombination(nvec, cv_mem->cv_cvals, cv_mem->cv_Xvecs, dkyQS); if (ier != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_VECTOROP_ERR); } if (k == 0) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } r = SUNRpowerI(cv_mem->cv_h, -k); N_VScale(r, dkyQS, dkyQS); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* * ----------------------------------------------------------------- * Deallocation functions * ----------------------------------------------------------------- */ /* * CVodeFree * * This routine frees the problem memory allocated by CVodeInit. * Such memory includes all the vectors allocated by cvAllocVectors, * and the memory lmem for the linear solver (deallocated by a call * to lfree), as well as (if Ns!=0) all memory allocated for * sensitivity computations by CVodeSensInit. */ void CVodeFree(void **cvode_mem) { CVodeMem cv_mem; if (*cvode_mem == NULL) return; cv_mem = (CVodeMem) (*cvode_mem); cvFreeVectors(cv_mem); /* if CVODE created the nonlinear solver object then free it */ if (cv_mem->ownNLS) { SUNNonlinSolFree(cv_mem->NLS); cv_mem->ownNLS = SUNFALSE; cv_mem->NLS = NULL; } CVodeQuadFree(cv_mem); CVodeSensFree(cv_mem); CVodeQuadSensFree(cv_mem); CVodeAdjFree(cv_mem); if (cv_mem->cv_lfree != NULL) cv_mem->cv_lfree(cv_mem); if (cv_mem->cv_nrtfn > 0) { free(cv_mem->cv_glo); cv_mem->cv_glo = NULL; free(cv_mem->cv_ghi); cv_mem->cv_ghi = NULL; free(cv_mem->cv_grout); cv_mem->cv_grout = NULL; free(cv_mem->cv_iroots); cv_mem->cv_iroots = NULL; free(cv_mem->cv_rootdir); cv_mem->cv_rootdir = NULL; free(cv_mem->cv_gactive); cv_mem->cv_gactive = NULL; } free(cv_mem->cv_cvals); cv_mem->cv_cvals = NULL; free(cv_mem->cv_Xvecs); cv_mem->cv_Xvecs = NULL; free(cv_mem->cv_Zvecs); cv_mem->cv_Zvecs = NULL; free(*cvode_mem); *cvode_mem = NULL; } /* * CVodeQuadFree * * CVodeQuadFree frees the problem memory in cvode_mem allocated * for quadrature integration. Its only argument is the pointer * cvode_mem returned by CVodeCreate. */ void CVodeQuadFree(void *cvode_mem) { CVodeMem cv_mem; if (cvode_mem == NULL) return; cv_mem = (CVodeMem) cvode_mem; if(cv_mem->cv_QuadMallocDone) { cvQuadFreeVectors(cv_mem); cv_mem->cv_QuadMallocDone = SUNFALSE; cv_mem->cv_quadr = SUNFALSE; } } /* * CVodeSensFree * * CVodeSensFree frees the problem memory in cvode_mem allocated * for sensitivity analysis. Its only argument is the pointer * cvode_mem returned by CVodeCreate. */ void CVodeSensFree(void *cvode_mem) { CVodeMem cv_mem; if (cvode_mem == NULL) return; cv_mem = (CVodeMem) cvode_mem; if(cv_mem->cv_SensMallocDone) { if (cv_mem->cv_stgr1alloc) { free(cv_mem->cv_ncfS1); cv_mem->cv_ncfS1 = NULL; free(cv_mem->cv_ncfnS1); cv_mem->cv_ncfnS1 = NULL; free(cv_mem->cv_nniS1); cv_mem->cv_nniS1 = NULL; cv_mem->cv_stgr1alloc = SUNFALSE; } cvSensFreeVectors(cv_mem); cv_mem->cv_SensMallocDone = SUNFALSE; cv_mem->cv_sensi = SUNFALSE; } /* free any vector wrappers */ if (cv_mem->simMallocDone) { N_VDestroy(cv_mem->zn0Sim); cv_mem->zn0Sim = NULL; N_VDestroy(cv_mem->ycorSim); cv_mem->ycorSim = NULL; N_VDestroy(cv_mem->ewtSim); cv_mem->ewtSim = NULL; cv_mem->simMallocDone = SUNFALSE; } if (cv_mem->stgMallocDone) { N_VDestroy(cv_mem->zn0Stg); cv_mem->zn0Stg = NULL; N_VDestroy(cv_mem->ycorStg); cv_mem->ycorStg = NULL; N_VDestroy(cv_mem->ewtStg); cv_mem->ewtStg = NULL; cv_mem->stgMallocDone = SUNFALSE; } /* if CVODES created a NLS object then free it */ if (cv_mem->ownNLSsim) { SUNNonlinSolFree(cv_mem->NLSsim); cv_mem->ownNLSsim = SUNFALSE; cv_mem->NLSsim = NULL; } if (cv_mem->ownNLSstg) { SUNNonlinSolFree(cv_mem->NLSstg); cv_mem->ownNLSstg = SUNFALSE; cv_mem->NLSstg = NULL; } if (cv_mem->ownNLSstg1) { SUNNonlinSolFree(cv_mem->NLSstg1); cv_mem->ownNLSstg1 = SUNFALSE; cv_mem->NLSstg1 = NULL; } /* free min atol array if necessary */ if (cv_mem->cv_atolSmin0) { free(cv_mem->cv_atolSmin0); cv_mem->cv_atolSmin0 = NULL; } } /* * CVodeQuadSensFree * * CVodeQuadSensFree frees the problem memory in cvode_mem allocated * for quadrature sensitivity analysis. Its only argument is the pointer * cvode_mem returned by CVodeCreate. */ void CVodeQuadSensFree(void *cvode_mem) { CVodeMem cv_mem; if (cvode_mem == NULL) return; cv_mem = (CVodeMem) cvode_mem; if(cv_mem->cv_QuadSensMallocDone) { cvQuadSensFreeVectors(cv_mem); cv_mem->cv_QuadSensMallocDone = SUNFALSE; cv_mem->cv_quadr_sensi = SUNFALSE; } /* free min atol array if necessary */ if (cv_mem->cv_atolQSmin0) { free(cv_mem->cv_atolQSmin0); cv_mem->cv_atolQSmin0 = NULL; } } /* * ================================================================= * Private Functions Implementation * ================================================================= */ /* * cvCheckNvector * This routine checks if all required vector operations are present. * If any of them is missing it returns SUNFALSE. */ static booleantype cvCheckNvector(N_Vector tmpl) { if((tmpl->ops->nvclone == NULL) || (tmpl->ops->nvdestroy == NULL) || (tmpl->ops->nvlinearsum == NULL) || (tmpl->ops->nvconst == NULL) || (tmpl->ops->nvprod == NULL) || (tmpl->ops->nvdiv == NULL) || (tmpl->ops->nvscale == NULL) || (tmpl->ops->nvabs == NULL) || (tmpl->ops->nvinv == NULL) || (tmpl->ops->nvaddconst == NULL) || (tmpl->ops->nvmaxnorm == NULL) || (tmpl->ops->nvwrmsnorm == NULL)) return(SUNFALSE); else return(SUNTRUE); } /* * ----------------------------------------------------------------- * Memory allocation/deallocation * ----------------------------------------------------------------- */ /* * cvAllocVectors * * This routine allocates the CVODES vectors ewt, acor, tempv, ftemp, and * zn[0], ..., zn[maxord]. * If all memory allocations are successful, cvAllocVectors returns SUNTRUE. * Otherwise all allocated memory is freed and cvAllocVectors returns SUNFALSE. * This routine also sets the optional outputs lrw and liw, which are * (respectively) the lengths of the real and integer work spaces * allocated here. */ static booleantype cvAllocVectors(CVodeMem cv_mem, N_Vector tmpl) { int i, j; /* Allocate ewt, acor, tempv, ftemp */ cv_mem->cv_ewt = N_VClone(tmpl); if (cv_mem->cv_ewt == NULL) return(SUNFALSE); cv_mem->cv_acor = N_VClone(tmpl); if (cv_mem->cv_acor == NULL) { N_VDestroy(cv_mem->cv_ewt); return(SUNFALSE); } cv_mem->cv_tempv = N_VClone(tmpl); if (cv_mem->cv_tempv == NULL) { N_VDestroy(cv_mem->cv_ewt); N_VDestroy(cv_mem->cv_acor); return(SUNFALSE); } cv_mem->cv_ftemp = N_VClone(tmpl); if (cv_mem->cv_ftemp == NULL) { N_VDestroy(cv_mem->cv_tempv); N_VDestroy(cv_mem->cv_ewt); N_VDestroy(cv_mem->cv_acor); return(SUNFALSE); } cv_mem->cv_vtemp1 = N_VClone(tmpl); if (cv_mem->cv_vtemp1 == NULL) { N_VDestroy(cv_mem->cv_ftemp); N_VDestroy(cv_mem->cv_tempv); N_VDestroy(cv_mem->cv_ewt); N_VDestroy(cv_mem->cv_acor); return(SUNFALSE); } cv_mem->cv_vtemp2 = N_VClone(tmpl); if (cv_mem->cv_vtemp2 == NULL) { N_VDestroy(cv_mem->cv_vtemp1); N_VDestroy(cv_mem->cv_ftemp); N_VDestroy(cv_mem->cv_tempv); N_VDestroy(cv_mem->cv_ewt); N_VDestroy(cv_mem->cv_acor); return(SUNFALSE); } cv_mem->cv_vtemp3 = N_VClone(tmpl); if (cv_mem->cv_vtemp3 == NULL) { N_VDestroy(cv_mem->cv_vtemp2); N_VDestroy(cv_mem->cv_vtemp1); N_VDestroy(cv_mem->cv_ftemp); N_VDestroy(cv_mem->cv_tempv); N_VDestroy(cv_mem->cv_ewt); N_VDestroy(cv_mem->cv_acor); return(SUNFALSE); } /* Allocate zn[0] ... zn[qmax] */ for (j=0; j <= cv_mem->cv_qmax; j++) { cv_mem->cv_zn[j] = N_VClone(tmpl); if (cv_mem->cv_zn[j] == NULL) { N_VDestroy(cv_mem->cv_ewt); N_VDestroy(cv_mem->cv_acor); N_VDestroy(cv_mem->cv_tempv); N_VDestroy(cv_mem->cv_ftemp); N_VDestroy(cv_mem->cv_vtemp1); N_VDestroy(cv_mem->cv_vtemp2); N_VDestroy(cv_mem->cv_vtemp3); for (i=0; i < j; i++) N_VDestroy(cv_mem->cv_zn[i]); return(SUNFALSE); } } /* Update solver workspace lengths */ cv_mem->cv_lrw += (cv_mem->cv_qmax + 8)*cv_mem->cv_lrw1; cv_mem->cv_liw += (cv_mem->cv_qmax + 8)*cv_mem->cv_liw1; /* Store the value of qmax used here */ cv_mem->cv_qmax_alloc = cv_mem->cv_qmax; return(SUNTRUE); } /* * cvFreeVectors * * This routine frees the vectors allocated in cvAllocVectors. */ static void cvFreeVectors(CVodeMem cv_mem) { int j, maxord; maxord = cv_mem->cv_qmax_alloc; N_VDestroy(cv_mem->cv_ewt); N_VDestroy(cv_mem->cv_acor); N_VDestroy(cv_mem->cv_tempv); N_VDestroy(cv_mem->cv_ftemp); N_VDestroy(cv_mem->cv_vtemp1); N_VDestroy(cv_mem->cv_vtemp2); N_VDestroy(cv_mem->cv_vtemp3); for (j=0; j <= maxord; j++) N_VDestroy(cv_mem->cv_zn[j]); cv_mem->cv_lrw -= (maxord + 8)*cv_mem->cv_lrw1; cv_mem->cv_liw -= (maxord + 8)*cv_mem->cv_liw1; if (cv_mem->cv_VabstolMallocDone) { N_VDestroy(cv_mem->cv_Vabstol); cv_mem->cv_lrw -= cv_mem->cv_lrw1; cv_mem->cv_liw -= cv_mem->cv_liw1; } if (cv_mem->cv_constraintsMallocDone) { N_VDestroy(cv_mem->cv_constraints); cv_mem->cv_lrw -= cv_mem->cv_lrw1; cv_mem->cv_liw -= cv_mem->cv_liw1; } } /* * CVodeQuadAllocVectors * * NOTE: Space for ewtQ is allocated even when errconQ=SUNFALSE, * although in this case, ewtQ is never used. The reason for this * decision is to allow the user to re-initialize the quadrature * computation with errconQ=SUNTRUE, after an initialization with * errconQ=SUNFALSE, without new memory allocation within * CVodeQuadReInit. */ static booleantype cvQuadAllocVectors(CVodeMem cv_mem, N_Vector tmpl) { int i, j; /* Allocate ewtQ */ cv_mem->cv_ewtQ = N_VClone(tmpl); if (cv_mem->cv_ewtQ == NULL) { return(SUNFALSE); } /* Allocate acorQ */ cv_mem->cv_acorQ = N_VClone(tmpl); if (cv_mem->cv_acorQ == NULL) { N_VDestroy(cv_mem->cv_ewtQ); return(SUNFALSE); } /* Allocate yQ */ cv_mem->cv_yQ = N_VClone(tmpl); if (cv_mem->cv_yQ == NULL) { N_VDestroy(cv_mem->cv_ewtQ); N_VDestroy(cv_mem->cv_acorQ); return(SUNFALSE); } /* Allocate tempvQ */ cv_mem->cv_tempvQ = N_VClone(tmpl); if (cv_mem->cv_tempvQ == NULL) { N_VDestroy(cv_mem->cv_ewtQ); N_VDestroy(cv_mem->cv_acorQ); N_VDestroy(cv_mem->cv_yQ); return(SUNFALSE); } /* Allocate zQn[0] ... zQn[maxord] */ for (j=0; j <= cv_mem->cv_qmax; j++) { cv_mem->cv_znQ[j] = N_VClone(tmpl); if (cv_mem->cv_znQ[j] == NULL) { N_VDestroy(cv_mem->cv_ewtQ); N_VDestroy(cv_mem->cv_acorQ); N_VDestroy(cv_mem->cv_yQ); N_VDestroy(cv_mem->cv_tempvQ); for (i=0; i < j; i++) N_VDestroy(cv_mem->cv_znQ[i]); return(SUNFALSE); } } /* Store the value of qmax used here */ cv_mem->cv_qmax_allocQ = cv_mem->cv_qmax; /* Update solver workspace lengths */ cv_mem->cv_lrw += (cv_mem->cv_qmax + 5)*cv_mem->cv_lrw1Q; cv_mem->cv_liw += (cv_mem->cv_qmax + 5)*cv_mem->cv_liw1Q; return(SUNTRUE); } /* * cvQuadFreeVectors * * This routine frees the CVODES vectors allocated in cvQuadAllocVectors. */ static void cvQuadFreeVectors(CVodeMem cv_mem) { int j, maxord; maxord = cv_mem->cv_qmax_allocQ; N_VDestroy(cv_mem->cv_ewtQ); N_VDestroy(cv_mem->cv_acorQ); N_VDestroy(cv_mem->cv_yQ); N_VDestroy(cv_mem->cv_tempvQ); for (j=0; j<=maxord; j++) N_VDestroy(cv_mem->cv_znQ[j]); cv_mem->cv_lrw -= (maxord + 5)*cv_mem->cv_lrw1Q; cv_mem->cv_liw -= (maxord + 5)*cv_mem->cv_liw1Q; if (cv_mem->cv_VabstolQMallocDone) { N_VDestroy(cv_mem->cv_VabstolQ); cv_mem->cv_lrw -= cv_mem->cv_lrw1Q; cv_mem->cv_liw -= cv_mem->cv_liw1Q; } cv_mem->cv_VabstolQMallocDone = SUNFALSE; } /* * cvSensAllocVectors * * Create (through duplication) N_Vectors used for sensitivity analysis, * using the N_Vector 'tmpl' as a template. */ static booleantype cvSensAllocVectors(CVodeMem cv_mem, N_Vector tmpl) { int i, j; /* Allocate yS */ cv_mem->cv_yS = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_yS == NULL) { return(SUNFALSE); } /* Allocate ewtS */ cv_mem->cv_ewtS = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_ewtS == NULL) { N_VDestroyVectorArray(cv_mem->cv_yS, cv_mem->cv_Ns); return(SUNFALSE); } /* Allocate acorS */ cv_mem->cv_acorS = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_acorS == NULL) { N_VDestroyVectorArray(cv_mem->cv_yS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtS, cv_mem->cv_Ns); return(SUNFALSE); } /* Allocate tempvS */ cv_mem->cv_tempvS = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_tempvS == NULL) { N_VDestroyVectorArray(cv_mem->cv_yS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_acorS, cv_mem->cv_Ns); return(SUNFALSE); } /* Allocate ftempS */ cv_mem->cv_ftempS = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_ftempS == NULL) { N_VDestroyVectorArray(cv_mem->cv_yS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_acorS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_tempvS, cv_mem->cv_Ns); return(SUNFALSE); } /* Allocate znS */ for (j=0; j<=cv_mem->cv_qmax; j++) { cv_mem->cv_znS[j] = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_znS[j] == NULL) { N_VDestroyVectorArray(cv_mem->cv_yS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_acorS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_tempvS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ftempS, cv_mem->cv_Ns); for (i=0; icv_znS[i], cv_mem->cv_Ns); return(SUNFALSE); } } /* Allocate space for pbar and plist */ cv_mem->cv_pbar = NULL; cv_mem->cv_pbar = (realtype *)malloc(cv_mem->cv_Ns*sizeof(realtype)); if (cv_mem->cv_pbar == NULL) { N_VDestroyVectorArray(cv_mem->cv_yS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_acorS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_tempvS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ftempS, cv_mem->cv_Ns); for (i=0; i<=cv_mem->cv_qmax; i++) N_VDestroyVectorArray(cv_mem->cv_znS[i], cv_mem->cv_Ns); return(SUNFALSE); } cv_mem->cv_plist = NULL; cv_mem->cv_plist = (int *)malloc(cv_mem->cv_Ns*sizeof(int)); if (cv_mem->cv_plist == NULL) { N_VDestroyVectorArray(cv_mem->cv_yS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_acorS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_tempvS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ftempS, cv_mem->cv_Ns); for (i=0; i<=cv_mem->cv_qmax; i++) N_VDestroyVectorArray(cv_mem->cv_znS[i], cv_mem->cv_Ns); free(cv_mem->cv_pbar); cv_mem->cv_pbar = NULL; return(SUNFALSE); } /* Update solver workspace lengths */ cv_mem->cv_lrw += (cv_mem->cv_qmax + 6)*cv_mem->cv_Ns*cv_mem->cv_lrw1 + cv_mem->cv_Ns; cv_mem->cv_liw += (cv_mem->cv_qmax + 6)*cv_mem->cv_Ns*cv_mem->cv_liw1 + cv_mem->cv_Ns; /* Store the value of qmax used here */ cv_mem->cv_qmax_allocS = cv_mem->cv_qmax; return(SUNTRUE); } /* * cvSensFreeVectors * * This routine frees the CVODES vectors allocated in cvSensAllocVectors. */ static void cvSensFreeVectors(CVodeMem cv_mem) { int j, maxord; maxord = cv_mem->cv_qmax_allocS; N_VDestroyVectorArray(cv_mem->cv_yS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_acorS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_tempvS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ftempS, cv_mem->cv_Ns); for (j=0; j<=maxord; j++) N_VDestroyVectorArray(cv_mem->cv_znS[j], cv_mem->cv_Ns); free(cv_mem->cv_pbar); cv_mem->cv_pbar = NULL; free(cv_mem->cv_plist); cv_mem->cv_plist = NULL; cv_mem->cv_lrw -= (maxord + 6)*cv_mem->cv_Ns*cv_mem->cv_lrw1 + cv_mem->cv_Ns; cv_mem->cv_liw -= (maxord + 6)*cv_mem->cv_Ns*cv_mem->cv_liw1 + cv_mem->cv_Ns; if (cv_mem->cv_VabstolSMallocDone) { N_VDestroyVectorArray(cv_mem->cv_VabstolS, cv_mem->cv_Ns); cv_mem->cv_lrw -= cv_mem->cv_Ns*cv_mem->cv_lrw1; cv_mem->cv_liw -= cv_mem->cv_Ns*cv_mem->cv_liw1; } if (cv_mem->cv_SabstolSMallocDone) { free(cv_mem->cv_SabstolS); cv_mem->cv_SabstolS = NULL; cv_mem->cv_lrw -= cv_mem->cv_Ns; } cv_mem->cv_VabstolSMallocDone = SUNFALSE; cv_mem->cv_SabstolSMallocDone = SUNFALSE; } /* * cvQuadSensAllocVectors * * Create (through duplication) N_Vectors used for quadrature sensitivity analysis, * using the N_Vector 'tmpl' as a template. */ static booleantype cvQuadSensAllocVectors(CVodeMem cv_mem, N_Vector tmpl) { int i, j; /* Allocate ftempQ */ cv_mem->cv_ftempQ = N_VClone(tmpl); if (cv_mem->cv_ftempQ == NULL) { return(SUNFALSE); } /* Allocate yQS */ cv_mem->cv_yQS = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_yQS == NULL) { N_VDestroy(cv_mem->cv_ftempQ); return(SUNFALSE); } /* Allocate ewtQS */ cv_mem->cv_ewtQS = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_ewtQS == NULL) { N_VDestroy(cv_mem->cv_ftempQ); N_VDestroyVectorArray(cv_mem->cv_yQS, cv_mem->cv_Ns); return(SUNFALSE); } /* Allocate acorQS */ cv_mem->cv_acorQS = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_acorQS == NULL) { N_VDestroy(cv_mem->cv_ftempQ); N_VDestroyVectorArray(cv_mem->cv_yQS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtQS, cv_mem->cv_Ns); return(SUNFALSE); } /* Allocate tempvQS */ cv_mem->cv_tempvQS = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_tempvQS == NULL) { N_VDestroy(cv_mem->cv_ftempQ); N_VDestroyVectorArray(cv_mem->cv_yQS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtQS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_acorQS, cv_mem->cv_Ns); return(SUNFALSE); } /* Allocate znQS */ for (j=0; j<=cv_mem->cv_qmax; j++) { cv_mem->cv_znQS[j] = N_VCloneVectorArray(cv_mem->cv_Ns, tmpl); if (cv_mem->cv_znQS[j] == NULL) { N_VDestroy(cv_mem->cv_ftempQ); N_VDestroyVectorArray(cv_mem->cv_yQS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtQS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_acorQS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_tempvQS, cv_mem->cv_Ns); for (i=0; icv_znQS[i], cv_mem->cv_Ns); return(SUNFALSE); } } /* Update solver workspace lengths */ cv_mem->cv_lrw += (cv_mem->cv_qmax + 5)*cv_mem->cv_Ns*cv_mem->cv_lrw1Q; cv_mem->cv_liw += (cv_mem->cv_qmax + 5)*cv_mem->cv_Ns*cv_mem->cv_liw1Q; /* Store the value of qmax used here */ cv_mem->cv_qmax_allocQS = cv_mem->cv_qmax; return(SUNTRUE); } /* * cvQuadSensFreeVectors * * This routine frees the CVODES vectors allocated in cvQuadSensAllocVectors. */ static void cvQuadSensFreeVectors(CVodeMem cv_mem) { int j, maxord; maxord = cv_mem->cv_qmax_allocQS; N_VDestroy(cv_mem->cv_ftempQ); N_VDestroyVectorArray(cv_mem->cv_yQS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_ewtQS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_acorQS, cv_mem->cv_Ns); N_VDestroyVectorArray(cv_mem->cv_tempvQS, cv_mem->cv_Ns); for (j=0; j<=maxord; j++) N_VDestroyVectorArray(cv_mem->cv_znQS[j], cv_mem->cv_Ns); cv_mem->cv_lrw -= (maxord + 5)*cv_mem->cv_Ns*cv_mem->cv_lrw1Q; cv_mem->cv_liw -= (maxord + 5)*cv_mem->cv_Ns*cv_mem->cv_liw1Q; if (cv_mem->cv_VabstolQSMallocDone) { N_VDestroyVectorArray(cv_mem->cv_VabstolQS, cv_mem->cv_Ns); cv_mem->cv_lrw -= cv_mem->cv_Ns*cv_mem->cv_lrw1Q; cv_mem->cv_liw -= cv_mem->cv_Ns*cv_mem->cv_liw1Q; } if (cv_mem->cv_SabstolQSMallocDone) { free(cv_mem->cv_SabstolQS); cv_mem->cv_SabstolQS = NULL; cv_mem->cv_lrw -= cv_mem->cv_Ns; } cv_mem->cv_VabstolQSMallocDone = SUNFALSE; cv_mem->cv_SabstolQSMallocDone = SUNFALSE; } /* * ----------------------------------------------------------------- * Initial setup * ----------------------------------------------------------------- */ /* * cvInitialSetup * * This routine performs input consistency checks at the first step. * If needed, it also checks the linear solver module and calls the * linear solver initialization routine. */ static int cvInitialSetup(CVodeMem cv_mem) { int ier; booleantype conOK; /* Did the user specify tolerances? */ if (cv_mem->cv_itol == CV_NN) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_NO_TOL); return(CV_ILL_INPUT); } /* If using a built-in routine for error weights with abstol==0, ensure that N_VMin is available */ if ((!cv_mem->cv_user_efun) && (cv_mem->cv_atolmin0) && (!cv_mem->cv_tempv->ops->nvmin)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", "Missing N_VMin routine from N_Vector"); return(CV_ILL_INPUT); } /* Set data for efun */ if (cv_mem->cv_user_efun) cv_mem->cv_e_data = cv_mem->cv_user_data; else cv_mem->cv_e_data = cv_mem; /* Check to see if y0 satisfies constraints */ if (cv_mem->cv_constraintsSet) { if (cv_mem->cv_sensi && (cv_mem->cv_ism==CV_SIMULTANEOUS)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_BAD_ISM_CONSTR); return(CV_ILL_INPUT); } conOK = N_VConstrMask(cv_mem->cv_constraints, cv_mem->cv_zn[0], cv_mem->cv_tempv); if (!conOK) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_Y0_FAIL_CONSTR); return(CV_ILL_INPUT); } } /* Load initial error weights */ ier = cv_mem->cv_efun(cv_mem->cv_zn[0], cv_mem->cv_ewt, cv_mem->cv_e_data); if (ier != 0) { if (cv_mem->cv_itol == CV_WF) cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_EWT_FAIL); else cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_BAD_EWT); return(CV_ILL_INPUT); } /* Quadrature initial setup */ if (cv_mem->cv_quadr && cv_mem->cv_errconQ) { /* Did the user specify tolerances? */ if (cv_mem->cv_itolQ == CV_NN) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_NO_TOLQ); return(CV_ILL_INPUT); } /* Load ewtQ */ ier = cvQuadEwtSet(cv_mem, cv_mem->cv_znQ[0], cv_mem->cv_ewtQ); if (ier != 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_BAD_EWTQ); return(CV_ILL_INPUT); } } if (!cv_mem->cv_quadr) cv_mem->cv_errconQ = SUNFALSE; /* Forward sensitivity initial setup */ if (cv_mem->cv_sensi) { /* Did the user specify tolerances? */ if (cv_mem->cv_itolS == CV_NN) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_NO_TOLS); return(CV_ILL_INPUT); } /* If using the internal DQ functions, we must have access to the problem parameters */ if(cv_mem->cv_fSDQ && (cv_mem->cv_p == NULL)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_NULL_P); return(CV_ILL_INPUT); } /* Load ewtS */ ier = cvSensEwtSet(cv_mem, cv_mem->cv_znS[0], cv_mem->cv_ewtS); if (ier != 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_BAD_EWTS); return(CV_ILL_INPUT); } } /* FSA of quadrature variables */ if (cv_mem->cv_quadr_sensi) { /* If using the internal DQ functions, we must have access to fQ * (i.e. quadrature integration must be enabled) and to the problem parameters */ if (cv_mem->cv_fQSDQ) { /* Test if quadratures are defined, so we can use fQ */ if (!cv_mem->cv_quadr) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_NULL_FQ); return(CV_ILL_INPUT); } /* Test if we have the problem parameters */ if(cv_mem->cv_p == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_NULL_P); return(CV_ILL_INPUT); } } if (cv_mem->cv_errconQS) { /* Did the user specify tolerances? */ if (cv_mem->cv_itolQS == CV_NN) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_NO_TOLQS); return(CV_ILL_INPUT); } /* If needed, did the user provide quadrature tolerances? */ if ( (cv_mem->cv_itolQS == CV_EE) && (cv_mem->cv_itolQ == CV_NN) ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_NO_TOLQ); return(CV_ILL_INPUT); } /* Load ewtQS */ ier = cvQuadSensEwtSet(cv_mem, cv_mem->cv_znQS[0], cv_mem->cv_ewtQS); if (ier != 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "cvInitialSetup", MSGCV_BAD_EWTQS); return(CV_ILL_INPUT); } } } else { cv_mem->cv_errconQS = SUNFALSE; } /* Call linit function (if it exists) */ if (cv_mem->cv_linit != NULL) { ier = cv_mem->cv_linit(cv_mem); if (ier != 0) { cvProcessError(cv_mem, CV_LINIT_FAIL, "CVODES", "cvInitialSetup", MSGCV_LINIT_FAIL); return(CV_LINIT_FAIL); } } /* Initialize the nonlinear solver (must occur after linear solver is initialized) so that lsetup and lsolve pointer have been set */ /* always initialize the ODE NLS in case the user disables sensitivities */ ier = cvNlsInit(cv_mem); if (ier != 0) { cvProcessError(cv_mem, CV_NLS_INIT_FAIL, "CVODES", "cvInitialSetup", MSGCV_NLS_INIT_FAIL); return(CV_NLS_INIT_FAIL); } if (cv_mem->NLSsim != NULL) { ier = cvNlsInitSensSim(cv_mem); if (ier != 0) { cvProcessError(cv_mem, CV_NLS_INIT_FAIL, "CVODES", "cvInitialSetup", MSGCV_NLS_INIT_FAIL); return(CV_NLS_INIT_FAIL); } } if (cv_mem->NLSstg != NULL) { ier = cvNlsInitSensStg(cv_mem); if (ier != 0) { cvProcessError(cv_mem, CV_NLS_INIT_FAIL, "CVODES", "cvInitialSetup", MSGCV_NLS_INIT_FAIL); return(CV_NLS_INIT_FAIL); } } if (cv_mem->NLSstg1 != NULL) { ier = cvNlsInitSensStg1(cv_mem); if (ier != 0) { cvProcessError(cv_mem, CV_NLS_INIT_FAIL, "CVODES", "cvInitialSetup", MSGCV_NLS_INIT_FAIL); return(CV_NLS_INIT_FAIL); } } return(CV_SUCCESS); } /* * ----------------------------------------------------------------- * Initial stepsize calculation * ----------------------------------------------------------------- */ /* * cvHin * * This routine computes a tentative initial step size h0. * If tout is too close to tn (= t0), then cvHin returns CV_TOO_CLOSE * and h remains uninitialized. Note that here tout is either the value * passed to CVode at the first call or the value of tstop (if tstop is * enabled and it is closer to t0=tn than tout). * If any RHS function fails unrecoverably, cvHin returns CV_*RHSFUNC_FAIL. * If any RHS function fails recoverably too many times and recovery is * not possible, cvHin returns CV_REPTD_*RHSFUNC_ERR. * Otherwise, cvHin sets h to the chosen value h0 and returns CV_SUCCESS. * * The algorithm used seeks to find h0 as a solution of * (WRMS norm of (h0^2 ydd / 2)) = 1, * where ydd = estimated second derivative of y. Here, y includes * all variables considered in the error test. * * We start with an initial estimate equal to the geometric mean of the * lower and upper bounds on the step size. * * Loop up to MAX_ITERS times to find h0. * Stop if new and previous values differ by a factor < 2. * Stop if hnew/hg > 2 after one iteration, as this probably means * that the ydd value is bad because of cancellation error. * * For each new proposed hg, we allow MAX_ITERS attempts to * resolve a possible recoverable failure from f() by reducing * the proposed stepsize by a factor of 0.2. If a legal stepsize * still cannot be found, fall back on a previous value if possible, * or else return CV_REPTD_RHSFUNC_ERR. * * Finally, we apply a bias (0.5) and verify that h0 is within bounds. */ static int cvHin(CVodeMem cv_mem, realtype tout) { int retval, sign, count1, count2; realtype tdiff, tdist, tround, hlb, hub; realtype hg, hgs, hs, hnew, hrat, h0, yddnrm; booleantype hgOK; /* If tout is too close to tn, give up */ if ((tdiff = tout-cv_mem->cv_tn) == ZERO) return(CV_TOO_CLOSE); sign = (tdiff > ZERO) ? 1 : -1; tdist = SUNRabs(tdiff); tround = cv_mem->cv_uround * SUNMAX(SUNRabs(cv_mem->cv_tn), SUNRabs(tout)); if (tdist < TWO*tround) return(CV_TOO_CLOSE); /* Set lower and upper bounds on h0, and take geometric mean as first trial value. Exit with this value if the bounds cross each other. */ hlb = HLB_FACTOR * tround; hub = cvUpperBoundH0(cv_mem, tdist); hg = SUNRsqrt(hlb*hub); if (hub < hlb) { if (sign == -1) cv_mem->cv_h = -hg; else cv_mem->cv_h = hg; return(CV_SUCCESS); } /* Outer loop */ hs = hg; /* safeguard against 'uninitialized variable' warning */ for(count1 = 1; count1 <= MAX_ITERS; count1++) { /* Attempts to estimate ydd */ hgOK = SUNFALSE; for (count2 = 1; count2 <= MAX_ITERS; count2++) { hgs = hg*sign; retval = cvYddNorm(cv_mem, hgs, &yddnrm); /* If a RHS function failed unrecoverably, give up */ if (retval < 0) return(retval); /* If successful, we can use ydd */ if (retval == CV_SUCCESS) {hgOK = SUNTRUE; break;} /* A RHS function failed recoverably; cut step size and test again */ hg *= POINT2; } /* If a RHS function failed recoverably MAX_ITERS times */ if (!hgOK) { /* Exit if this is the first or second pass. No recovery possible */ if (count1 <= 2) { if (retval == RHSFUNC_RECVR) return(CV_REPTD_RHSFUNC_ERR); if (retval == QRHSFUNC_RECVR) return(CV_REPTD_QRHSFUNC_ERR); if (retval == SRHSFUNC_RECVR) return(CV_REPTD_SRHSFUNC_ERR); } /* We have a fall-back option. The value hs is a previous hnew which passed through f(). Use it and break */ hnew = hs; break; } /* The proposed step size is feasible. Save it. */ hs = hg; /* Propose new step size */ hnew = (yddnrm*hub*hub > TWO) ? SUNRsqrt(TWO/yddnrm) : SUNRsqrt(hg*hub); /* If last pass, stop now with hnew */ if (count1 == MAX_ITERS) break; hrat = hnew/hg; /* Accept hnew if it does not differ from hg by more than a factor of 2 */ if ((hrat > HALF) && (hrat < TWO)) break; /* After one pass, if ydd seems to be bad, use fall-back value. */ if ((count1 > 1) && (hrat > TWO)) { hnew = hg; break; } /* Send this value back through f() */ hg = hnew; } /* Apply bounds, bias factor, and attach sign */ h0 = H_BIAS*hnew; if (h0 < hlb) h0 = hlb; if (h0 > hub) h0 = hub; if (sign == -1) h0 = -h0; cv_mem->cv_h = h0; return(CV_SUCCESS); } /* * cvUpperBoundH0 * * This routine sets an upper bound on abs(h0) based on * tdist = tn - t0 and the values of y[i]/y'[i]. */ static realtype cvUpperBoundH0(CVodeMem cv_mem, realtype tdist) { realtype hub_inv, hubQ_inv, hubS_inv, hubQS_inv, hub; N_Vector temp1, temp2; N_Vector tempQ1, tempQ2; N_Vector *tempS1; N_Vector *tempQS1; int is; /* * Bound based on |y|/|y'| -- allow at most an increase of * HUB_FACTOR in y0 (based on a forward Euler step). The weight * factor is used as a safeguard against zero components in y0. */ temp1 = cv_mem->cv_tempv; temp2 = cv_mem->cv_acor; N_VAbs(cv_mem->cv_zn[0], temp2); cv_mem->cv_efun(cv_mem->cv_zn[0], temp1, cv_mem->cv_e_data); N_VInv(temp1, temp1); N_VLinearSum(HUB_FACTOR, temp2, ONE, temp1, temp1); N_VAbs(cv_mem->cv_zn[1], temp2); N_VDiv(temp2, temp1, temp1); hub_inv = N_VMaxNorm(temp1); /* Bound based on |yQ|/|yQ'| */ if (cv_mem->cv_quadr && cv_mem->cv_errconQ) { tempQ1 = cv_mem->cv_tempvQ; tempQ2 = cv_mem->cv_acorQ; N_VAbs(cv_mem->cv_znQ[0], tempQ2); cvQuadEwtSet(cv_mem, cv_mem->cv_znQ[0], tempQ1); N_VInv(tempQ1, tempQ1); N_VLinearSum(HUB_FACTOR, tempQ2, ONE, tempQ1, tempQ1); N_VAbs(cv_mem->cv_znQ[1], tempQ2); N_VDiv(tempQ2, tempQ1, tempQ1); hubQ_inv = N_VMaxNorm(tempQ1); if (hubQ_inv > hub_inv) hub_inv = hubQ_inv; } /* Bound based on |yS|/|yS'| */ if (cv_mem->cv_sensi && cv_mem->cv_errconS) { tempS1 = cv_mem->cv_acorS; cvSensEwtSet(cv_mem, cv_mem->cv_znS[0], tempS1); for (is=0; iscv_Ns; is++) { N_VAbs(cv_mem->cv_znS[0][is], temp2); N_VInv(tempS1[is], temp1); N_VLinearSum(HUB_FACTOR, temp2, ONE, temp1, temp1); N_VAbs(cv_mem->cv_znS[1][is], temp2); N_VDiv(temp2, temp1, temp1); hubS_inv = N_VMaxNorm(temp1); if (hubS_inv > hub_inv) hub_inv = hubS_inv; } } /* Bound based on |yQS|/|yQS'| */ if (cv_mem->cv_quadr_sensi && cv_mem->cv_errconQS) { tempQ1 = cv_mem->cv_tempvQ; tempQ2 = cv_mem->cv_acorQ; tempQS1 = cv_mem->cv_acorQS; cvQuadSensEwtSet(cv_mem, cv_mem->cv_znQS[0], tempQS1); for (is=0; iscv_Ns; is++) { N_VAbs(cv_mem->cv_znQS[0][is], tempQ2); N_VInv(tempQS1[is], tempQ1); N_VLinearSum(HUB_FACTOR, tempQ2, ONE, tempQ1, tempQ1); N_VAbs(cv_mem->cv_znQS[1][is], tempQ2); N_VDiv(tempQ2, tempQ1, tempQ1); hubQS_inv = N_VMaxNorm(tempQ1); if (hubQS_inv > hub_inv) hub_inv = hubQS_inv; } } /* * bound based on tdist -- allow at most a step of magnitude * HUB_FACTOR * tdist */ hub = HUB_FACTOR*tdist; /* Use the smaller of the two */ if (hub*hub_inv > ONE) hub = ONE/hub_inv; return(hub); } /* * cvYddNorm * * This routine computes an estimate of the second derivative of Y * using a difference quotient, and returns its WRMS norm. * * Y contains all variables included in the error test. */ static int cvYddNorm(CVodeMem cv_mem, realtype hg, realtype *yddnrm) { int retval; N_Vector wrk1, wrk2; /* y <- h*y'(t) + y(t) */ N_VLinearSum(hg, cv_mem->cv_zn[1], ONE, cv_mem->cv_zn[0], cv_mem->cv_y); if (cv_mem->cv_sensi && cv_mem->cv_errconS) { retval = N_VLinearSumVectorArray(cv_mem->cv_Ns, hg, cv_mem->cv_znS[1], ONE, cv_mem->cv_znS[0], cv_mem->cv_yS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } /* tempv <- f(t+h, h*y'(t)+y(t)) */ retval = cv_mem->cv_f(cv_mem->cv_tn+hg, cv_mem->cv_y, cv_mem->cv_tempv, cv_mem->cv_user_data); cv_mem->cv_nfe++; if (retval < 0) return(CV_RHSFUNC_FAIL); if (retval > 0) return(RHSFUNC_RECVR); if (cv_mem->cv_quadr && cv_mem->cv_errconQ) { retval = cv_mem->cv_fQ(cv_mem->cv_tn+hg, cv_mem->cv_y, cv_mem->cv_tempvQ, cv_mem->cv_user_data); cv_mem->cv_nfQe++; if (retval < 0) return(CV_QRHSFUNC_FAIL); if (retval > 0) return(QRHSFUNC_RECVR); } if (cv_mem->cv_sensi && cv_mem->cv_errconS) { wrk1 = cv_mem->cv_ftemp; wrk2 = cv_mem->cv_acor; retval = cvSensRhsWrapper(cv_mem, cv_mem->cv_tn+hg, cv_mem->cv_y, cv_mem->cv_tempv, cv_mem->cv_yS, cv_mem->cv_tempvS, wrk1, wrk2); if (retval < 0) return(CV_SRHSFUNC_FAIL); if (retval > 0) return(SRHSFUNC_RECVR); } if (cv_mem->cv_quadr_sensi && cv_mem->cv_errconQS) { wrk1 = cv_mem->cv_ftemp; wrk2 = cv_mem->cv_acorQ; retval = cv_mem->cv_fQS(cv_mem->cv_Ns, cv_mem->cv_tn+hg, cv_mem->cv_y, cv_mem->cv_yS, cv_mem->cv_tempvQ, cv_mem->cv_tempvQS, cv_mem->cv_fQS_data, wrk1, wrk2); cv_mem->cv_nfQSe++; if (retval < 0) return(CV_QSRHSFUNC_FAIL); if (retval > 0) return(QSRHSFUNC_RECVR); } /* Load estimate of ||y''|| into tempv: * tempv <- (1/h) * f(t+h, h*y'(t)+y(t)) - y'(t) */ N_VLinearSum(ONE/hg, cv_mem->cv_tempv, -ONE/hg, cv_mem->cv_zn[1], cv_mem->cv_tempv); *yddnrm = N_VWrmsNorm(cv_mem->cv_tempv, cv_mem->cv_ewt); if (cv_mem->cv_quadr && cv_mem->cv_errconQ) { N_VLinearSum(ONE/hg, cv_mem->cv_tempvQ, -ONE/hg, cv_mem->cv_znQ[1], cv_mem->cv_tempvQ); *yddnrm = cvQuadUpdateNorm(cv_mem, *yddnrm, cv_mem->cv_tempvQ, cv_mem->cv_ewtQ); } if (cv_mem->cv_sensi && cv_mem->cv_errconS) { retval = N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE/hg, cv_mem->cv_tempvS, -ONE/hg, cv_mem->cv_znS[1], cv_mem->cv_tempvS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); *yddnrm = cvSensUpdateNorm(cv_mem, *yddnrm, cv_mem->cv_tempvS, cv_mem->cv_ewtS); } if (cv_mem->cv_quadr_sensi && cv_mem->cv_errconQS) { retval = N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE/hg, cv_mem->cv_tempvQS, -ONE/hg, cv_mem->cv_znQS[1], cv_mem->cv_tempvQS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); *yddnrm = cvQuadSensUpdateNorm(cv_mem, *yddnrm, cv_mem->cv_tempvQS, cv_mem->cv_ewtQS); } return(CV_SUCCESS); } /* * ----------------------------------------------------------------- * Main cvStep function * ----------------------------------------------------------------- */ /* * cvStep * * This routine performs one internal cvode step, from tn to tn + h. * It calls other routines to do all the work. * * The main operations done here are as follows: * - preliminary adjustments if a new step size was chosen; * - prediction of the Nordsieck history array zn at tn + h; * - setting of multistep method coefficients and test quantities; * - solution of the nonlinear system; * - testing the local error; * - updating zn and other state data if successful; * - resetting stepsize and order for the next step. * - if SLDET is on, check for stability, reduce order if necessary. * On a failure in the nonlinear system solution or error test, the * step may be reattempted, depending on the nature of the failure. */ static int cvStep(CVodeMem cv_mem) { realtype saved_t, dsm, dsmQ, dsmS, dsmQS; booleantype do_sensi_stg, do_sensi_stg1; int ncf, ncfS; int nef, nefQ, nefS, nefQS; int nflag, kflag, eflag; int retval, is; /* Are we computing sensitivities with a staggered approach? */ do_sensi_stg = (cv_mem->cv_sensi && (cv_mem->cv_ism==CV_STAGGERED)); do_sensi_stg1 = (cv_mem->cv_sensi && (cv_mem->cv_ism==CV_STAGGERED1)); /* Initialize local counters for convergence and error test failures */ ncf = nef = 0; nefQ = nefQS = 0; ncfS = nefS = 0; if (do_sensi_stg1) { for (is=0; iscv_Ns; is++) cv_mem->cv_ncfS1[is] = 0; } /* If needed, adjust method parameters */ if ((cv_mem->cv_nst > 0) && (cv_mem->cv_hprime != cv_mem->cv_h)) cvAdjustParams(cv_mem); /* Looping point for attempts to take a step */ saved_t = cv_mem->cv_tn; nflag = FIRST_CALL; for(;;) { cvPredict(cv_mem); cvSet(cv_mem); /* ------ Correct state variables ------ */ nflag = cvNls(cv_mem, nflag); kflag = cvHandleNFlag(cv_mem, &nflag, saved_t, &ncf, &(cv_mem->cv_ncfn)); /* Go back in loop if we need to predict again (nflag=PREV_CONV_FAIL) */ if (kflag == PREDICT_AGAIN) continue; /* Return if nonlinear solve failed and recovery not possible. */ if (kflag != DO_ERROR_TEST) return(kflag); /* Perform error test (nflag=CV_SUCCESS) */ eflag = cvDoErrorTest(cv_mem, &nflag, saved_t, cv_mem->cv_acnrm, &nef, &(cv_mem->cv_netf), &dsm); /* Go back in loop if we need to predict again (nflag=PREV_ERR_FAIL) */ if (eflag == TRY_AGAIN) continue; /* Return if error test failed and recovery not possible. */ if (eflag != CV_SUCCESS) return(eflag); /* Error test passed (eflag=CV_SUCCESS, nflag=CV_SUCCESS), go on */ /* ------ Correct the quadrature variables ------ */ if (cv_mem->cv_quadr) { ncf = nef = 0; /* reset counters for states */ nflag = cvQuadNls(cv_mem); kflag = cvHandleNFlag(cv_mem, &nflag, saved_t, &ncf, &(cv_mem->cv_ncfn)); if (kflag == PREDICT_AGAIN) continue; if (kflag != DO_ERROR_TEST) return(kflag); /* Error test on quadratures */ if (cv_mem->cv_errconQ) { cv_mem->cv_acnrmQ = N_VWrmsNorm(cv_mem->cv_acorQ, cv_mem->cv_ewtQ); eflag = cvDoErrorTest(cv_mem, &nflag, saved_t, cv_mem->cv_acnrmQ, &nefQ, &(cv_mem->cv_netfQ), &dsmQ); if (eflag == TRY_AGAIN) continue; if (eflag != CV_SUCCESS) return(eflag); /* Set dsm = max(dsm, dsmQ) to be used in cvPrepareNextStep */ if (dsmQ > dsm) dsm = dsmQ; } } /* ------ Correct the sensitivity variables (STAGGERED or STAGGERED1) ------- */ if (do_sensi_stg || do_sensi_stg1) { ncf = nef = 0; /* reset counters for states */ if (cv_mem->cv_quadr) nefQ = 0; /* reset counter for quadratures */ /* Evaluate f at converged y, needed for future evaluations of sens. RHS * If f() fails recoverably, treat it as a convergence failure and * attempt the step again */ retval = cv_mem->cv_f(cv_mem->cv_tn, cv_mem->cv_y, cv_mem->cv_ftemp, cv_mem->cv_user_data); cv_mem->cv_nfe++; if (retval < 0) return(CV_RHSFUNC_FAIL); if (retval > 0) { nflag = PREV_CONV_FAIL; continue; } if (do_sensi_stg) { /* Nonlinear solve for sensitivities (all-at-once) */ nflag = cvStgrNls(cv_mem); kflag = cvHandleNFlag(cv_mem, &nflag, saved_t, &ncfS, &(cv_mem->cv_ncfnS)); } else { /* Nonlinear solve for sensitivities (one-by-one) */ for (is=0; iscv_Ns; is++) { cv_mem->sens_solve_idx = is; nflag = cvStgr1Nls(cv_mem, is); kflag = cvHandleNFlag(cv_mem, &nflag, saved_t, &(cv_mem->cv_ncfS1[is]), &(cv_mem->cv_ncfnS1[is])); if (kflag != DO_ERROR_TEST) break; } } if (kflag == PREDICT_AGAIN) continue; if (kflag != DO_ERROR_TEST) return(kflag); /* Error test on sensitivities */ if (cv_mem->cv_errconS) { if (!cv_mem->cv_acnrmScur) cv_mem->cv_acnrmS = cvSensNorm(cv_mem, cv_mem->cv_acorS, cv_mem->cv_ewtS); eflag = cvDoErrorTest(cv_mem, &nflag, saved_t, cv_mem->cv_acnrmS, &nefS, &(cv_mem->cv_netfS), &dsmS); if (eflag == TRY_AGAIN) continue; if (eflag != CV_SUCCESS) return(eflag); /* Set dsm = max(dsm, dsmS) to be used in cvPrepareNextStep */ if (dsmS > dsm) dsm = dsmS; } } /* ------ Correct the quadrature sensitivity variables ------ */ if (cv_mem->cv_quadr_sensi) { /* Reset local convergence and error test failure counters */ ncf = nef = 0; if (cv_mem->cv_quadr) nefQ = 0; if (do_sensi_stg) ncfS = nefS = 0; if (do_sensi_stg1) { for (is=0; iscv_Ns; is++) cv_mem->cv_ncfS1[is] = 0; nefS = 0; } /* Note that ftempQ contains yQdot evaluated at the converged y * (stored in cvQuadNls) and can be used in evaluating fQS */ nflag = cvQuadSensNls(cv_mem); kflag = cvHandleNFlag(cv_mem, &nflag, saved_t, &ncf, &(cv_mem->cv_ncfn)); if (kflag == PREDICT_AGAIN) continue; if (kflag != DO_ERROR_TEST) return(kflag); /* Error test on quadrature sensitivities */ if (cv_mem->cv_errconQS) { cv_mem->cv_acnrmQS = cvQuadSensNorm(cv_mem, cv_mem->cv_acorQS, cv_mem->cv_ewtQS); eflag = cvDoErrorTest(cv_mem, &nflag, saved_t, cv_mem->cv_acnrmQS, &nefQS, &(cv_mem->cv_netfQS), &dsmQS); if (eflag == TRY_AGAIN) continue; if (eflag != CV_SUCCESS) return(eflag); /* Set dsm = max(dsm, dsmQS) to be used in cvPrepareNextStep */ if (dsmQS > dsm) dsm = dsmQS; } } /* Error test passed (eflag=CV_SUCCESS), break from loop */ break; } /* Nonlinear system solve and error test were both successful. Update data, and consider change of step and/or order. */ cvCompleteStep(cv_mem); cvPrepareNextStep(cv_mem, dsm); /* If Stablilty Limit Detection is turned on, call stability limit detection routine for possible order reduction. */ if (cv_mem->cv_sldeton) cvBDFStab(cv_mem); cv_mem->cv_etamax = (cv_mem->cv_nst <= SMALL_NST) ? ETAMX2 : ETAMX3; /* Finally, we rescale the acor array to be the estimated local error vector. */ N_VScale(cv_mem->cv_tq[2], cv_mem->cv_acor, cv_mem->cv_acor); if (cv_mem->cv_quadr) N_VScale(cv_mem->cv_tq[2], cv_mem->cv_acorQ, cv_mem->cv_acorQ); if (cv_mem->cv_sensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = cv_mem->cv_tq[2]; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_acorS, cv_mem->cv_acorS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } if (cv_mem->cv_quadr_sensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = cv_mem->cv_tq[2]; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_acorQS, cv_mem->cv_acorQS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } return(CV_SUCCESS); } /* * ----------------------------------------------------------------- * Function called at beginning of step * ----------------------------------------------------------------- */ /* * cvAdjustParams * * This routine is called when a change in step size was decided upon, * and it handles the required adjustments to the history array zn. * If there is to be a change in order, we call cvAdjustOrder and reset * q, L = q+1, and qwait. Then in any case, we call cvRescale, which * resets h and rescales the Nordsieck array. */ static void cvAdjustParams(CVodeMem cv_mem) { if (cv_mem->cv_qprime != cv_mem->cv_q) { cvAdjustOrder(cv_mem, cv_mem->cv_qprime-cv_mem->cv_q); cv_mem->cv_q = cv_mem->cv_qprime; cv_mem->cv_L = cv_mem->cv_q+1; cv_mem->cv_qwait = cv_mem->cv_L; } cvRescale(cv_mem); } /* * cvAdjustOrder * * This routine is a high level routine which handles an order * change by an amount deltaq (= +1 or -1). If a decrease in order * is requested and q==2, then the routine returns immediately. * Otherwise cvAdjustAdams or cvAdjustBDF is called to handle the * order change (depending on the value of lmm). */ static void cvAdjustOrder(CVodeMem cv_mem, int deltaq) { if ((cv_mem->cv_q==2) && (deltaq != 1)) return; switch(cv_mem->cv_lmm){ case CV_ADAMS: cvAdjustAdams(cv_mem, deltaq); break; case CV_BDF: cvAdjustBDF(cv_mem, deltaq); break; } } /* * cvAdjustAdams * * This routine adjusts the history array on a change of order q by * deltaq, in the case that lmm == CV_ADAMS. */ static void cvAdjustAdams(CVodeMem cv_mem, int deltaq) { int i, j; realtype xi, hsum; /* On an order increase, set new column of zn to zero and return */ if (deltaq==1) { N_VConst(ZERO, cv_mem->cv_zn[cv_mem->cv_L]); if (cv_mem->cv_quadr) N_VConst(ZERO, cv_mem->cv_znQ[cv_mem->cv_L]); if (cv_mem->cv_sensi) (void) N_VConstVectorArray(cv_mem->cv_Ns, ZERO, cv_mem->cv_znS[cv_mem->cv_L]); return; } /* * On an order decrease, each zn[j] is adjusted by a multiple of zn[q]. * The coeffs. in the adjustment are the coeffs. of the polynomial: * x * q * INT { u * ( u + xi_1 ) * ... * ( u + xi_{q-2} ) } du * 0 * where xi_j = [t_n - t_(n-j)]/h => xi_0 = 0 */ for (i=0; i <= cv_mem->cv_qmax; i++) cv_mem->cv_l[i] = ZERO; cv_mem->cv_l[1] = ONE; hsum = ZERO; for (j=1; j <= cv_mem->cv_q-2; j++) { hsum += cv_mem->cv_tau[j]; xi = hsum / cv_mem->cv_hscale; for (i=j+1; i >= 1; i--) cv_mem->cv_l[i] = cv_mem->cv_l[i]*xi + cv_mem->cv_l[i-1]; } for (j=1; j <= cv_mem->cv_q-2; j++) cv_mem->cv_l[j+1] = cv_mem->cv_q * (cv_mem->cv_l[j] / (j+1)); if (cv_mem->cv_q > 2) { for (j=2; j < cv_mem->cv_q; j++) cv_mem->cv_cvals[j-2] = -cv_mem->cv_l[j]; (void) N_VScaleAddMulti(cv_mem->cv_q-2, cv_mem->cv_cvals, cv_mem->cv_zn[cv_mem->cv_q], cv_mem->cv_zn+2, cv_mem->cv_zn+2); if (cv_mem->cv_quadr) (void) N_VScaleAddMulti(cv_mem->cv_q-2, cv_mem->cv_cvals, cv_mem->cv_znQ[cv_mem->cv_q], cv_mem->cv_znQ+2, cv_mem->cv_znQ+2); if (cv_mem->cv_sensi) (void) N_VScaleAddMultiVectorArray(cv_mem->cv_Ns, cv_mem->cv_q-2, cv_mem->cv_cvals, cv_mem->cv_znS[cv_mem->cv_q], cv_mem->cv_znS+2, cv_mem->cv_znS+2); } } /* * cvAdjustBDF * * This is a high level routine which handles adjustments to the * history array on a change of order by deltaq in the case that * lmm == CV_BDF. cvAdjustBDF calls cvIncreaseBDF if deltaq = +1 and * cvDecreaseBDF if deltaq = -1 to do the actual work. */ static void cvAdjustBDF(CVodeMem cv_mem, int deltaq) { switch(deltaq) { case 1: cvIncreaseBDF(cv_mem); return; case -1: cvDecreaseBDF(cv_mem); return; } } /* * cvIncreaseBDF * * This routine adjusts the history array on an increase in the * order q in the case that lmm == CV_BDF. * A new column zn[q+1] is set equal to a multiple of the saved * vector (= acor) in zn[indx_acor]. Then each zn[j] is adjusted by * a multiple of zn[q+1]. The coefficients in the adjustment are the * coefficients of the polynomial x*x*(x+xi_1)*...*(x+xi_j), * where xi_j = [t_n - t_(n-j)]/h. */ static void cvIncreaseBDF(CVodeMem cv_mem) { realtype alpha0, alpha1, prod, xi, xiold, hsum, A1; int i, j; int is; for (i=0; i <= cv_mem->cv_qmax; i++) cv_mem->cv_l[i] = ZERO; cv_mem->cv_l[2] = alpha1 = prod = xiold = ONE; alpha0 = -ONE; hsum = cv_mem->cv_hscale; if (cv_mem->cv_q > 1) { for (j=1; j < cv_mem->cv_q; j++) { hsum += cv_mem->cv_tau[j+1]; xi = hsum / cv_mem->cv_hscale; prod *= xi; alpha0 -= ONE / (j+1); alpha1 += ONE / xi; for (i=j+2; i >= 2; i--) cv_mem->cv_l[i] = cv_mem->cv_l[i]*xiold + cv_mem->cv_l[i-1]; xiold = xi; } } A1 = (-alpha0 - alpha1) / prod; /* zn[indx_acor] contains the value Delta_n = y_n - y_n(0) This value was stored there at the previous successful step (in cvCompleteStep) A1 contains dbar = (1/xi* - 1/xi_q)/prod(xi_j) */ N_VScale(A1, cv_mem->cv_zn[cv_mem->cv_indx_acor], cv_mem->cv_zn[cv_mem->cv_L]); /* for (j=2; j <= cv_mem->cv_q; j++) */ if (cv_mem->cv_q > 1) (void) N_VScaleAddMulti(cv_mem->cv_q-1, cv_mem->cv_l+2, cv_mem->cv_zn[cv_mem->cv_L], cv_mem->cv_zn+2, cv_mem->cv_zn+2); if (cv_mem->cv_quadr) { N_VScale(A1, cv_mem->cv_znQ[cv_mem->cv_indx_acor], cv_mem->cv_znQ[cv_mem->cv_L]); /* for (j=2; j <= cv_mem->cv_q; j++) */ if (cv_mem->cv_q > 1) (void) N_VScaleAddMulti(cv_mem->cv_q-1, cv_mem->cv_l+2, cv_mem->cv_znQ[cv_mem->cv_L], cv_mem->cv_znQ+2, cv_mem->cv_znQ+2); } if (cv_mem->cv_sensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = A1; (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znS[cv_mem->cv_indx_acor], cv_mem->cv_znS[cv_mem->cv_L]); /* for (j=2; j <= cv_mem->cv_q; j++) */ if (cv_mem->cv_q > 1) (void) N_VScaleAddMultiVectorArray(cv_mem->cv_Ns, cv_mem->cv_q-1, cv_mem->cv_l+2, cv_mem->cv_znS[cv_mem->cv_L], cv_mem->cv_znS+2, cv_mem->cv_znS+2); } if (cv_mem->cv_quadr_sensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = A1; (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znQS[cv_mem->cv_indx_acor], cv_mem->cv_znQS[cv_mem->cv_L]); /* for (j=2; j <= cv_mem->cv_q; j++) */ if (cv_mem->cv_q > 1) (void) N_VScaleAddMultiVectorArray(cv_mem->cv_Ns, cv_mem->cv_q-1, cv_mem->cv_l+2, cv_mem->cv_znQS[cv_mem->cv_L], cv_mem->cv_znQS+2, cv_mem->cv_znQS+2); } } /* * cvDecreaseBDF * * This routine adjusts the history array on a decrease in the * order q in the case that lmm == CV_BDF. * Each zn[j] is adjusted by a multiple of zn[q]. The coefficients * in the adjustment are the coefficients of the polynomial * x*x*(x+xi_1)*...*(x+xi_j), where xi_j = [t_n - t_(n-j)]/h. */ static void cvDecreaseBDF(CVodeMem cv_mem) { realtype hsum, xi; int i, j; for (i=0; i <= cv_mem->cv_qmax; i++) cv_mem->cv_l[i] = ZERO; cv_mem->cv_l[2] = ONE; hsum = ZERO; for (j=1; j <= cv_mem->cv_q-2; j++) { hsum += cv_mem->cv_tau[j]; xi = hsum / cv_mem->cv_hscale; for (i=j+2; i >= 2; i--) cv_mem->cv_l[i] = cv_mem->cv_l[i]*xi + cv_mem->cv_l[i-1]; } if (cv_mem->cv_q > 2) { for (j=2; j < cv_mem->cv_q; j++) cv_mem->cv_cvals[j-2] = -cv_mem->cv_l[j]; (void) N_VScaleAddMulti(cv_mem->cv_q-2, cv_mem->cv_cvals, cv_mem->cv_zn[cv_mem->cv_q], cv_mem->cv_zn+2, cv_mem->cv_zn+2); if (cv_mem->cv_quadr) (void) N_VScaleAddMulti(cv_mem->cv_q-2, cv_mem->cv_cvals, cv_mem->cv_znQ[cv_mem->cv_q], cv_mem->cv_znQ+2, cv_mem->cv_znQ+2); if (cv_mem->cv_sensi) (void) N_VScaleAddMultiVectorArray(cv_mem->cv_Ns, cv_mem->cv_q-2, cv_mem->cv_cvals, cv_mem->cv_znS[cv_mem->cv_q], cv_mem->cv_znS+2, cv_mem->cv_znS+2); if (cv_mem->cv_quadr_sensi) (void) N_VScaleAddMultiVectorArray(cv_mem->cv_Ns, cv_mem->cv_q-2, cv_mem->cv_cvals, cv_mem->cv_znQS[cv_mem->cv_q], cv_mem->cv_znQS+2, cv_mem->cv_znQS+2); } } /* * cvRescale * * This routine rescales the Nordsieck array by multiplying the * jth column zn[j] by eta^j, j = 1, ..., q. Then the value of * h is rescaled by eta, and hscale is reset to h. */ static void cvRescale(CVodeMem cv_mem) { int j; int is; /* compute scaling factors */ cv_mem->cv_cvals[0] = cv_mem->cv_eta; for (j=1; j <= cv_mem->cv_q; j++) cv_mem->cv_cvals[j] = cv_mem->cv_eta * cv_mem->cv_cvals[j-1]; (void) N_VScaleVectorArray(cv_mem->cv_q, cv_mem->cv_cvals, cv_mem->cv_zn+1, cv_mem->cv_zn+1); if (cv_mem->cv_quadr) (void) N_VScaleVectorArray(cv_mem->cv_q, cv_mem->cv_cvals, cv_mem->cv_znQ+1, cv_mem->cv_znQ+1); /* compute sensi scaling factors */ if (cv_mem->cv_sensi || cv_mem->cv_quadr_sensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = cv_mem->cv_eta; for (j=1; j <= cv_mem->cv_q; j++) for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[j*cv_mem->cv_Ns+is] = cv_mem->cv_eta * cv_mem->cv_cvals[(j-1)*cv_mem->cv_Ns+is]; } if (cv_mem->cv_sensi) { for (j=1; j <= cv_mem->cv_q; j++) for (is=0; iscv_Ns; is++) cv_mem->cv_Xvecs[(j-1)*cv_mem->cv_Ns+is] = cv_mem->cv_znS[j][is]; (void) N_VScaleVectorArray(cv_mem->cv_q*cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_Xvecs, cv_mem->cv_Xvecs); } if (cv_mem->cv_quadr_sensi) { for (j=1; j <= cv_mem->cv_q; j++) for (is=0; iscv_Ns; is++) cv_mem->cv_Xvecs[(j-1)*cv_mem->cv_Ns+is] = cv_mem->cv_znQS[j][is]; (void) N_VScaleVectorArray(cv_mem->cv_q*cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_Xvecs, cv_mem->cv_Xvecs); } cv_mem->cv_h = cv_mem->cv_hscale * cv_mem->cv_eta; cv_mem->cv_next_h = cv_mem->cv_h; cv_mem->cv_hscale = cv_mem->cv_h; cv_mem->cv_nscon = 0; } /* * cvPredict * * This routine advances tn by the tentative step size h, and computes * the predicted array z_n(0), which is overwritten on zn. The * prediction of zn is done by repeated additions. * If tstop is enabled, it is possible for tn + h to be past tstop by roundoff, * and in that case, we reset tn (after incrementing by h) to tstop. */ static void cvPredict(CVodeMem cv_mem) { int j, k; cv_mem->cv_tn += cv_mem->cv_h; if (cv_mem->cv_tstopset) { if ((cv_mem->cv_tn - cv_mem->cv_tstop)*cv_mem->cv_h > ZERO) cv_mem->cv_tn = cv_mem->cv_tstop; } for (k = 1; k <= cv_mem->cv_q; k++) for (j = cv_mem->cv_q; j >= k; j--) N_VLinearSum(ONE, cv_mem->cv_zn[j-1], ONE, cv_mem->cv_zn[j], cv_mem->cv_zn[j-1]); if (cv_mem->cv_quadr) { for (k = 1; k <= cv_mem->cv_q; k++) for (j = cv_mem->cv_q; j >= k; j--) N_VLinearSum(ONE, cv_mem->cv_znQ[j-1], ONE, cv_mem->cv_znQ[j], cv_mem->cv_znQ[j-1]); } if (cv_mem->cv_sensi) { for (k = 1; k <= cv_mem->cv_q; k++) for (j = cv_mem->cv_q; j >= k; j--) (void) N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znS[j-1], ONE, cv_mem->cv_znS[j], cv_mem->cv_znS[j-1]); } if (cv_mem->cv_quadr_sensi) { for (k = 1; k <= cv_mem->cv_q; k++) for (j = cv_mem->cv_q; j >= k; j--) (void) N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znQS[j-1], ONE, cv_mem->cv_znQS[j], cv_mem->cv_znQS[j-1]); } } /* * cvSet * * This routine is a high level routine which calls cvSetAdams or * cvSetBDF to set the polynomial l, the test quantity array tq, * and the related variables rl1, gamma, and gamrat. * * The array tq is loaded with constants used in the control of estimated * local errors and in the nonlinear convergence test. Specifically, while * running at order q, the components of tq are as follows: * tq[1] = a coefficient used to get the est. local error at order q-1 * tq[2] = a coefficient used to get the est. local error at order q * tq[3] = a coefficient used to get the est. local error at order q+1 * tq[4] = constant used in nonlinear iteration convergence test * tq[5] = coefficient used to get the order q+2 derivative vector used in * the est. local error at order q+1 */ static void cvSet(CVodeMem cv_mem) { switch(cv_mem->cv_lmm) { case CV_ADAMS: cvSetAdams(cv_mem); break; case CV_BDF: cvSetBDF(cv_mem); break; } cv_mem->cv_rl1 = ONE / cv_mem->cv_l[1]; cv_mem->cv_gamma = cv_mem->cv_h * cv_mem->cv_rl1; if (cv_mem->cv_nst == 0) cv_mem->cv_gammap = cv_mem->cv_gamma; cv_mem->cv_gamrat = (cv_mem->cv_nst > 0) ? cv_mem->cv_gamma / cv_mem->cv_gammap : ONE; /* protect x / x != 1.0 */ } /* * cvSetAdams * * This routine handles the computation of l and tq for the * case lmm == CV_ADAMS. * * The components of the array l are the coefficients of a * polynomial Lambda(x) = l_0 + l_1 x + ... + l_q x^q, given by * q-1 * (d/dx) Lambda(x) = c * PRODUCT (1 + x / xi_i) , where * i=1 * Lambda(-1) = 0, Lambda(0) = 1, and c is a normalization factor. * Here xi_i = [t_n - t_(n-i)] / h. * * The array tq is set to test quantities used in the convergence * test, the error test, and the selection of h at a new order. */ static void cvSetAdams(CVodeMem cv_mem) { realtype m[L_MAX], M[3], hsum; if (cv_mem->cv_q == 1) { cv_mem->cv_l[0] = cv_mem->cv_l[1] = cv_mem->cv_tq[1] = cv_mem->cv_tq[5] = ONE; cv_mem->cv_tq[2] = HALF; cv_mem->cv_tq[3] = ONE/TWELVE; cv_mem->cv_tq[4] = cv_mem->cv_nlscoef / cv_mem->cv_tq[2]; /* = 0.1 / tq[2] */ return; } hsum = cvAdamsStart(cv_mem, m); M[0] = cvAltSum(cv_mem->cv_q-1, m, 1); M[1] = cvAltSum(cv_mem->cv_q-1, m, 2); cvAdamsFinish(cv_mem, m, M, hsum); } /* * cvAdamsStart * * This routine generates in m[] the coefficients of the product * polynomial needed for the Adams l and tq coefficients for q > 1. */ static realtype cvAdamsStart(CVodeMem cv_mem, realtype m[]) { realtype hsum, xi_inv, sum; int i, j; hsum = cv_mem->cv_h; m[0] = ONE; for (i=1; i <= cv_mem->cv_q; i++) m[i] = ZERO; for (j=1; j < cv_mem->cv_q; j++) { if ((j==cv_mem->cv_q-1) && (cv_mem->cv_qwait == 1)) { sum = cvAltSum(cv_mem->cv_q-2, m, 2); cv_mem->cv_tq[1] = cv_mem->cv_q * sum / m[cv_mem->cv_q-2]; } xi_inv = cv_mem->cv_h / hsum; for (i=j; i >= 1; i--) m[i] += m[i-1] * xi_inv; hsum += cv_mem->cv_tau[j]; /* The m[i] are coefficients of product(1 to j) (1 + x/xi_i) */ } return(hsum); } /* * cvAdamsFinish * * This routine completes the calculation of the Adams l and tq. */ static void cvAdamsFinish(CVodeMem cv_mem, realtype m[], realtype M[], realtype hsum) { int i; realtype M0_inv, xi, xi_inv; M0_inv = ONE / M[0]; cv_mem->cv_l[0] = ONE; for (i=1; i <= cv_mem->cv_q; i++) cv_mem->cv_l[i] = M0_inv * (m[i-1] / i); xi = hsum / cv_mem->cv_h; xi_inv = ONE / xi; cv_mem->cv_tq[2] = M[1] * M0_inv / xi; cv_mem->cv_tq[5] = xi / cv_mem->cv_l[cv_mem->cv_q]; if (cv_mem->cv_qwait == 1) { for (i=cv_mem->cv_q; i >= 1; i--) m[i] += m[i-1] * xi_inv; M[2] = cvAltSum(cv_mem->cv_q, m, 2); cv_mem->cv_tq[3] = M[2] * M0_inv / cv_mem->cv_L; } cv_mem->cv_tq[4] = cv_mem->cv_nlscoef / cv_mem->cv_tq[2]; } /* * cvAltSum * * cvAltSum returns the value of the alternating sum * sum (i= 0 ... iend) [ (-1)^i * (a[i] / (i + k)) ]. * If iend < 0 then cvAltSum returns 0. * This operation is needed to compute the integral, from -1 to 0, * of a polynomial x^(k-1) M(x) given the coefficients of M(x). */ static realtype cvAltSum(int iend, realtype a[], int k) { int i, sign; realtype sum; if (iend < 0) return(ZERO); sum = ZERO; sign = 1; for (i=0; i <= iend; i++) { sum += sign * (a[i] / (i+k)); sign = -sign; } return(sum); } /* * cvSetBDF * * This routine computes the coefficients l and tq in the case * lmm == CV_BDF. cvSetBDF calls cvSetTqBDF to set the test * quantity array tq. * * The components of the array l are the coefficients of a * polynomial Lambda(x) = l_0 + l_1 x + ... + l_q x^q, given by * q-1 * Lambda(x) = (1 + x / xi*_q) * PRODUCT (1 + x / xi_i) , where * i=1 * xi_i = [t_n - t_(n-i)] / h. * * The array tq is set to test quantities used in the convergence * test, the error test, and the selection of h at a new order. */ static void cvSetBDF(CVodeMem cv_mem) { realtype alpha0, alpha0_hat, xi_inv, xistar_inv, hsum; int i,j; cv_mem->cv_l[0] = cv_mem->cv_l[1] = xi_inv = xistar_inv = ONE; for (i=2; i <= cv_mem->cv_q; i++) cv_mem->cv_l[i] = ZERO; alpha0 = alpha0_hat = -ONE; hsum = cv_mem->cv_h; if (cv_mem->cv_q > 1) { for (j=2; j < cv_mem->cv_q; j++) { hsum += cv_mem->cv_tau[j-1]; xi_inv = cv_mem->cv_h / hsum; alpha0 -= ONE / j; for (i=j; i >= 1; i--) cv_mem->cv_l[i] += cv_mem->cv_l[i-1]*xi_inv; /* The l[i] are coefficients of product(1 to j) (1 + x/xi_i) */ } /* j = q */ alpha0 -= ONE / cv_mem->cv_q; xistar_inv = -cv_mem->cv_l[1] - alpha0; hsum += cv_mem->cv_tau[cv_mem->cv_q-1]; xi_inv = cv_mem->cv_h / hsum; alpha0_hat = -cv_mem->cv_l[1] - xi_inv; for (i=cv_mem->cv_q; i >= 1; i--) cv_mem->cv_l[i] += cv_mem->cv_l[i-1]*xistar_inv; } cvSetTqBDF(cv_mem, hsum, alpha0, alpha0_hat, xi_inv, xistar_inv); } /* * cvSetTqBDF * * This routine sets the test quantity array tq in the case * lmm == CV_BDF. */ static void cvSetTqBDF(CVodeMem cv_mem, realtype hsum, realtype alpha0, realtype alpha0_hat, realtype xi_inv, realtype xistar_inv) { realtype A1, A2, A3, A4, A5, A6; realtype C, Cpinv, Cppinv; A1 = ONE - alpha0_hat + alpha0; A2 = ONE + cv_mem->cv_q * A1; cv_mem->cv_tq[2] = SUNRabs(A1 / (alpha0 * A2)); cv_mem->cv_tq[5] = SUNRabs(A2 * xistar_inv / (cv_mem->cv_l[cv_mem->cv_q] * xi_inv)); if (cv_mem->cv_qwait == 1) { if (cv_mem->cv_q > 1) { C = xistar_inv / cv_mem->cv_l[cv_mem->cv_q]; A3 = alpha0 + ONE / cv_mem->cv_q; A4 = alpha0_hat + xi_inv; Cpinv = (ONE - A4 + A3) / A3; cv_mem->cv_tq[1] = SUNRabs(C * Cpinv); } else cv_mem->cv_tq[1] = ONE; hsum += cv_mem->cv_tau[cv_mem->cv_q]; xi_inv = cv_mem->cv_h / hsum; A5 = alpha0 - (ONE / (cv_mem->cv_q+1)); A6 = alpha0_hat - xi_inv; Cppinv = (ONE - A6 + A5) / A2; cv_mem->cv_tq[3] = SUNRabs(Cppinv / (xi_inv * (cv_mem->cv_q+2) * A5)); } cv_mem->cv_tq[4] = cv_mem->cv_nlscoef / cv_mem->cv_tq[2]; } /* * ----------------------------------------------------------------- * Nonlinear solver functions * ----------------------------------------------------------------- */ /* * cvNls * * This routine attempts to solve the nonlinear system associated * with a single implicit step of the linear multistep method. */ static int cvNls(CVodeMem cv_mem, int nflag) { int flag = CV_SUCCESS; booleantype callSetup; booleantype do_sensi_sim; long int nni_inc; /* Are we computing sensitivities with the CV_SIMULTANEOUS approach? */ do_sensi_sim = (cv_mem->cv_sensi && (cv_mem->cv_ism==CV_SIMULTANEOUS)); /* Decide whether or not to call setup routine (if one exists) and */ /* set flag convfail (input to lsetup for its evaluation decision) */ if (cv_mem->cv_lsetup) { cv_mem->convfail = ((nflag == FIRST_CALL) || (nflag == PREV_ERR_FAIL)) ? CV_NO_FAILURES : CV_FAIL_OTHER; callSetup = (nflag == PREV_CONV_FAIL) || (nflag == PREV_ERR_FAIL) || (cv_mem->cv_nst == 0) || (cv_mem->cv_nst >= cv_mem->cv_nstlp + cv_mem->cv_msbp) || (SUNRabs(cv_mem->cv_gamrat-ONE) > DGMAX); /* Decide whether to force a call to setup */ if (cv_mem->cv_forceSetup) { callSetup = SUNTRUE; cv_mem->convfail = CV_FAIL_OTHER; } } else { cv_mem->cv_crate = ONE; cv_mem->cv_crateS = ONE; /* if NO lsetup all conv. rates are set to ONE */ callSetup = SUNFALSE; } /* initial guess for the correction to the predictor */ if (do_sensi_sim) N_VConst(ZERO, cv_mem->ycorSim); else N_VConst(ZERO, cv_mem->cv_acor); /* call nonlinear solver setup if it exists */ if ((cv_mem->NLS)->ops->setup) { if (do_sensi_sim) flag = SUNNonlinSolSetup(cv_mem->NLS, cv_mem->ycorSim, cv_mem); else flag = SUNNonlinSolSetup(cv_mem->NLS, cv_mem->cv_acor, cv_mem); if (flag < 0) return(CV_NLS_SETUP_FAIL); if (flag > 0) return(SUN_NLS_CONV_RECVR); } /* solve the nonlinear system */ if (do_sensi_sim) { flag = SUNNonlinSolSolve(cv_mem->NLSsim, cv_mem->zn0Sim, cv_mem->ycorSim, cv_mem->ewtSim, cv_mem->cv_tq[4], callSetup, cv_mem); /* increment counter */ nni_inc = 0; (void) SUNNonlinSolGetNumIters(cv_mem->NLSsim, &(nni_inc)); cv_mem->cv_nni += nni_inc; } else { flag = SUNNonlinSolSolve(cv_mem->NLS, cv_mem->cv_zn[0], cv_mem->cv_acor, cv_mem->cv_ewt, cv_mem->cv_tq[4], callSetup, cv_mem); /* increment counter */ nni_inc = 0; (void) SUNNonlinSolGetNumIters(cv_mem->NLS, &(nni_inc)); cv_mem->cv_nni += nni_inc; } /* if the solve failed return */ if (flag != CV_SUCCESS) return(flag); /* solve successful */ /* update the state based on the final correction from the nonlinear solver */ N_VLinearSum(ONE, cv_mem->cv_zn[0], ONE, cv_mem->cv_acor, cv_mem->cv_y); /* update the sensitivities based on the final correction from the nonlinear solver */ if (do_sensi_sim) { N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znS[0], ONE, cv_mem->cv_acorS, cv_mem->cv_yS); } /* compute acnrm if is was not already done by the nonlinear solver */ if (!cv_mem->cv_acnrmcur) { if (do_sensi_sim && cv_mem->cv_errconS) cv_mem->cv_acnrm = N_VWrmsNorm(cv_mem->ycorSim, cv_mem->ewtSim); else cv_mem->cv_acnrm = N_VWrmsNorm(cv_mem->cv_acor, cv_mem->cv_ewt); } /* update Jacobian status */ cv_mem->cv_jcur = SUNFALSE; /* check inequality constraints */ if (cv_mem->cv_constraintsSet) flag = cvCheckConstraints(cv_mem); return(flag); } /* * cvCheckConstraints * * This routine determines if the constraints of the problem * are satisfied by the proposed step * * Possible return values are: * * CV_SUCCESS ---> allows stepping forward * * CONSTR_RECVR ---> values failed to satisfy constraints * * CV_CONSTR_FAIL ---> values failed to satisfy constraints with hmin */ static int cvCheckConstraints(CVodeMem cv_mem) { booleantype constraintsPassed; realtype vnorm; N_Vector mm = cv_mem->cv_ftemp; N_Vector tmp = cv_mem->cv_tempv; /* Get mask vector mm, set where constraints failed */ constraintsPassed = N_VConstrMask(cv_mem->cv_constraints, cv_mem->cv_y, mm); if (constraintsPassed) return(CV_SUCCESS); /* Constraints not met */ /* Compute correction to satisfy constraints */ N_VCompare(ONEPT5, cv_mem->cv_constraints, tmp); /* a[i]=1 when |c[i]|=2 */ N_VProd(tmp, cv_mem->cv_constraints, tmp); /* a * c */ N_VDiv(tmp, cv_mem->cv_ewt, tmp); /* a * c * wt */ N_VLinearSum(ONE, cv_mem->cv_y, -PT1, tmp, tmp); /* y - 0.1 * a * c * wt */ N_VProd(tmp, mm, tmp); /* v = mm*(y-0.1*a*c*wt) */ vnorm = N_VWrmsNorm(tmp, cv_mem->cv_ewt); /* ||v|| */ /* If vector v of constraint corrections is small in norm, correct and accept this step */ if (vnorm <= cv_mem->cv_tq[4]) { N_VLinearSum(ONE, cv_mem->cv_acor, -ONE, tmp, cv_mem->cv_acor); /* acor <- acor - v */ return(CV_SUCCESS); } /* Return with error if |h| == hmin */ if (SUNRabs(cv_mem->cv_h) <= cv_mem->cv_hmin*ONEPSM) return(CV_CONSTR_FAIL); /* Constraint correction is too large, reduce h by computing eta = h'/h */ N_VLinearSum(ONE, cv_mem->cv_zn[0], -ONE, cv_mem->cv_y, tmp); N_VProd(mm, tmp, tmp); cv_mem->cv_eta = PT9*N_VMinQuotient(cv_mem->cv_zn[0], tmp); cv_mem->cv_eta = SUNMAX(cv_mem->cv_eta, PT1); cv_mem->cv_eta = SUNMAX(cv_mem->cv_eta, cv_mem->cv_hmin / SUNRabs(cv_mem->cv_h)); /* Reattempt step with new step size */ return(CONSTR_RECVR); } /* * cvQuadNls * * This routine solves for the quadrature variables at the new step. * It does not solve a nonlinear system, but rather updates the * quadrature variables. The name for this function is just for * uniformity purposes. * * Possible return values (interpreted by cvHandleNFlag) * * CV_SUCCESS -> continue with error test * CV_QRHSFUNC_FAIL -> halt the integration * QRHSFUNC_RECVR -> predict again or stop if too many * */ static int cvQuadNls(CVodeMem cv_mem) { int retval; /* Save quadrature correction in acorQ */ retval = cv_mem->cv_fQ(cv_mem->cv_tn, cv_mem->cv_y, cv_mem->cv_acorQ, cv_mem->cv_user_data); cv_mem->cv_nfQe++; if (retval < 0) return(CV_QRHSFUNC_FAIL); if (retval > 0) return(QRHSFUNC_RECVR); /* If needed, save the value of yQdot = fQ into ftempQ * for use in evaluating fQS */ if (cv_mem->cv_quadr_sensi) { N_VScale(ONE, cv_mem->cv_acorQ, cv_mem->cv_ftempQ); } N_VLinearSum(cv_mem->cv_h, cv_mem->cv_acorQ, -ONE, cv_mem->cv_znQ[1], cv_mem->cv_acorQ); N_VScale(cv_mem->cv_rl1, cv_mem->cv_acorQ, cv_mem->cv_acorQ); /* Apply correction to quadrature variables */ N_VLinearSum(ONE, cv_mem->cv_znQ[0], ONE, cv_mem->cv_acorQ, cv_mem->cv_yQ); return(CV_SUCCESS); } /* * cvQuadSensNls * * This routine solves for the quadrature sensitivity variables * at the new step. It does not solve a nonlinear system, but * rather updates the quadrature variables. The name for this * function is just for uniformity purposes. * * Possible return values (interpreted by cvHandleNFlag) * * CV_SUCCESS -> continue with error test * CV_QSRHSFUNC_FAIL -> halt the integration * QSRHSFUNC_RECVR -> predict again or stop if too many * */ static int cvQuadSensNls(CVodeMem cv_mem) { int is, retval; /* Save quadrature correction in acorQ */ retval = cv_mem->cv_fQS(cv_mem->cv_Ns, cv_mem->cv_tn, cv_mem->cv_y, cv_mem->cv_yS, cv_mem->cv_ftempQ, cv_mem->cv_acorQS, cv_mem->cv_user_data, cv_mem->cv_tempv, cv_mem->cv_tempvQ); cv_mem->cv_nfQSe++; if (retval < 0) return(CV_QSRHSFUNC_FAIL); if (retval > 0) return(QSRHSFUNC_RECVR); for (is=0; iscv_Ns; is++) { N_VLinearSum(cv_mem->cv_h, cv_mem->cv_acorQS[is], -ONE, cv_mem->cv_znQS[1][is], cv_mem->cv_acorQS[is]); N_VScale(cv_mem->cv_rl1, cv_mem->cv_acorQS[is], cv_mem->cv_acorQS[is]); /* Apply correction to quadrature sensitivity variables */ N_VLinearSum(ONE, cv_mem->cv_znQS[0][is], ONE, cv_mem->cv_acorQS[is], cv_mem->cv_yQS[is]); } return(CV_SUCCESS); } /* * cvStgrNls * * This is a high-level routine that attempts to solve the * sensitivity linear systems using the attached nonlinear solver * once the states y_n were obtained and passed the error test. */ static int cvStgrNls(CVodeMem cv_mem) { booleantype callSetup; int flag=CV_SUCCESS; long int nniS_inc; callSetup = SUNFALSE; if (cv_mem->cv_lsetup == NULL) cv_mem->cv_crateS = ONE; /* initial guess for the correction to the predictor */ N_VConst(ZERO, cv_mem->ycorStg); /* set sens solve flag */ cv_mem->sens_solve = SUNTRUE; /* solve the nonlinear system */ flag = SUNNonlinSolSolve(cv_mem->NLSstg, cv_mem->zn0Stg, cv_mem->ycorStg, cv_mem->ewtStg, cv_mem->cv_tq[4], callSetup, cv_mem); /* increment counter */ nniS_inc = 0; (void) SUNNonlinSolGetNumIters(cv_mem->NLSstg, &(nniS_inc)); cv_mem->cv_nniS += nniS_inc; /* reset sens solve flag */ cv_mem->sens_solve = SUNFALSE; /* if the solve failed return */ if (flag != CV_SUCCESS) return(flag); /* solve successful */ /* update the sensitivities based on the final correction from the nonlinear solver */ N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znS[0], ONE, cv_mem->cv_acorS, cv_mem->cv_yS); /* update Jacobian status */ cv_mem->cv_jcur = SUNFALSE; return(flag); } /* * cvStgr1Nls * * This is a high-level routine that attempts to solve the i-th * sensitivity linear system using the attached nonlinear solver * once the states y_n were obtained and passed the error test. */ static int cvStgr1Nls(CVodeMem cv_mem, int is) { booleantype callSetup; long int nniS1_inc; int flag=CV_SUCCESS; callSetup = SUNFALSE; if (cv_mem->cv_lsetup == NULL) cv_mem->cv_crateS = ONE; /* initial guess for the correction to the predictor */ N_VConst(ZERO, cv_mem->cv_acorS[is]); /* set sens solve flag */ cv_mem->sens_solve = SUNTRUE; /* solve the nonlinear system */ flag = SUNNonlinSolSolve(cv_mem->NLSstg1, cv_mem->cv_znS[0][is], cv_mem->cv_acorS[is], cv_mem->cv_ewtS[is], cv_mem->cv_tq[4], callSetup, cv_mem); /* increment counter */ nniS1_inc = 0; (void) SUNNonlinSolGetNumIters(cv_mem->NLSstg1, &(nniS1_inc)); cv_mem->cv_nniS1[is] += nniS1_inc; /* reset sens solve flag */ cv_mem->sens_solve = SUNFALSE; /* if the solve failed return */ if (flag != CV_SUCCESS) return(flag); /* solve successful */ /* update the sensitivity with the final correction from the nonlinear solver */ N_VLinearSum(ONE, cv_mem->cv_znS[0][is], ONE, cv_mem->cv_acorS[is], cv_mem->cv_yS[is]); /* update Jacobian status */ cv_mem->cv_jcur = SUNFALSE; return(flag); } /* * cvHandleNFlag * * This routine takes action on the return value nflag = *nflagPtr * returned by cvNls, as follows: * * If cvNls succeeded in solving the nonlinear system, then * cvHandleNFlag returns the constant DO_ERROR_TEST, which tells cvStep * to perform the error test. * * If the nonlinear system was not solved successfully, then ncfn and * ncf = *ncfPtr are incremented and Nordsieck array zn is restored. * * If the solution of the nonlinear system failed due to an * unrecoverable failure by setup, we return the value CV_LSETUP_FAIL. * * If it failed due to an unrecoverable failure in solve, then we return * the value CV_LSOLVE_FAIL. * * If it failed due to an unrecoverable failure in rhs, then we return * the value CV_RHSFUNC_FAIL. * * If it failed due to an unrecoverable failure in quad rhs, then we return * the value CV_QRHSFUNC_FAIL. * * If it failed due to an unrecoverable failure in sensi rhs, then we return * the value CV_SRHSFUNC_FAIL. * * If it failed due to an unrecoverable failure in sensi quad rhs, then we * return the value CV_QSRHSFUNC_FAIL. * * Otherwise, a recoverable failure occurred when solving the nonlinear system * (cvNls returned SUN_NLS_CONV_RECVR, RHSFUNC_RECVR, or SRHSFUNC_RECVR). * * If ncf is now equal to maxncf or |h| = hmin, we return the value * CV_CONV_FAILURE (if SUN_NLS_CONV_RECVR), * CV_REPTD_RHSFUNC_ERR (if RHSFUNC_RECVR), or * CV_REPTD_SRHSFUNC_ERR (if SRHSFUNC_RECVR). * Otherwise, we set *nflagPtr = PREV_CONV_FAIL and return the value * PREDICT_AGAIN, telling cvStep to reattempt the step. * */ static int cvHandleNFlag(CVodeMem cv_mem, int *nflagPtr, realtype saved_t, int *ncfPtr, long int *ncfnPtr) { int nflag; nflag = *nflagPtr; if (nflag == CV_SUCCESS) return(DO_ERROR_TEST); /* The nonlinear soln. failed; increment ncfn and restore zn */ (*ncfnPtr)++; cvRestore(cv_mem, saved_t); /* Return if failed unrecoverably */ if (nflag < 0) { if (nflag == CV_LSETUP_FAIL) return(CV_LSETUP_FAIL); else if (nflag == CV_LSOLVE_FAIL) return(CV_LSOLVE_FAIL); else if (nflag == CV_RHSFUNC_FAIL) return(CV_RHSFUNC_FAIL); else if (nflag == CV_QRHSFUNC_FAIL) return(CV_QRHSFUNC_FAIL); else if (nflag == CV_SRHSFUNC_FAIL) return(CV_SRHSFUNC_FAIL); else if (nflag == CV_QSRHSFUNC_FAIL) return(CV_QSRHSFUNC_FAIL); else return(CV_NLS_FAIL); } /* At this point, a recoverable error occured. */ (*ncfPtr)++; cv_mem->cv_etamax = ONE; /* If we had maxncf failures or |h| = hmin, return failure. */ if ((SUNRabs(cv_mem->cv_h) <= cv_mem->cv_hmin*ONEPSM) || (*ncfPtr == cv_mem->cv_maxncf)) { if (nflag == SUN_NLS_CONV_RECVR) return(CV_CONV_FAILURE); if (nflag == CONSTR_RECVR) return(CV_CONSTR_FAIL); if (nflag == RHSFUNC_RECVR) return(CV_REPTD_RHSFUNC_ERR); if (nflag == QRHSFUNC_RECVR) return(CV_REPTD_QRHSFUNC_ERR); if (nflag == SRHSFUNC_RECVR) return(CV_REPTD_SRHSFUNC_ERR); if (nflag == QSRHSFUNC_RECVR) return(CV_REPTD_QSRHSFUNC_ERR); } /* Reduce step size; return to reattempt the step Note that if nflag = CONSTR_RECVR, then eta was already set in cvCheckConstraints */ if (nflag != CONSTR_RECVR) cv_mem->cv_eta = SUNMAX(ETACF, cv_mem->cv_hmin / SUNRabs(cv_mem->cv_h)); *nflagPtr = PREV_CONV_FAIL; cvRescale(cv_mem); return(PREDICT_AGAIN); } /* * cvRestore * * This routine restores the value of tn to saved_t and undoes the * prediction. After execution of cvRestore, the Nordsieck array zn has * the same values as before the call to cvPredict. */ static void cvRestore(CVodeMem cv_mem, realtype saved_t) { int j, k; cv_mem->cv_tn = saved_t; for (k = 1; k <= cv_mem->cv_q; k++) for (j = cv_mem->cv_q; j >= k; j--) N_VLinearSum(ONE, cv_mem->cv_zn[j-1], -ONE, cv_mem->cv_zn[j], cv_mem->cv_zn[j-1]); if (cv_mem->cv_quadr) { for (k = 1; k <= cv_mem->cv_q; k++) for (j = cv_mem->cv_q; j >= k; j--) N_VLinearSum(ONE, cv_mem->cv_znQ[j-1], -ONE, cv_mem->cv_znQ[j], cv_mem->cv_znQ[j-1]); } if (cv_mem->cv_sensi) { for (k = 1; k <= cv_mem->cv_q; k++) for (j = cv_mem->cv_q; j >= k; j--) (void) N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znS[j-1], -ONE, cv_mem->cv_znS[j], cv_mem->cv_znS[j-1]); } if (cv_mem->cv_quadr_sensi) { for (k = 1; k <= cv_mem->cv_q; k++) for (j = cv_mem->cv_q; j >= k; j--) (void) N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znQS[j-1], -ONE, cv_mem->cv_znQS[j], cv_mem->cv_znQS[j-1]); } } /* * ----------------------------------------------------------------- * Error Test * ----------------------------------------------------------------- */ /* * cvDoErrorTest * * This routine performs the local error test, for the state, quadrature, * or sensitivity variables. Its last three arguments change depending * on which variables the error test is to be performed on. * * The weighted local error norm dsm is loaded into *dsmPtr, and * the test dsm ?<= 1 is made. * * If the test passes, cvDoErrorTest returns CV_SUCCESS. * * If the test fails, we undo the step just taken (call cvRestore) and * * - if maxnef error test failures have occurred or if SUNRabs(h) = hmin, * we return CV_ERR_FAILURE. * * - if more than MXNEF1 error test failures have occurred, an order * reduction is forced. If already at order 1, restart by reloading * zn from scratch (also znQ and znS if appropriate). * If f() fails, we return CV_RHSFUNC_FAIL or CV_UNREC_RHSFUNC_ERR; * if fQ() fails, we return CV_QRHSFUNC_FAIL or CV_UNREC_QRHSFUNC_ERR; * if cvSensRhsWrapper() fails, we return CV_SRHSFUNC_FAIL or CV_UNREC_SRHSFUNC_ERR; * (no recovery is possible at this stage). * * - otherwise, set *nflagPtr to PREV_ERR_FAIL, and return TRY_AGAIN. * */ static int cvDoErrorTest(CVodeMem cv_mem, int *nflagPtr, realtype saved_t, realtype acor_nrm, int *nefPtr, long int *netfPtr, realtype *dsmPtr) { realtype dsm; int retval, is; N_Vector wrk1, wrk2; dsm = acor_nrm * cv_mem->cv_tq[2]; /* If est. local error norm dsm passes test, return CV_SUCCESS */ *dsmPtr = dsm; if (dsm <= ONE) return(CV_SUCCESS); /* Test failed; increment counters, set nflag, and restore zn array */ (*nefPtr)++; (*netfPtr)++; *nflagPtr = PREV_ERR_FAIL; cvRestore(cv_mem, saved_t); /* At maxnef failures or |h| = hmin, return CV_ERR_FAILURE */ if ((SUNRabs(cv_mem->cv_h) <= cv_mem->cv_hmin*ONEPSM) || (*nefPtr == cv_mem->cv_maxnef)) return(CV_ERR_FAILURE); /* Set etamax = 1 to prevent step size increase at end of this step */ cv_mem->cv_etamax = ONE; /* Set h ratio eta from dsm, rescale, and return for retry of step */ if (*nefPtr <= MXNEF1) { cv_mem->cv_eta = ONE / (SUNRpowerR(BIAS2*dsm,ONE/cv_mem->cv_L) + ADDON); cv_mem->cv_eta = SUNMAX(ETAMIN, SUNMAX(cv_mem->cv_eta, cv_mem->cv_hmin / SUNRabs(cv_mem->cv_h))); if (*nefPtr >= SMALL_NEF) cv_mem->cv_eta = SUNMIN(cv_mem->cv_eta, ETAMXF); cvRescale(cv_mem); return(TRY_AGAIN); } /* After MXNEF1 failures, force an order reduction and retry step */ if (cv_mem->cv_q > 1) { cv_mem->cv_eta = SUNMAX(ETAMIN, cv_mem->cv_hmin / SUNRabs(cv_mem->cv_h)); cvAdjustOrder(cv_mem,-1); cv_mem->cv_L = cv_mem->cv_q; cv_mem->cv_q--; cv_mem->cv_qwait = cv_mem->cv_L; cvRescale(cv_mem); return(TRY_AGAIN); } /* If already at order 1, restart: reload zn, znQ, znS, znQS from scratch */ cv_mem->cv_eta = SUNMAX(ETAMIN, cv_mem->cv_hmin / SUNRabs(cv_mem->cv_h)); cv_mem->cv_h *= cv_mem->cv_eta; cv_mem->cv_next_h = cv_mem->cv_h; cv_mem->cv_hscale = cv_mem->cv_h; cv_mem->cv_qwait = LONG_WAIT; cv_mem->cv_nscon = 0; retval = cv_mem->cv_f(cv_mem->cv_tn, cv_mem->cv_zn[0], cv_mem->cv_tempv, cv_mem->cv_user_data); cv_mem->cv_nfe++; if (retval < 0) return(CV_RHSFUNC_FAIL); if (retval > 0) return(CV_UNREC_RHSFUNC_ERR); N_VScale(cv_mem->cv_h, cv_mem->cv_tempv, cv_mem->cv_zn[1]); if (cv_mem->cv_quadr) { retval = cv_mem->cv_fQ(cv_mem->cv_tn, cv_mem->cv_zn[0], cv_mem->cv_tempvQ, cv_mem->cv_user_data); cv_mem->cv_nfQe++; if (retval < 0) return(CV_QRHSFUNC_FAIL); if (retval > 0) return(CV_UNREC_QRHSFUNC_ERR); N_VScale(cv_mem->cv_h, cv_mem->cv_tempvQ, cv_mem->cv_znQ[1]); } if (cv_mem->cv_sensi) { wrk1 = cv_mem->cv_ftemp; wrk2 = cv_mem->cv_ftempS[0]; retval = cvSensRhsWrapper(cv_mem, cv_mem->cv_tn, cv_mem->cv_zn[0], cv_mem->cv_tempv, cv_mem->cv_znS[0], cv_mem->cv_tempvS, wrk1, wrk2); if (retval < 0) return(CV_SRHSFUNC_FAIL); if (retval > 0) return(CV_UNREC_SRHSFUNC_ERR); for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = cv_mem->cv_h; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_tempvS, cv_mem->cv_znS[1]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } if (cv_mem->cv_quadr_sensi) { wrk1 = cv_mem->cv_ftemp; wrk2 = cv_mem->cv_ftempQ; retval = cv_mem->cv_fQS(cv_mem->cv_Ns, cv_mem->cv_tn, cv_mem->cv_zn[0], cv_mem->cv_znS[0], cv_mem->cv_tempvQ, cv_mem->cv_tempvQS, cv_mem->cv_fQS_data, wrk1, wrk2); cv_mem->cv_nfQSe++; if (retval < 0) return(CV_QSRHSFUNC_FAIL); if (retval > 0) return(CV_UNREC_QSRHSFUNC_ERR); for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = cv_mem->cv_h; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_tempvQS, cv_mem->cv_znQS[1]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } return(TRY_AGAIN); } /* * ----------------------------------------------------------------- * Functions called after a successful step * ----------------------------------------------------------------- */ /* * cvCompleteStep * * This routine performs various update operations when the solution * to the nonlinear system has passed the local error test. * We increment the step counter nst, record the values hu and qu, * update the tau array, and apply the corrections to the zn array. * The tau[i] are the last q values of h, with tau[1] the most recent. * The counter qwait is decremented, and if qwait == 1 (and q < qmax) * we save acor and tq[5] for a possible order increase. */ static void cvCompleteStep(CVodeMem cv_mem) { int i; int is; cv_mem->cv_nst++; cv_mem->cv_nscon++; cv_mem->cv_hu = cv_mem->cv_h; cv_mem->cv_qu = cv_mem->cv_q; for (i=cv_mem->cv_q; i >= 2; i--) cv_mem->cv_tau[i] = cv_mem->cv_tau[i-1]; if ((cv_mem->cv_q==1) && (cv_mem->cv_nst > 1)) cv_mem->cv_tau[2] = cv_mem->cv_tau[1]; cv_mem->cv_tau[1] = cv_mem->cv_h; /* Apply correction to column j of zn: l_j * Delta_n */ (void) N_VScaleAddMulti(cv_mem->cv_q+1, cv_mem->cv_l, cv_mem->cv_acor, cv_mem->cv_zn, cv_mem->cv_zn); if (cv_mem->cv_quadr) (void) N_VScaleAddMulti(cv_mem->cv_q+1, cv_mem->cv_l, cv_mem->cv_acorQ, cv_mem->cv_znQ, cv_mem->cv_znQ); if (cv_mem->cv_sensi) (void) N_VScaleAddMultiVectorArray(cv_mem->cv_Ns, cv_mem->cv_q+1, cv_mem->cv_l, cv_mem->cv_acorS, cv_mem->cv_znS, cv_mem->cv_znS); if (cv_mem->cv_quadr_sensi) (void) N_VScaleAddMultiVectorArray(cv_mem->cv_Ns, cv_mem->cv_q+1, cv_mem->cv_l, cv_mem->cv_acorQS, cv_mem->cv_znQS, cv_mem->cv_znQS); /* If necessary, store Delta_n in zn[qmax] to be used in order increase. * This actually will be Delta_{n-1} in the ELTE at q+1 since it happens at * the next to last step of order q before a possible one at order q+1 */ cv_mem->cv_qwait--; if ((cv_mem->cv_qwait == 1) && (cv_mem->cv_q != cv_mem->cv_qmax)) { N_VScale(ONE, cv_mem->cv_acor, cv_mem->cv_zn[cv_mem->cv_qmax]); if (cv_mem->cv_quadr) N_VScale(ONE, cv_mem->cv_acorQ, cv_mem->cv_znQ[cv_mem->cv_qmax]); if (cv_mem->cv_sensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_acorS, cv_mem->cv_znS[cv_mem->cv_qmax]); } if (cv_mem->cv_quadr_sensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_acorQS, cv_mem->cv_znQS[cv_mem->cv_qmax]); } cv_mem->cv_saved_tq5 = cv_mem->cv_tq[5]; cv_mem->cv_indx_acor = cv_mem->cv_qmax; } #ifdef SUNDIALS_BUILD_WITH_MONITORING /* If user access function was provided, call it now */ if (cv_mem->cv_monitorfun != NULL && !(cv_mem->cv_nst % cv_mem->cv_monitor_interval)) { cv_mem->cv_monitorfun((void*) cv_mem, cv_mem->cv_user_data); } #endif } /* * cvPrepareNextStep * * This routine handles the setting of stepsize and order for the * next step -- hprime and qprime. Along with hprime, it sets the * ratio eta = hprime/h. It also updates other state variables * related to a change of step size or order. */ static void cvPrepareNextStep(CVodeMem cv_mem, realtype dsm) { /* If etamax = 1, defer step size or order changes */ if (cv_mem->cv_etamax == ONE) { cv_mem->cv_qwait = SUNMAX(cv_mem->cv_qwait, 2); cv_mem->cv_qprime = cv_mem->cv_q; cv_mem->cv_hprime = cv_mem->cv_h; cv_mem->cv_eta = ONE; return; } /* etaq is the ratio of new to old h at the current order */ cv_mem->cv_etaq = ONE /(SUNRpowerR(BIAS2*dsm,ONE/cv_mem->cv_L) + ADDON); /* If no order change, adjust eta and acor in cvSetEta and return */ if (cv_mem->cv_qwait != 0) { cv_mem->cv_eta = cv_mem->cv_etaq; cv_mem->cv_qprime = cv_mem->cv_q; cvSetEta(cv_mem); return; } /* If qwait = 0, consider an order change. etaqm1 and etaqp1 are the ratios of new to old h at orders q-1 and q+1, respectively. cvChooseEta selects the largest; cvSetEta adjusts eta and acor */ cv_mem->cv_qwait = 2; cv_mem->cv_etaqm1 = cvComputeEtaqm1(cv_mem); cv_mem->cv_etaqp1 = cvComputeEtaqp1(cv_mem); cvChooseEta(cv_mem); cvSetEta(cv_mem); } /* * cvSetEta * * This routine adjusts the value of eta according to the various * heuristic limits and the optional input hmax. */ static void cvSetEta(CVodeMem cv_mem) { /* If eta below the threshhold THRESH, reject a change of step size */ if (cv_mem->cv_eta < THRESH) { cv_mem->cv_eta = ONE; cv_mem->cv_hprime = cv_mem->cv_h; } else { /* Limit eta by etamax and hmax, then set hprime */ cv_mem->cv_eta = SUNMIN(cv_mem->cv_eta, cv_mem->cv_etamax); cv_mem->cv_eta /= SUNMAX(ONE, SUNRabs(cv_mem->cv_h) * cv_mem->cv_hmax_inv*cv_mem->cv_eta); cv_mem->cv_hprime = cv_mem->cv_h * cv_mem->cv_eta; if (cv_mem->cv_qprime < cv_mem->cv_q) cv_mem->cv_nscon = 0; } } /* * cvComputeEtaqm1 * * This routine computes and returns the value of etaqm1 for a * possible decrease in order by 1. */ static realtype cvComputeEtaqm1(CVodeMem cv_mem) { realtype ddn; cv_mem->cv_etaqm1 = ZERO; if (cv_mem->cv_q > 1) { ddn = N_VWrmsNorm(cv_mem->cv_zn[cv_mem->cv_q], cv_mem->cv_ewt); if ( cv_mem->cv_quadr && cv_mem->cv_errconQ ) ddn = cvQuadUpdateNorm(cv_mem, ddn, cv_mem->cv_znQ[cv_mem->cv_q], cv_mem->cv_ewtQ); if ( cv_mem->cv_sensi && cv_mem->cv_errconS ) ddn = cvSensUpdateNorm(cv_mem, ddn, cv_mem->cv_znS[cv_mem->cv_q], cv_mem->cv_ewtS); if ( cv_mem->cv_quadr_sensi && cv_mem->cv_errconQS ) ddn = cvQuadSensUpdateNorm(cv_mem, ddn, cv_mem->cv_znQS[cv_mem->cv_q], cv_mem->cv_ewtQS); ddn = ddn * cv_mem->cv_tq[1]; cv_mem->cv_etaqm1 = ONE/(SUNRpowerR(BIAS1*ddn, ONE/cv_mem->cv_q) + ADDON); } return(cv_mem->cv_etaqm1); } /* * cvComputeEtaqp1 * * This routine computes and returns the value of etaqp1 for a * possible increase in order by 1. */ static realtype cvComputeEtaqp1(CVodeMem cv_mem) { realtype dup, cquot; cv_mem->cv_etaqp1 = ZERO; if (cv_mem->cv_q != cv_mem->cv_qmax) { if (cv_mem->cv_saved_tq5 == ZERO) return(cv_mem->cv_etaqp1); cquot = (cv_mem->cv_tq[5] / cv_mem->cv_saved_tq5) * SUNRpowerI(cv_mem->cv_h/cv_mem->cv_tau[2], cv_mem->cv_L); N_VLinearSum(-cquot, cv_mem->cv_zn[cv_mem->cv_qmax], ONE, cv_mem->cv_acor, cv_mem->cv_tempv); dup = N_VWrmsNorm(cv_mem->cv_tempv, cv_mem->cv_ewt); if ( cv_mem->cv_quadr && cv_mem->cv_errconQ ) { N_VLinearSum(-cquot, cv_mem->cv_znQ[cv_mem->cv_qmax], ONE, cv_mem->cv_acorQ, cv_mem->cv_tempvQ); dup = cvQuadUpdateNorm(cv_mem, dup, cv_mem->cv_tempvQ, cv_mem->cv_ewtQ); } if ( cv_mem->cv_sensi && cv_mem->cv_errconS ) { (void) N_VLinearSumVectorArray(cv_mem->cv_Ns, -cquot, cv_mem->cv_znS[cv_mem->cv_qmax], ONE, cv_mem->cv_acorS, cv_mem->cv_tempvS); dup = cvSensUpdateNorm(cv_mem, dup, cv_mem->cv_tempvS, cv_mem->cv_ewtS); } if ( cv_mem->cv_quadr_sensi && cv_mem->cv_errconQS ) { (void) N_VLinearSumVectorArray(cv_mem->cv_Ns, -cquot, cv_mem->cv_znQS[cv_mem->cv_qmax], ONE, cv_mem->cv_acorQS, cv_mem->cv_tempvQS); dup = cvSensUpdateNorm(cv_mem, dup, cv_mem->cv_tempvQS, cv_mem->cv_ewtQS); } dup = dup * cv_mem->cv_tq[3]; cv_mem->cv_etaqp1 = ONE / (SUNRpowerR(BIAS3*dup, ONE/(cv_mem->cv_L+1)) + ADDON); } return(cv_mem->cv_etaqp1); } /* * cvChooseEta * Given etaqm1, etaq, etaqp1 (the values of eta for qprime = * q - 1, q, or q + 1, respectively), this routine chooses the * maximum eta value, sets eta to that value, and sets qprime to the * corresponding value of q. If there is a tie, the preference * order is to (1) keep the same order, then (2) decrease the order, * and finally (3) increase the order. If the maximum eta value * is below the threshhold THRESH, the order is kept unchanged and * eta is set to 1. */ static void cvChooseEta(CVodeMem cv_mem) { realtype etam; int is; etam = SUNMAX(cv_mem->cv_etaqm1, SUNMAX(cv_mem->cv_etaq, cv_mem->cv_etaqp1)); if (etam < THRESH) { cv_mem->cv_eta = ONE; cv_mem->cv_qprime = cv_mem->cv_q; return; } if (etam == cv_mem->cv_etaq) { cv_mem->cv_eta = cv_mem->cv_etaq; cv_mem->cv_qprime = cv_mem->cv_q; } else if (etam == cv_mem->cv_etaqm1) { cv_mem->cv_eta = cv_mem->cv_etaqm1; cv_mem->cv_qprime = cv_mem->cv_q - 1; } else { cv_mem->cv_eta = cv_mem->cv_etaqp1; cv_mem->cv_qprime = cv_mem->cv_q + 1; if (cv_mem->cv_lmm == CV_BDF) { /* * Store Delta_n in zn[qmax] to be used in order increase * * This happens at the last step of order q before an increase * to order q+1, so it represents Delta_n in the ELTE at q+1 */ N_VScale(ONE, cv_mem->cv_acor, cv_mem->cv_zn[cv_mem->cv_qmax]); if (cv_mem->cv_quadr && cv_mem->cv_errconQ) N_VScale(ONE, cv_mem->cv_acorQ, cv_mem->cv_znQ[cv_mem->cv_qmax]); if (cv_mem->cv_sensi && cv_mem->cv_errconS) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_acorS, cv_mem->cv_znS[cv_mem->cv_qmax]); } if (cv_mem->cv_quadr_sensi && cv_mem->cv_errconQS) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_acorQS, cv_mem->cv_znQS[cv_mem->cv_qmax]); } } } } /* * ----------------------------------------------------------------- * Function to handle failures * ----------------------------------------------------------------- */ /* * cvHandleFailure * * This routine prints error messages for all cases of failure by * cvHin and cvStep. * It returns to CVode the value that CVode is to return to the user. */ static int cvHandleFailure(CVodeMem cv_mem, int flag) { /* Set vector of absolute weighted local errors */ /* N_VProd(acor, ewt, tempv); N_VAbs(tempv, tempv); */ /* Depending on flag, print error message and return error flag */ switch (flag) { case CV_ERR_FAILURE: cvProcessError(cv_mem, CV_ERR_FAILURE, "CVODES", "CVode", MSGCV_ERR_FAILS, cv_mem->cv_tn, cv_mem->cv_h); break; case CV_CONV_FAILURE: cvProcessError(cv_mem, CV_CONV_FAILURE, "CVODES", "CVode", MSGCV_CONV_FAILS, cv_mem->cv_tn, cv_mem->cv_h); break; case CV_LSETUP_FAIL: cvProcessError(cv_mem, CV_LSETUP_FAIL, "CVODES", "CVode", MSGCV_SETUP_FAILED, cv_mem->cv_tn); break; case CV_LSOLVE_FAIL: cvProcessError(cv_mem, CV_LSOLVE_FAIL, "CVODES", "CVode", MSGCV_SOLVE_FAILED, cv_mem->cv_tn); break; case CV_RHSFUNC_FAIL: cvProcessError(cv_mem, CV_RHSFUNC_FAIL, "CVODES", "CVode", MSGCV_RHSFUNC_FAILED, cv_mem->cv_tn); break; case CV_UNREC_RHSFUNC_ERR: cvProcessError(cv_mem, CV_UNREC_RHSFUNC_ERR, "CVODES", "CVode", MSGCV_RHSFUNC_UNREC, cv_mem->cv_tn); break; case CV_REPTD_RHSFUNC_ERR: cvProcessError(cv_mem, CV_REPTD_RHSFUNC_ERR, "CVODES", "CVode", MSGCV_RHSFUNC_REPTD, cv_mem->cv_tn); break; case CV_RTFUNC_FAIL: cvProcessError(cv_mem, CV_RTFUNC_FAIL, "CVODES", "CVode", MSGCV_RTFUNC_FAILED, cv_mem->cv_tn); break; case CV_QRHSFUNC_FAIL: cvProcessError(cv_mem, CV_QRHSFUNC_FAIL, "CVODES", "CVode", MSGCV_QRHSFUNC_FAILED, cv_mem->cv_tn); break; case CV_UNREC_QRHSFUNC_ERR: cvProcessError(cv_mem, CV_UNREC_QRHSFUNC_ERR, "CVODES", "CVode", MSGCV_QRHSFUNC_UNREC, cv_mem->cv_tn); break; case CV_REPTD_QRHSFUNC_ERR: cvProcessError(cv_mem, CV_REPTD_QRHSFUNC_ERR, "CVODES", "CVode", MSGCV_QRHSFUNC_REPTD, cv_mem->cv_tn); break; case CV_SRHSFUNC_FAIL: cvProcessError(cv_mem, CV_SRHSFUNC_FAIL, "CVODES", "CVode", MSGCV_SRHSFUNC_FAILED, cv_mem->cv_tn); break; case CV_UNREC_SRHSFUNC_ERR: cvProcessError(cv_mem, CV_UNREC_SRHSFUNC_ERR, "CVODES", "CVode", MSGCV_SRHSFUNC_UNREC, cv_mem->cv_tn); break; case CV_REPTD_SRHSFUNC_ERR: cvProcessError(cv_mem, CV_REPTD_SRHSFUNC_ERR, "CVODES", "CVode", MSGCV_SRHSFUNC_REPTD, cv_mem->cv_tn); break; case CV_QSRHSFUNC_FAIL: cvProcessError(cv_mem, CV_QSRHSFUNC_FAIL, "CVODES", "CVode", MSGCV_QSRHSFUNC_FAILED, cv_mem->cv_tn); break; case CV_UNREC_QSRHSFUNC_ERR: cvProcessError(cv_mem, CV_UNREC_QSRHSFUNC_ERR, "CVODES", "CVode", MSGCV_QSRHSFUNC_UNREC, cv_mem->cv_tn); break; case CV_REPTD_QSRHSFUNC_ERR: cvProcessError(cv_mem, CV_REPTD_QSRHSFUNC_ERR, "CVODES", "CVode", MSGCV_QSRHSFUNC_REPTD, cv_mem->cv_tn); break; case CV_TOO_CLOSE: cvProcessError(cv_mem, CV_TOO_CLOSE, "CVODES", "CVode", MSGCV_TOO_CLOSE); break; case CV_MEM_NULL: cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVode", MSGCV_NO_MEM); break; case SUN_NLS_MEM_NULL: cvProcessError(cv_mem, CV_MEM_NULL, "CVODES", "CVode", MSGCV_NLS_INPUT_NULL, cv_mem->cv_tn); break; case CV_NLS_SETUP_FAIL: cvProcessError(cv_mem, CV_NLS_SETUP_FAIL, "CVODES", "CVode", MSGCV_NLS_SETUP_FAILED, cv_mem->cv_tn); break; case CV_CONSTR_FAIL: cvProcessError(cv_mem, CV_CONSTR_FAIL, "CVODES", "CVode", MSGCV_FAILED_CONSTR, cv_mem->cv_tn); break; case CV_NLS_FAIL: cvProcessError(cv_mem, CV_NLS_FAIL, "CVODES", "CVode", MSGCV_NLS_FAIL, cv_mem->cv_tn); break; default: /* This return should never happen */ cvProcessError(cv_mem, CV_UNRECOGNIZED_ERR, "CVODES", "CVode", "CVODES encountered an unrecognized error. Please report this to the Sundials developers at sundials-users@llnl.gov"); return (CV_UNRECOGNIZED_ERR); } return(flag); } /* * ----------------------------------------------------------------- * Functions for BDF Stability Limit Detection * ----------------------------------------------------------------- */ /* * cvBDFStab * * This routine handles the BDF Stability Limit Detection Algorithm * STALD. It is called if lmm = CV_BDF and the SLDET option is on. * If the order is 3 or more, the required norm data is saved. * If a decision to reduce order has not already been made, and * enough data has been saved, cvSLdet is called. If it signals * a stability limit violation, the order is reduced, and the step * size is reset accordingly. */ static void cvBDFStab(CVodeMem cv_mem) { int i,k, ldflag, factorial; realtype sq, sqm1, sqm2; /* If order is 3 or greater, then save scaled derivative data, push old data down in i, then add current values to top. */ if (cv_mem->cv_q >= 3) { for (k = 1; k <= 3; k++) for (i = 5; i >= 2; i--) cv_mem->cv_ssdat[i][k] = cv_mem->cv_ssdat[i-1][k]; factorial = 1; for (i = 1; i <= cv_mem->cv_q-1; i++) factorial *= i; sq = factorial * cv_mem->cv_q * (cv_mem->cv_q+1) * cv_mem->cv_acnrm / SUNMAX(cv_mem->cv_tq[5],TINY); sqm1 = factorial * cv_mem->cv_q * N_VWrmsNorm(cv_mem->cv_zn[cv_mem->cv_q], cv_mem->cv_ewt); sqm2 = factorial * N_VWrmsNorm(cv_mem->cv_zn[cv_mem->cv_q-1], cv_mem->cv_ewt); cv_mem->cv_ssdat[1][1] = sqm2*sqm2; cv_mem->cv_ssdat[1][2] = sqm1*sqm1; cv_mem->cv_ssdat[1][3] = sq*sq; } if (cv_mem->cv_qprime >= cv_mem->cv_q) { /* If order is 3 or greater, and enough ssdat has been saved, nscon >= q+5, then call stability limit detection routine. */ if ( (cv_mem->cv_q >= 3) && (cv_mem->cv_nscon >= cv_mem->cv_q+5) ) { ldflag = cvSLdet(cv_mem); if (ldflag > 3) { /* A stability limit violation is indicated by a return flag of 4, 5, or 6. Reduce new order. */ cv_mem->cv_qprime = cv_mem->cv_q-1; cv_mem->cv_eta = cv_mem->cv_etaqm1; cv_mem->cv_eta = SUNMIN(cv_mem->cv_eta,cv_mem->cv_etamax); cv_mem->cv_eta = cv_mem->cv_eta / SUNMAX(ONE,SUNRabs(cv_mem->cv_h)*cv_mem->cv_hmax_inv*cv_mem->cv_eta); cv_mem->cv_hprime = cv_mem->cv_h * cv_mem->cv_eta; cv_mem->cv_nor = cv_mem->cv_nor + 1; } } } else { /* Otherwise, let order increase happen, and reset stability limit counter, nscon. */ cv_mem->cv_nscon = 0; } } /* * cvSLdet * * This routine detects stability limitation using stored scaled * derivatives data. cvSLdet returns the magnitude of the * dominate characteristic root, rr. The presence of a stability * limit is indicated by rr > "something a little less then 1.0", * and a positive kflag. This routine should only be called if * order is greater than or equal to 3, and data has been collected * for 5 time steps. * * Returned values: * kflag = 1 -> Found stable characteristic root, normal matrix case * kflag = 2 -> Found stable characteristic root, quartic solution * kflag = 3 -> Found stable characteristic root, quartic solution, * with Newton correction * kflag = 4 -> Found stability violation, normal matrix case * kflag = 5 -> Found stability violation, quartic solution * kflag = 6 -> Found stability violation, quartic solution, * with Newton correction * * kflag < 0 -> No stability limitation, * or could not compute limitation. * * kflag = -1 -> Min/max ratio of ssdat too small. * kflag = -2 -> For normal matrix case, vmax > vrrt2*vrrt2 * kflag = -3 -> For normal matrix case, The three ratios * are inconsistent. * kflag = -4 -> Small coefficient prevents elimination of quartics. * kflag = -5 -> R value from quartics not consistent. * kflag = -6 -> No corrected root passes test on qk values * kflag = -7 -> Trouble solving for sigsq. * kflag = -8 -> Trouble solving for B, or R via B. * kflag = -9 -> R via sigsq[k] disagrees with R from data. */ static int cvSLdet(CVodeMem cv_mem) { int i, k, j, it, kmin = 0, kflag = 0; realtype rat[5][4], rav[4], qkr[4], sigsq[4], smax[4], ssmax[4]; realtype drr[4], rrc[4],sqmx[4], qjk[4][4], vrat[5], qc[6][4], qco[6][4]; realtype rr, rrcut, vrrtol, vrrt2, sqtol, rrtol; realtype smink, smaxk, sumrat, sumrsq, vmin, vmax, drrmax, adrr; realtype tem, sqmax, saqk, qp, s, sqmaxk, saqj, sqmin; realtype rsa, rsb, rsc, rsd, rd1a, rd1b, rd1c; realtype rd2a, rd2b, rd3a, cest1, corr1; realtype ratp, ratm, qfac1, qfac2, bb, rrb; /* The following are cutoffs and tolerances used by this routine */ rrcut = RCONST(0.98); vrrtol = RCONST(1.0e-4); vrrt2 = RCONST(5.0e-4); sqtol = RCONST(1.0e-3); rrtol = RCONST(1.0e-2); rr = ZERO; /* Index k corresponds to the degree of the interpolating polynomial. */ /* k = 1 -> q-1 */ /* k = 2 -> q */ /* k = 3 -> q+1 */ /* Index i is a backward-in-time index, i = 1 -> current time, */ /* i = 2 -> previous step, etc */ /* get maxima, minima, and variances, and form quartic coefficients */ for (k=1; k<=3; k++) { smink = cv_mem->cv_ssdat[1][k]; smaxk = ZERO; for (i=1; i<=5; i++) { smink = SUNMIN(smink,cv_mem->cv_ssdat[i][k]); smaxk = SUNMAX(smaxk,cv_mem->cv_ssdat[i][k]); } if (smink < TINY*smaxk) { kflag = -1; return(kflag); } smax[k] = smaxk; ssmax[k] = smaxk*smaxk; sumrat = ZERO; sumrsq = ZERO; for (i=1; i<=4; i++) { rat[i][k] = cv_mem->cv_ssdat[i][k] / cv_mem->cv_ssdat[i+1][k]; sumrat = sumrat + rat[i][k]; sumrsq = sumrsq + rat[i][k]*rat[i][k]; } rav[k] = FOURTH*sumrat; vrat[k] = SUNRabs(FOURTH*sumrsq - rav[k]*rav[k]); qc[5][k] = cv_mem->cv_ssdat[1][k] * cv_mem->cv_ssdat[3][k] - cv_mem->cv_ssdat[2][k] * cv_mem->cv_ssdat[2][k]; qc[4][k] = cv_mem->cv_ssdat[2][k] * cv_mem->cv_ssdat[3][k] - cv_mem->cv_ssdat[1][k] * cv_mem->cv_ssdat[4][k]; qc[3][k] = ZERO; qc[2][k] = cv_mem->cv_ssdat[2][k] * cv_mem->cv_ssdat[5][k] - cv_mem->cv_ssdat[3][k] * cv_mem->cv_ssdat[4][k]; qc[1][k] = cv_mem->cv_ssdat[4][k] * cv_mem->cv_ssdat[4][k] - cv_mem->cv_ssdat[3][k] * cv_mem->cv_ssdat[5][k]; for (i=1; i<=5; i++) qco[i][k] = qc[i][k]; } /* End of k loop */ /* Isolate normal or nearly-normal matrix case. The three quartics will have a common or nearly-common root in this case. Return a kflag = 1 if this procedure works. If the three roots differ more than vrrt2, return error kflag = -3. */ vmin = SUNMIN(vrat[1],SUNMIN(vrat[2],vrat[3])); vmax = SUNMAX(vrat[1],SUNMAX(vrat[2],vrat[3])); if (vmin < vrrtol*vrrtol) { if (vmax > vrrt2*vrrt2) { kflag = -2; return(kflag); } else { rr = (rav[1] + rav[2] + rav[3])/THREE; drrmax = ZERO; for (k = 1;k<=3;k++) { adrr = SUNRabs(rav[k] - rr); drrmax = SUNMAX(drrmax, adrr); } if (drrmax > vrrt2) { kflag = -3; return(kflag); } kflag = 1; /* can compute charactistic root, drop to next section */ } } else { /* use the quartics to get rr. */ if (SUNRabs(qco[1][1]) < TINY*ssmax[1]) { kflag = -4; return(kflag); } tem = qco[1][2]/qco[1][1]; for (i=2; i<=5; i++) { qco[i][2] = qco[i][2] - tem*qco[i][1]; } qco[1][2] = ZERO; tem = qco[1][3]/qco[1][1]; for (i=2; i<=5; i++) { qco[i][3] = qco[i][3] - tem*qco[i][1]; } qco[1][3] = ZERO; if (SUNRabs(qco[2][2]) < TINY*ssmax[2]) { kflag = -4; return(kflag); } tem = qco[2][3]/qco[2][2]; for (i=3; i<=5; i++) { qco[i][3] = qco[i][3] - tem*qco[i][2]; } if (SUNRabs(qco[4][3]) < TINY*ssmax[3]) { kflag = -4; return(kflag); } rr = -qco[5][3]/qco[4][3]; if (rr < TINY || rr > HUNDRED) { kflag = -5; return(kflag); } for (k=1; k<=3; k++) qkr[k] = qc[5][k] + rr*(qc[4][k] + rr*rr*(qc[2][k] + rr*qc[1][k])); sqmax = ZERO; for (k=1; k<=3; k++) { saqk = SUNRabs(qkr[k])/ssmax[k]; if (saqk > sqmax) sqmax = saqk; } if (sqmax < sqtol) { kflag = 2; /* can compute charactistic root, drop to "given rr,etc" */ } else { /* do Newton corrections to improve rr. */ for (it=1; it<=3; it++) { for (k=1; k<=3; k++) { qp = qc[4][k] + rr*rr*(THREE*qc[2][k] + rr*FOUR*qc[1][k]); drr[k] = ZERO; if (SUNRabs(qp) > TINY*ssmax[k]) drr[k] = -qkr[k]/qp; rrc[k] = rr + drr[k]; } for (k=1; k<=3; k++) { s = rrc[k]; sqmaxk = ZERO; for (j=1; j<=3; j++) { qjk[j][k] = qc[5][j] + s*(qc[4][j] + s*s*(qc[2][j] + s*qc[1][j])); saqj = SUNRabs(qjk[j][k])/ssmax[j]; if (saqj > sqmaxk) sqmaxk = saqj; } sqmx[k] = sqmaxk; } sqmin = sqmx[1] + ONE; for (k=1; k<=3; k++) { if (sqmx[k] < sqmin) { kmin = k; sqmin = sqmx[k]; } } rr = rrc[kmin]; if (sqmin < sqtol) { kflag = 3; /* can compute charactistic root */ /* break out of Newton correction loop and drop to "given rr,etc" */ break; } else { for (j=1; j<=3; j++) { qkr[j] = qjk[j][kmin]; } } } /* end of Newton correction loop */ if (sqmin > sqtol) { kflag = -6; return(kflag); } } /* end of if (sqmax < sqtol) else */ } /* end of if (vmin < vrrtol*vrrtol) else, quartics to get rr. */ /* given rr, find sigsq[k] and verify rr. */ /* All positive kflag drop to this section */ for (k=1; k<=3; k++) { rsa = cv_mem->cv_ssdat[1][k]; rsb = cv_mem->cv_ssdat[2][k]*rr; rsc = cv_mem->cv_ssdat[3][k]*rr*rr; rsd = cv_mem->cv_ssdat[4][k]*rr*rr*rr; rd1a = rsa - rsb; rd1b = rsb - rsc; rd1c = rsc - rsd; rd2a = rd1a - rd1b; rd2b = rd1b - rd1c; rd3a = rd2a - rd2b; if (SUNRabs(rd1b) < TINY*smax[k]) { kflag = -7; return(kflag); } cest1 = -rd3a/rd1b; if (cest1 < TINY || cest1 > FOUR) { kflag = -7; return(kflag); } corr1 = (rd2b/cest1)/(rr*rr); sigsq[k] = cv_mem->cv_ssdat[3][k] + corr1; } if (sigsq[2] < TINY) { kflag = -8; return(kflag); } ratp = sigsq[3]/sigsq[2]; ratm = sigsq[1]/sigsq[2]; qfac1 = FOURTH*(cv_mem->cv_q*cv_mem->cv_q - ONE); qfac2 = TWO/(cv_mem->cv_q - ONE); bb = ratp*ratm - ONE - qfac1*ratp; tem = ONE - qfac2*bb; if (SUNRabs(tem) < TINY) { kflag = -8; return(kflag); } rrb = ONE/tem; if (SUNRabs(rrb - rr) > rrtol) { kflag = -9; return(kflag); } /* Check to see if rr is above cutoff rrcut */ if (rr > rrcut) { if (kflag == 1) kflag = 4; if (kflag == 2) kflag = 5; if (kflag == 3) kflag = 6; } /* All positive kflag returned at this point */ return(kflag); } /* * ----------------------------------------------------------------- * Functions for rootfinding * ----------------------------------------------------------------- */ /* * cvRcheck1 * * This routine completes the initialization of rootfinding memory * information, and checks whether g has a zero both at and very near * the initial point of the IVP. * * This routine returns an int equal to: * CV_RTFUNC_FAIL < 0 if the g function failed, or * CV_SUCCESS = 0 otherwise. */ static int cvRcheck1(CVodeMem cv_mem) { int i, retval; realtype smallh, hratio, tplus; booleantype zroot; for (i = 0; i < cv_mem->cv_nrtfn; i++) cv_mem->cv_iroots[i] = 0; cv_mem->cv_tlo = cv_mem->cv_tn; cv_mem->cv_ttol = (SUNRabs(cv_mem->cv_tn) + SUNRabs(cv_mem->cv_h)) * cv_mem->cv_uround*HUNDRED; /* Evaluate g at initial t and check for zero values. */ retval = cv_mem->cv_gfun(cv_mem->cv_tlo, cv_mem->cv_zn[0], cv_mem->cv_glo, cv_mem->cv_user_data); cv_mem->cv_nge = 1; if (retval != 0) return(CV_RTFUNC_FAIL); zroot = SUNFALSE; for (i = 0; i < cv_mem->cv_nrtfn; i++) { if (SUNRabs(cv_mem->cv_glo[i]) == ZERO) { zroot = SUNTRUE; cv_mem->cv_gactive[i] = SUNFALSE; } } if (!zroot) return(CV_SUCCESS); /* Some g_i is zero at t0; look at g at t0+(small increment). */ hratio = SUNMAX(cv_mem->cv_ttol/SUNRabs(cv_mem->cv_h), PT1); smallh = hratio*cv_mem->cv_h; tplus = cv_mem->cv_tlo + smallh; N_VLinearSum(ONE, cv_mem->cv_zn[0], hratio, cv_mem->cv_zn[1], cv_mem->cv_y); retval = cv_mem->cv_gfun(tplus, cv_mem->cv_y, cv_mem->cv_ghi, cv_mem->cv_user_data); cv_mem->cv_nge++; if (retval != 0) return(CV_RTFUNC_FAIL); /* We check now only the components of g which were exactly 0.0 at t0 * to see if we can 'activate' them. */ for (i = 0; i < cv_mem->cv_nrtfn; i++) { if (!cv_mem->cv_gactive[i] && SUNRabs(cv_mem->cv_ghi[i]) != ZERO) { cv_mem->cv_gactive[i] = SUNTRUE; cv_mem->cv_glo[i] = cv_mem->cv_ghi[i]; } } return(CV_SUCCESS); } /* * cvRcheck2 * * This routine checks for exact zeros of g at the last root found, * if the last return was a root. It then checks for a close pair of * zeros (an error condition), and for a new root at a nearby point. * The array glo = g(tlo) at the left endpoint of the search interval * is adjusted if necessary to assure that all g_i are nonzero * there, before returning to do a root search in the interval. * * On entry, tlo = tretlast is the last value of tret returned by * CVode. This may be the previous tn, the previous tout value, * or the last root location. * * This routine returns an int equal to: * CV_RTFUNC_FAIL < 0 if the g function failed, or * CLOSERT = 3 if a close pair of zeros was found, or * RTFOUND = 1 if a new zero of g was found near tlo, or * CV_SUCCESS = 0 otherwise. */ static int cvRcheck2(CVodeMem cv_mem) { int i, retval; realtype smallh, hratio, tplus; booleantype zroot; if (cv_mem->cv_irfnd == 0) return(CV_SUCCESS); (void) CVodeGetDky(cv_mem, cv_mem->cv_tlo, 0, cv_mem->cv_y); retval = cv_mem->cv_gfun(cv_mem->cv_tlo, cv_mem->cv_y, cv_mem->cv_glo, cv_mem->cv_user_data); cv_mem->cv_nge++; if (retval != 0) return(CV_RTFUNC_FAIL); zroot = SUNFALSE; for (i = 0; i < cv_mem->cv_nrtfn; i++) cv_mem->cv_iroots[i] = 0; for (i = 0; i < cv_mem->cv_nrtfn; i++) { if (!cv_mem->cv_gactive[i]) continue; if (SUNRabs(cv_mem->cv_glo[i]) == ZERO) { zroot = SUNTRUE; cv_mem->cv_iroots[i] = 1; } } if (!zroot) return(CV_SUCCESS); /* One or more g_i has a zero at tlo. Check g at tlo+smallh. */ cv_mem->cv_ttol = (SUNRabs(cv_mem->cv_tn) + SUNRabs(cv_mem->cv_h)) * cv_mem->cv_uround * HUNDRED; smallh = (cv_mem->cv_h > ZERO) ? cv_mem->cv_ttol : -cv_mem->cv_ttol; tplus = cv_mem->cv_tlo + smallh; if ( (tplus - cv_mem->cv_tn)*cv_mem->cv_h >= ZERO) { hratio = smallh/cv_mem->cv_h; N_VLinearSum(ONE, cv_mem->cv_y, hratio, cv_mem->cv_zn[1], cv_mem->cv_y); } else { (void) CVodeGetDky(cv_mem, tplus, 0, cv_mem->cv_y); } retval = cv_mem->cv_gfun(tplus, cv_mem->cv_y, cv_mem->cv_ghi, cv_mem->cv_user_data); cv_mem->cv_nge++; if (retval != 0) return(CV_RTFUNC_FAIL); /* Check for close roots (error return), for a new zero at tlo+smallh, and for a g_i that changed from zero to nonzero. */ zroot = SUNFALSE; for (i = 0; i < cv_mem->cv_nrtfn; i++) { if (!cv_mem->cv_gactive[i]) continue; if (SUNRabs(cv_mem->cv_ghi[i]) == ZERO) { if (cv_mem->cv_iroots[i] == 1) return(CLOSERT); zroot = SUNTRUE; cv_mem->cv_iroots[i] = 1; } else { if (cv_mem->cv_iroots[i] == 1) cv_mem->cv_glo[i] = cv_mem->cv_ghi[i]; } } if (zroot) return(RTFOUND); return(CV_SUCCESS); } /* * cvRcheck3 * * This routine interfaces to cvRootfind to look for a root of g * between tlo and either tn or tout, whichever comes first. * Only roots beyond tlo in the direction of integration are sought. * * This routine returns an int equal to: * CV_RTFUNC_FAIL < 0 if the g function failed, or * RTFOUND = 1 if a root of g was found, or * CV_SUCCESS = 0 otherwise. */ static int cvRcheck3(CVodeMem cv_mem) { int i, ier, retval; /* Set thi = tn or tout, whichever comes first; set y = y(thi). */ if (cv_mem->cv_taskc == CV_ONE_STEP) { cv_mem->cv_thi = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], cv_mem->cv_y); } if (cv_mem->cv_taskc == CV_NORMAL) { if ( (cv_mem->cv_toutc - cv_mem->cv_tn)*cv_mem->cv_h >= ZERO) { cv_mem->cv_thi = cv_mem->cv_tn; N_VScale(ONE, cv_mem->cv_zn[0], cv_mem->cv_y); } else { cv_mem->cv_thi = cv_mem->cv_toutc; (void) CVodeGetDky(cv_mem, cv_mem->cv_thi, 0, cv_mem->cv_y); } } /* Set ghi = g(thi) and call cvRootfind to search (tlo,thi) for roots. */ retval = cv_mem->cv_gfun(cv_mem->cv_thi, cv_mem->cv_y, cv_mem->cv_ghi, cv_mem->cv_user_data); cv_mem->cv_nge++; if (retval != 0) return(CV_RTFUNC_FAIL); cv_mem->cv_ttol = (SUNRabs(cv_mem->cv_tn) + SUNRabs(cv_mem->cv_h)) * cv_mem->cv_uround * HUNDRED; ier = cvRootfind(cv_mem); if (ier == CV_RTFUNC_FAIL) return(CV_RTFUNC_FAIL); for(i=0; icv_nrtfn; i++) { if(!cv_mem->cv_gactive[i] && cv_mem->cv_grout[i] != ZERO) cv_mem->cv_gactive[i] = SUNTRUE; } cv_mem->cv_tlo = cv_mem->cv_trout; for (i = 0; i < cv_mem->cv_nrtfn; i++) cv_mem->cv_glo[i] = cv_mem->cv_grout[i]; /* If no root found, return CV_SUCCESS. */ if (ier == CV_SUCCESS) return(CV_SUCCESS); /* If a root was found, interpolate to get y(trout) and return. */ (void) CVodeGetDky(cv_mem, cv_mem->cv_trout, 0, cv_mem->cv_y); return(RTFOUND); } /* * cvRootfind * * This routine solves for a root of g(t) between tlo and thi, if * one exists. Only roots of odd multiplicity (i.e. with a change * of sign in one of the g_i), or exact zeros, are found. * Here the sign of tlo - thi is arbitrary, but if multiple roots * are found, the one closest to tlo is returned. * * The method used is the Illinois algorithm, a modified secant method. * Reference: Kathie L. Hiebert and Lawrence F. Shampine, Implicitly * Defined Output Points for Solutions of ODEs, Sandia National * Laboratory Report SAND80-0180, February 1980. * * This routine uses the following parameters for communication: * * nrtfn = number of functions g_i, or number of components of * the vector-valued function g(t). Input only. * * gfun = user-defined function for g(t). Its form is * (void) gfun(t, y, gt, user_data) * * rootdir = in array specifying the direction of zero-crossings. * If rootdir[i] > 0, search for roots of g_i only if * g_i is increasing; if rootdir[i] < 0, search for * roots of g_i only if g_i is decreasing; otherwise * always search for roots of g_i. * * gactive = array specifying whether a component of g should * or should not be monitored. gactive[i] is initially * set to SUNTRUE for all i=0,...,nrtfn-1, but it may be * reset to SUNFALSE if at the first step g[i] is 0.0 * both at the I.C. and at a small perturbation of them. * gactive[i] is then set back on SUNTRUE only after the * corresponding g function moves away from 0.0. * * nge = cumulative counter for gfun calls. * * ttol = a convergence tolerance for trout. Input only. * When a root at trout is found, it is located only to * within a tolerance of ttol. Typically, ttol should * be set to a value on the order of * 100 * UROUND * max (SUNRabs(tlo), SUNRabs(thi)) * where UROUND is the unit roundoff of the machine. * * tlo, thi = endpoints of the interval in which roots are sought. * On input, these must be distinct, but tlo - thi may * be of either sign. The direction of integration is * assumed to be from tlo to thi. On return, tlo and thi * are the endpoints of the final relevant interval. * * glo, ghi = arrays of length nrtfn containing the vectors g(tlo) * and g(thi) respectively. Input and output. On input, * none of the glo[i] should be zero. * * trout = root location, if a root was found, or thi if not. * Output only. If a root was found other than an exact * zero of g, trout is the endpoint thi of the final * interval bracketing the root, with size at most ttol. * * grout = array of length nrtfn containing g(trout) on return. * * iroots = int array of length nrtfn with root information. * Output only. If a root was found, iroots indicates * which components g_i have a root at trout. For * i = 0, ..., nrtfn-1, iroots[i] = 1 if g_i has a root * and g_i is increasing, iroots[i] = -1 if g_i has a * root and g_i is decreasing, and iroots[i] = 0 if g_i * has no roots or g_i varies in the direction opposite * to that indicated by rootdir[i]. * * This routine returns an int equal to: * CV_RTFUNC_FAIL < 0 if the g function failed, or * RTFOUND = 1 if a root of g was found, or * CV_SUCCESS = 0 otherwise. */ static int cvRootfind(CVodeMem cv_mem) { realtype alph, tmid, gfrac, maxfrac, fracint, fracsub; int i, retval, imax, side, sideprev; booleantype zroot, sgnchg; imax = 0; /* First check for change in sign in ghi or for a zero in ghi. */ maxfrac = ZERO; zroot = SUNFALSE; sgnchg = SUNFALSE; for (i = 0; i < cv_mem->cv_nrtfn; i++) { if(!cv_mem->cv_gactive[i]) continue; if (SUNRabs(cv_mem->cv_ghi[i]) == ZERO) { if(cv_mem->cv_rootdir[i]*cv_mem->cv_glo[i] <= ZERO) { zroot = SUNTRUE; } } else { if ( (cv_mem->cv_glo[i]*cv_mem->cv_ghi[i] < ZERO) && (cv_mem->cv_rootdir[i]*cv_mem->cv_glo[i] <= ZERO) ) { gfrac = SUNRabs(cv_mem->cv_ghi[i]/(cv_mem->cv_ghi[i] - cv_mem->cv_glo[i])); if (gfrac > maxfrac) { sgnchg = SUNTRUE; maxfrac = gfrac; imax = i; } } } } /* If no sign change was found, reset trout and grout. Then return CV_SUCCESS if no zero was found, or set iroots and return RTFOUND. */ if (!sgnchg) { cv_mem->cv_trout = cv_mem->cv_thi; for (i = 0; i < cv_mem->cv_nrtfn; i++) cv_mem->cv_grout[i] = cv_mem->cv_ghi[i]; if (!zroot) return(CV_SUCCESS); for (i = 0; i < cv_mem->cv_nrtfn; i++) { cv_mem->cv_iroots[i] = 0; if(!cv_mem->cv_gactive[i]) continue; if ( (SUNRabs(cv_mem->cv_ghi[i]) == ZERO) && (cv_mem->cv_rootdir[i]*cv_mem->cv_glo[i] <= ZERO) ) cv_mem->cv_iroots[i] = cv_mem->cv_glo[i] > 0 ? -1 : 1; } return(RTFOUND); } /* Initialize alph to avoid compiler warning */ alph = ONE; /* A sign change was found. Loop to locate nearest root. */ side = 0; sideprev = -1; for(;;) { /* Looping point */ /* If interval size is already less than tolerance ttol, break. */ if (SUNRabs(cv_mem->cv_thi - cv_mem->cv_tlo) <= cv_mem->cv_ttol) break; /* Set weight alph. On the first two passes, set alph = 1. Thereafter, reset alph according to the side (low vs high) of the subinterval in which the sign change was found in the previous two passes. If the sides were opposite, set alph = 1. If the sides were the same, then double alph (if high side), or halve alph (if low side). The next guess tmid is the secant method value if alph = 1, but is closer to tlo if alph < 1, and closer to thi if alph > 1. */ if (sideprev == side) { alph = (side == 2) ? alph*TWO : alph*HALF; } else { alph = ONE; } /* Set next root approximation tmid and get g(tmid). If tmid is too close to tlo or thi, adjust it inward, by a fractional distance that is between 0.1 and 0.5. */ tmid = cv_mem->cv_thi - (cv_mem->cv_thi - cv_mem->cv_tlo) * cv_mem->cv_ghi[imax] / (cv_mem->cv_ghi[imax] - alph*cv_mem->cv_glo[imax]); if (SUNRabs(tmid - cv_mem->cv_tlo) < HALF*cv_mem->cv_ttol) { fracint = SUNRabs(cv_mem->cv_thi - cv_mem->cv_tlo)/cv_mem->cv_ttol; fracsub = (fracint > FIVE) ? PT1 : HALF/fracint; tmid = cv_mem->cv_tlo + fracsub*(cv_mem->cv_thi - cv_mem->cv_tlo); } if (SUNRabs(cv_mem->cv_thi - tmid) < HALF*cv_mem->cv_ttol) { fracint = SUNRabs(cv_mem->cv_thi - cv_mem->cv_tlo)/cv_mem->cv_ttol; fracsub = (fracint > FIVE) ? PT1 : HALF/fracint; tmid = cv_mem->cv_thi - fracsub*(cv_mem->cv_thi - cv_mem->cv_tlo); } (void) CVodeGetDky(cv_mem, tmid, 0, cv_mem->cv_y); retval = cv_mem->cv_gfun(tmid, cv_mem->cv_y, cv_mem->cv_grout, cv_mem->cv_user_data); cv_mem->cv_nge++; if (retval != 0) return(CV_RTFUNC_FAIL); /* Check to see in which subinterval g changes sign, and reset imax. Set side = 1 if sign change is on low side, or 2 if on high side. */ maxfrac = ZERO; zroot = SUNFALSE; sgnchg = SUNFALSE; sideprev = side; for (i = 0; i < cv_mem->cv_nrtfn; i++) { if(!cv_mem->cv_gactive[i]) continue; if (SUNRabs(cv_mem->cv_grout[i]) == ZERO) { if(cv_mem->cv_rootdir[i]*cv_mem->cv_glo[i] <= ZERO) zroot = SUNTRUE; } else { if ( (cv_mem->cv_glo[i]*cv_mem->cv_grout[i] < ZERO) && (cv_mem->cv_rootdir[i]*cv_mem->cv_glo[i] <= ZERO) ) { gfrac = SUNRabs(cv_mem->cv_grout[i] / (cv_mem->cv_grout[i] - cv_mem->cv_glo[i])); if (gfrac > maxfrac) { sgnchg = SUNTRUE; maxfrac = gfrac; imax = i; } } } } if (sgnchg) { /* Sign change found in (tlo,tmid); replace thi with tmid. */ cv_mem->cv_thi = tmid; for (i = 0; i < cv_mem->cv_nrtfn; i++) cv_mem->cv_ghi[i] = cv_mem->cv_grout[i]; side = 1; /* Stop at root thi if converged; otherwise loop. */ if (SUNRabs(cv_mem->cv_thi - cv_mem->cv_tlo) <= cv_mem->cv_ttol) break; continue; /* Return to looping point. */ } if (zroot) { /* No sign change in (tlo,tmid), but g = 0 at tmid; return root tmid. */ cv_mem->cv_thi = tmid; for (i = 0; i < cv_mem->cv_nrtfn; i++) cv_mem->cv_ghi[i] = cv_mem->cv_grout[i]; break; } /* No sign change in (tlo,tmid), and no zero at tmid. Sign change must be in (tmid,thi). Replace tlo with tmid. */ cv_mem->cv_tlo = tmid; for (i = 0; i < cv_mem->cv_nrtfn; i++) cv_mem->cv_glo[i] = cv_mem->cv_grout[i]; side = 2; /* Stop at root thi if converged; otherwise loop back. */ if (SUNRabs(cv_mem->cv_thi - cv_mem->cv_tlo) <= cv_mem->cv_ttol) break; } /* End of root-search loop */ /* Reset trout and grout, set iroots, and return RTFOUND. */ cv_mem->cv_trout = cv_mem->cv_thi; for (i = 0; i < cv_mem->cv_nrtfn; i++) { cv_mem->cv_grout[i] = cv_mem->cv_ghi[i]; cv_mem->cv_iroots[i] = 0; if(!cv_mem->cv_gactive[i]) continue; if ( (SUNRabs(cv_mem->cv_ghi[i]) == ZERO) && (cv_mem->cv_rootdir[i]*cv_mem->cv_glo[i] <= ZERO) ) cv_mem->cv_iroots[i] = cv_mem->cv_glo[i] > 0 ? -1 : 1; if ( (cv_mem->cv_glo[i]*cv_mem->cv_ghi[i] < ZERO) && (cv_mem->cv_rootdir[i]*cv_mem->cv_glo[i] <= ZERO) ) cv_mem->cv_iroots[i] = cv_mem->cv_glo[i] > 0 ? -1 : 1; } return(RTFOUND); } /* * ================================================================= * Internal EWT function * ================================================================= */ /* * cvEwtSet * * This routine is responsible for setting the error weight vector ewt, * according to tol_type, as follows: * * (1) ewt[i] = 1 / (reltol * SUNRabs(ycur[i]) + abstol), i=0,...,neq-1 * if tol_type = CV_SS * (2) ewt[i] = 1 / (reltol * SUNRabs(ycur[i]) + abstol[i]), i=0,...,neq-1 * if tol_type = CV_SV * * cvEwtSet returns 0 if ewt is successfully set as above to a * positive vector and -1 otherwise. In the latter case, ewt is * considered undefined. * * All the real work is done in the routines cvEwtSetSS, cvEwtSetSV. */ int cvEwtSet(N_Vector ycur, N_Vector weight, void *data) { CVodeMem cv_mem; int flag = 0; /* data points to cv_mem here */ cv_mem = (CVodeMem) data; switch(cv_mem->cv_itol) { case CV_SS: flag = cvEwtSetSS(cv_mem, ycur, weight); break; case CV_SV: flag = cvEwtSetSV(cv_mem, ycur, weight); break; } return(flag); } /* * cvEwtSetSS * * This routine sets ewt as decribed above in the case tol_type = CV_SS. * If the absolute tolerance is zero, it tests for non-positive components * before inverting. cvEwtSetSS returns 0 if ewt is successfully set to a * positive vector and -1 otherwise. In the latter case, ewt is considered * undefined. */ static int cvEwtSetSS(CVodeMem cv_mem, N_Vector ycur, N_Vector weight) { N_VAbs(ycur, cv_mem->cv_tempv); N_VScale(cv_mem->cv_reltol, cv_mem->cv_tempv, cv_mem->cv_tempv); N_VAddConst(cv_mem->cv_tempv, cv_mem->cv_Sabstol, cv_mem->cv_tempv); if (cv_mem->cv_atolmin0) { if (N_VMin(cv_mem->cv_tempv) <= ZERO) return(-1); } N_VInv(cv_mem->cv_tempv, weight); return(0); } /* * cvEwtSetSV * * This routine sets ewt as decribed above in the case tol_type = CV_SV. * If any absolute tolerance is zero, it tests for non-positive components * before inverting. cvEwtSetSV returns 0 if ewt is successfully set to a * positive vector and -1 otherwise. In the latter case, ewt is considered * undefined. */ static int cvEwtSetSV(CVodeMem cv_mem, N_Vector ycur, N_Vector weight) { N_VAbs(ycur, cv_mem->cv_tempv); N_VLinearSum(cv_mem->cv_reltol, cv_mem->cv_tempv, ONE, cv_mem->cv_Vabstol, cv_mem->cv_tempv); if (cv_mem->cv_atolmin0) { if (N_VMin(cv_mem->cv_tempv) <= ZERO) return(-1); } N_VInv(cv_mem->cv_tempv, weight); return(0); } /* * cvQuadEwtSet * */ static int cvQuadEwtSet(CVodeMem cv_mem, N_Vector qcur, N_Vector weightQ) { int flag=0; switch (cv_mem->cv_itolQ) { case CV_SS: flag = cvQuadEwtSetSS(cv_mem, qcur, weightQ); break; case CV_SV: flag = cvQuadEwtSetSV(cv_mem, qcur, weightQ); break; } return(flag); } /* * cvQuadEwtSetSS * */ static int cvQuadEwtSetSS(CVodeMem cv_mem, N_Vector qcur, N_Vector weightQ) { N_VAbs(qcur, cv_mem->cv_tempvQ); N_VScale(cv_mem->cv_reltolQ, cv_mem->cv_tempvQ, cv_mem->cv_tempvQ); N_VAddConst(cv_mem->cv_tempvQ, cv_mem->cv_SabstolQ, cv_mem->cv_tempvQ); if (cv_mem->cv_atolQmin0) { if (N_VMin(cv_mem->cv_tempvQ) <= ZERO) return(-1); } N_VInv(cv_mem->cv_tempvQ, weightQ); return(0); } /* * cvQuadEwtSetSV * */ static int cvQuadEwtSetSV(CVodeMem cv_mem, N_Vector qcur, N_Vector weightQ) { N_VAbs(qcur, cv_mem->cv_tempvQ); N_VLinearSum(cv_mem->cv_reltolQ, cv_mem->cv_tempvQ, ONE, cv_mem->cv_VabstolQ, cv_mem->cv_tempvQ); if (cv_mem->cv_atolQmin0) { if (N_VMin(cv_mem->cv_tempvQ) <= ZERO) return(-1); } N_VInv(cv_mem->cv_tempvQ, weightQ); return(0); } /* * cvSensEwtSet * */ static int cvSensEwtSet(CVodeMem cv_mem, N_Vector *yScur, N_Vector *weightS) { int flag=0; switch (cv_mem->cv_itolS) { case CV_EE: flag = cvSensEwtSetEE(cv_mem, yScur, weightS); break; case CV_SS: flag = cvSensEwtSetSS(cv_mem, yScur, weightS); break; case CV_SV: flag = cvSensEwtSetSV(cv_mem, yScur, weightS); break; } return(flag); } /* * cvSensEwtSetEE * * In this case, the error weight vector for the i-th sensitivity is set to * * ewtS_i = pbar_i * efun(pbar_i*yS_i) * * In other words, the scaled sensitivity pbar_i * yS_i has the same error * weight vector calculation as the solution vector. * */ static int cvSensEwtSetEE(CVodeMem cv_mem, N_Vector *yScur, N_Vector *weightS) { int is; N_Vector pyS; int flag; /* Use tempvS[0] as temporary storage for the scaled sensitivity */ pyS = cv_mem->cv_tempvS[0]; for (is=0; iscv_Ns; is++) { N_VScale(cv_mem->cv_pbar[is], yScur[is], pyS); flag = cv_mem->cv_efun(pyS, weightS[is], cv_mem->cv_e_data); if (flag != 0) return(-1); N_VScale(cv_mem->cv_pbar[is], weightS[is], weightS[is]); } return(0); } /* * cvSensEwtSetSS * */ static int cvSensEwtSetSS(CVodeMem cv_mem, N_Vector *yScur, N_Vector *weightS) { int is; for (is=0; iscv_Ns; is++) { N_VAbs(yScur[is], cv_mem->cv_tempv); N_VScale(cv_mem->cv_reltolS, cv_mem->cv_tempv, cv_mem->cv_tempv); N_VAddConst(cv_mem->cv_tempv, cv_mem->cv_SabstolS[is], cv_mem->cv_tempv); if (cv_mem->cv_atolSmin0[is]) { if (N_VMin(cv_mem->cv_tempv) <= ZERO) return(-1); } N_VInv(cv_mem->cv_tempv, weightS[is]); } return(0); } /* * cvSensEwtSetSV * */ static int cvSensEwtSetSV(CVodeMem cv_mem, N_Vector *yScur, N_Vector *weightS) { int is; for (is=0; iscv_Ns; is++) { N_VAbs(yScur[is], cv_mem->cv_tempv); N_VLinearSum(cv_mem->cv_reltolS, cv_mem->cv_tempv, ONE, cv_mem->cv_VabstolS[is], cv_mem->cv_tempv); if (cv_mem->cv_atolSmin0[is]) { if (N_VMin(cv_mem->cv_tempv) <= ZERO) return(-1); } N_VInv(cv_mem->cv_tempv, weightS[is]); } return(0); } /* * cvQuadSensEwtSet * */ static int cvQuadSensEwtSet(CVodeMem cv_mem, N_Vector *yQScur, N_Vector *weightQS) { int flag=0; switch (cv_mem->cv_itolQS) { case CV_EE: flag = cvQuadSensEwtSetEE(cv_mem, yQScur, weightQS); break; case CV_SS: flag = cvQuadSensEwtSetSS(cv_mem, yQScur, weightQS); break; case CV_SV: flag = cvQuadSensEwtSetSV(cv_mem, yQScur, weightQS); break; } return(flag); } /* * cvQuadSensEwtSetEE * * In this case, the error weight vector for the i-th quadrature sensitivity * is set to * * ewtQS_i = pbar_i * cvQuadEwtSet(pbar_i*yQS_i) * * In other words, the scaled sensitivity pbar_i * yQS_i has the same error * weight vector calculation as the quadrature vector. * */ static int cvQuadSensEwtSetEE(CVodeMem cv_mem, N_Vector *yQScur, N_Vector *weightQS) { int is; N_Vector pyS; int flag; /* Use tempvQS[0] as temporary storage for the scaled sensitivity */ pyS = cv_mem->cv_tempvQS[0]; for (is=0; iscv_Ns; is++) { N_VScale(cv_mem->cv_pbar[is], yQScur[is], pyS); flag = cvQuadEwtSet(cv_mem, pyS, weightQS[is]); if (flag != 0) return(-1); N_VScale(cv_mem->cv_pbar[is], weightQS[is], weightQS[is]); } return(0); } static int cvQuadSensEwtSetSS(CVodeMem cv_mem, N_Vector *yQScur, N_Vector *weightQS) { int is; for (is=0; iscv_Ns; is++) { N_VAbs(yQScur[is], cv_mem->cv_tempvQ); N_VScale(cv_mem->cv_reltolQS, cv_mem->cv_tempvQ, cv_mem->cv_tempvQ); N_VAddConst(cv_mem->cv_tempvQ, cv_mem->cv_SabstolQS[is], cv_mem->cv_tempvQ); if (cv_mem->cv_atolQSmin0[is]) { if (N_VMin(cv_mem->cv_tempvQ) <= ZERO) return(-1); } N_VInv(cv_mem->cv_tempvQ, weightQS[is]); } return(0); } static int cvQuadSensEwtSetSV(CVodeMem cv_mem, N_Vector *yQScur, N_Vector *weightQS) { int is; for (is=0; iscv_Ns; is++) { N_VAbs(yQScur[is], cv_mem->cv_tempvQ); N_VLinearSum(cv_mem->cv_reltolQS, cv_mem->cv_tempvQ, ONE, cv_mem->cv_VabstolQS[is], cv_mem->cv_tempvQ); if (cv_mem->cv_atolQSmin0[is]) { if (N_VMin(cv_mem->cv_tempvQ) <= ZERO) return(-1); } N_VInv(cv_mem->cv_tempvQ, weightQS[is]); } return(0); } /* * ----------------------------------------------------------------- * Functions for combined norms * ----------------------------------------------------------------- */ /* * cvQuadUpdateNorm * * Updates the norm old_nrm to account for all quadratures. */ static realtype cvQuadUpdateNorm(CVodeMem cv_mem, realtype old_nrm, N_Vector xQ, N_Vector wQ) { realtype qnrm; qnrm = N_VWrmsNorm(xQ, wQ); if (old_nrm > qnrm) return(old_nrm); else return(qnrm); } /* * cvSensNorm * * This routine returns the maximum over the weighted root mean * square norm of xS with weight vectors wS: * * max { wrms(xS[0],wS[0]) ... wrms(xS[Ns-1],wS[Ns-1]) } * * Called by cvSensUpdateNorm or directly in the CV_STAGGERED approach * during the NLS solution and before the error test. */ realtype cvSensNorm(CVodeMem cv_mem, N_Vector *xS, N_Vector *wS) { int is; realtype nrm; (void) N_VWrmsNormVectorArray(cv_mem->cv_Ns, xS, wS, cv_mem->cv_cvals); nrm = cv_mem->cv_cvals[0]; for (is=1; iscv_Ns; is++) if ( cv_mem->cv_cvals[is] > nrm ) nrm = cv_mem->cv_cvals[is]; return(nrm); } /* * cvSensUpdateNorm * * Updates the norm old_nrm to account for all sensitivities. */ realtype cvSensUpdateNorm(CVodeMem cv_mem, realtype old_nrm, N_Vector *xS, N_Vector *wS) { realtype snrm; snrm = cvSensNorm(cv_mem, xS, wS); if (old_nrm > snrm) return(old_nrm); else return(snrm); } /* * cvQuadSensNorm * * This routine returns the maximum over the weighted root mean * square norm of xQS with weight vectors wQS: * * max { wrms(xQS[0],wS[0]) ... wrms(xQS[Ns-1],wS[Ns-1]) } * * Called by cvQuadSensUpdateNorm. */ static realtype cvQuadSensNorm(CVodeMem cv_mem, N_Vector *xQS, N_Vector *wQS) { int is; realtype nrm; (void) N_VWrmsNormVectorArray(cv_mem->cv_Ns, xQS, wQS, cv_mem->cv_cvals); nrm = cv_mem->cv_cvals[0]; for (is=1; iscv_Ns; is++) if ( cv_mem->cv_cvals[is] > nrm ) nrm = cv_mem->cv_cvals[is]; return(nrm); } /* * cvSensUpdateNorm * * Updates the norm old_nrm to account for all quadrature sensitivities. */ static realtype cvQuadSensUpdateNorm(CVodeMem cv_mem, realtype old_nrm, N_Vector *xQS, N_Vector *wQS) { realtype snrm; snrm = cvQuadSensNorm(cv_mem, xQS, wQS); if (old_nrm > snrm) return(old_nrm); else return(snrm); } /* * ----------------------------------------------------------------- * Wrappers for sensitivity RHS * ----------------------------------------------------------------- */ /* * cvSensRhsWrapper * * CVSensRhs is a high level routine that returns right hand side * of sensitivity equations. Depending on the 'ifS' flag, it either * calls directly the fS routine (ifS=CV_ALLSENS) or (if ifS=CV_ONESENS) * calls the fS1 routine in a loop over all sensitivities. * * CVSensRhs is called: * (*) by CVode at the first step * (*) by cvYddNorm if errcon=SUNTRUE * (*) by the nonlinear solver if ism=CV_SIMULTANEOUS * (*) by cvDoErrorTest when restarting from scratch * (*) in the corrector loop if ism=CV_STAGGERED * (*) by cvStgrDoErrorTest when restarting from scratch * * The return value is that of the sensitivity RHS function fS, * */ int cvSensRhsWrapper(CVodeMem cv_mem, realtype time, N_Vector ycur, N_Vector fcur, N_Vector *yScur, N_Vector *fScur, N_Vector temp1, N_Vector temp2) { int retval=0, is; if (cv_mem->cv_ifS==CV_ALLSENS) { retval = cv_mem->cv_fS(cv_mem->cv_Ns, time, ycur, fcur, yScur, fScur, cv_mem->cv_fS_data, temp1, temp2); cv_mem->cv_nfSe++; } else { for (is=0; iscv_Ns; is++) { retval = cv_mem->cv_fS1(cv_mem->cv_Ns, time, ycur, fcur, is, yScur[is], fScur[is], cv_mem->cv_fS_data, temp1, temp2); cv_mem->cv_nfSe++; if (retval != 0) break; } } return(retval); } /* * cvSensRhs1Wrapper * * cvSensRhs1Wrapper is a high level routine that returns right-hand * side of the is-th sensitivity equation. * * cvSensRhs1Wrapper is called only during the CV_STAGGERED1 corrector loop * (ifS must be CV_ONESENS, otherwise CVodeSensInit would have * issued an error message). * * The return value is that of the sensitivity RHS function fS1, */ int cvSensRhs1Wrapper(CVodeMem cv_mem, realtype time, N_Vector ycur, N_Vector fcur, int is, N_Vector yScur, N_Vector fScur, N_Vector temp1, N_Vector temp2) { int retval; retval = cv_mem->cv_fS1(cv_mem->cv_Ns, time, ycur, fcur, is, yScur, fScur, cv_mem->cv_fS_data, temp1, temp2); cv_mem->cv_nfSe++; return(retval); } /* * ----------------------------------------------------------------- * Internal DQ approximations for sensitivity RHS * ----------------------------------------------------------------- */ /* Undefine Readibility Constants */ #undef y /* * cvSensRhsInternalDQ - internal CVSensRhsFn * * cvSensRhsInternalDQ computes right hand side of all sensitivity equations * by finite differences */ int cvSensRhsInternalDQ(int Ns, realtype t, N_Vector y, N_Vector ydot, N_Vector *yS, N_Vector *ySdot, void *cvode_mem, N_Vector ytemp, N_Vector ftemp) { int is, retval; for (is=0; iscv_reltol, cv_mem->cv_uround)); rdelta = ONE/delta; pbari = cv_mem->cv_pbar[is]; which = cv_mem->cv_plist[is]; psave = cv_mem->cv_p[which]; Deltap = pbari * delta; rDeltap = ONE/Deltap; norms = N_VWrmsNorm(yS, cv_mem->cv_ewt) * pbari; rDeltay = SUNMAX(norms, rdelta) / pbari; Deltay = ONE/rDeltay; if (cv_mem->cv_DQrhomax == ZERO) { /* No switching */ method = (cv_mem->cv_DQtype==CV_CENTERED) ? CENTERED1 : FORWARD1; } else { /* switch between simultaneous/separate DQ */ ratio = Deltay * rDeltap; if ( SUNMAX(ONE/ratio, ratio) <= cv_mem->cv_DQrhomax ) method = (cv_mem->cv_DQtype==CV_CENTERED) ? CENTERED1 : FORWARD1; else method = (cv_mem->cv_DQtype==CV_CENTERED) ? CENTERED2 : FORWARD2; } switch(method) { case CENTERED1: Delta = SUNMIN(Deltay, Deltap); r2Delta = HALF/Delta; N_VLinearSum(ONE,y,Delta,yS,ytemp); cv_mem->cv_p[which] = psave + Delta; retval = cv_mem->cv_f(t, ytemp, ySdot, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(ONE,y,-Delta,yS,ytemp); cv_mem->cv_p[which] = psave - Delta; retval = cv_mem->cv_f(t, ytemp, ftemp, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(r2Delta,ySdot,-r2Delta,ftemp,ySdot); break; case CENTERED2: r2Deltap = HALF/Deltap; r2Deltay = HALF/Deltay; N_VLinearSum(ONE,y,Deltay,yS,ytemp); retval = cv_mem->cv_f(t, ytemp, ySdot, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(ONE,y,-Deltay,yS,ytemp); retval = cv_mem->cv_f(t, ytemp, ftemp, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(r2Deltay, ySdot, -r2Deltay, ftemp, ySdot); cv_mem->cv_p[which] = psave + Deltap; retval = cv_mem->cv_f(t, y, ytemp, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); cv_mem->cv_p[which] = psave - Deltap; retval = cv_mem->cv_f(t, y, ftemp, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); /* ySdot = ySdot + r2Deltap * ytemp - r2Deltap * ftemp */ cvals[0] = ONE; Xvecs[0] = ySdot; cvals[1] = r2Deltap; Xvecs[1] = ytemp; cvals[2] = -r2Deltap; Xvecs[2] = ftemp; retval = N_VLinearCombination(3, cvals, Xvecs, ySdot); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); break; case FORWARD1: Delta = SUNMIN(Deltay, Deltap); rDelta = ONE/Delta; N_VLinearSum(ONE,y,Delta,yS,ytemp); cv_mem->cv_p[which] = psave + Delta; retval = cv_mem->cv_f(t, ytemp, ySdot, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(rDelta,ySdot,-rDelta,ydot,ySdot); break; case FORWARD2: N_VLinearSum(ONE,y,Deltay,yS,ytemp); retval = cv_mem->cv_f(t, ytemp, ySdot, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(rDeltay, ySdot, -rDeltay, ydot, ySdot); cv_mem->cv_p[which] = psave + Deltap; retval = cv_mem->cv_f(t, y, ytemp, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); /* ySdot = ySdot + rDeltap * ytemp - rDeltap * ydot */ cvals[0] = ONE; Xvecs[0] = ySdot; cvals[1] = rDeltap; Xvecs[1] = ytemp; cvals[2] = -rDeltap; Xvecs[2] = ydot; retval = N_VLinearCombination(3, cvals, Xvecs, ySdot); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); break; } cv_mem->cv_p[which] = psave; /* Increment counter nfeS */ cv_mem->cv_nfeS += nfel; return(0); } /* * cvQuadSensRhsInternalDQ - internal CVQuadSensRhsFn * * cvQuadSensRhsInternalDQ computes right hand side of all quadrature * sensitivity equations by finite differences. All work is actually * done in cvQuadSensRhs1InternalDQ. */ static int cvQuadSensRhsInternalDQ(int Ns, realtype t, N_Vector y, N_Vector *yS, N_Vector yQdot, N_Vector *yQSdot, void *cvode_mem, N_Vector tmp, N_Vector tmpQ) { CVodeMem cv_mem; int is, retval; /* cvode_mem is passed here as user data */ cv_mem = (CVodeMem) cvode_mem; for (is=0; iscv_reltol, cv_mem->cv_uround)); rdelta = ONE/delta; pbari = cv_mem->cv_pbar[is]; which = cv_mem->cv_plist[is]; psave = cv_mem->cv_p[which]; Deltap = pbari * delta; norms = N_VWrmsNorm(yS, cv_mem->cv_ewt) * pbari; rDeltay = SUNMAX(norms, rdelta) / pbari; Deltay = ONE/rDeltay; method = (cv_mem->cv_DQtype==CV_CENTERED) ? CENTERED1 : FORWARD1; switch(method) { case CENTERED1: Delta = SUNMIN(Deltay, Deltap); r2Delta = HALF/Delta; N_VLinearSum(ONE, y, Delta, yS, tmp); cv_mem->cv_p[which] = psave + Delta; retval = cv_mem->cv_fQ(t, tmp, yQSdot, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(ONE, y, -Delta, yS, tmp); cv_mem->cv_p[which] = psave - Delta; retval = cv_mem->cv_fQ(t, tmp, tmpQ, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(r2Delta, yQSdot, -r2Delta, tmpQ, yQSdot); break; case FORWARD1: Delta = SUNMIN(Deltay, Deltap); rDelta = ONE/Delta; N_VLinearSum(ONE, y, Delta, yS, tmp); cv_mem->cv_p[which] = psave + Delta; retval = cv_mem->cv_fQ(t, tmp, yQSdot, cv_mem->cv_user_data); nfel++; if (retval != 0) return(retval); N_VLinearSum(rDelta, yQSdot, -rDelta, yQdot, yQSdot); break; } cv_mem->cv_p[which] = psave; /* Increment counter nfQeS */ cv_mem->cv_nfQeS += nfel; return(0); } /* * ----------------------------------------------------------------- * Error message handling functions * ----------------------------------------------------------------- */ /* * cvProcessError is a high level error handling function. * - If cv_mem==NULL it prints the error message to stderr. * - Otherwise, it sets up and calls the error handling function * pointed to by cv_ehfun. */ void cvProcessError(CVodeMem cv_mem, int error_code, const char *module, const char *fname, const char *msgfmt, ...) { va_list ap; char msg[256]; /* Initialize the argument pointer variable (msgfmt is the last required argument to cvProcessError) */ va_start(ap, msgfmt); /* Compose the message */ vsprintf(msg, msgfmt, ap); if (cv_mem == NULL) { /* We write to stderr */ #ifndef NO_FPRINTF_OUTPUT STAN_SUNDIALS_FPRINTF(stderr, "\n[%s ERROR] %s\n ", module, fname); STAN_SUNDIALS_FPRINTF(stderr, "%s\n\n", msg); #endif } else { /* We can call ehfun */ cv_mem->cv_ehfun(error_code, module, fname, msg, cv_mem->cv_eh_data); } /* Finalize argument processing */ va_end(ap); return; } /* * cvErrHandler is the default error handling function. * It sends the error message to the stream pointed to by cv_errfp. */ void cvErrHandler(int error_code, const char *module, const char *function, char *msg, void *data) { CVodeMem cv_mem; char err_type[10]; /* data points to cv_mem here */ cv_mem = (CVodeMem) data; if (error_code == CV_WARNING) sprintf(err_type,"WARNING"); else sprintf(err_type,"ERROR"); #ifndef NO_FPRINTF_OUTPUT if (cv_mem->cv_errfp!=NULL) { STAN_SUNDIALS_FPRINTF(cv_mem->cv_errfp,"\n[%s %s] %s\n",module,err_type,function); STAN_SUNDIALS_FPRINTF(cv_mem->cv_errfp," %s\n\n",msg); } #endif return; } StanHeaders/src/cvodes/cvodes_ls.c0000644000176200001440000026774614645137106016707 0ustar liggesusers/* ---------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Radu Serban @ LLNL * ---------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ---------------------------------------------------------------- * Implementation file for CVODES' linear solver interface. * * Part I contains routines for using CVSLS on forward problems. * * Part II contains wrappers for using CVSLS on adjoint * (backward) problems. * ---------------------------------------------------------------- */ #include #include #include #include "cvodes_impl.h" #include "cvodes_ls_impl.h" #include #include #include #include /* Private constants */ #define MIN_INC_MULT RCONST(1000.0) #define MAX_DQITERS 3 /* max. number of attempts to recover in DQ J*v */ #define ZERO RCONST(0.0) #define PT25 RCONST(0.25) #define ONE RCONST(1.0) #define TWO RCONST(2.0) /*================================================================= PRIVATE FUNCTION PROTOTYPES - forward problems =================================================================*/ static int cvLsLinSys(realtype t, N_Vector y, N_Vector fy, SUNMatrix A, booleantype jok, booleantype *jcur, realtype gamma, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); /*================================================================= PRIVATE FUNCTION PROTOTYPES - backward problems =================================================================*/ /* cvLsJacBWrapper and cvLsJacBSWrapper have type CVLsJacFn, and wrap around user-provided functions of type CVLsJacFnB and CVLsJacFnBS, respectively */ static int cvLsJacBWrapper(realtype t, N_Vector yB, N_Vector fyB, SUNMatrix JB, void *cvode_mem, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B); static int cvLsJacBSWrapper(realtype t, N_Vector yB, N_Vector fyB, SUNMatrix JB, void *cvode_mem, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B); /* cvLsPrecSetupBWrapper and cvLsPrecSetupBSWrapper have type CVLsPrecSetupFn, and wrap around user-provided functions of type CVLsPrecSetupFnB and CVLsPrecSetupFnBS, respectively */ static int cvLsPrecSetupBWrapper(realtype t, N_Vector yB, N_Vector fyB, booleantype jokB, booleantype *jcurPtrB, realtype gammaB, void *cvode_mem); static int cvLsPrecSetupBSWrapper(realtype t, N_Vector yB, N_Vector fyB, booleantype jokB, booleantype *jcurPtrB, realtype gammaB, void *cvode_mem); /* cvLsPrecSolveBWrapper and cvLsPrecSolveBSWrapper have type CVLsPrecSolveFn, and wrap around user-provided functions of type CVLsPrecSolveFnB and CVLsPrecSolveFnBS, respectively */ static int cvLsPrecSolveBWrapper(realtype t, N_Vector yB, N_Vector fyB, N_Vector rB, N_Vector zB, realtype gammaB, realtype deltaB, int lrB, void *cvode_mem); static int cvLsPrecSolveBSWrapper(realtype t, N_Vector yB, N_Vector fyB, N_Vector rB, N_Vector zB, realtype gammaB, realtype deltaB, int lrB, void *cvode_mem); /* cvLsJacTimesSetupBWrapper and cvLsJacTimesSetupBSWrapper have type CVLsJacTimesSetupFn, and wrap around user-provided functions of type CVLsJacTimesSetupFnB and CVLsJacTimesSetupFnBS, respectively */ static int cvLsJacTimesSetupBWrapper(realtype t, N_Vector yB, N_Vector fyB, void *cvode_mem); static int cvLsJacTimesSetupBSWrapper(realtype t, N_Vector yB, N_Vector fyB, void *cvode_mem); /* cvLsJacTimesVecBWrapper and cvLsJacTimesVecBSWrapper have type CVLsJacTimesVecFn, and wrap around user-provided functions of type CVLsJacTimesVecFnB and CVLsJacTimesVecFnBS, respectively */ static int cvLsJacTimesVecBWrapper(N_Vector vB, N_Vector JvB, realtype t, N_Vector yB, N_Vector fyB, void *cvode_mem, N_Vector tmpB); static int cvLsJacTimesVecBSWrapper(N_Vector vB, N_Vector JvB, realtype t, N_Vector yB, N_Vector fyB, void *cvode_mem, N_Vector tmpB); /* cvLsLinSysFnBWrapper and cvLsLinSysFnBSWrapper have type CVLsLinSysFn, and wrap around user-provided functions of type CVLsLinSysFnB and CVLsLinSysFnBS, respectively */ static int cvLsLinSysBWrapper(realtype t, N_Vector yB, N_Vector fyB, SUNMatrix AB, booleantype jokB, booleantype *jcurB, realtype gammaB, void *user_dataB, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B); static int cvLsLinSysBSWrapper(realtype t, N_Vector yB, N_Vector fyB, SUNMatrix AB, booleantype jokB, booleantype *jcurB, realtype gammaB, void *user_dataB, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3); /*================================================================ PART I - forward problems ================================================================*/ /*=============================================================== CVSLS Exported functions -- Required ===============================================================*/ /*--------------------------------------------------------------- CVodeSetLinearSolver specifies the linear solver ---------------------------------------------------------------*/ int CVodeSetLinearSolver(void *cvode_mem, SUNLinearSolver LS, SUNMatrix A) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval, LSType; booleantype iterative; /* is the solver iterative? */ booleantype matrixbased; /* is a matrix structure used? */ /* Return immediately if either cvode_mem or LS inputs are NULL */ if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSLS", "CVodeSetLinearSolver", MSG_LS_CVMEM_NULL); return(CVLS_MEM_NULL); } if (LS == NULL) { cvProcessError(NULL, CVLS_ILL_INPUT, "CVSLS", "CVodeSetLinearSolver", "LS must be non-NULL"); return(CVLS_ILL_INPUT); } cv_mem = (CVodeMem) cvode_mem; /* Test if solver is compatible with LS interface */ if ( (LS->ops->gettype == NULL) || (LS->ops->solve == NULL) ) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetLinearSolver", "LS object is missing a required operation"); return(CVLS_ILL_INPUT); } /* Retrieve the LS type */ LSType = SUNLinSolGetType(LS); /* Set flags based on LS type */ iterative = (LSType != SUNLINEARSOLVER_DIRECT); matrixbased = ((LSType != SUNLINEARSOLVER_ITERATIVE) && (LSType != SUNLINEARSOLVER_MATRIX_EMBEDDED)); /* Test if vector is compatible with LS interface */ if ( (cv_mem->cv_tempv->ops->nvconst == NULL) || (cv_mem->cv_tempv->ops->nvwrmsnorm == NULL) ) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetLinearSolver", MSG_LS_BAD_NVECTOR); return(CVLS_ILL_INPUT); } /* Ensure that A is NULL when LS is matrix-embedded */ if ((LSType == SUNLINEARSOLVER_MATRIX_EMBEDDED) && (A != NULL)) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVLS", "CVodeSetLinearSolver", "Incompatible inputs: matrix-embedded LS requires NULL matrix"); return(CVLS_ILL_INPUT); } /* Check for compatible LS type, matrix and "atimes" support */ if (iterative) { if (cv_mem->cv_tempv->ops->nvgetlength == NULL) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetLinearSolver", MSG_LS_BAD_NVECTOR); return(CVLS_ILL_INPUT); } if (!matrixbased && (LSType != SUNLINEARSOLVER_MATRIX_EMBEDDED) && (LS->ops->setatimes == NULL)) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetLinearSolver", "Incompatible inputs: iterative LS must support ATimes routine"); return(CVLS_ILL_INPUT); } if (matrixbased && (A == NULL)) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetLinearSolver", "Incompatible inputs: matrix-iterative LS requires non-NULL matrix"); return(CVLS_ILL_INPUT); } } else if (A == NULL) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetLinearSolver", "Incompatible inputs: direct LS requires non-NULL matrix"); return(CVLS_ILL_INPUT); } /* free any existing system solver attached to CVode */ if (cv_mem->cv_lfree) cv_mem->cv_lfree(cv_mem); /* Set four main system linear solver function fields in cv_mem */ cv_mem->cv_linit = cvLsInitialize; cv_mem->cv_lsetup = cvLsSetup; cv_mem->cv_lsolve = cvLsSolve; cv_mem->cv_lfree = cvLsFree; /* Allocate memory for CVLsMemRec */ cvls_mem = NULL; cvls_mem = (CVLsMem) malloc(sizeof(struct CVLsMemRec)); if (cvls_mem == NULL) { cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSLS", "CVodeSetLinearSolver", MSG_LS_MEM_FAIL); return(CVLS_MEM_FAIL); } memset(cvls_mem, 0, sizeof(struct CVLsMemRec)); /* set SUNLinearSolver pointer */ cvls_mem->LS = LS; /* Linear solver type information */ cvls_mem->iterative = iterative; cvls_mem->matrixbased = matrixbased; /* Set defaults for Jacobian-related fields */ if (A != NULL) { cvls_mem->jacDQ = SUNTRUE; cvls_mem->jac = cvLsDQJac; cvls_mem->J_data = cv_mem; } else { cvls_mem->jacDQ = SUNFALSE; cvls_mem->jac = NULL; cvls_mem->J_data = NULL; } cvls_mem->jtimesDQ = SUNTRUE; cvls_mem->jtsetup = NULL; cvls_mem->jtimes = cvLsDQJtimes; cvls_mem->jt_f = cv_mem->cv_f; cvls_mem->jt_data = cv_mem; cvls_mem->user_linsys = SUNFALSE; cvls_mem->linsys = cvLsLinSys; cvls_mem->A_data = cv_mem; /* Set defaults for preconditioner-related fields */ cvls_mem->pset = NULL; cvls_mem->psolve = NULL; cvls_mem->pfree = NULL; cvls_mem->P_data = cv_mem->cv_user_data; /* Initialize counters */ cvLsInitializeCounters(cvls_mem); /* Set default values for the rest of the LS parameters */ cvls_mem->msbj = CVLS_MSBJ; cvls_mem->jbad = SUNTRUE; cvls_mem->eplifac = CVLS_EPLIN; cvls_mem->last_flag = CVLS_SUCCESS; /* If LS supports ATimes, attach CVLs routine */ if (LS->ops->setatimes) { retval = SUNLinSolSetATimes(LS, cv_mem, cvLsATimes); if (retval != SUNLS_SUCCESS) { cvProcessError(cv_mem, CVLS_SUNLS_FAIL, "CVSLS", "CVodeSetLinearSolver", "Error in calling SUNLinSolSetATimes"); free(cvls_mem); cvls_mem = NULL; return(CVLS_SUNLS_FAIL); } } /* If LS supports preconditioning, initialize pset/psol to NULL */ if (LS->ops->setpreconditioner) { retval = SUNLinSolSetPreconditioner(LS, cv_mem, NULL, NULL); if (retval != SUNLS_SUCCESS) { cvProcessError(cv_mem, CVLS_SUNLS_FAIL, "CVSLS", "CVodeSetLinearSolver", "Error in calling SUNLinSolSetPreconditioner"); free(cvls_mem); cvls_mem = NULL; return(CVLS_SUNLS_FAIL); } } /* When using a SUNMatrix object, store pointer to A and initialize savedJ */ if (A != NULL) { cvls_mem->A = A; cvls_mem->savedJ = NULL; /* allocated in cvLsInitialize */ } /* Allocate memory for ytemp and x */ cvls_mem->ytemp = N_VClone(cv_mem->cv_tempv); if (cvls_mem->ytemp == NULL) { cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSLS", "CVodeSetLinearSolver", MSG_LS_MEM_FAIL); free(cvls_mem); cvls_mem = NULL; return(CVLS_MEM_FAIL); } cvls_mem->x = N_VClone(cv_mem->cv_tempv); if (cvls_mem->x == NULL) { cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSLS", "CVodeSetLinearSolver", MSG_LS_MEM_FAIL); N_VDestroy(cvls_mem->ytemp); free(cvls_mem); cvls_mem = NULL; return(CVLS_MEM_FAIL); } /* For iterative LS, compute default norm conversion factor */ if (iterative) cvls_mem->nrmfac = SUNRsqrt( N_VGetLength(cvls_mem->ytemp) ); /* Check if solution scaling should be enabled */ if (matrixbased && cv_mem->cv_lmm == CV_BDF) cvls_mem->scalesol = SUNTRUE; else cvls_mem->scalesol = SUNFALSE; /* Attach linear solver memory to integrator memory */ cv_mem->cv_lmem = cvls_mem; return(CVLS_SUCCESS); } /*=============================================================== Optional input/output routines ===============================================================*/ /* CVodeSetJacFn specifies the Jacobian function. */ int CVodeSetJacFn(void *cvode_mem, CVLsJacFn jac) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "CVodeSetJacFn", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* return with failure if jac cannot be used */ if ((jac != NULL) && (cvls_mem->A == NULL)) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetJacFn", "Jacobian routine cannot be supplied for NULL SUNMatrix"); return(CVLS_ILL_INPUT); } /* set the Jacobian routine pointer, and update relevant flags */ if (jac != NULL) { cvls_mem->jacDQ = SUNFALSE; cvls_mem->jac = jac; cvls_mem->J_data = cv_mem->cv_user_data; } else { cvls_mem->jacDQ = SUNTRUE; cvls_mem->jac = cvLsDQJac; cvls_mem->J_data = cv_mem; } /* ensure the internal linear system function is used */ cvls_mem->user_linsys = SUNFALSE; cvls_mem->linsys = cvLsLinSys; cvls_mem->A_data = cv_mem; return(CVLS_SUCCESS); } /* CVodeSetEpsLin specifies the nonlinear -> linear tolerance scale factor */ int CVodeSetEpsLin(void *cvode_mem, realtype eplifac) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "CVodeSetEpsLin", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* Check for legal eplifac */ if(eplifac < ZERO) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetEpsLin", MSG_LS_BAD_EPLIN); return(CVLS_ILL_INPUT); } cvls_mem->eplifac = (eplifac == ZERO) ? CVLS_EPLIN : eplifac; return(CVLS_SUCCESS); } /* CVodeSetLSNormFactor sets or computes the factor to use when converting from the integrator tolerance to the linear solver tolerance (WRMS to L2 norm). */ int CVodeSetLSNormFactor(void *cvode_mem, realtype nrmfac) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "CVodeSetLSNormFactor", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); if (nrmfac > ZERO) { /* user-provided factor */ cvls_mem->nrmfac = nrmfac; } else if (nrmfac < ZERO) { /* compute factor for WRMS norm with dot product */ N_VConst(ONE, cvls_mem->ytemp); cvls_mem->nrmfac = SUNRsqrt(N_VDotProd(cvls_mem->ytemp, cvls_mem->ytemp)); } else { /* compute default factor for WRMS norm from vector legnth */ cvls_mem->nrmfac = SUNRsqrt(N_VGetLength(cvls_mem->ytemp)); } return(CVLS_SUCCESS); } /* CVodeSetJacEvalFrequency specifies the frequency for recomputing the Jacobian matrix and/or preconditioner */ int CVodeSetJacEvalFrequency(void *cvode_mem, long int msbj) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; store input and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeSetJacEvalFrequency", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* Check for legal msbj */ if(msbj < 0) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVLS", "CVodeSetJacEvalFrequency", "A negative evaluation frequency was provided."); return(CVLS_ILL_INPUT); } cvls_mem->msbj = (msbj == 0) ? CVLS_MSBJ : msbj; return(CVLS_SUCCESS); } /* CVodeSetLinearSolutionScaling enables or disables scaling the linear solver solution to account for changes in gamma. */ int CVodeSetLinearSolutionScaling(void *cvode_mem, booleantype onoff) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; store input and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeSetLinearSolutionScaling", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* check for valid solver and method type */ if (!(cvls_mem->matrixbased) || cv_mem->cv_lmm != CV_BDF) return(CVLS_ILL_INPUT); /* set solution scaling flag */ cvls_mem->scalesol = onoff; return(CVLS_SUCCESS); } /* CVodeSetPreconditioner specifies the user-supplied preconditioner setup and solve routines */ int CVodeSetPreconditioner(void *cvode_mem, CVLsPrecSetupFn psetup, CVLsPrecSolveFn psolve) { CVodeMem cv_mem; CVLsMem cvls_mem; SUNPSetupFn cvls_psetup; SUNPSolveFn cvls_psolve; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "CVodeSetPreconditioner", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* store function pointers for user-supplied routines in CVLs interface */ cvls_mem->pset = psetup; cvls_mem->psolve = psolve; /* issue error if LS object does not allow user-supplied preconditioning */ if (cvls_mem->LS->ops->setpreconditioner == NULL) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetPreconditioner", "SUNLinearSolver object does not support user-supplied preconditioning"); return(CVLS_ILL_INPUT); } /* notify iterative linear solver to call CVLs interface routines */ cvls_psetup = (psetup == NULL) ? NULL : cvLsPSetup; cvls_psolve = (psolve == NULL) ? NULL : cvLsPSolve; retval = SUNLinSolSetPreconditioner(cvls_mem->LS, cv_mem, cvls_psetup, cvls_psolve); if (retval != SUNLS_SUCCESS) { cvProcessError(cv_mem, CVLS_SUNLS_FAIL, "CVSLS", "CVLsSetPreconditioner", "Error in calling SUNLinSolSetPreconditioner"); return(CVLS_SUNLS_FAIL); } return(CVLS_SUCCESS); } /* CVodeSetJacTimes specifies the user-supplied Jacobian-vector product setup and multiply routines */ int CVodeSetJacTimes(void *cvode_mem, CVLsJacTimesSetupFn jtsetup, CVLsJacTimesVecFn jtimes) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "CVodeSetJacTimes", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* issue error if LS object does not allow user-supplied ATimes */ if (cvls_mem->LS->ops->setatimes == NULL) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetJacTimes", "SUNLinearSolver object does not support user-supplied ATimes routine"); return(CVLS_ILL_INPUT); } /* store function pointers for user-supplied routines in CVLs interface (NULL jtimes implies use of DQ default) */ if (jtimes != NULL) { cvls_mem->jtimesDQ = SUNFALSE; cvls_mem->jtsetup = jtsetup; cvls_mem->jtimes = jtimes; cvls_mem->jt_data = cv_mem->cv_user_data; } else { cvls_mem->jtimesDQ = SUNTRUE; cvls_mem->jtsetup = NULL; cvls_mem->jtimes = cvLsDQJtimes; cvls_mem->jt_f = cv_mem->cv_f; cvls_mem->jt_data = cv_mem; } return(CVLS_SUCCESS); } /* CVodeSetJacTimesRhsFn specifies an alternative user-supplied ODE right-hand side function to use in the internal finite difference Jacobian-vector product */ int CVodeSetJacTimesRhsFn(void *cvode_mem, CVRhsFn jtimesRhsFn) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "CVodeSetJacTimesRhsFn", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* check if using internal finite difference approximation */ if (!(cvls_mem->jtimesDQ)) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetJacTimesRhsFn", "Internal finite-difference Jacobian-vector product is disabled."); return(CVLS_ILL_INPUT); } /* store function pointers for RHS function (NULL implies use ODE RHS) */ if (jtimesRhsFn != NULL) cvls_mem->jt_f = jtimesRhsFn; else cvls_mem->jt_f = cv_mem->cv_f; return(CVLS_SUCCESS); } /* CVodeSetLinSysFn specifies the linear system setup function. */ int CVodeSetLinSysFn(void *cvode_mem, CVLsLinSysFn linsys) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "CVodeSetLinSysFn", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* return with failure if linsys cannot be used */ if ((linsys != NULL) && (cvls_mem->A == NULL)) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetLinSysFn", "Linear system setup routine cannot be supplied for NULL SUNMatrix"); return(CVLS_ILL_INPUT); } /* set the linear system routine pointer, and update relevant flags */ if (linsys != NULL) { cvls_mem->user_linsys = SUNTRUE; cvls_mem->linsys = linsys; cvls_mem->A_data = cv_mem->cv_user_data; } else { cvls_mem->user_linsys = SUNFALSE; cvls_mem->linsys = cvLsLinSys; cvls_mem->A_data = cv_mem; } return(CVLS_SUCCESS); } /* CVodeGetLinWorkSpace returns the length of workspace allocated for the CVLS linear solver interface */ int CVodeGetLinWorkSpace(void *cvode_mem, long int *lenrwLS, long int *leniwLS) { CVodeMem cv_mem; CVLsMem cvls_mem; sunindextype lrw1, liw1; long int lrw, liw; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetLinWorkSpace", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* start with fixed sizes plus vector/matrix pointers */ *lenrwLS = 2; *leniwLS = 30; /* add NVector sizes */ if (cv_mem->cv_tempv->ops->nvspace) { N_VSpace(cv_mem->cv_tempv, &lrw1, &liw1); *lenrwLS += 2*lrw1; *leniwLS += 2*liw1; } /* add SUNMatrix size (only account for the one owned by Ls interface) */ if (cvls_mem->savedJ) if (cvls_mem->savedJ->ops->space) { retval = SUNMatSpace(cvls_mem->savedJ, &lrw, &liw); if (retval == 0) { *lenrwLS += lrw; *leniwLS += liw; } } /* add LS sizes */ if (cvls_mem->LS->ops->space) { retval = SUNLinSolSpace(cvls_mem->LS, &lrw, &liw); if (retval == 0) { *lenrwLS += lrw; *leniwLS += liw; } } return(CVLS_SUCCESS); } /* CVodeGetNumJacEvals returns the number of Jacobian evaluations */ int CVodeGetNumJacEvals(void *cvode_mem, long int *njevals) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; set output value and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetNumJacEvals", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); *njevals = cvls_mem->nje; return(CVLS_SUCCESS); } /* CVodeGetNumLinRhsEvals returns the number of calls to the ODE function needed for the DQ Jacobian approximation or J*v product approximation */ int CVodeGetNumLinRhsEvals(void *cvode_mem, long int *nfevalsLS) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; set output value and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetNumLinRhsEvals", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); *nfevalsLS = cvls_mem->nfeDQ; return(CVLS_SUCCESS); } /* CVodeGetNumPrecEvals returns the number of calls to the user- or CVode-supplied preconditioner setup routine */ int CVodeGetNumPrecEvals(void *cvode_mem, long int *npevals) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; set output value and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetNumPrecEvals", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); *npevals = cvls_mem->npe; return(CVLS_SUCCESS); } /* CVodeGetNumPrecSolves returns the number of calls to the user- or CVode-supplied preconditioner solve routine */ int CVodeGetNumPrecSolves(void *cvode_mem, long int *npsolves) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; set output value and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetNumPrecSolves", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); *npsolves = cvls_mem->nps; return(CVLS_SUCCESS); } /* CVodeGetNumLinIters returns the number of linear iterations (if accessible from the LS object) */ int CVodeGetNumLinIters(void *cvode_mem, long int *nliters) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; set output value and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetNumLinIters", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); *nliters = cvls_mem->nli; return(CVLS_SUCCESS); } /* CVodeGetNumLinConvFails returns the number of linear solver convergence failures (as reported by the LS object) */ int CVodeGetNumLinConvFails(void *cvode_mem, long int *nlcfails) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; set output value and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetNumLinConvFails", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); *nlcfails = cvls_mem->ncfl; return(CVLS_SUCCESS); } /* CVodeGetNumJTSetupEvals returns the number of calls to the user-supplied Jacobian-vector product setup routine */ int CVodeGetNumJTSetupEvals(void *cvode_mem, long int *njtsetups) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; set output value and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetNumJTSetupEvals", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); *njtsetups = cvls_mem->njtsetup; return(CVLS_SUCCESS); } /* CVodeGetNumJtimesEvals returns the number of calls to the Jacobian-vector product multiply routine */ int CVodeGetNumJtimesEvals(void *cvode_mem, long int *njvevals) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; set output value and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetNumJtimesEvals", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); *njvevals = cvls_mem->njtimes; return(CVLS_SUCCESS); } /* CVodeGetLinSolveStats returns statistics related to the linear solve. */ int CVodeGetLinSolveStats(void* cvode_mem, long int* njevals, long int* nfevalsLS, long int* nliters, long int* nlcfails, long int* npevals, long int* npsolves, long int* njtsetups, long int* njtimes) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; set output value and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetLinSolveStats", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); *njevals = cvls_mem->nje; *nfevalsLS = cvls_mem->nfeDQ; *nliters = cvls_mem->nli; *nlcfails = cvls_mem->ncfl; *npevals = cvls_mem->npe; *npsolves = cvls_mem->nps; *njtsetups = cvls_mem->njtsetup; *njtimes = cvls_mem->njtimes; return(CVLS_SUCCESS); } /* CVodeGetLastLinFlag returns the last flag set in a CVLS function */ int CVodeGetLastLinFlag(void *cvode_mem, long int *flag) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure; set output value and return */ retval = cvLs_AccessLMem(cvode_mem, "CVodeGetLastLinFlag", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); *flag = cvls_mem->last_flag; return(CVLS_SUCCESS); } /* CVodeGetLinReturnFlagName translates from the integer error code returned by an CVLs routine to the corresponding string equivalent for that flag */ char *CVodeGetLinReturnFlagName(long int flag) { char *name = (char *)malloc(30*sizeof(char)); switch(flag) { case CVLS_SUCCESS: sprintf(name,"CVLS_SUCCESS"); break; case CVLS_MEM_NULL: sprintf(name,"CVLS_MEM_NULL"); break; case CVLS_LMEM_NULL: sprintf(name,"CVLS_LMEM_NULL"); break; case CVLS_ILL_INPUT: sprintf(name,"CVLS_ILL_INPUT"); break; case CVLS_MEM_FAIL: sprintf(name,"CVLS_MEM_FAIL"); break; case CVLS_PMEM_NULL: sprintf(name,"CVLS_PMEM_NULL"); break; case CVLS_JACFUNC_UNRECVR: sprintf(name,"CVLS_JACFUNC_UNRECVR"); break; case CVLS_JACFUNC_RECVR: sprintf(name,"CVLS_JACFUNC_RECVR"); break; case CVLS_SUNMAT_FAIL: sprintf(name,"CVLS_SUNMAT_FAIL"); break; case CVLS_SUNLS_FAIL: sprintf(name,"CVLS_SUNLS_FAIL"); break; case CVLS_NO_ADJ: sprintf(name,"CVLS_NO_ADJ"); break; case CVLS_LMEMB_NULL: sprintf(name,"CVLS_LMEMB_NULL"); break; default: sprintf(name,"NONE"); } return(name); } /*================================================================= CVSLS private functions =================================================================*/ /*----------------------------------------------------------------- cvLsATimes This routine generates the matrix-vector product z = Mv, where M = I - gamma*J. The product J*v is obtained by calling the jtimes routine. It is then scaled by -gamma and added to v to obtain M*v. The return value is the same as the value returned by jtimes -- 0 if successful, nonzero otherwise. -----------------------------------------------------------------*/ int cvLsATimes(void *cvode_mem, N_Vector v, N_Vector z) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "cvLsATimes", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* call Jacobian-times-vector product routine (either user-supplied or internal DQ) */ retval = cvls_mem->jtimes(v, z, cv_mem->cv_tn, cvls_mem->ycur, cvls_mem->fcur, cvls_mem->jt_data, cvls_mem->ytemp); cvls_mem->njtimes++; if (retval != 0) return(retval); /* add contribution from identity matrix */ N_VLinearSum(ONE, v, -cv_mem->cv_gamma, z, z); return(0); } /*--------------------------------------------------------------- cvLsPSetup: This routine interfaces between the generic iterative linear solvers and the user's psetup routine. It passes to psetup all required state information from cvode_mem. Its return value is the same as that returned by psetup. Note that the generic iterative linear solvers guarantee that cvLsPSetup will only be called in the case that the user's psetup routine is non-NULL. ---------------------------------------------------------------*/ int cvLsPSetup(void *cvode_mem) { int retval; CVodeMem cv_mem; CVLsMem cvls_mem; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "cvLsPSetup", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* Call user pset routine to update preconditioner and possibly reset jcur (pass !jbad as update suggestion) */ retval = cvls_mem->pset(cv_mem->cv_tn, cvls_mem->ycur, cvls_mem->fcur, !(cvls_mem->jbad), &cv_mem->cv_jcur, cv_mem->cv_gamma, cvls_mem->P_data); return(retval); } /*----------------------------------------------------------------- cvLsPSolve This routine interfaces between the generic SUNLinSolSolve routine and the user's psolve routine. It passes to psolve all required state information from cvode_mem. Its return value is the same as that returned by psolve. Note that the generic SUNLinSol solver guarantees that cvLsPSolve will not be called in the case in which preconditioning is not done. This is the only case in which the user's psolve routine is allowed to be NULL. -----------------------------------------------------------------*/ int cvLsPSolve(void *cvode_mem, N_Vector r, N_Vector z, realtype tol, int lr) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "cvLsPSolve", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* call the user-supplied psolve routine, and accumulate count */ retval = cvls_mem->psolve(cv_mem->cv_tn, cvls_mem->ycur, cvls_mem->fcur, r, z, cv_mem->cv_gamma, tol, lr, cvls_mem->P_data); cvls_mem->nps++; return(retval); } /*----------------------------------------------------------------- cvLsDQJac This routine is a wrapper for the Dense and Band implementations of the difference quotient Jacobian approximation routines. ---------------------------------------------------------------*/ int cvLsDQJac(realtype t, N_Vector y, N_Vector fy, SUNMatrix Jac, void *cvode_mem, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { CVodeMem cv_mem; int retval; /* access CVodeMem structure */ if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSLS", "cvLsDQJac", MSG_LS_CVMEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* verify that Jac is non-NULL */ if (Jac == NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSLS", "cvLsDQJac", MSG_LS_LMEM_NULL); return(CVLS_LMEM_NULL); } /* Verify that N_Vector supports required operations */ if (cv_mem->cv_tempv->ops->nvcloneempty == NULL || cv_mem->cv_tempv->ops->nvwrmsnorm == NULL || cv_mem->cv_tempv->ops->nvlinearsum == NULL || cv_mem->cv_tempv->ops->nvdestroy == NULL || cv_mem->cv_tempv->ops->nvscale == NULL || cv_mem->cv_tempv->ops->nvgetarraypointer == NULL || cv_mem->cv_tempv->ops->nvsetarraypointer == NULL) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "cvLsDQJac", MSG_LS_BAD_NVECTOR); return(CVLS_ILL_INPUT); } /* Call the matrix-structure-specific DQ approximation routine */ if (SUNMatGetID(Jac) == SUNMATRIX_DENSE) { retval = cvLsDenseDQJac(t, y, fy, Jac, cv_mem, tmp1); } else if (SUNMatGetID(Jac) == SUNMATRIX_BAND) { retval = cvLsBandDQJac(t, y, fy, Jac, cv_mem, tmp1, tmp2); } else { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "cvLsDQJac", "unrecognized matrix type for cvLsDQJac"); retval = CVLS_ILL_INPUT; } return(retval); } /*----------------------------------------------------------------- cvLsDenseDQJac This routine generates a dense difference quotient approximation to the Jacobian of f(t,y). It assumes that a dense SUNMatrix is stored column-wise, and that elements within each column are contiguous. The address of the jth column of J is obtained via the accessor function SUNDenseMatrix_Column, and this pointer is associated with an N_Vector using the N_VSetArrayPointer function. Finally, the actual computation of the jth column of the Jacobian is done with a call to N_VLinearSum. -----------------------------------------------------------------*/ int cvLsDenseDQJac(realtype t, N_Vector y, N_Vector fy, SUNMatrix Jac, CVodeMem cv_mem, N_Vector tmp1) { realtype fnorm, minInc, inc, inc_inv, yjsaved, srur, conj; realtype *y_data, *ewt_data, *cns_data; N_Vector ftemp, jthCol; sunindextype j, N; CVLsMem cvls_mem; int retval = 0; /* initialize cns_data to avoid compiler warning */ cns_data = NULL; /* access LsMem interface structure */ cvls_mem = (CVLsMem) cv_mem->cv_lmem; /* access matrix dimension */ N = SUNDenseMatrix_Columns(Jac); /* Rename work vector for readibility */ ftemp = tmp1; /* Create an empty vector for matrix column calculations */ jthCol = N_VCloneEmpty(tmp1); /* Obtain pointers to the data for ewt, y */ ewt_data = N_VGetArrayPointer(cv_mem->cv_ewt); y_data = N_VGetArrayPointer(y); if (cv_mem->cv_constraintsSet) cns_data = N_VGetArrayPointer(cv_mem->cv_constraints); /* Set minimum increment based on uround and norm of f */ srur = SUNRsqrt(cv_mem->cv_uround); fnorm = N_VWrmsNorm(fy, cv_mem->cv_ewt); minInc = (fnorm != ZERO) ? (MIN_INC_MULT * SUNRabs(cv_mem->cv_h) * cv_mem->cv_uround * N * fnorm) : ONE; for (j = 0; j < N; j++) { /* Generate the jth col of J(tn,y) */ N_VSetArrayPointer(SUNDenseMatrix_Column(Jac,j), jthCol); yjsaved = y_data[j]; inc = SUNMAX(srur*SUNRabs(yjsaved), minInc/ewt_data[j]); /* Adjust sign(inc) if y_j has an inequality constraint. */ if (cv_mem->cv_constraintsSet) { conj = cns_data[j]; if (SUNRabs(conj) == ONE) {if ((yjsaved+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if ((yjsaved+inc)*conj <= ZERO) inc = -inc;} } y_data[j] += inc; retval = cv_mem->cv_f(t, y, ftemp, cv_mem->cv_user_data); cvls_mem->nfeDQ++; if (retval != 0) break; y_data[j] = yjsaved; inc_inv = ONE/inc; N_VLinearSum(inc_inv, ftemp, -inc_inv, fy, jthCol); } /* Destroy jthCol vector */ N_VSetArrayPointer(NULL, jthCol); /* SHOULDN'T BE NEEDED */ N_VDestroy(jthCol); return(retval); } /*----------------------------------------------------------------- cvLsBandDQJac This routine generates a banded difference quotient approximation to the Jacobian of f(t,y). It assumes that a band SUNMatrix is stored column-wise, and that elements within each column are contiguous. This makes it possible to get the address of a column of J via the accessor function SUNBandMatrix_Column, and to write a simple for loop to set each of the elements of a column in succession. -----------------------------------------------------------------*/ int cvLsBandDQJac(realtype t, N_Vector y, N_Vector fy, SUNMatrix Jac, CVodeMem cv_mem, N_Vector tmp1, N_Vector tmp2) { N_Vector ftemp, ytemp; realtype fnorm, minInc, inc, inc_inv, srur, conj; realtype *col_j, *ewt_data, *fy_data, *ftemp_data; realtype *y_data, *ytemp_data, *cns_data; sunindextype group, i, j, width, ngroups, i1, i2; sunindextype N, mupper, mlower; CVLsMem cvls_mem; int retval = 0; /* initialize cns_data to avoid compiler warning */ cns_data = NULL; /* access LsMem interface structure */ cvls_mem = (CVLsMem) cv_mem->cv_lmem; /* access matrix dimensions */ N = SUNBandMatrix_Columns(Jac); mupper = SUNBandMatrix_UpperBandwidth(Jac); mlower = SUNBandMatrix_LowerBandwidth(Jac); /* Rename work vectors for use as temporary values of y and f */ ftemp = tmp1; ytemp = tmp2; /* Obtain pointers to the data for ewt, fy, ftemp, y, ytemp */ ewt_data = N_VGetArrayPointer(cv_mem->cv_ewt); fy_data = N_VGetArrayPointer(fy); ftemp_data = N_VGetArrayPointer(ftemp); y_data = N_VGetArrayPointer(y); ytemp_data = N_VGetArrayPointer(ytemp); if (cv_mem->cv_constraintsSet) cns_data = N_VGetArrayPointer(cv_mem->cv_constraints); /* Load ytemp with y = predicted y vector */ N_VScale(ONE, y, ytemp); /* Set minimum increment based on uround and norm of f */ srur = SUNRsqrt(cv_mem->cv_uround); fnorm = N_VWrmsNorm(fy, cv_mem->cv_ewt); minInc = (fnorm != ZERO) ? (MIN_INC_MULT * SUNRabs(cv_mem->cv_h) * cv_mem->cv_uround * N * fnorm) : ONE; /* Set bandwidth and number of column groups for band differencing */ width = mlower + mupper + 1; ngroups = SUNMIN(width, N); /* Loop over column groups. */ for (group=1; group <= ngroups; group++) { /* Increment all y_j in group */ for(j=group-1; j < N; j+=width) { inc = SUNMAX(srur*SUNRabs(y_data[j]), minInc/ewt_data[j]); /* Adjust sign(inc) if yj has an inequality constraint. */ if (cv_mem->cv_constraintsSet) { conj = cns_data[j]; if (SUNRabs(conj) == ONE) {if ((ytemp_data[j]+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if ((ytemp_data[j]+inc)*conj <= ZERO) inc = -inc;} } ytemp_data[j] += inc; } /* Evaluate f with incremented y */ retval = cv_mem->cv_f(cv_mem->cv_tn, ytemp, ftemp, cv_mem->cv_user_data); cvls_mem->nfeDQ++; if (retval != 0) break; /* Restore ytemp, then form and load difference quotients */ for (j=group-1; j < N; j+=width) { ytemp_data[j] = y_data[j]; col_j = SUNBandMatrix_Column(Jac, j); inc = SUNMAX(srur*SUNRabs(y_data[j]), minInc/ewt_data[j]); /* Adjust sign(inc) as before. */ if (cv_mem->cv_constraintsSet) { conj = cns_data[j]; if (SUNRabs(conj) == ONE) {if ((ytemp_data[j]+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if ((ytemp_data[j]+inc)*conj <= ZERO) inc = -inc;} } inc_inv = ONE/inc; i1 = SUNMAX(0, j-mupper); i2 = SUNMIN(j+mlower, N-1); for (i=i1; i <= i2; i++) SM_COLUMN_ELEMENT_B(col_j,i,j) = inc_inv * (ftemp_data[i] - fy_data[i]); } } return(retval); } /*----------------------------------------------------------------- cvLsDQJtimes This routine generates a difference quotient approximation to the Jacobian times vector f_y(t,y) * v. The approximation is Jv = [f(y + v*sig) - f(y)]/sig, where sig = 1 / ||v||_WRMS, i.e. the WRMS norm of v*sig is 1. -----------------------------------------------------------------*/ int cvLsDQJtimes(N_Vector v, N_Vector Jv, realtype t, N_Vector y, N_Vector fy, void *cvode_mem, N_Vector work) { CVodeMem cv_mem; CVLsMem cvls_mem; realtype sig, siginv; int iter, retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "cvLsDQJtimes", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* Initialize perturbation to 1/||v|| */ sig = ONE/N_VWrmsNorm(v, cv_mem->cv_ewt); for (iter=0; iterjt_f(t, work, Jv, cv_mem->cv_user_data); cvls_mem->nfeDQ++; if (retval == 0) break; if (retval < 0) return(-1); /* If f failed recoverably, shrink sig and retry */ sig *= PT25; } /* If retval still isn't 0, return with a recoverable failure */ if (retval > 0) return(+1); /* Replace Jv by (Jv - fy)/sig */ siginv = ONE/sig; N_VLinearSum(siginv, Jv, -siginv, fy, Jv); return(0); } /*----------------------------------------------------------------- cvLsLinSys Setup the linear system A = I - gamma J -----------------------------------------------------------------*/ static int cvLsLinSys(realtype t, N_Vector y, N_Vector fy, SUNMatrix A, booleantype jok, booleantype *jcur, realtype gamma, void *cvode_mem, N_Vector vtemp1, N_Vector vtemp2, N_Vector vtemp3) { CVodeMem cv_mem; CVLsMem cvls_mem; int retval; /* access CVLsMem structure */ retval = cvLs_AccessLMem(cvode_mem, "cvLsLinSys", &cv_mem, &cvls_mem); if (retval != CVLS_SUCCESS) return(retval); /* Check if Jacobian needs to be updated */ if (jok) { /* Use saved copy of J */ *jcur = SUNFALSE; /* Overwrite linear system matrix with saved J */ retval = SUNMatCopy(cvls_mem->savedJ, A); if (retval) { cvProcessError(cv_mem, CVLS_SUNMAT_FAIL, "CVSLS", "cvLsSetup", MSG_LS_SUNMAT_FAILED); cvls_mem->last_flag = CVLS_SUNMAT_FAIL; return(cvls_mem->last_flag); } } else { /* Call jac() routine to update J */ *jcur = SUNTRUE; /* Clear the linear system matrix if necessary */ if (SUNLinSolGetType(cvls_mem->LS) == SUNLINEARSOLVER_DIRECT) { retval = SUNMatZero(A); if (retval) { cvProcessError(cv_mem, CVLS_SUNMAT_FAIL, "CVSLS", "cvLsSetup", MSG_LS_SUNMAT_FAILED); cvls_mem->last_flag = CVLS_SUNMAT_FAIL; return(cvls_mem->last_flag); } } /* Compute new Jacobian matrix */ retval = cvls_mem->jac(t, y, fy, A, cvls_mem->J_data, vtemp1, vtemp2, vtemp3); if (retval < 0) { cvProcessError(cv_mem, CVLS_JACFUNC_UNRECVR, "CVSLS", "cvLsSetup", MSG_LS_JACFUNC_FAILED); cvls_mem->last_flag = CVLS_JACFUNC_UNRECVR; return(-1); } if (retval > 0) { cvls_mem->last_flag = CVLS_JACFUNC_RECVR; return(1); } /* Update saved copy of the Jacobian matrix */ retval = SUNMatCopy(A, cvls_mem->savedJ); if (retval) { cvProcessError(cv_mem, CVLS_SUNMAT_FAIL, "CVSLS", "cvLsSetup", MSG_LS_SUNMAT_FAILED); cvls_mem->last_flag = CVLS_SUNMAT_FAIL; return(cvls_mem->last_flag); } } /* Perform linear combination A = I - gamma*J */ retval = SUNMatScaleAddI(-gamma, A); if (retval) { cvProcessError(cv_mem, CVLS_SUNMAT_FAIL, "CVSLS", "cvLsSetup", MSG_LS_SUNMAT_FAILED); cvls_mem->last_flag = CVLS_SUNMAT_FAIL; return(cvls_mem->last_flag); } return(CVLS_SUCCESS); } /*----------------------------------------------------------------- cvLsInitialize This routine performs remaining initializations specific to the iterative linear solver interface (and solver itself) -----------------------------------------------------------------*/ int cvLsInitialize(CVodeMem cv_mem) { CVLsMem cvls_mem; int retval; /* access CVLsMem structure */ if (cv_mem->cv_lmem==NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSLS", "cvLsInitialize", MSG_LS_LMEM_NULL); return(CVLS_LMEM_NULL); } cvls_mem = (CVLsMem) cv_mem->cv_lmem; /* Test for valid combinations of matrix & Jacobian routines: */ if (cvls_mem->A != NULL) { /* Matrix-based case */ if (cvls_mem->user_linsys) { /* User-supplied linear system function, reset A_data (just in case) */ cvls_mem->A_data = cv_mem->cv_user_data; } else { /* Internal linear system function, reset pointers (just in case) */ cvls_mem->linsys = cvLsLinSys; cvls_mem->A_data = cv_mem; /* Check if an internal or user-supplied Jacobian function is used */ if (cvls_mem->jacDQ) { /* Internal difference quotient Jacobian. Check that A is dense or band, otherwise return an error */ retval = 0; if (cvls_mem->A->ops->getid) { if ( (SUNMatGetID(cvls_mem->A) == SUNMATRIX_DENSE) || (SUNMatGetID(cvls_mem->A) == SUNMATRIX_BAND) ) { cvls_mem->jac = cvLsDQJac; cvls_mem->J_data = cv_mem; } else { retval++; } } else { retval++; } if (retval) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "cvLsInitialize", "No Jacobian constructor available for SUNMatrix type"); cvls_mem->last_flag = CVLS_ILL_INPUT; return(CVLS_ILL_INPUT); } } else { /* User-supplied Jacobian, reset J_data pointer (just in case) */ cvls_mem->J_data = cv_mem->cv_user_data; } /* Allocate internally saved Jacobian if not already done */ if (cvls_mem->savedJ == NULL) { cvls_mem->savedJ = SUNMatClone(cvls_mem->A); if (cvls_mem->savedJ == NULL) { cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSLS", "cvLsInitialize", MSG_LS_MEM_FAIL); cvls_mem->last_flag = CVLS_MEM_FAIL; return(CVLS_MEM_FAIL); } } } /* end matrix-based case */ } else { /* Matrix-free case: ensure 'jac' and `linsys` function pointers are NULL */ cvls_mem->jacDQ = SUNFALSE; cvls_mem->jac = NULL; cvls_mem->J_data = NULL; cvls_mem->user_linsys = SUNFALSE; cvls_mem->linsys = NULL; cvls_mem->A_data = NULL; } /* reset counters */ cvLsInitializeCounters(cvls_mem); /* Set Jacobian-vector product related fields, based on jtimesDQ */ if (cvls_mem->jtimesDQ) { cvls_mem->jtsetup = NULL; cvls_mem->jtimes = cvLsDQJtimes; cvls_mem->jt_data = cv_mem; } else { cvls_mem->jt_data = cv_mem->cv_user_data; } /* if A is NULL and psetup is not present, then cvLsSetup does not need to be called, so set the lsetup function to NULL */ if ( (cvls_mem->A == NULL) && (cvls_mem->pset == NULL) ) cv_mem->cv_lsetup = NULL; /* When using a matrix-embedded linear solver, disable lsetup call and solution scaling */ if (SUNLinSolGetType(cvls_mem->LS) == SUNLINEARSOLVER_MATRIX_EMBEDDED) { cv_mem->cv_lsetup = NULL; cvls_mem->scalesol = SUNFALSE; } /* Call LS initialize routine, and return result */ cvls_mem->last_flag = SUNLinSolInitialize(cvls_mem->LS); return(cvls_mem->last_flag); } /*----------------------------------------------------------------- cvLsSetup This conditionally calls the LS 'setup' routine. When using a SUNMatrix object, this determines whether to update a Jacobian matrix (or use a stored version), based on heuristics regarding previous convergence issues, the number of time steps since it was last updated, etc.; it then creates the system matrix from this, the 'gamma' factor and the identity matrix, A = I-gamma*J. This routine then calls the LS 'setup' routine with A. -----------------------------------------------------------------*/ int cvLsSetup(CVodeMem cv_mem, int convfail, N_Vector ypred, N_Vector fpred, booleantype *jcurPtr, N_Vector vtemp1, N_Vector vtemp2, N_Vector vtemp3) { CVLsMem cvls_mem; realtype dgamma; int retval; /* access CVLsMem structure */ if (cv_mem->cv_lmem==NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSLS", "cvLsSetup", MSG_LS_LMEM_NULL); return(CVLS_LMEM_NULL); } cvls_mem = (CVLsMem) cv_mem->cv_lmem; /* Immediately return when using matrix-embedded linear solver */ if (SUNLinSolGetType(cvls_mem->LS) == SUNLINEARSOLVER_MATRIX_EMBEDDED) { cvls_mem->last_flag = CVLS_SUCCESS; return(cvls_mem->last_flag); } /* Set CVLs N_Vector pointers to current solution and rhs */ cvls_mem->ycur = ypred; cvls_mem->fcur = fpred; /* Use nst, gamma/gammap, and convfail to set J/P eval. flag jok */ dgamma = SUNRabs((cv_mem->cv_gamma/cv_mem->cv_gammap) - ONE); cvls_mem->jbad = (cv_mem->cv_nst == 0) || (cv_mem->cv_nst >= cvls_mem->nstlj + cvls_mem->msbj) || ((convfail == CV_FAIL_BAD_J) && (dgamma < CVLS_DGMAX)) || (convfail == CV_FAIL_OTHER); /* Setup the linear system if necessary */ if (cvls_mem->A != NULL) { /* Update J if appropriate and evaluate A = I - gamma J */ retval = cvls_mem->linsys(cv_mem->cv_tn, ypred, fpred, cvls_mem->A, !(cvls_mem->jbad), jcurPtr, cv_mem->cv_gamma, cvls_mem->A_data, vtemp1, vtemp2, vtemp3); /* Update J eval count and step when J was last updated */ if (*jcurPtr) { cvls_mem->nje++; cvls_mem->nstlj = cv_mem->cv_nst; } /* Check linsys() return value and return if necessary */ if (retval != CVLS_SUCCESS) { if (cvls_mem->user_linsys) { if (retval < 0) { cvProcessError(cv_mem, CVLS_JACFUNC_UNRECVR, "CVSLS", "cvLsSetup", MSG_LS_JACFUNC_FAILED); cvls_mem->last_flag = CVLS_JACFUNC_UNRECVR; return(-1); } else { cvls_mem->last_flag = CVLS_JACFUNC_RECVR; return(1); } } else { return(retval); } } } else { /* Matrix-free case, set jcur to jbad */ *jcurPtr = cvls_mem->jbad; } /* Call LS setup routine -- the LS may call cvLsPSetup, who will pass the heuristic suggestions above to the user code(s) */ cvls_mem->last_flag = SUNLinSolSetup(cvls_mem->LS, cvls_mem->A); /* If Matrix-free, update heuristics flags */ if (cvls_mem->A == NULL) { /* If user set jcur to SUNTRUE, increment npe and save nst value */ if (*jcurPtr) { cvls_mem->npe++; cvls_mem->nstlj = cv_mem->cv_nst; } /* Update jcur flag if we suggested an update */ if (cvls_mem->jbad) *jcurPtr = SUNTRUE; } return(cvls_mem->last_flag); } /*----------------------------------------------------------------- cvLsSolve This routine interfaces between CVode and the generic SUNLinearSolver object LS, by setting the appropriate tolerance and scaling vectors, calling the solver, and accumulating statistics from the solve for use/reporting by CVode. -----------------------------------------------------------------*/ int cvLsSolve(CVodeMem cv_mem, N_Vector b, N_Vector weight, N_Vector ynow, N_Vector fnow) { CVLsMem cvls_mem; realtype bnorm, deltar, delta, w_mean; int curiter, nli_inc, retval; booleantype do_sensi_sim, do_sensi_stg, do_sensi_stg1; /* access CVLsMem structure */ if (cv_mem->cv_lmem==NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSLS", "cvLsSolve", MSG_LS_LMEM_NULL); return(CVLS_LMEM_NULL); } cvls_mem = (CVLsMem) cv_mem->cv_lmem; /* are we computing sensitivities and with which approach? */ do_sensi_sim = (cv_mem->cv_sensi && (cv_mem->cv_ism==CV_SIMULTANEOUS)); do_sensi_stg = (cv_mem->cv_sensi && (cv_mem->cv_ism==CV_STAGGERED)); do_sensi_stg1 = (cv_mem->cv_sensi && (cv_mem->cv_ism==CV_STAGGERED1)); /* get current nonlinear solver iteration */ if (do_sensi_sim) retval = SUNNonlinSolGetCurIter(cv_mem->NLSsim, &curiter); else if (do_sensi_stg && cv_mem->sens_solve) retval = SUNNonlinSolGetCurIter(cv_mem->NLSstg, &curiter); else if (do_sensi_stg1 && cv_mem->sens_solve) retval = SUNNonlinSolGetCurIter(cv_mem->NLSstg1, &curiter); else retval = SUNNonlinSolGetCurIter(cv_mem->NLS, &curiter); /* If the linear solver is iterative: test norm(b), if small, return x = 0 or x = b; set linear solver tolerance (in left/right scaled 2-norm) */ if (cvls_mem->iterative) { deltar = cvls_mem->eplifac * cv_mem->cv_tq[4]; bnorm = N_VWrmsNorm(b, weight); if (bnorm <= deltar) { if (curiter > 0) N_VConst(ZERO, b); cvls_mem->last_flag = CVLS_SUCCESS; return(cvls_mem->last_flag); } /* Adjust tolerance for 2-norm */ delta = deltar * cvls_mem->nrmfac; } else { delta = ZERO; } /* Set vectors ycur and fcur for use by the Atimes and Psolve interface routines */ cvls_mem->ycur = ynow; cvls_mem->fcur = fnow; /* Set scaling vectors for LS to use (if applicable) */ if (cvls_mem->LS->ops->setscalingvectors) { retval = SUNLinSolSetScalingVectors(cvls_mem->LS, weight, weight); if (retval != SUNLS_SUCCESS) { cvProcessError(cv_mem, CVLS_SUNLS_FAIL, "CVSLS", "cvLsSolve", "Error in calling SUNLinSolSetScalingVectors"); cvls_mem->last_flag = CVLS_SUNLS_FAIL; return(cvls_mem->last_flag); } /* If solver is iterative and does not support scaling vectors, update the tolerance in an attempt to account for weight vector. We make the following assumptions: 1. w_i = w_mean, for i=0,...,n-1 (i.e. the weights are homogeneous) 2. the linear solver uses a basic 2-norm to measure convergence Hence (using the notation from sunlinsol_spgmr.h, with S = diag(w)), || bbar - Abar xbar ||_2 < tol <=> || S b - S A x ||_2 < tol <=> || S (b - A x) ||_2 < tol <=> \sum_{i=0}^{n-1} (w_i (b - A x)_i)^2 < tol^2 <=> w_mean^2 \sum_{i=0}^{n-1} (b - A x_i)^2 < tol^2 <=> \sum_{i=0}^{n-1} (b - A x_i)^2 < tol^2 / w_mean^2 <=> || b - A x ||_2 < tol / w_mean So we compute w_mean = ||w||_RMS = ||w||_2 and scale the desired tolerance accordingly. */ } else if (cvls_mem->iterative) { N_VConst(ONE, cvls_mem->x); w_mean = N_VWrmsNorm(weight, cvls_mem->x); delta /= w_mean; } /* Set initial guess x = 0 to LS */ N_VConst(ZERO, cvls_mem->x); /* Set zero initial guess flag */ retval = SUNLinSolSetZeroGuess(cvls_mem->LS, SUNTRUE); if (retval != SUNLS_SUCCESS) return(-1); /* If a user-provided jtsetup routine is supplied, call that here */ if (cvls_mem->jtsetup) { cvls_mem->last_flag = cvls_mem->jtsetup(cv_mem->cv_tn, ynow, fnow, cvls_mem->jt_data); cvls_mem->njtsetup++; if (cvls_mem->last_flag != 0) { cvProcessError(cv_mem, retval, "CVSLS", "cvLsSolve", MSG_LS_JTSETUP_FAILED); return(cvls_mem->last_flag); } } /* Call solver, and copy x to b */ retval = SUNLinSolSolve(cvls_mem->LS, cvls_mem->A, cvls_mem->x, b, delta); N_VScale(ONE, cvls_mem->x, b); /* If using a direct or matrix-iterative solver, BDF method, and gamma has changed, scale the correction to account for change in gamma */ if (cvls_mem->scalesol && cv_mem->cv_gamrat != ONE) N_VScale(TWO/(ONE + cv_mem->cv_gamrat), b, b); /* Retrieve statistics from iterative linear solvers */ nli_inc = 0; if (cvls_mem->iterative && cvls_mem->LS->ops->numiters) nli_inc = SUNLinSolNumIters(cvls_mem->LS); /* Increment counters nli and ncfl */ cvls_mem->nli += nli_inc; if (retval != SUNLS_SUCCESS) cvls_mem->ncfl++; /* Interpret solver return value */ cvls_mem->last_flag = retval; switch(retval) { case SUNLS_SUCCESS: return(0); break; case SUNLS_RES_REDUCED: /* allow reduction but not solution on first Newton iteration, otherwise return with a recoverable failure */ if (curiter == 0) return(0); else return(1); break; case SUNLS_CONV_FAIL: case SUNLS_ATIMES_FAIL_REC: case SUNLS_PSOLVE_FAIL_REC: case SUNLS_PACKAGE_FAIL_REC: case SUNLS_QRFACT_FAIL: case SUNLS_LUFACT_FAIL: return(1); break; case SUNLS_MEM_NULL: case SUNLS_ILL_INPUT: case SUNLS_MEM_FAIL: case SUNLS_GS_FAIL: case SUNLS_QRSOL_FAIL: return(-1); break; case SUNLS_PACKAGE_FAIL_UNREC: cvProcessError(cv_mem, SUNLS_PACKAGE_FAIL_UNREC, "CVSLS", "cvLsSolve", "Failure in SUNLinSol external package"); return(-1); break; case SUNLS_ATIMES_FAIL_UNREC: cvProcessError(cv_mem, SUNLS_ATIMES_FAIL_UNREC, "CVSLS", "cvLsSolve", MSG_LS_JTIMES_FAILED); return(-1); break; case SUNLS_PSOLVE_FAIL_UNREC: cvProcessError(cv_mem, SUNLS_PSOLVE_FAIL_UNREC, "CVSLS", "cvLsSolve", MSG_LS_PSOLVE_FAILED); return(-1); break; } return(0); } /*----------------------------------------------------------------- cvLsFree This routine frees memory associates with the CVLs system solver interface. -----------------------------------------------------------------*/ int cvLsFree(CVodeMem cv_mem) { CVLsMem cvls_mem; /* Return immediately if CVodeMem or CVLsMem are NULL */ if (cv_mem == NULL) return (CVLS_SUCCESS); if (cv_mem->cv_lmem == NULL) return(CVLS_SUCCESS); cvls_mem = (CVLsMem) cv_mem->cv_lmem; /* Free N_Vector memory */ if (cvls_mem->ytemp) { N_VDestroy(cvls_mem->ytemp); cvls_mem->ytemp = NULL; } if (cvls_mem->x) { N_VDestroy(cvls_mem->x); cvls_mem->x = NULL; } /* Free savedJ memory */ if (cvls_mem->savedJ) { SUNMatDestroy(cvls_mem->savedJ); cvls_mem->savedJ = NULL; } /* Nullify other N_Vector pointers */ cvls_mem->ycur = NULL; cvls_mem->fcur = NULL; /* Nullify other SUNMatrix pointer */ cvls_mem->A = NULL; /* Free preconditioner memory (if applicable) */ if (cvls_mem->pfree) cvls_mem->pfree(cv_mem); /* free CVLs interface structure */ free(cv_mem->cv_lmem); return(CVLS_SUCCESS); } /*----------------------------------------------------------------- cvLsInitializeCounters This routine resets all counters from an CVLsMem structure. -----------------------------------------------------------------*/ int cvLsInitializeCounters(CVLsMem cvls_mem) { cvls_mem->nje = 0; cvls_mem->nfeDQ = 0; cvls_mem->nstlj = 0; cvls_mem->npe = 0; cvls_mem->nli = 0; cvls_mem->nps = 0; cvls_mem->ncfl = 0; cvls_mem->njtsetup = 0; cvls_mem->njtimes = 0; return(0); } /*--------------------------------------------------------------- cvLs_AccessLMem This routine unpacks the cv_mem and ls_mem structures from void* pointer. If either is missing it returns CVLS_MEM_NULL or CVLS_LMEM_NULL. ---------------------------------------------------------------*/ int cvLs_AccessLMem(void* cvode_mem, const char *fname, CVodeMem *cv_mem, CVLsMem *cvls_mem) { if (cvode_mem==NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSLS", fname, MSG_LS_CVMEM_NULL); return(CVLS_MEM_NULL); } *cv_mem = (CVodeMem) cvode_mem; if ((*cv_mem)->cv_lmem==NULL) { cvProcessError(*cv_mem, CVLS_LMEM_NULL, "CVSLS", fname, MSG_LS_LMEM_NULL); return(CVLS_LMEM_NULL); } *cvls_mem = (CVLsMem) (*cv_mem)->cv_lmem; return(CVLS_SUCCESS); } /*================================================================ PART II - backward problems ================================================================*/ /*--------------------------------------------------------------- CVSLS Exported functions -- Required ---------------------------------------------------------------*/ /* CVodeSetLinearSolverB specifies the linear solver for backward integration */ int CVodeSetLinearSolverB(void *cvode_mem, int which, SUNLinearSolver LS, SUNMatrix A) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; CVLsMemB cvlsB_mem; int retval; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSLS", "CVodeSetLinearSolverB", MSG_LS_CVMEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CVLS_NO_ADJ, "CVSLS", "CVodeSetLinearSolverB", MSG_LS_NO_ADJ); return(CVLS_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSLS", "CVodeSetLinearSolverB", MSG_LS_BAD_WHICH); return(CVLS_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } /* Get memory for CVLsMemRecB */ cvlsB_mem = NULL; cvlsB_mem = (CVLsMemB) malloc(sizeof(struct CVLsMemRecB)); if (cvlsB_mem == NULL) { cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSLS", "CVodeSetLinearSolverB", MSG_LS_MEM_FAIL); return(CVLS_MEM_FAIL); } /* initialize Jacobian and preconditioner functions */ cvlsB_mem->jacB = NULL; cvlsB_mem->jacBS = NULL; cvlsB_mem->jtsetupB = NULL; cvlsB_mem->jtsetupBS = NULL; cvlsB_mem->jtimesB = NULL; cvlsB_mem->jtimesBS = NULL; cvlsB_mem->psetB = NULL; cvlsB_mem->psetBS = NULL; cvlsB_mem->psolveB = NULL; cvlsB_mem->psolveBS = NULL; cvlsB_mem->P_dataB = NULL; /* free any existing system solver attached to cvB */ if (cvB_mem->cv_lfree) cvB_mem->cv_lfree(cvB_mem); /* Attach lmemB data and lfreeB function. */ cvB_mem->cv_lmem = cvlsB_mem; cvB_mem->cv_lfree = cvLsFreeB; /* set the linear solver for this backward problem */ cvodeB_mem = (void *) (cvB_mem->cv_mem); retval = CVodeSetLinearSolver(cvodeB_mem, LS, A); if (retval != CVLS_SUCCESS) { free(cvlsB_mem); cvlsB_mem = NULL; } return(retval); } /*--------------------------------------------------------------- CVSLS Exported functions -- Optional input/output ---------------------------------------------------------------*/ int CVodeSetJacFnB(void *cvode_mem, int which, CVLsJacFnB jacB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; void *cvodeB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetJacFnB", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* set jacB function pointer */ cvlsB_mem->jacB = jacB; /* call corresponding routine for cvodeB_mem structure */ cvodeB_mem = (void *) (cvB_mem->cv_mem); if (jacB != NULL) { retval = CVodeSetJacFn(cvodeB_mem, cvLsJacBWrapper); } else { retval = CVodeSetJacFn(cvodeB_mem, NULL); } return(retval); } int CVodeSetJacFnBS(void *cvode_mem, int which, CVLsJacFnBS jacBS) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; void *cvodeB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetJacFnBS", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* set jacBS function pointer */ cvlsB_mem->jacBS = jacBS; /* call corresponding routine for cvodeB_mem structure */ cvodeB_mem = (void *) (cvB_mem->cv_mem); if (jacBS != NULL) { retval = CVodeSetJacFn(cvodeB_mem, cvLsJacBSWrapper); } else { retval = CVodeSetJacFn(cvodeB_mem, NULL); } return(retval); } int CVodeSetEpsLinB(void *cvode_mem, int which, realtype eplifacB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; void *cvodeB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetEpsLinB", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* call corresponding routine for cvodeB_mem structure */ cvodeB_mem = (void *) (cvB_mem->cv_mem); return(CVodeSetEpsLin(cvodeB_mem, eplifacB)); } int CVodeSetLSNormFactorB(void *cvode_mem, int which, realtype nrmfacB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; void *cvodeB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetLSNormFactorB", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* call corresponding routine for cvodeB_mem structure */ cvodeB_mem = (void *) (cvB_mem->cv_mem); return(CVodeSetLSNormFactor(cvodeB_mem, nrmfacB)); } int CVodeSetLinearSolutionScalingB(void *cvode_mem, int which, booleantype onoffB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; void *cvodeB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetLinearSolutionScalingB", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* call corresponding routine for cvodeB_mem structure */ cvodeB_mem = (void *) (cvB_mem->cv_mem); return(CVodeSetLinearSolutionScaling(cvodeB_mem, onoffB)); } int CVodeSetPreconditionerB(void *cvode_mem, int which, CVLsPrecSetupFnB psetupB, CVLsPrecSolveFnB psolveB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; CVLsMemB cvlsB_mem; CVLsPrecSetupFn cvls_psetup; CVLsPrecSolveFn cvls_psolve; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetPreconditionerB", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Set preconditioners for the backward problem. */ cvlsB_mem->psetB = psetupB; cvlsB_mem->psolveB = psolveB; /* Call the corresponding "set" routine for the backward problem */ cvodeB_mem = (void *) (cvB_mem->cv_mem); cvls_psetup = (psetupB == NULL) ? NULL : cvLsPrecSetupBWrapper; cvls_psolve = (psolveB == NULL) ? NULL : cvLsPrecSolveBWrapper; return(CVodeSetPreconditioner(cvodeB_mem, cvls_psetup, cvls_psolve)); } int CVodeSetPreconditionerBS(void *cvode_mem, int which, CVLsPrecSetupFnBS psetupBS, CVLsPrecSolveFnBS psolveBS) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; CVLsMemB cvlsB_mem; CVLsPrecSetupFn cvls_psetup; CVLsPrecSolveFn cvls_psolve; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetPreconditionerBS", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Set preconditioners for the backward problem. */ cvlsB_mem->psetBS = psetupBS; cvlsB_mem->psolveBS = psolveBS; /* Call the corresponding "set" routine for the backward problem */ cvodeB_mem = (void *) (cvB_mem->cv_mem); cvls_psetup = (psetupBS == NULL) ? NULL : cvLsPrecSetupBSWrapper; cvls_psolve = (psolveBS == NULL) ? NULL : cvLsPrecSolveBSWrapper; return(CVodeSetPreconditioner(cvodeB_mem, cvls_psetup, cvls_psolve)); } int CVodeSetJacTimesB(void *cvode_mem, int which, CVLsJacTimesSetupFnB jtsetupB, CVLsJacTimesVecFnB jtimesB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; CVLsMemB cvlsB_mem; CVLsJacTimesSetupFn cvls_jtsetup; CVLsJacTimesVecFn cvls_jtimes; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetJacTimesB", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Set jacobian routines for the backward problem. */ cvlsB_mem->jtsetupB = jtsetupB; cvlsB_mem->jtimesB = jtimesB; /* Call the corresponding "set" routine for the backward problem */ cvodeB_mem = (void *) (cvB_mem->cv_mem); cvls_jtsetup = (jtsetupB == NULL) ? NULL : cvLsJacTimesSetupBWrapper; cvls_jtimes = (jtimesB == NULL) ? NULL : cvLsJacTimesVecBWrapper; return(CVodeSetJacTimes(cvodeB_mem, cvls_jtsetup, cvls_jtimes)); } int CVodeSetJacTimesBS(void *cvode_mem, int which, CVLsJacTimesSetupFnBS jtsetupBS, CVLsJacTimesVecFnBS jtimesBS) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; CVLsMemB cvlsB_mem; CVLsJacTimesSetupFn cvls_jtsetup; CVLsJacTimesVecFn cvls_jtimes; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetJacTimesBS", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Set jacobian routines for the backward problem. */ cvlsB_mem->jtsetupBS = jtsetupBS; cvlsB_mem->jtimesBS = jtimesBS; /* Call the corresponding "set" routine for the backward problem */ cvodeB_mem = (void *) (cvB_mem->cv_mem); cvls_jtsetup = (jtsetupBS == NULL) ? NULL : cvLsJacTimesSetupBSWrapper; cvls_jtimes = (jtimesBS == NULL) ? NULL : cvLsJacTimesVecBSWrapper; return(CVodeSetJacTimes(cvodeB_mem, cvls_jtsetup, cvls_jtimes)); } int CVodeSetJacTimesRhsFnB(void *cvode_mem, int which, CVRhsFn jtimesRhsFn) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; void *cvodeB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetJacTimesRhsFnB", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Call the corresponding "set" routine for the backward problem */ cvodeB_mem = (void *) (cvB_mem->cv_mem); return(CVodeSetJacTimesRhsFn(cvodeB_mem, jtimesRhsFn)); } int CVodeSetLinSysFnB(void *cvode_mem, int which, CVLsLinSysFnB linsysB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; void *cvodeB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetLinSysFnB", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* set linsysB function pointer */ cvlsB_mem->linsysB = linsysB; /* call corresponding routine for cvodeB_mem structure */ cvodeB_mem = (void *) (cvB_mem->cv_mem); if (linsysB != NULL) { retval = CVodeSetLinSysFn(cvodeB_mem, cvLsLinSysBWrapper); } else { retval = CVodeSetLinSysFn(cvodeB_mem, NULL); } return(retval); } int CVodeSetLinSysFnBS(void *cvode_mem, int which, CVLsLinSysFnBS linsysBS) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; void *cvodeB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemB(cvode_mem, which, "CVodeSetLinSysFnBS", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* set linsysB function pointer */ cvlsB_mem->linsysBS = linsysBS; /* call corresponding routine for cvodeB_mem structure */ cvodeB_mem = (void *) (cvB_mem->cv_mem); if (linsysBS != NULL) { retval = CVodeSetLinSysFn(cvodeB_mem, cvLsLinSysBSWrapper); } else { retval = CVodeSetLinSysFn(cvodeB_mem, NULL); } return(retval); } /*----------------------------------------------------------------- CVSLS private functions for backwards problems -----------------------------------------------------------------*/ /* cvLsJacBWrapper interfaces to the CVLsJacFnB routine provided by the user. cvLsJacBWrapper is of type CVLsJacFn. */ static int cvLsJacBWrapper(realtype t, N_Vector yB, N_Vector fyB, SUNMatrix JB, void *cvode_mem, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsJacBWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsJacBWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint jacB routine (of type CVLsJacFnB) */ return(cvlsB_mem->jacB(t, ca_mem->ca_ytmp, yB, fyB, JB, cvB_mem->cv_user_data, tmp1B, tmp2B, tmp3B)); } /* cvLsJacBSWrapper interfaces to the CVLsJacFnBS routine provided by the user. cvLsJacBSWrapper is of type CVLsJacFn. */ static int cvLsJacBSWrapper(realtype t, N_Vector yB, N_Vector fyB, SUNMatrix JB, void *cvode_mem, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsJacBSWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ if (ca_mem->ca_IMinterpSensi) retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, ca_mem->ca_yStmp); else retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsJacBSWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint dense djacBS routine (of type CVLsDenseJacFnBS) */ return(cvlsB_mem->jacBS(t, ca_mem->ca_ytmp, ca_mem->ca_yStmp, yB, fyB, JB, cvB_mem->cv_user_data, tmp1B, tmp2B, tmp3B)); } /* cvLsPrecSetupBWrapper interfaces to the CVLsPrecSetupFnB routine provided by the user */ static int cvLsPrecSetupBWrapper(realtype t, N_Vector yB, N_Vector fyB, booleantype jokB, booleantype *jcurPtrB, realtype gammaB, void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsPrecSetupBWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Get forward solution from interpolation */ retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsPrecSetupBWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint precondB routine */ return(cvlsB_mem->psetB(t, ca_mem->ca_ytmp, yB, fyB, jokB, jcurPtrB, gammaB, cvB_mem->cv_user_data)); } /* cvLsPrecSetupBSWrapper interfaces to the CVLsPrecSetupFnBS routine provided by the user */ static int cvLsPrecSetupBSWrapper(realtype t, N_Vector yB, N_Vector fyB, booleantype jokB, booleantype *jcurPtrB, realtype gammaB, void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsPrecSetupBSWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ if (ca_mem->ca_IMinterpSensi) retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, ca_mem->ca_yStmp); else retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsPrecSetupBSWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint precondB routine */ return(cvlsB_mem->psetBS(t, ca_mem->ca_ytmp, ca_mem->ca_yStmp, yB, fyB, jokB, jcurPtrB, gammaB, cvB_mem->cv_user_data)); } /* cvLsPrecSolveBWrapper interfaces to the CVLsPrecSolveFnB routine provided by the user */ static int cvLsPrecSolveBWrapper(realtype t, N_Vector yB, N_Vector fyB, N_Vector rB, N_Vector zB, realtype gammaB, realtype deltaB, int lrB, void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsPrecSolveBWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsPrecSolveBWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint psolveB routine */ return(cvlsB_mem->psolveB(t, ca_mem->ca_ytmp, yB, fyB, rB, zB, gammaB, deltaB, lrB, cvB_mem->cv_user_data)); } /* cvLsPrecSolveBSWrapper interfaces to the CVLsPrecSolveFnBS routine provided by the user */ static int cvLsPrecSolveBSWrapper(realtype t, N_Vector yB, N_Vector fyB, N_Vector rB, N_Vector zB, realtype gammaB, realtype deltaB, int lrB, void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsPrecSolveBSWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ if (ca_mem->ca_IMinterpSensi) retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, ca_mem->ca_yStmp); else retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsPrecSolveBSWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint psolveBS routine */ return(cvlsB_mem->psolveBS(t, ca_mem->ca_ytmp, ca_mem->ca_yStmp, yB, fyB, rB, zB, gammaB, deltaB, lrB, cvB_mem->cv_user_data)); } /* cvLsJacTimesSetupBWrapper interfaces to the CVLsJacTimesSetupFnB routine provided by the user */ static int cvLsJacTimesSetupBWrapper(realtype t, N_Vector yB, N_Vector fyB, void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsJacTimesSetupBWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsJacTimesVecBWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint jtsetupB routine */ return(cvlsB_mem->jtsetupB(t, ca_mem->ca_ytmp, yB, fyB, cvB_mem->cv_user_data)); } /* cvLsJacTimesSetupBSWrapper interfaces to the CVLsJacTimesSetupFnBS routine provided by the user */ static int cvLsJacTimesSetupBSWrapper(realtype t, N_Vector yB, N_Vector fyB, void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsJacTimesSetupBSWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ if (ca_mem->ca_IMinterpSensi) retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, ca_mem->ca_yStmp); else retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsJacTimesVecBSWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint jtsetupBS routine */ return(cvlsB_mem->jtsetupBS(t, ca_mem->ca_ytmp, ca_mem->ca_yStmp, yB, fyB, cvB_mem->cv_user_data)); } /* cvLsJacTimesVecBWrapper interfaces to the CVLsJacTimesVecFnB routine provided by the user */ static int cvLsJacTimesVecBWrapper(N_Vector vB, N_Vector JvB, realtype t, N_Vector yB, N_Vector fyB, void *cvode_mem, N_Vector tmpB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsJacTimesVecBWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsJacTimesVecBWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint jtimesB routine */ return(cvlsB_mem->jtimesB(vB, JvB, t, ca_mem->ca_ytmp, yB, fyB, cvB_mem->cv_user_data, tmpB)); } /* cvLsJacTimesVecBSWrapper interfaces to the CVLsJacTimesVecFnBS routine provided by the user */ static int cvLsJacTimesVecBSWrapper(N_Vector vB, N_Vector JvB, realtype t, N_Vector yB, N_Vector fyB, void *cvode_mem, N_Vector tmpB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsJacTimesVecBSWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ if (ca_mem->ca_IMinterpSensi) retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, ca_mem->ca_yStmp); else retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsJacTimesVecBSWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint jtimesBS routine */ return(cvlsB_mem->jtimesBS(vB, JvB, t, ca_mem->ca_ytmp, ca_mem->ca_yStmp, yB, fyB, cvB_mem->cv_user_data, tmpB)); } /* cvLsLinSysBWrapper interfaces to the CVLsLinSysFnB routine provided by the user. cvLsLinSysBWrapper is of type CVLsLinSysFn. */ static int cvLsLinSysBWrapper(realtype t, N_Vector yB, N_Vector fyB, SUNMatrix AB, booleantype jokB, booleantype *jcurB, realtype gammaB, void *cvode_mem, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsLinSysBWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsLinSysBWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint linsysB routine (of type CVLsLinSysFnB) */ return(cvlsB_mem->linsysB(t, ca_mem->ca_ytmp, yB, fyB, AB, jokB, jcurB, gammaB, cvB_mem->cv_user_data, tmp1B, tmp2B, tmp3B)); } /* cvLsLinSysBSWrapper interfaces to the CVLsLinSysFnBS routine provided by the user. cvLsLinSysBSWrapper is of type CVLsLinSysFn. */ static int cvLsLinSysBSWrapper(realtype t, N_Vector yB, N_Vector fyB, SUNMatrix AB, booleantype jokB, booleantype *jcurB, realtype gammaB, void *cvode_mem, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVLsMemB cvlsB_mem; int retval; /* access relevant memory structures */ retval = cvLs_AccessLMemBCur(cvode_mem, "cvLsLinSysBSWrapper", &cv_mem, &ca_mem, &cvB_mem, &cvlsB_mem); if (retval != CVLS_SUCCESS) return(retval); /* Forward solution from interpolation */ if (ca_mem->ca_IMinterpSensi) retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, ca_mem->ca_yStmp); else retval = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSLS", "cvLsLinSysBSWrapper", MSG_LS_BAD_TINTERP); return(-1); } /* Call user's adjoint dense djacBS routine (of type CVLsDenseJacFnBS) */ return(cvlsB_mem->linsysBS(t, ca_mem->ca_ytmp, ca_mem->ca_yStmp, yB, fyB, AB, jokB, jcurB, gammaB, cvB_mem->cv_user_data, tmp1B, tmp2B, tmp3B)); } /* cvLsFreeB frees memory associated with the CVSLS wrapper */ int cvLsFreeB(CVodeBMem cvB_mem) { CVLsMemB cvlsB_mem; /* Return immediately if cvB_mem or cvB_mem->cv_lmem are NULL */ if (cvB_mem == NULL) return(CVLS_SUCCESS); if (cvB_mem->cv_lmem == NULL) return(CVLS_SUCCESS); cvlsB_mem = (CVLsMemB) (cvB_mem->cv_lmem); /* free CVLsMemB interface structure */ free(cvlsB_mem); return(CVLS_SUCCESS); } /* cvLs_AccessLMemB unpacks the cv_mem, ca_mem, cvB_mem and cvlsB_mem structures from the void* cvode_mem pointer. If any are missing it returns CVLS_MEM_NULL, CVLS_NO_ADJ, CVS_ILL_INPUT, or CVLS_LMEMB_NULL. */ int cvLs_AccessLMemB(void *cvode_mem, int which, const char *fname, CVodeMem *cv_mem, CVadjMem *ca_mem, CVodeBMem *cvB_mem, CVLsMemB *cvlsB_mem) { /* access CVodeMem structure */ if (cvode_mem==NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSLS", fname, MSG_LS_CVMEM_NULL); return(CVLS_MEM_NULL); } *cv_mem = (CVodeMem) cvode_mem; /* access CVadjMem structure */ if ((*cv_mem)->cv_adjMallocDone == SUNFALSE) { cvProcessError(*cv_mem, CVLS_NO_ADJ, "CVSLS", fname, MSG_LS_NO_ADJ); return(CVLS_NO_ADJ); } *ca_mem = (*cv_mem)->cv_adj_mem; /* Check which */ if ( which >= (*ca_mem)->ca_nbckpbs ) { cvProcessError(*cv_mem, CVLS_ILL_INPUT, "CVSLS", fname, MSG_LS_BAD_WHICH); return(CVLS_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ *cvB_mem = (*ca_mem)->cvB_mem; while ((*cvB_mem) != NULL) { if ( which == (*cvB_mem)->cv_index ) break; *cvB_mem = (*cvB_mem)->cv_next; } /* access CVLsMemB structure */ if ((*cvB_mem)->cv_lmem == NULL) { cvProcessError(*cv_mem, CVLS_LMEMB_NULL, "CVSLS", fname, MSG_LS_LMEMB_NULL); return(CVLS_LMEMB_NULL); } *cvlsB_mem = (CVLsMemB) ((*cvB_mem)->cv_lmem); return(CVLS_SUCCESS); } /* cvLs_AccessLMemBCur unpacks the cv_mem, ca_mem, cvB_mem and cvlsB_mem structures from the void* cvode_mem pointer. If any are missing it returns CVLS_MEM_NULL, CVLS_NO_ADJ, or CVLS_LMEMB_NULL. */ int cvLs_AccessLMemBCur(void *cvode_mem, const char *fname, CVodeMem *cv_mem, CVadjMem *ca_mem, CVodeBMem *cvB_mem, CVLsMemB *cvlsB_mem) { /* access CVodeMem structure */ if (cvode_mem==NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSLS", fname, MSG_LS_CVMEM_NULL); return(CVLS_MEM_NULL); } *cv_mem = (CVodeMem) cvode_mem; /* access CVadjMem structure */ if ((*cv_mem)->cv_adjMallocDone == SUNFALSE) { cvProcessError(*cv_mem, CVLS_NO_ADJ, "CVSLS", fname, MSG_LS_NO_ADJ); return(CVLS_NO_ADJ); } *ca_mem = (*cv_mem)->cv_adj_mem; /* get current backward problem */ if ((*ca_mem)->ca_bckpbCrt == NULL) { cvProcessError(*cv_mem, CVLS_LMEMB_NULL, "CVSLS", fname, MSG_LS_LMEMB_NULL); return(CVLS_LMEMB_NULL); } *cvB_mem = (*ca_mem)->ca_bckpbCrt; /* access CVLsMemB structure */ if ((*cvB_mem)->cv_lmem == NULL) { cvProcessError(*cv_mem, CVLS_LMEMB_NULL, "CVSLS", fname, MSG_LS_LMEMB_NULL); return(CVLS_LMEMB_NULL); } *cvlsB_mem = (CVLsMemB) ((*cvB_mem)->cv_lmem); return(CVLS_SUCCESS); } /*--------------------------------------------------------------- EOF ---------------------------------------------------------------*/ StanHeaders/src/cvodes/cvodes_spils.c0000644000176200001440000001035614645137106017402 0ustar liggesusers/*----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Header file for the deprecated Scaled, Preconditioned Iterative * Linear Solver interface in CVODES; these routines now just wrap * the updated CVODES generic linear solver interface in cvodes_ls.h. * -----------------------------------------------------------------*/ #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*================================================================= CVSSPILS Exported functions (wrappers for equivalent routines in cvodes_ls.h) =================================================================*/ int CVSpilsSetLinearSolver(void *cvode_mem, SUNLinearSolver LS) { return(CVodeSetLinearSolver(cvode_mem, LS, NULL)); } int CVSpilsSetEpsLin(void *cvode_mem, realtype eplifac) { return(CVodeSetEpsLin(cvode_mem, eplifac)); } int CVSpilsSetPreconditioner(void *cvode_mem, CVSpilsPrecSetupFn pset, CVSpilsPrecSolveFn psolve) { return(CVodeSetPreconditioner(cvode_mem, pset, psolve)); } int CVSpilsSetJacTimes(void *cvode_mem, CVSpilsJacTimesSetupFn jtsetup, CVSpilsJacTimesVecFn jtimes) { return(CVodeSetJacTimes(cvode_mem, jtsetup, jtimes)); } int CVSpilsGetWorkSpace(void *cvode_mem, long int *lenrwLS, long int *leniwLS) { return(CVodeGetLinWorkSpace(cvode_mem, lenrwLS, leniwLS)); } int CVSpilsGetNumPrecEvals(void *cvode_mem, long int *npevals) { return(CVodeGetNumPrecEvals(cvode_mem, npevals)); } int CVSpilsGetNumPrecSolves(void *cvode_mem, long int *npsolves) { return(CVodeGetNumPrecSolves(cvode_mem, npsolves)); } int CVSpilsGetNumLinIters(void *cvode_mem, long int *nliters) { return(CVodeGetNumLinIters(cvode_mem, nliters)); } int CVSpilsGetNumConvFails(void *cvode_mem, long int *nlcfails) { return(CVodeGetNumLinConvFails(cvode_mem, nlcfails)); } int CVSpilsGetNumJTSetupEvals(void *cvode_mem, long int *njtsetups) { return(CVodeGetNumJTSetupEvals(cvode_mem, njtsetups)); } int CVSpilsGetNumJtimesEvals(void *cvode_mem, long int *njvevals) { return(CVodeGetNumJtimesEvals(cvode_mem, njvevals)); } int CVSpilsGetNumRhsEvals(void *cvode_mem, long int *nfevalsLS) { return(CVodeGetNumLinRhsEvals(cvode_mem, nfevalsLS)); } int CVSpilsGetLastFlag(void *cvode_mem, long int *flag) { return(CVodeGetLastLinFlag(cvode_mem, flag)); } char *CVSpilsGetReturnFlagName(long int flag) { return(CVodeGetLinReturnFlagName(flag)); } int CVSpilsSetLinearSolverB(void *cvode_mem, int which, SUNLinearSolver LS) { return(CVodeSetLinearSolverB(cvode_mem, which, LS, NULL)); } int CVSpilsSetEpsLinB(void *cvode_mem, int which, realtype eplifacB) { return(CVodeSetEpsLinB(cvode_mem, which, eplifacB)); } int CVSpilsSetPreconditionerB(void *cvode_mem, int which, CVSpilsPrecSetupFnB psetB, CVSpilsPrecSolveFnB psolveB) { return(CVodeSetPreconditionerB(cvode_mem, which, psetB, psolveB)); } int CVSpilsSetPreconditionerBS(void *cvode_mem, int which, CVSpilsPrecSetupFnBS psetBS, CVSpilsPrecSolveFnBS psolveBS) { return(CVodeSetPreconditionerBS(cvode_mem, which, psetBS, psolveBS)); } int CVSpilsSetJacTimesB(void *cvode_mem, int which, CVSpilsJacTimesSetupFnB jtsetupB, CVSpilsJacTimesVecFnB jtimesB) { return(CVodeSetJacTimesB(cvode_mem, which, jtsetupB, jtimesB)); } int CVSpilsSetJacTimesBS(void *cvode_mem, int which, CVSpilsJacTimesSetupFnBS jtsetupBS, CVSpilsJacTimesVecFnBS jtimesBS) { return(CVodeSetJacTimesBS(cvode_mem, which, jtsetupBS, jtimesBS)); } #ifdef __cplusplus } #endif StanHeaders/src/cvodes/cvodes_diag_impl.h0000644000176200001440000000427314645137106020203 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Implementation header file for the diagonal linear solver, CVDIAG. * ----------------------------------------------------------------- */ #ifndef _CVSDIAG_IMPL_H #define _CVSDIAG_IMPL_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * Types: CVDiagMemRec, CVDiagMem * ----------------------------------------------------------------- * The type CVDiagMem is pointer to a CVDiagMemRec. * This structure contains CVDiag solver-specific data. * ----------------------------------------------------------------- */ typedef struct { realtype di_gammasv; /* gammasv = gamma at the last call to setup or solve */ N_Vector di_M; /* M = (I - gamma J)^{-1} , gamma = h / l1 */ N_Vector di_bit; /* temporary storage vector */ N_Vector di_bitcomp; /* temporary storage vector */ long int di_nfeDI; /* no. of calls to f due to difference quotient diagonal Jacobian approximation */ long int di_last_flag; /* last error return flag */ } CVDiagMemRec, *CVDiagMem; /* Error Messages */ #define MSGDG_CVMEM_NULL "Integrator memory is NULL." #define MSGDG_MEM_FAIL "A memory request failed." #define MSGDG_BAD_NVECTOR "A required vector operation is not implemented." #define MSGDG_LMEM_NULL "CVDIAG memory is NULL." #define MSGDG_RHSFUNC_FAILED "The right-hand side routine failed in an unrecoverable manner." #define MSGDG_NO_ADJ "Illegal attempt to call before calling CVodeAdjMalloc." #define MSGDG_BAD_WHICH "Illegal value for which." #ifdef __cplusplus } #endif #endif StanHeaders/src/cvodes/cvodes_bandpre.c0000644000176200001440000004741614645137106017672 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Radu Serban and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This file contains implementations of the banded difference * quotient Jacobian-based preconditioner and solver routines for * use with the CVSLS linear solver interface. * ----------------------------------------------------------------- */ #include #include #include "cvodes_impl.h" #include "cvodes_bandpre_impl.h" #include "cvodes_ls_impl.h" #include #define MIN_INC_MULT RCONST(1000.0) #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define TWO RCONST(2.0) /* Prototypes of cvBandPrecSetup and cvBandPrecSolve */ static int cvBandPrecSetup(realtype t, N_Vector y, N_Vector fy, booleantype jok, booleantype *jcurPtr, realtype gamma, void *bp_data); static int cvBandPrecSolve(realtype t, N_Vector y, N_Vector fy, N_Vector r, N_Vector z, realtype gamma, realtype delta, int lr, void *bp_data); /* Prototype for cvBandPrecFree */ static int cvBandPrecFree(CVodeMem cv_mem); /* Prototype for difference quotient Jacobian calculation routine */ static int cvBandPrecDQJac(CVBandPrecData pdata, realtype t, N_Vector y, N_Vector fy, N_Vector ftemp, N_Vector ytemp); /*================================================================ PART I - Forward Problems ================================================================*/ /*----------------------------------------------------------------- Initialization, Free, and Get Functions NOTE: The band linear solver assumes a serial/OpenMP/Pthreads implementation of the NVECTOR package. Therefore, CVBandPrecInit will first test for a compatible N_Vector internal representation by checking that the function N_VGetArrayPointer exists. -----------------------------------------------------------------*/ int CVBandPrecInit(void *cvode_mem, sunindextype N, sunindextype mu, sunindextype ml) { CVodeMem cv_mem; CVLsMem cvls_mem; CVBandPrecData pdata; sunindextype mup, mlp, storagemu; int flag; if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSBANDPRE", "CVBandPrecInit", MSGBP_MEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Test if the CVSLS linear solver interface has been attached */ if (cv_mem->cv_lmem == NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSBANDPRE", "CVBandPrecInit", MSGBP_LMEM_NULL); return(CVLS_LMEM_NULL); } cvls_mem = (CVLsMem) cv_mem->cv_lmem; /* Test compatibility of NVECTOR package with the BAND preconditioner */ if(cv_mem->cv_tempv->ops->nvgetarraypointer == NULL) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSBANDPRE", "CVBandPrecInit", MSGBP_BAD_NVECTOR); return(CVLS_ILL_INPUT); } /* Allocate data memory */ pdata = NULL; pdata = (CVBandPrecData) malloc(sizeof *pdata); if (pdata == NULL) { cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBANDPRE", "CVBandPrecInit", MSGBP_MEM_FAIL); return(CVLS_MEM_FAIL); } /* Load pointers and bandwidths into pdata block. */ pdata->cvode_mem = cvode_mem; pdata->N = N; pdata->mu = mup = SUNMIN(N-1, SUNMAX(0,mu)); pdata->ml = mlp = SUNMIN(N-1, SUNMAX(0,ml)); /* Initialize nfeBP counter */ pdata->nfeBP = 0; /* Allocate memory for saved banded Jacobian approximation. */ pdata->savedJ = NULL; pdata->savedJ = SUNBandMatrixStorage(N, mup, mlp, mup, cv_mem->cv_sunctx); if (pdata->savedJ == NULL) { free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBANDPRE", "CVBandPrecInit", MSGBP_MEM_FAIL); return(CVLS_MEM_FAIL); } /* Allocate memory for banded preconditioner. */ storagemu = SUNMIN(N-1, mup+mlp); pdata->savedP = NULL; pdata->savedP = SUNBandMatrixStorage(N, mup, mlp, storagemu, cv_mem->cv_sunctx); if (pdata->savedP == NULL) { SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBANDPRE", "CVBandPrecInit", MSGBP_MEM_FAIL); return(CVLS_MEM_FAIL); } /* Allocate memory for banded linear solver */ pdata->LS = NULL; pdata->LS = SUNLinSol_Band(cv_mem->cv_tempv, pdata->savedP, cv_mem->cv_sunctx); if (pdata->LS == NULL) { SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBANDPRE", "CVBandPrecInit", MSGBP_MEM_FAIL); return(CVLS_MEM_FAIL); } /* allocate memory for temporary N_Vectors */ pdata->tmp1 = NULL; pdata->tmp1 = N_VClone(cv_mem->cv_tempv); if (pdata->tmp1 == NULL) { SUNLinSolFree(pdata->LS); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBANDPRE", "CVBandPrecInit", MSGBP_MEM_FAIL); return(CVLS_MEM_FAIL); } pdata->tmp2 = NULL; pdata->tmp2 = N_VClone(cv_mem->cv_tempv); if (pdata->tmp2 == NULL) { SUNLinSolFree(pdata->LS); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); N_VDestroy(pdata->tmp1); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBANDPRE", "CVBandPrecInit", MSGBP_MEM_FAIL); return(CVLS_MEM_FAIL); } /* initialize band linear solver object */ flag = SUNLinSolInitialize(pdata->LS); if (flag != SUNLS_SUCCESS) { SUNLinSolFree(pdata->LS); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); N_VDestroy(pdata->tmp1); N_VDestroy(pdata->tmp2); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_SUNLS_FAIL, "CVSBANDPRE", "CVBandPrecInit", MSGBP_SUNLS_FAIL); return(CVLS_SUNLS_FAIL); } /* make sure P_data is free from any previous allocations */ if (cvls_mem->pfree) cvls_mem->pfree(cv_mem); /* Point to the new P_data field in the LS memory */ cvls_mem->P_data = pdata; /* Attach the pfree function */ cvls_mem->pfree = cvBandPrecFree; /* Attach preconditioner solve and setup functions */ flag = CVodeSetPreconditioner(cvode_mem, cvBandPrecSetup, cvBandPrecSolve); return(flag); } int CVBandPrecGetWorkSpace(void *cvode_mem, long int *lenrwBP, long int *leniwBP) { CVodeMem cv_mem; CVLsMem cvls_mem; CVBandPrecData pdata; sunindextype lrw1, liw1; long int lrw, liw; int flag; if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSBANDPRE", "CVBandPrecGetWorkSpace", MSGBP_MEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_lmem == NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSBANDPRE", "CVBandPrecGetWorkSpace", MSGBP_LMEM_NULL); return(CVLS_LMEM_NULL); } cvls_mem = (CVLsMem) cv_mem->cv_lmem; if (cvls_mem->P_data == NULL) { cvProcessError(cv_mem, CVLS_PMEM_NULL, "CVSBANDPRE", "CVBandPrecGetWorkSpace", MSGBP_PMEM_NULL); return(CVLS_PMEM_NULL); } pdata = (CVBandPrecData) cvls_mem->P_data; /* sum space requirements for all objects in pdata */ *leniwBP = 4; *lenrwBP = 0; if (cv_mem->cv_tempv->ops->nvspace) { N_VSpace(cv_mem->cv_tempv, &lrw1, &liw1); *leniwBP += 2*liw1; *lenrwBP += 2*lrw1; } if (pdata->savedJ->ops->space) { flag = SUNMatSpace(pdata->savedJ, &lrw, &liw); if (flag != 0) return(-1); *leniwBP += liw; *lenrwBP += lrw; } if (pdata->savedP->ops->space) { flag = SUNMatSpace(pdata->savedP, &lrw, &liw); if (flag != 0) return(-1); *leniwBP += liw; *lenrwBP += lrw; } if (pdata->LS->ops->space) { flag = SUNLinSolSpace(pdata->LS, &lrw, &liw); if (flag != 0) return(-1); *leniwBP += liw; *lenrwBP += lrw; } return(CVLS_SUCCESS); } int CVBandPrecGetNumRhsEvals(void *cvode_mem, long int *nfevalsBP) { CVodeMem cv_mem; CVLsMem cvls_mem; CVBandPrecData pdata; if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSBANDPRE", "CVBandPrecGetNumRhsEvals", MSGBP_MEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_lmem == NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSBANDPRE", "CVBandPrecGetNumRhsEvals", MSGBP_LMEM_NULL); return(CVLS_LMEM_NULL); } cvls_mem = (CVLsMem) cv_mem->cv_lmem; if (cvls_mem->P_data == NULL) { cvProcessError(cv_mem, CVLS_PMEM_NULL, "CVSBANDPRE", "CVBandPrecGetNumRhsEvals", MSGBP_PMEM_NULL); return(CVLS_PMEM_NULL); } pdata = (CVBandPrecData) cvls_mem->P_data; *nfevalsBP = pdata->nfeBP; return(CVLS_SUCCESS); } /*----------------------------------------------------------------- cvBandPrecSetup ----------------------------------------------------------------- Together cvBandPrecSetup and cvBandPrecSolve use a banded difference quotient Jacobian to create a preconditioner. cvBandPrecSetup calculates a new J, if necessary, then calculates P = I - gamma*J, and does an LU factorization of P. The parameters of cvBandPrecSetup are as follows: t is the current value of the independent variable. y is the current value of the dependent variable vector, namely the predicted value of y(t). fy is the vector f(t,y). jok is an input flag indicating whether Jacobian-related data needs to be recomputed, as follows: jok == SUNFALSE means recompute Jacobian-related data from scratch. jok == SUNTRUE means that Jacobian data from the previous PrecSetup call will be reused (with the current value of gamma). A cvBandPrecSetup call with jok == SUNTRUE should only occur after a call with jok == SUNFALSE. *jcurPtr is a pointer to an output integer flag which is set by cvBandPrecSetup as follows: *jcurPtr = SUNTRUE if Jacobian data was recomputed. *jcurPtr = SUNFALSE if Jacobian data was not recomputed, but saved data was reused. gamma is the scalar appearing in the Newton matrix. bp_data is a pointer to preconditoner data (set by cvBandPrecInit) The value to be returned by the cvBandPrecSetup function is 0 if successful, or 1 if the band factorization failed. -----------------------------------------------------------------*/ static int cvBandPrecSetup(realtype t, N_Vector y, N_Vector fy, booleantype jok, booleantype *jcurPtr, realtype gamma, void *bp_data) { CVBandPrecData pdata; CVodeMem cv_mem; int retval; /* Assume matrix and lpivots have already been allocated. */ pdata = (CVBandPrecData) bp_data; cv_mem = (CVodeMem) pdata->cvode_mem; if (jok) { /* If jok = SUNTRUE, use saved copy of J. */ *jcurPtr = SUNFALSE; retval = SUNMatCopy(pdata->savedJ, pdata->savedP); if (retval < 0) { cvProcessError(cv_mem, -1, "CVBANDPRE", "cvBandPrecSetup", MSGBP_SUNMAT_FAIL); return(-1); } if (retval > 0) { return(1); } } else { /* If jok = SUNFALSE, call CVBandPDQJac for new J value. */ *jcurPtr = SUNTRUE; retval = SUNMatZero(pdata->savedJ); if (retval < 0) { cvProcessError(cv_mem, -1, "CVBANDPRE", "cvBandPrecSetup", MSGBP_SUNMAT_FAIL); return(-1); } if (retval > 0) { return(1); } retval = cvBandPrecDQJac(pdata, t, y, fy, pdata->tmp1, pdata->tmp2); if (retval < 0) { cvProcessError(cv_mem, -1, "CVBANDPRE", "cvBandPrecSetup", MSGBP_RHSFUNC_FAILED); return(-1); } if (retval > 0) { return(1); } retval = SUNMatCopy(pdata->savedJ, pdata->savedP); if (retval < 0) { cvProcessError(cv_mem, -1, "CVBANDPRE", "cvBandPrecSetup", MSGBP_SUNMAT_FAIL); return(-1); } if (retval > 0) { return(1); } } /* Scale and add identity to get savedP = I - gamma*J. */ retval = SUNMatScaleAddI(-gamma, pdata->savedP); if (retval) { cvProcessError(cv_mem, -1, "CVBANDPRE", "cvBandPrecSetup", MSGBP_SUNMAT_FAIL); return(-1); } /* Do LU factorization of matrix and return error flag */ retval = SUNLinSolSetup_Band(pdata->LS, pdata->savedP); return(retval); } /*----------------------------------------------------------------- cvBandPrecSolve ----------------------------------------------------------------- cvBandPrecSolve solves a linear system P z = r, where P is the matrix computed by cvBandPrecond. The parameters of cvBandPrecSolve used here are as follows: r is the right-hand side vector of the linear system. bp_data is a pointer to preconditoner data (set by CVBandPrecInit) z is the output vector computed by cvBandPrecSolve. The value returned by the cvBandPrecSolve function is always 0, indicating success. -----------------------------------------------------------------*/ static int cvBandPrecSolve(realtype t, N_Vector y, N_Vector fy, N_Vector r, N_Vector z, realtype gamma, realtype delta, int lr, void *bp_data) { CVBandPrecData pdata; int retval; /* Assume matrix and lpivots have already been allocated. */ pdata = (CVBandPrecData) bp_data; /* Call banded solver object to do the work */ retval = SUNLinSolSolve(pdata->LS, pdata->savedP, z, r, ZERO); return(retval); } static int cvBandPrecFree(CVodeMem cv_mem) { CVLsMem cvls_mem; CVBandPrecData pdata; if (cv_mem->cv_lmem == NULL) return(0); cvls_mem = (CVLsMem) cv_mem->cv_lmem; if (cvls_mem->P_data == NULL) return(0); pdata = (CVBandPrecData) cvls_mem->P_data; SUNLinSolFree(pdata->LS); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); N_VDestroy(pdata->tmp1); N_VDestroy(pdata->tmp2); free(pdata); pdata = NULL; return(0); } /*----------------------------------------------------------------- cvBandPrecDQJac ----------------------------------------------------------------- This routine generates a banded difference quotient approximation to the Jacobian of f(t,y). It assumes that a band SUNMatrix is stored column-wise, and that elements within each column are contiguous. This makes it possible to get the address of a column of J via the accessor function SUNBandMatrix_Column() and to write a simple for loop to set each of the elements of a column in succession. -----------------------------------------------------------------*/ static int cvBandPrecDQJac(CVBandPrecData pdata, realtype t, N_Vector y, N_Vector fy, N_Vector ftemp, N_Vector ytemp) { CVodeMem cv_mem; realtype fnorm, minInc, inc, inc_inv, yj, srur, conj; sunindextype group, i, j, width, ngroups, i1, i2; realtype *col_j, *ewt_data, *fy_data, *ftemp_data; realtype *y_data, *ytemp_data, *cns_data; int retval; /* initialize cns_data to avoid compiler warning */ cns_data = NULL; cv_mem = (CVodeMem) pdata->cvode_mem; /* Obtain pointers to the data for ewt, fy, ftemp, y, ytemp. */ ewt_data = N_VGetArrayPointer(cv_mem->cv_ewt); fy_data = N_VGetArrayPointer(fy); ftemp_data = N_VGetArrayPointer(ftemp); y_data = N_VGetArrayPointer(y); ytemp_data = N_VGetArrayPointer(ytemp); if (cv_mem->cv_constraintsSet) cns_data = N_VGetArrayPointer(cv_mem->cv_constraints); /* Load ytemp with y = predicted y vector. */ N_VScale(ONE, y, ytemp); /* Set minimum increment based on uround and norm of f. */ srur = SUNRsqrt(cv_mem->cv_uround); fnorm = N_VWrmsNorm(fy, cv_mem->cv_ewt); minInc = (fnorm != ZERO) ? (MIN_INC_MULT * SUNRabs(cv_mem->cv_h) * cv_mem->cv_uround * pdata->N * fnorm) : ONE; /* Set bandwidth and number of column groups for band differencing. */ width = pdata->ml + pdata->mu + 1; ngroups = SUNMIN(width, pdata->N); for (group = 1; group <= ngroups; group++) { /* Increment all y_j in group. */ for(j = group-1; j < pdata->N; j += width) { inc = SUNMAX(srur*SUNRabs(y_data[j]), minInc/ewt_data[j]); yj = y_data[j]; /* Adjust sign(inc) again if yj has an inequality constraint. */ if (cv_mem->cv_constraintsSet) { conj = cns_data[j]; if (SUNRabs(conj) == ONE) {if ((yj+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if ((yj+inc)*conj <= ZERO) inc = -inc;} } ytemp_data[j] += inc; } /* Evaluate f with incremented y. */ retval = cv_mem->cv_f(t, ytemp, ftemp, cv_mem->cv_user_data); pdata->nfeBP++; if (retval != 0) return(retval); /* Restore ytemp, then form and load difference quotients. */ for (j = group-1; j < pdata->N; j += width) { yj = y_data[j]; ytemp_data[j] = y_data[j]; col_j = SUNBandMatrix_Column(pdata->savedJ,j); inc = SUNMAX(srur*SUNRabs(y_data[j]), minInc/ewt_data[j]); /* Adjust sign(inc) as before. */ if (cv_mem->cv_constraintsSet) { conj = cns_data[j]; if (SUNRabs(conj) == ONE) {if ((yj+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if ((yj+inc)*conj <= ZERO) inc = -inc;} } inc_inv = ONE/inc; i1 = SUNMAX(0, j-pdata->mu); i2 = SUNMIN(j + pdata->ml, pdata->N - 1); for (i=i1; i <= i2; i++) SM_COLUMN_ELEMENT_B(col_j,i,j) = inc_inv * (ftemp_data[i] - fy_data[i]); } } return(0); } /*================================================================ PART II - Backward Problems ================================================================*/ /*--------------------------------------------------------------- User-Callable initialization function: wrapper for the backward phase around the corresponding CVODES functions ---------------------------------------------------------------*/ int CVBandPrecInitB(void *cvode_mem, int which, sunindextype nB, sunindextype muB, sunindextype mlB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSBANDPRE", "CVBandPrecInitB", MSGBP_MEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CVLS_NO_ADJ, "CVSBANDPRE", "CVBandPrecInitB", MSGBP_NO_ADJ); return(CVLS_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSBANDPRE", "CVBandPrecInitB", MSGBP_BAD_WHICH); return(CVLS_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; /* advance */ cvB_mem = cvB_mem->cv_next; } /* cv_mem corresponding to 'which' problem. */ cvodeB_mem = (void *) (cvB_mem->cv_mem); /* Set pfree */ cvB_mem->cv_pfree = NULL; /* Initialize the band preconditioner for this backward problem. */ flag = CVBandPrecInit(cvodeB_mem, nB, muB, mlB); return(flag); } StanHeaders/src/cvodes/cvodes_bandpre_impl.h0000644000176200001440000000466414645137106020716 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Implementation header file for the CVSBANDPRE module. * ----------------------------------------------------------------- */ #ifndef _CVSBANDPRE_IMPL_H #define _CVSBANDPRE_IMPL_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*----------------------------------------------------------------- Type: CVBandPrecData -----------------------------------------------------------------*/ typedef struct CVBandPrecDataRec { /* Data set by user in CVBandPrecInit */ sunindextype N; sunindextype ml, mu; /* Data set by CVBandPrecSetup */ SUNMatrix savedJ; SUNMatrix savedP; SUNLinearSolver LS; N_Vector tmp1; N_Vector tmp2; /* Rhs calls */ long int nfeBP; /* Pointer to cvode_mem */ void *cvode_mem; } *CVBandPrecData; /*----------------------------------------------------------------- CVBANDPRE error messages -----------------------------------------------------------------*/ #define MSGBP_MEM_NULL "Integrator memory is NULL." #define MSGBP_LMEM_NULL "Linear solver memory is NULL. One of the SPILS linear solvers must be attached." #define MSGBP_MEM_FAIL "A memory request failed." #define MSGBP_BAD_NVECTOR "A required vector operation is not implemented." #define MSGBP_SUNMAT_FAIL "An error arose from a SUNBandMatrix routine." #define MSGBP_SUNLS_FAIL "An error arose from a SUNBandLinearSolver routine." #define MSGBP_PMEM_NULL "Band preconditioner memory is NULL. CVBandPrecInit must be called." #define MSGBP_RHSFUNC_FAILED "The right-hand side routine failed in an unrecoverable manner." #define MSGBP_NO_ADJ "Illegal attempt to call before calling CVodeAdjInit." #define MSGBP_BAD_WHICH "Illegal value for parameter which." #ifdef __cplusplus } #endif #endif StanHeaders/src/cvodes/cvodes_bbdpre_impl.h0000644000176200001440000000622114645137106020530 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Implementation header file for the CVBBDPRE module. * ----------------------------------------------------------------- */ #ifndef _CVSBBDPRE_IMPL_H #define _CVSBBDPRE_IMPL_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*----------------------------------------------------------------- Type: CVBBDPrecData -----------------------------------------------------------------*/ typedef struct CVBBDPrecDataRec { /* passed by user to CVBBDPrecInit and used by PrecSetup/PrecSolve */ sunindextype mudq, mldq, mukeep, mlkeep; realtype dqrely; CVLocalFn gloc; CVCommFn cfn; /* set by CVBBDPrecSetup and used by CVBBDPrecSolve */ SUNMatrix savedJ; SUNMatrix savedP; SUNLinearSolver LS; N_Vector tmp1; N_Vector tmp2; N_Vector tmp3; N_Vector zlocal; N_Vector rlocal; /* set by CVBBDPrecInit and used by CVBBDPrecSetup */ sunindextype n_local; /* available for optional output */ long int rpwsize; long int ipwsize; long int nge; /* pointer to cvode_mem */ void *cvode_mem; } *CVBBDPrecData; /*----------------------------------------------------------------- Type: CVBBDPrecDataB -----------------------------------------------------------------*/ typedef struct CVBBDPrecDataRecB { /* BBD user functions (glocB and cfnB) for backward run */ CVLocalFnB glocB; CVCommFnB cfnB; } *CVBBDPrecDataB; /*----------------------------------------------------------------- CVBBDPRE error messages -----------------------------------------------------------------*/ #define MSGBBD_MEM_NULL "Integrator memory is NULL." #define MSGBBD_LMEM_NULL "Linear solver memory is NULL. One of the SPILS linear solvers must be attached." #define MSGBBD_MEM_FAIL "A memory request failed." #define MSGBBD_BAD_NVECTOR "A required vector operation is not implemented." #define MSGBBD_SUNMAT_FAIL "An error arose from a SUNBandMatrix routine." #define MSGBBD_SUNLS_FAIL "An error arose from a SUNBandLinearSolver routine." #define MSGBBD_PMEM_NULL "BBD peconditioner memory is NULL. CVBBDPrecInit must be called." #define MSGBBD_FUNC_FAILED "The gloc or cfn routine failed in an unrecoverable manner." #define MSGBBD_NO_ADJ "Illegal attempt to call before calling CVodeAdjInit." #define MSGBBD_BAD_WHICH "Illegal value for the which parameter." #define MSGBBD_PDATAB_NULL "BBD preconditioner memory is NULL for the backward integration." #define MSGBBD_BAD_TINTERP "Bad t for interpolation." #ifdef __cplusplus } #endif #endif StanHeaders/src/cvodes/NOTICE0000644000176200001440000000221614645137106015441 0ustar liggesusersThis work was produced under the auspices of the U.S. Department of Energy by Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344. This work was prepared as an account of work sponsored by an agency of the United States Government. Neither the United States Government nor Lawrence Livermore National Security, LLC, nor any of their employees makes any warranty, expressed or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately owned rights. Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or Lawrence Livermore National Security, LLC, and shall not be used for advertising or product endorsement purposes.StanHeaders/src/cvodes/cvodes_nls_stg.c0000644000176200001440000003524214645137106017722 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This the implementation file for the CVODES nonlinear solver interface. * ---------------------------------------------------------------------------*/ #include "cvodes_impl.h" #include "sundials/sundials_math.h" #include "sundials/sundials_nvector_senswrapper.h" /* constant macros */ #define ONE RCONST(1.0) /* private functions */ static int cvNlsResidualSensStg(N_Vector ycorStg, N_Vector resStg, void* cvode_mem); static int cvNlsFPFunctionSensStg(N_Vector ycorStg, N_Vector resStg, void* cvode_mem); static int cvNlsLSetupSensStg(booleantype jbad, booleantype* jcur, void* cvode_mem); static int cvNlsLSolveSensStg(N_Vector deltaStg, void* cvode_mem); static int cvNlsConvTestSensStg(SUNNonlinearSolver NLS, N_Vector ycorStg, N_Vector delStg, realtype tol, N_Vector ewtStg, void* cvode_mem); /* ----------------------------------------------------------------------------- * Exported functions * ---------------------------------------------------------------------------*/ int CVodeSetNonlinearSolverSensStg(void *cvode_mem, SUNNonlinearSolver NLS) { CVodeMem cv_mem; int retval, is; /* Return immediately if CVode memory is NULL */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetNonlinearSolverSensStg", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Return immediately if NLS memory is NULL */ if (NLS == NULL) { cvProcessError(NULL, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg", "NLS must be non-NULL"); return (CV_ILL_INPUT); } /* check for required nonlinear solver functions */ if ( NLS->ops->gettype == NULL || NLS->ops->solve == NULL || NLS->ops->setsysfn == NULL ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg", "NLS does not support required operations"); return(CV_ILL_INPUT); } /* check that sensitivities were initialized */ if (!(cv_mem->cv_sensi)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg", MSGCV_NO_SENSI); return(CV_ILL_INPUT); } /* check that staggered corrector was selected */ if (cv_mem->cv_ism != CV_STAGGERED) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg", "Sensitivity solution method is not CV_STAGGERED"); return(CV_ILL_INPUT); } /* free any existing nonlinear solver */ if ((cv_mem->NLSstg != NULL) && (cv_mem->ownNLSstg)) retval = SUNNonlinSolFree(cv_mem->NLSstg); /* set SUNNonlinearSolver pointer */ cv_mem->NLSstg = NLS; /* Set NLS ownership flag. If this function was called to attach the default NLS, CVODE will set the flag to SUNTRUE after this function returns. */ cv_mem->ownNLSstg = SUNFALSE; /* set the nonlinear system function */ if (SUNNonlinSolGetType(NLS) == SUNNONLINEARSOLVER_ROOTFIND) { retval = SUNNonlinSolSetSysFn(cv_mem->NLSstg, cvNlsResidualSensStg); } else if (SUNNonlinSolGetType(NLS) == SUNNONLINEARSOLVER_FIXEDPOINT) { retval = SUNNonlinSolSetSysFn(cv_mem->NLSstg, cvNlsFPFunctionSensStg); } else { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg", "Invalid nonlinear solver type"); return(CV_ILL_INPUT); } if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg", "Setting nonlinear system function failed"); return(CV_ILL_INPUT); } /* set convergence test function */ retval = SUNNonlinSolSetConvTestFn(cv_mem->NLSstg, cvNlsConvTestSensStg, cvode_mem); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg", "Setting convergence test function failed"); return(CV_ILL_INPUT); } /* set max allowed nonlinear iterations */ retval = SUNNonlinSolSetMaxIters(cv_mem->NLSstg, NLS_MAXCOR); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg", "Setting maximum number of nonlinear iterations failed"); return(CV_ILL_INPUT); } /* create vector wrappers if necessary */ if (cv_mem->stgMallocDone == SUNFALSE) { cv_mem->zn0Stg = N_VNewEmpty_SensWrapper(cv_mem->cv_Ns, cv_mem->cv_sunctx); if (cv_mem->zn0Stg == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSetNonlinearSolverSensStg", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->ycorStg = N_VNewEmpty_SensWrapper(cv_mem->cv_Ns, cv_mem->cv_sunctx); if (cv_mem->ycorStg == NULL) { N_VDestroy(cv_mem->zn0Stg); cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSetNonlinearSolverSensStg", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->ewtStg = N_VNewEmpty_SensWrapper(cv_mem->cv_Ns, cv_mem->cv_sunctx); if (cv_mem->ewtStg == NULL) { N_VDestroy(cv_mem->zn0Stg); N_VDestroy(cv_mem->ycorStg); cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSetNonlinearSolverSensStg", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->stgMallocDone = SUNTRUE; } /* attach vectors to vector wrappers */ for (is=0; is < cv_mem->cv_Ns; is++) { NV_VEC_SW(cv_mem->zn0Stg, is) = cv_mem->cv_znS[0][is]; NV_VEC_SW(cv_mem->ycorStg, is) = cv_mem->cv_acorS[is]; NV_VEC_SW(cv_mem->ewtStg, is) = cv_mem->cv_ewtS[is]; } /* Reset the acnrmScur flag to SUNFALSE */ cv_mem->cv_acnrmScur = SUNFALSE; return(CV_SUCCESS); } /* ----------------------------------------------------------------------------- * Private functions * ---------------------------------------------------------------------------*/ int cvNlsInitSensStg(CVodeMem cvode_mem) { int retval; /* set the linear solver setup wrapper function */ if (cvode_mem->cv_lsetup) retval = SUNNonlinSolSetLSetupFn(cvode_mem->NLSstg, cvNlsLSetupSensStg); else retval = SUNNonlinSolSetLSetupFn(cvode_mem->NLSstg, NULL); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODES", "cvNlsInitSensStg", "Setting the linear solver setup function failed"); return(CV_NLS_INIT_FAIL); } /* set the linear solver solve wrapper function */ if (cvode_mem->cv_lsolve) retval = SUNNonlinSolSetLSolveFn(cvode_mem->NLSstg, cvNlsLSolveSensStg); else retval = SUNNonlinSolSetLSolveFn(cvode_mem->NLSstg, NULL); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODES", "cvNlsInitSensStg", "Setting linear solver solve function failed"); return(CV_NLS_INIT_FAIL); } /* initialize nonlinear solver */ retval = SUNNonlinSolInitialize(cvode_mem->NLSstg); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODES", "cvNlsInitSensStg", MSGCV_NLS_INIT_FAIL); return(CV_NLS_INIT_FAIL); } return(CV_SUCCESS); } static int cvNlsLSetupSensStg(booleantype jbad, booleantype* jcur, void* cvode_mem) { CVodeMem cv_mem; int retval; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsLSetupSensStg", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* if the nonlinear solver marked the Jacobian as bad update convfail */ if (jbad) cv_mem->convfail = CV_FAIL_BAD_J; /* setup the linear solver */ retval = cv_mem->cv_lsetup(cv_mem, cv_mem->convfail, cv_mem->cv_y, cv_mem->cv_ftemp, &(cv_mem->cv_jcur), cv_mem->cv_vtemp1, cv_mem->cv_vtemp2, cv_mem->cv_vtemp3); cv_mem->cv_nsetups++; cv_mem->cv_nsetupsS++; /* update Jacobian status */ *jcur = cv_mem->cv_jcur; cv_mem->cv_gamrat = ONE; cv_mem->cv_gammap = cv_mem->cv_gamma; cv_mem->cv_crate = ONE; cv_mem->cv_crateS = ONE; cv_mem->cv_nstlp = cv_mem->cv_nst; if (retval < 0) return(CV_LSETUP_FAIL); if (retval > 0) return(SUN_NLS_CONV_RECVR); return(CV_SUCCESS); } static int cvNlsLSolveSensStg(N_Vector deltaStg, void* cvode_mem) { CVodeMem cv_mem; int retval, is; N_Vector *deltaS; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsLSolveSensStg", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* extract sensitivity deltas from the vector wrapper */ deltaS = NV_VECS_SW(deltaStg); /* solve the sensitivity linear systems */ for (is=0; iscv_Ns; is++) { retval = cv_mem->cv_lsolve(cv_mem, deltaS[is], cv_mem->cv_ewtS[is], cv_mem->cv_y, cv_mem->cv_ftemp); if (retval < 0) return(CV_LSOLVE_FAIL); if (retval > 0) return(SUN_NLS_CONV_RECVR); } return(CV_SUCCESS); } static int cvNlsConvTestSensStg(SUNNonlinearSolver NLS, N_Vector ycorStg, N_Vector deltaStg, realtype tol, N_Vector ewtStg, void* cvode_mem) { CVodeMem cv_mem; int m, retval; realtype Del; realtype dcon; N_Vector *ycorS, *deltaS, *ewtS; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsConvTestSensStg", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* extract the current sensitivity corrections */ ycorS = NV_VECS_SW(ycorStg); /* extract the sensitivity deltas */ deltaS = NV_VECS_SW(deltaStg); /* extract the sensitivity error weights */ ewtS = NV_VECS_SW(ewtStg); /* compute the norm of the state and sensitivity corrections */ Del = cvSensNorm(cv_mem, deltaS, ewtS); /* get the current nonlinear solver iteration count */ retval = SUNNonlinSolGetCurIter(NLS, &m); if (retval != CV_SUCCESS) return(CV_MEM_NULL); /* Test for convergence. If m > 0, an estimate of the convergence rate constant is stored in crate, and used in the test. Recall that, even when errconS=SUNFALSE, all variables are used in the convergence test. Hence, we use Del (and not del). However, acnrm is used in the error test and thus it has different forms depending on errconS (and this explains why we have to carry around del and delS). */ if (m > 0) { cv_mem->cv_crateS = SUNMAX(CRDOWN * cv_mem->cv_crateS, Del/cv_mem->cv_delp); } dcon = Del * SUNMIN(ONE, cv_mem->cv_crateS) / tol; /* check if nonlinear system was solved successfully */ if (dcon <= ONE) { if (cv_mem->cv_errconS) { cv_mem->cv_acnrmS = (m==0) ? Del : cvSensNorm(cv_mem, ycorS, ewtS); cv_mem->cv_acnrmScur = SUNTRUE; } return(CV_SUCCESS); } /* check if the iteration seems to be diverging */ if ((m >= 1) && (Del > RDIV*cv_mem->cv_delp)) return(SUN_NLS_CONV_RECVR); /* Save norm of correction and loop again */ cv_mem->cv_delp = Del; /* Not yet converged */ return(SUN_NLS_CONTINUE); } static int cvNlsResidualSensStg(N_Vector ycorStg, N_Vector resStg, void* cvode_mem) { CVodeMem cv_mem; int retval; N_Vector *ycorS, *resS; realtype cvals[3]; N_Vector* XXvecs[3]; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsResidualSensStg", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* extract sensitivity and residual vectors from the vector wrapper */ ycorS = NV_VECS_SW(ycorStg); resS = NV_VECS_SW(resStg); /* update sensitivities based on the current correction */ retval = N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znS[0], ONE, ycorS, cv_mem->cv_yS); if (retval != CV_SUCCESS) return(CV_VECTOROP_ERR); /* evaluate the sensitivity rhs function */ retval = cvSensRhsWrapper(cv_mem, cv_mem->cv_tn, cv_mem->cv_y, cv_mem->cv_ftemp, cv_mem->cv_yS, cv_mem->cv_ftempS, cv_mem->cv_vtemp1, cv_mem->cv_vtemp2); if (retval < 0) return(CV_SRHSFUNC_FAIL); if (retval > 0) return(SRHSFUNC_RECVR); /* compute the sensitivity resiudal */ cvals[0] = cv_mem->cv_rl1; XXvecs[0] = cv_mem->cv_znS[1]; cvals[1] = ONE; XXvecs[1] = ycorS; cvals[2] = -cv_mem->cv_gamma; XXvecs[2] = cv_mem->cv_ftempS; retval = N_VLinearCombinationVectorArray(cv_mem->cv_Ns, 3, cvals, XXvecs, resS); if (retval != CV_SUCCESS) return(CV_VECTOROP_ERR); return(CV_SUCCESS); } static int cvNlsFPFunctionSensStg(N_Vector ycorStg, N_Vector resStg, void* cvode_mem) { CVodeMem cv_mem; int retval, is; N_Vector *ycorS, *resS; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsFPFunctionSensStg", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* extract sensitivity and residual vectors from the vector wrapper */ ycorS = NV_VECS_SW(ycorStg); resS = NV_VECS_SW(resStg); /* update the sensitivities based on the current correction */ retval = N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znS[0], ONE, ycorS, cv_mem->cv_yS); if (retval != CV_SUCCESS) return(CV_VECTOROP_ERR); /* evaluate the sensitivity rhs function */ retval = cvSensRhsWrapper(cv_mem, cv_mem->cv_tn, cv_mem->cv_y, cv_mem->cv_ftemp, cv_mem->cv_yS, resS, cv_mem->cv_vtemp1, cv_mem->cv_vtemp2); if (retval < 0) return(CV_SRHSFUNC_FAIL); if (retval > 0) return(SRHSFUNC_RECVR); /* evaluate sensitivity fixed point function */ for (is=0; iscv_Ns; is++) { N_VLinearSum(cv_mem->cv_h, resS[is], -ONE, cv_mem->cv_znS[1][is], resS[is]); N_VScale(cv_mem->cv_rl1, resS[is], resS[is]); } return(CV_SUCCESS); } StanHeaders/src/cvodes/cvodes_io.c0000644000176200001440000013551614645137106016665 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Alan C. Hindmarsh and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the optional input and output * functions for the CVODES solver. * ----------------------------------------------------------------- */ #include #include #include "cvodes_impl.h" #include "sundials/sundials_types.h" #include "sundials/sundials_math.h" #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWOPT5 RCONST(2.5) /* * ================================================================= * CVODES optional input functions * ================================================================= */ /* * CVodeSetErrHandlerFn * * Specifies the error handler function */ int CVodeSetErrHandlerFn(void *cvode_mem, CVErrHandlerFn ehfun, void *eh_data) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetErrHandlerFn", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_ehfun = ehfun; cv_mem->cv_eh_data = eh_data; return(CV_SUCCESS); } /* * CVodeSetErrFile * * Specifies the FILE pointer for output (NULL means no messages) */ int CVodeSetErrFile(void *cvode_mem, FILE *errfp) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetErrFile", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_errfp = errfp; return(CV_SUCCESS); } /* * CVodeSetMonitorFn * * Specifies the user function to call for monitoring * the solution and/or integrator statistics. */ int CVodeSetMonitorFn(void *cvode_mem, CVMonitorFn fn) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetMonitorFn", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; #ifdef SUNDIALS_BUILD_WITH_MONITORING cv_mem->cv_monitorfun = fn; return(CV_SUCCESS); #else cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetMonitorFn", "SUNDIALS was not built with monitoring enabled."); return(CV_ILL_INPUT); #endif } /* * CVodeSetMonitorFrequency * * Specifies the frequency with which to call the user function. */ int CVodeSetMonitorFrequency(void *cvode_mem, long int nst) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetMonitorFrequency", MSGCV_NO_MEM); return(CV_MEM_NULL); } if (nst < 0) { cvProcessError(NULL, CV_ILL_INPUT, "CVODES", "CVodeSetMonitorFrequency", "step interval must be >= 0\n"); return(CV_ILL_INPUT); } cv_mem = (CVodeMem) cvode_mem; #ifdef SUNDIALS_BUILD_WITH_MONITORING cv_mem->cv_monitor_interval = nst; return(CV_SUCCESS); #else cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetMonitorFrequency", "SUNDIALS was not built with monitoring enabled."); return(CV_ILL_INPUT); #endif } /* * CVodeSetUserData * * Specifies the user data pointer for f */ int CVodeSetUserData(void *cvode_mem, void *user_data) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetUserData", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_user_data = user_data; return(CV_SUCCESS); } /* * CVodeSetMaxOrd * * Specifies the maximum method order */ int CVodeSetMaxOrd(void *cvode_mem, int maxord) { CVodeMem cv_mem; int qmax_alloc; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetMaxOrd", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (maxord <= 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetMaxOrd", MSGCV_NEG_MAXORD); return(CV_ILL_INPUT); } /* Cannot increase maximum order beyond the value that was used when allocating memory */ qmax_alloc = cv_mem->cv_qmax_alloc; qmax_alloc = SUNMIN(qmax_alloc, cv_mem->cv_qmax_allocQ); qmax_alloc = SUNMIN(qmax_alloc, cv_mem->cv_qmax_allocS); if (maxord > qmax_alloc) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetMaxOrd", MSGCV_BAD_MAXORD); return(CV_ILL_INPUT); } cv_mem->cv_qmax = maxord; return(CV_SUCCESS); } /* * CVodeSetMaxNumSteps * * Specifies the maximum number of integration steps */ int CVodeSetMaxNumSteps(void *cvode_mem, long int mxsteps) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetMaxNumSteps", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Passing mxsteps=0 sets the default. Passing mxsteps<0 disables the test. */ if (mxsteps == 0) cv_mem->cv_mxstep = MXSTEP_DEFAULT; else cv_mem->cv_mxstep = mxsteps; return(CV_SUCCESS); } /* * CVodeSetMaxHnilWarns * * Specifies the maximum number of warnings for small h */ int CVodeSetMaxHnilWarns(void *cvode_mem, int mxhnil) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetMaxHnilWarns", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_mxhnil = mxhnil; return(CV_SUCCESS); } /* *CVodeSetStabLimDet * * Turns on/off the stability limit detection algorithm */ int CVodeSetStabLimDet(void *cvode_mem, booleantype sldet) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetStabLimDet", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if( sldet && (cv_mem->cv_lmm != CV_BDF) ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetStabLimDet", MSGCV_SET_SLDET); return(CV_ILL_INPUT); } cv_mem->cv_sldeton = sldet; return(CV_SUCCESS); } /* * CVodeSetInitStep * * Specifies the initial step size */ int CVodeSetInitStep(void *cvode_mem, realtype hin) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetInitStep", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_hin = hin; return(CV_SUCCESS); } /* * CVodeSetMinStep * * Specifies the minimum step size */ int CVodeSetMinStep(void *cvode_mem, realtype hmin) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetMinStep", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (hmin<0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetMinStep", MSGCV_NEG_HMIN); return(CV_ILL_INPUT); } /* Passing 0 sets hmin = zero */ if (hmin == ZERO) { cv_mem->cv_hmin = HMIN_DEFAULT; return(CV_SUCCESS); } if (hmin * cv_mem->cv_hmax_inv > ONE) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetMinStep", MSGCV_BAD_HMIN_HMAX); return(CV_ILL_INPUT); } cv_mem->cv_hmin = hmin; return(CV_SUCCESS); } /* * CVodeSetMaxStep * * Specifies the maximum step size */ int CVodeSetMaxStep(void *cvode_mem, realtype hmax) { realtype hmax_inv; CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetMaxStep", MSGCV_NO_MEM); return (CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (hmax < 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetMaxStep", MSGCV_NEG_HMAX); return(CV_ILL_INPUT); } /* Passing 0 sets hmax = infinity */ if (hmax == ZERO) { cv_mem->cv_hmax_inv = HMAX_INV_DEFAULT; return(CV_SUCCESS); } hmax_inv = ONE/hmax; if (hmax_inv * cv_mem->cv_hmin > ONE) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetMaxStep", MSGCV_BAD_HMIN_HMAX); return(CV_ILL_INPUT); } cv_mem->cv_hmax_inv = hmax_inv; return(CV_SUCCESS); } /* * CVodeSetStopTime * * Specifies the time beyond which the integration is not to proceed. */ int CVodeSetStopTime(void *cvode_mem, realtype tstop) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetStopTime", MSGCV_NO_MEM); return (CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* If CVode was called at least once, test if tstop is legal * (i.e. if it was not already passed). * If CVodeSetStopTime is called before the first call to CVode, * tstop will be checked in CVode. */ if (cv_mem->cv_nst > 0) { if ( (tstop - cv_mem->cv_tn) * cv_mem->cv_h < ZERO ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetStopTime", MSGCV_BAD_TSTOP, tstop, cv_mem->cv_tn); return(CV_ILL_INPUT); } } cv_mem->cv_tstop = tstop; cv_mem->cv_tstopset = SUNTRUE; return(CV_SUCCESS); } /* * CVodeSetMaxErrTestFails * * Specifies the maximum number of error test failures during one * step try. */ int CVodeSetMaxErrTestFails(void *cvode_mem, int maxnef) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetMaxErrTestFails", MSGCV_NO_MEM); return (CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_maxnef = maxnef; return(CV_SUCCESS); } /* * CVodeSetMaxConvFails * * Specifies the maximum number of nonlinear convergence failures * during one step try. */ int CVodeSetMaxConvFails(void *cvode_mem, int maxncf) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetMaxConvFails", MSGCV_NO_MEM); return (CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_maxncf = maxncf; return(CV_SUCCESS); } /* * CVodeSetMaxNonlinIters * * Specifies the maximum number of nonlinear iterations during * one solve. */ int CVodeSetMaxNonlinIters(void *cvode_mem, int maxcor) { CVodeMem cv_mem; booleantype sensi_sim; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetMaxNonlinIters", MSGCV_NO_MEM); return (CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Are we computing sensitivities with the simultaneous approach? */ sensi_sim = (cv_mem->cv_sensi && (cv_mem->cv_ism==CV_SIMULTANEOUS)); if (sensi_sim) { /* check that the NLS is non-NULL */ if (cv_mem->NLSsim == NULL) { cvProcessError(NULL, CV_MEM_FAIL, "CVODES", "CVodeSetMaxNonlinIters", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } return(SUNNonlinSolSetMaxIters(cv_mem->NLSsim, maxcor)); } else { /* check that the NLS is non-NULL */ if (cv_mem->NLS == NULL) { cvProcessError(NULL, CV_MEM_FAIL, "CVODES", "CVodeSetMaxNonlinIters", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } return(SUNNonlinSolSetMaxIters(cv_mem->NLS, maxcor)); } return(CV_SUCCESS); } /* * CVodeSetNonlinConvCoef * * Specifies the coeficient in the nonlinear solver convergence * test */ int CVodeSetNonlinConvCoef(void *cvode_mem, realtype nlscoef) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetNonlinConvCoef", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_nlscoef = nlscoef; return(CV_SUCCESS); } /* * CVodeSetLSetupFrequency * * Specifies the frequency for calling the linear solver setup function to * recompute the Jacobian matrix and/or preconditioner */ int CVodeSetLSetupFrequency(void *cvode_mem, long int msbp) { CVodeMem cv_mem; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetLSetupFrequency", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* check for a valid input */ if (msbp < 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetLSetupFrequency", "A negative setup frequency was provided"); return(CV_ILL_INPUT); } /* use default or user provided value */ cv_mem->cv_msbp = (msbp == 0) ? MSBP : msbp; return(CV_SUCCESS); } /* * CVodeSetRootDirection * * Specifies the direction of zero-crossings to be monitored. * The default is to monitor both crossings. */ int CVodeSetRootDirection(void *cvode_mem, int *rootdir) { CVodeMem cv_mem; int i, nrt; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetRootDirection", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; nrt = cv_mem->cv_nrtfn; if (nrt==0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetRootDirection", MSGCV_NO_ROOT); return(CV_ILL_INPUT); } for(i=0; icv_rootdir[i] = rootdir[i]; return(CV_SUCCESS); } /* * CVodeSetNoInactiveRootWarn * * Disables issuing a warning if some root function appears * to be identically zero at the beginning of the integration */ int CVodeSetNoInactiveRootWarn(void *cvode_mem) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetNoInactiveRootWarn", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_mxgnull = 0; return(CV_SUCCESS); } /* * CVodeSetConstraints * * Setup for constraint handling feature */ int CVodeSetConstraints(void *cvode_mem, N_Vector constraints) { CVodeMem cv_mem; realtype temptest; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetConstraints", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* If there are no constraints, destroy data structures */ if (constraints == NULL) { if (cv_mem->cv_constraintsMallocDone) { N_VDestroy(cv_mem->cv_constraints); cv_mem->cv_lrw -= cv_mem->cv_lrw1; cv_mem->cv_liw -= cv_mem->cv_liw1; } cv_mem->cv_constraintsMallocDone = SUNFALSE; cv_mem->cv_constraintsSet = SUNFALSE; return(CV_SUCCESS); } /* Test if required vector ops. are defined */ if (constraints->ops->nvdiv == NULL || constraints->ops->nvmaxnorm == NULL || constraints->ops->nvcompare == NULL || constraints->ops->nvconstrmask == NULL || constraints->ops->nvminquotient == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetConstraints", MSGCV_BAD_NVECTOR); return(CV_ILL_INPUT); } /* Check the constraints vector */ temptest = N_VMaxNorm(constraints); if ((temptest > TWOPT5) || (temptest < HALF)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetConstraints", MSGCV_BAD_CONSTR); return(CV_ILL_INPUT); } if ( !(cv_mem->cv_constraintsMallocDone) ) { cv_mem->cv_constraints = N_VClone(constraints); cv_mem->cv_lrw += cv_mem->cv_lrw1; cv_mem->cv_liw += cv_mem->cv_liw1; cv_mem->cv_constraintsMallocDone = SUNTRUE; } /* Load the constraints vector */ N_VScale(ONE, constraints, cv_mem->cv_constraints); cv_mem->cv_constraintsSet = SUNTRUE; return(CV_SUCCESS); } /* * ================================================================= * Quadrature optional input functions * ================================================================= */ int CVodeSetQuadErrCon(void *cvode_mem, booleantype errconQ) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetQuadErrCon", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_errconQ = errconQ; return(CV_SUCCESS); } /* * ================================================================= * FSA optional input functions * ================================================================= */ int CVodeSetSensDQMethod(void *cvode_mem, int DQtype, realtype DQrhomax) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetSensDQMethod", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if ( (DQtype != CV_CENTERED) && (DQtype != CV_FORWARD) ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetSensDQMethod", MSGCV_BAD_DQTYPE); return(CV_ILL_INPUT); } if (DQrhomax < ZERO ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetSensDQMethod", MSGCV_BAD_DQRHO); return(CV_ILL_INPUT); } cv_mem->cv_DQtype = DQtype; cv_mem->cv_DQrhomax = DQrhomax; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeSetSensErrCon(void *cvode_mem, booleantype errconS) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetSensErrCon", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; cv_mem->cv_errconS = errconS; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeSetSensMaxNonlinIters(void *cvode_mem, int maxcorS) { CVodeMem cv_mem; booleantype sensi_stg; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetSensMaxNonlinIters", MSGCV_NO_MEM); return (CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Are we computing sensitivities with a staggered approach? */ sensi_stg = (cv_mem->cv_sensi && (cv_mem->cv_ism==CV_STAGGERED)); if (sensi_stg) { /* check that the NLS is non-NULL */ if (cv_mem->NLSstg == NULL) { cvProcessError(NULL, CV_MEM_FAIL, "CVODES", "CVodeSetSensMaxNonlinIters", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } return(SUNNonlinSolSetMaxIters(cv_mem->NLSstg, maxcorS)); } else { /* check that the NLS is non-NULL */ if (cv_mem->NLSstg1 == NULL) { cvProcessError(NULL, CV_MEM_FAIL, "CVODES", "CVodeSetMaxNonlinIters", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } return(SUNNonlinSolSetMaxIters(cv_mem->NLSstg1, maxcorS)); } return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeSetSensParams(void *cvode_mem, realtype *p, realtype *pbar, int *plist) { CVodeMem cv_mem; int is, Ns; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetSensParams", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was sensitivity initialized? */ if (cv_mem->cv_SensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeSetSensParams", MSGCV_NO_SENSI); return(CV_NO_SENS); } Ns = cv_mem->cv_Ns; /* Parameters */ cv_mem->cv_p = p; /* pbar */ if (pbar != NULL) for (is=0; iscv_pbar[is] = SUNRabs(pbar[is]); } else for (is=0; iscv_pbar[is] = ONE; /* plist */ if (plist != NULL) for (is=0; iscv_plist[is] = plist[is]; } else for (is=0; iscv_plist[is] = is; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeSetQuadSensErrCon(void *cvode_mem, booleantype errconQS) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetQuadSensErrCon", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was sensitivity initialized? */ if (cv_mem->cv_SensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeSetQuadSensTolerances", MSGCV_NO_SENSI); return(CV_NO_SENS); } /* Ckeck if quadrature sensitivity was initialized? */ if (cv_mem->cv_QuadSensMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUADSENS, "CVODES", "CVodeSetQuadSensErrCon", MSGCV_NO_QUADSENSI); return(CV_NO_QUAD); } cv_mem->cv_errconQS = errconQS; return(CV_SUCCESS); } /* * ================================================================= * CVODES optional output functions * ================================================================= */ /* * CVodeGetNumSteps * * Returns the current number of integration steps */ int CVodeGetNumSteps(void *cvode_mem, long int *nsteps) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNumSteps", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *nsteps = cv_mem->cv_nst; return(CV_SUCCESS); } /* * CVodeGetNumRhsEvals * * Returns the current number of calls to f */ int CVodeGetNumRhsEvals(void *cvode_mem, long int *nfevals) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNumRhsEvals", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *nfevals = cv_mem->cv_nfe; return(CV_SUCCESS); } /* * CVodeGetNumLinSolvSetups * * Returns the current number of calls to the linear solver setup routine */ int CVodeGetNumLinSolvSetups(void *cvode_mem, long int *nlinsetups) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNumLinSolvSetups", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *nlinsetups = cv_mem->cv_nsetups; return(CV_SUCCESS); } /* * CVodeGetNumErrTestFails * * Returns the current number of error test failures */ int CVodeGetNumErrTestFails(void *cvode_mem, long int *netfails) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNumErrTestFails", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *netfails = cv_mem->cv_netf; return(CV_SUCCESS); } /* * CVodeGetLastOrder * * Returns the order on the last succesful step */ int CVodeGetLastOrder(void *cvode_mem, int *qlast) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetLastOrder", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *qlast = cv_mem->cv_qu; return(CV_SUCCESS); } /* * CVodeGetCurrentOrder * * Returns the order to be attempted on the next step */ int CVodeGetCurrentOrder(void *cvode_mem, int *qcur) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetCurrentOrder", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *qcur = cv_mem->cv_next_q; return(CV_SUCCESS); } /* * CVodeGetCurrentGamma * * Returns the value of gamma for the current step. */ int CVodeGetCurrentGamma(void *cvode_mem, realtype *gamma) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetCurrentGamma", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *gamma = cv_mem->cv_gamma; return(CV_SUCCESS); } /* * CVodeGetNumStabLimOrderReds * * Returns the number of order reductions triggered by the stability * limit detection algorithm */ int CVodeGetNumStabLimOrderReds(void *cvode_mem, long int *nslred) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNumStabLimOrderReds", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_sldeton==SUNFALSE) *nslred = 0; else *nslred = cv_mem->cv_nor; return(CV_SUCCESS); } /* * CVodeGetActualInitStep * * Returns the step size used on the first step */ int CVodeGetActualInitStep(void *cvode_mem, realtype *hinused) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetActualInitStep", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *hinused = cv_mem->cv_h0u; return(CV_SUCCESS); } /* * CVodeGetLastStep * * Returns the step size used on the last successful step */ int CVodeGetLastStep(void *cvode_mem, realtype *hlast) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetLastStep", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *hlast = cv_mem->cv_hu; return(CV_SUCCESS); } /* * CVodeGetCurrentStep * * Returns the step size to be attempted on the next step */ int CVodeGetCurrentStep(void *cvode_mem, realtype *hcur) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetCurrentStep", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *hcur = cv_mem->cv_next_h; return(CV_SUCCESS); } /* * CVodeGetCurrentState * * Returns the current state vector */ int CVodeGetCurrentState(void *cvode_mem, N_Vector *y) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetCurrentState", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *y = cv_mem->cv_y; return(CV_SUCCESS); } /* * CVodeGetCurrentStateSens * * Returns the current sensitivity state vector array */ int CVodeGetCurrentStateSens(void *cvode_mem, N_Vector **yS) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetCurrentStateSens", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *yS = cv_mem->cv_yS; return(CV_SUCCESS); } /* * CVodeGetCurrentSensSolveIndex * * Returns the current index of the sensitivity solve when using * the staggered1 nonlinear solver. */ int CVodeGetCurrentSensSolveIndex(void *cvode_mem, int *index) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetCurrentSensSolveIndex", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *index = cv_mem->sens_solve_idx; return(CV_SUCCESS); } /* * CVodeGetCurrentTime * * Returns the current value of the independent variable */ int CVodeGetCurrentTime(void *cvode_mem, realtype *tcur) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetCurrentTime", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *tcur = cv_mem->cv_tn; return(CV_SUCCESS); } /* * CVodeGetTolScaleFactor * * Returns a suggested factor for scaling tolerances */ int CVodeGetTolScaleFactor(void *cvode_mem, realtype *tolsfact) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetTolScaleFactor", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *tolsfact = cv_mem->cv_tolsf; return(CV_SUCCESS); } /* * CVodeGetErrWeights * * This routine returns the current weight vector. */ int CVodeGetErrWeights(void *cvode_mem, N_Vector eweight) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetErrWeights", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; N_VScale(ONE, cv_mem->cv_ewt, eweight); return(CV_SUCCESS); } /* * CVodeGetEstLocalErrors * * Returns an estimate of the local error */ int CVodeGetEstLocalErrors(void *cvode_mem, N_Vector ele) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetEstLocalErrors", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; N_VScale(ONE, cv_mem->cv_acor, ele); return(CV_SUCCESS); } /* * CVodeGetWorkSpace * * Returns integrator work space requirements */ int CVodeGetWorkSpace(void *cvode_mem, long int *lenrw, long int *leniw) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetWorkSpace", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *leniw = cv_mem->cv_liw; *lenrw = cv_mem->cv_lrw; return(CV_SUCCESS); } /* * CVodeGetIntegratorStats * * Returns integrator statistics */ int CVodeGetIntegratorStats(void *cvode_mem, long int *nsteps, long int *nfevals, long int *nlinsetups, long int *netfails, int *qlast, int *qcur, realtype *hinused, realtype *hlast, realtype *hcur, realtype *tcur) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetIntegratorStats", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *nsteps = cv_mem->cv_nst; *nfevals = cv_mem->cv_nfe; *nlinsetups = cv_mem->cv_nsetups; *netfails = cv_mem->cv_netf; *qlast = cv_mem->cv_qu; *qcur = cv_mem->cv_next_q; *hinused = cv_mem->cv_h0u; *hlast = cv_mem->cv_hu; *hcur = cv_mem->cv_next_h; *tcur = cv_mem->cv_tn; return(CV_SUCCESS); } /* * CVodeGetNumGEvals * * Returns the current number of calls to g (for rootfinding) */ int CVodeGetNumGEvals(void *cvode_mem, long int *ngevals) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNumGEvals", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *ngevals = cv_mem->cv_nge; return(CV_SUCCESS); } /* * CVodeGetRootInfo * * Returns pointer to array rootsfound showing roots found */ int CVodeGetRootInfo(void *cvode_mem, int *rootsfound) { CVodeMem cv_mem; int i, nrt; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetRootInfo", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; nrt = cv_mem->cv_nrtfn; for (i=0; icv_iroots[i]; return(CV_SUCCESS); } /* * CVodeGetNumNonlinSolvIters * * Returns the current number of iterations in the nonlinear solver */ int CVodeGetNumNonlinSolvIters(void *cvode_mem, long int *nniters) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNumNonlinSolvIters", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *nniters = cv_mem->cv_nni; return(CV_SUCCESS); } /* * CVodeGetNumNonlinSolvConvFails * * Returns the current number of convergence failures in the * nonlinear solver */ int CVodeGetNumNonlinSolvConvFails(void *cvode_mem, long int *nncfails) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNumNonlinSolvConvFails", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *nncfails = cv_mem->cv_ncfn; return(CV_SUCCESS); } /* * CVodeGetNonlinSolvStats * * Returns nonlinear solver statistics */ int CVodeGetNonlinSolvStats(void *cvode_mem, long int *nniters, long int *nncfails) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNonlinSolvStats", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *nniters = cv_mem->cv_nni; *nncfails = cv_mem->cv_ncfn; return(CV_SUCCESS); } /* * ================================================================= * Quadrature optional output functions * ================================================================= */ /*-----------------------------------------------------------------*/ int CVodeGetQuadNumRhsEvals(void *cvode_mem, long int *nfQevals) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadNumRhsEvals", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_quadr==SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUAD, "CVODES", "CVodeGetQuadNumRhsEvals", MSGCV_NO_QUAD); return(CV_NO_QUAD); } *nfQevals = cv_mem->cv_nfQe; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetQuadNumErrTestFails(void *cvode_mem, long int *nQetfails) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadNumErrTestFails", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_quadr==SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUAD, "CVODES", "CVodeGetQuadNumErrTestFails", MSGCV_NO_QUAD); return(CV_NO_QUAD); } *nQetfails = cv_mem->cv_netfQ; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetQuadErrWeights(void *cvode_mem, N_Vector eQweight) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadErrWeights", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_quadr==SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUAD, "CVODES", "CVodeGetQuadErrWeights", MSGCV_NO_QUAD); return(CV_NO_QUAD); } if(cv_mem->cv_errconQ) N_VScale(ONE, cv_mem->cv_ewtQ, eQweight); return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetQuadStats(void *cvode_mem, long int *nfQevals, long int *nQetfails) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadStats", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_quadr==SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUAD, "CVODES", "CVodeGetQuadStats", MSGCV_NO_QUAD); return(CV_NO_QUAD); } *nfQevals = cv_mem->cv_nfQe; *nQetfails = cv_mem->cv_netfQ; return(CV_SUCCESS); } /* * ================================================================= * Quadrature FSA optional output functions * ================================================================= */ /*-----------------------------------------------------------------*/ int CVodeGetQuadSensNumRhsEvals(void *cvode_mem, long int *nfQSevals) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadSensNumRhsEvals", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_quadr_sensi == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUADSENS, "CVODES", "CVodeGetQuadSensNumRhsEvals", MSGCV_NO_QUADSENSI); return(CV_NO_QUADSENS); } *nfQSevals = cv_mem->cv_nfQSe; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetQuadSensNumErrTestFails(void *cvode_mem, long int *nQSetfails) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadSensNumErrTestFails", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_quadr_sensi == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUADSENS, "CVODES", "CVodeGetQuadSensNumErrTestFails", MSGCV_NO_QUADSENSI); return(CV_NO_QUADSENS); } *nQSetfails = cv_mem->cv_netfQS; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetQuadSensErrWeights(void *cvode_mem, N_Vector *eQSweight) { CVodeMem cv_mem; int is, Ns; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadSensErrWeights", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_quadr_sensi == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUADSENS, "CVODES", "CVodeGetQuadSensErrWeights", MSGCV_NO_QUADSENSI); return(CV_NO_QUADSENS); } Ns = cv_mem->cv_Ns; if (cv_mem->cv_errconQS) for (is=0; iscv_ewtQS[is], eQSweight[is]); return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetQuadSensStats(void *cvode_mem, long int *nfQSevals, long int *nQSetfails) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetQuadSensStats", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_quadr_sensi == SUNFALSE) { cvProcessError(cv_mem, CV_NO_QUADSENS, "CVODES", "CVodeGetQuadSensStats", MSGCV_NO_QUADSENSI); return(CV_NO_QUADSENS); } *nfQSevals = cv_mem->cv_nfQSe; *nQSetfails = cv_mem->cv_netfQS; return(CV_SUCCESS); } /* * ================================================================= * FSA optional output functions * ================================================================= */ /*-----------------------------------------------------------------*/ int CVodeGetSensNumRhsEvals(void *cvode_mem, long int *nfSevals) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSensNumRhsEvals", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetSensNumRhsEvals", MSGCV_NO_SENSI); return(CV_NO_SENS); } *nfSevals = cv_mem->cv_nfSe; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetNumRhsEvalsSens(void *cvode_mem, long int *nfevalsS) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNumRhsEvalsSens", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetNumRhsEvalsSens", MSGCV_NO_SENSI); return(CV_NO_SENS); } *nfevalsS = cv_mem->cv_nfeS; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetSensNumErrTestFails(void *cvode_mem, long int *nSetfails) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSensNumErrTestFails", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetSensNumErrTestFails", MSGCV_NO_SENSI); return(CV_NO_SENS); } *nSetfails = cv_mem->cv_netfS; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetSensNumLinSolvSetups(void *cvode_mem, long int *nlinsetupsS) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSensNumLinSolvSetups", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetSensNumLinSolvSetups", MSGCV_NO_SENSI); return(CV_NO_SENS); } *nlinsetupsS = cv_mem->cv_nsetupsS; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetSensErrWeights(void *cvode_mem, N_Vector *eSweight) { CVodeMem cv_mem; int is, Ns; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSensErrWeights", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetSensErrWeights", MSGCV_NO_SENSI); return(CV_NO_SENS); } Ns = cv_mem->cv_Ns; for (is=0; iscv_ewtS[is], eSweight[is]); return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetSensStats(void *cvode_mem, long int *nfSevals, long int *nfevalsS, long int *nSetfails, long int *nlinsetupsS) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSensStats", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetSensStats", MSGCV_NO_SENSI); return(CV_NO_SENS); } *nfSevals = cv_mem->cv_nfSe; *nfevalsS = cv_mem->cv_nfeS; *nSetfails = cv_mem->cv_netfS; *nlinsetupsS = cv_mem->cv_nsetupsS; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetSensNumNonlinSolvIters(void *cvode_mem, long int *nSniters) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSensNumNonlinSolvIters", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetSensNumNonlinSolvIters", MSGCV_NO_SENSI); return(CV_NO_SENS); } *nSniters = cv_mem->cv_nniS; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetSensNumNonlinSolvConvFails(void *cvode_mem, long int *nSncfails) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSensNumNonlinSolvConvFails", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetSensNumNonlinSolvConvFails", MSGCV_NO_SENSI); return(CV_NO_SENS); } *nSncfails = cv_mem->cv_ncfnS; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetSensNonlinSolvStats(void *cvode_mem, long int *nSniters, long int *nSncfails) { CVodeMem cv_mem; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetSensNonlinSolvStats", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetSensNonlinSolvStats", MSGCV_NO_SENSI); return(CV_NO_SENS); } *nSniters = cv_mem->cv_nniS; *nSncfails = cv_mem->cv_ncfnS; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetStgrSensNumNonlinSolvIters(void *cvode_mem, long int *nSTGR1niters) { CVodeMem cv_mem; int is, Ns; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetStgrSensNumNonlinSolvIters", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; Ns = cv_mem->cv_Ns; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetStgrSensNumNonlinSolvIters", MSGCV_NO_SENSI); return(CV_NO_SENS); } if(cv_mem->cv_ism==CV_STAGGERED1) for(is=0; iscv_nniS1[is]; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetStgrSensNumNonlinSolvConvFails(void *cvode_mem, long int *nSTGR1ncfails) { CVodeMem cv_mem; int is, Ns; if (cvode_mem==NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetStgrSensNumNonlinSolvConvFails", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; Ns = cv_mem->cv_Ns; if (cv_mem->cv_sensi==SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetStgrSensNumNonlinSolvConvFails", MSGCV_NO_SENSI); return(CV_NO_SENS); } if(cv_mem->cv_ism==CV_STAGGERED1) for(is=0; iscv_ncfnS1[is]; return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ int CVodeGetStgrSensNonlinSolvStats(void *cvode_mem, long int *nSTGR1niters, long int *nSTGR1ncfails) { CVodeMem cv_mem; int is, Ns; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetStgrSensNonlinSolvStats", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; Ns = cv_mem->cv_Ns; if (cv_mem->cv_sensi == SUNFALSE) { cvProcessError(cv_mem, CV_NO_SENS, "CVODES", "CVodeGetStgrSensNonlinSolvStats", MSGCV_NO_SENSI); return(CV_NO_SENS); } if(cv_mem->cv_ism == CV_STAGGERED1) { for(is=0; iscv_nniS1[is]; for(is=0; iscv_ncfnS1[is]; } return(CV_SUCCESS); } /*-----------------------------------------------------------------*/ char *CVodeGetReturnFlagName(long int flag) { char *name; name = (char *)malloc(24*sizeof(char)); switch(flag) { case CV_SUCCESS: sprintf(name,"CV_SUCCESS"); break; case CV_TSTOP_RETURN: sprintf(name,"CV_TSTOP_RETURN"); break; case CV_ROOT_RETURN: sprintf(name,"CV_ROOT_RETURN"); break; case CV_TOO_MUCH_WORK: sprintf(name,"CV_TOO_MUCH_WORK"); break; case CV_TOO_MUCH_ACC: sprintf(name,"CV_TOO_MUCH_ACC"); break; case CV_ERR_FAILURE: sprintf(name,"CV_ERR_FAILURE"); break; case CV_CONV_FAILURE: sprintf(name,"CV_CONV_FAILURE"); break; case CV_LINIT_FAIL: sprintf(name,"CV_LINIT_FAIL"); break; case CV_LSETUP_FAIL: sprintf(name,"CV_LSETUP_FAIL"); break; case CV_LSOLVE_FAIL: sprintf(name,"CV_LSOLVE_FAIL"); break; case CV_RHSFUNC_FAIL: sprintf(name,"CV_RHSFUNC_FAIL"); break; case CV_FIRST_RHSFUNC_ERR: sprintf(name,"CV_FIRST_RHSFUNC_ERR"); break; case CV_REPTD_RHSFUNC_ERR: sprintf(name,"CV_REPTD_RHSFUNC_ERR"); break; case CV_UNREC_RHSFUNC_ERR: sprintf(name,"CV_UNREC_RHSFUNC_ERR"); break; case CV_RTFUNC_FAIL: sprintf(name,"CV_RTFUNC_FAIL"); break; case CV_MEM_FAIL: sprintf(name,"CV_MEM_FAIL"); break; case CV_MEM_NULL: sprintf(name,"CV_MEM_NULL"); break; case CV_ILL_INPUT: sprintf(name,"CV_ILL_INPUT"); break; case CV_NO_MALLOC: sprintf(name,"CV_NO_MALLOC"); break; case CV_BAD_K: sprintf(name,"CV_BAD_K"); break; case CV_BAD_T: sprintf(name,"CV_BAD_T"); break; case CV_BAD_DKY: sprintf(name,"CV_BAD_DKY"); break; case CV_NO_QUAD: sprintf(name,"CV_NO_QUAD"); break; case CV_QRHSFUNC_FAIL: sprintf(name,"CV_QRHSFUNC_FAIL"); break; case CV_FIRST_QRHSFUNC_ERR: sprintf(name,"CV_FIRST_QRHSFUNC_ERR"); break; case CV_REPTD_QRHSFUNC_ERR: sprintf(name,"CV_REPTD_QRHSFUNC_ERR"); break; case CV_UNREC_QRHSFUNC_ERR: sprintf(name,"CV_UNREC_QRHSFUNC_ERR"); break; case CV_BAD_IS: sprintf(name,"CV_BAD_IS"); break; case CV_NO_SENS: sprintf(name,"CV_NO_SENS"); break; case CV_SRHSFUNC_FAIL: sprintf(name,"CV_SRHSFUNC_FAIL"); break; case CV_FIRST_SRHSFUNC_ERR: sprintf(name,"CV_FIRST_SRHSFUNC_ERR"); break; case CV_REPTD_SRHSFUNC_ERR: sprintf(name,"CV_REPTD_SRHSFUNC_ERR"); break; case CV_UNREC_SRHSFUNC_ERR: sprintf(name,"CV_UNREC_SRHSFUNC_ERR"); break; case CV_TOO_CLOSE: sprintf(name,"CV_TOO_CLOSE"); break; case CV_NO_ADJ: sprintf(name,"CV_NO_ADJ"); break; case CV_NO_FWD: sprintf(name,"CV_NO_FWD"); break; case CV_NO_BCK: sprintf(name,"CV_NO_BCK"); break; case CV_BAD_TB0: sprintf(name,"CV_BAD_TB0"); break; case CV_REIFWD_FAIL: sprintf(name,"CV_REIFWD_FAIL"); break; case CV_FWD_FAIL: sprintf(name,"CV_FWD_FAIL"); break; case CV_GETY_BADT: sprintf(name,"CV_GETY_BADT"); break; case CV_NLS_FAIL: sprintf(name,"CV_NLS_FAIL"); break; default: sprintf(name,"NONE"); } return(name); } StanHeaders/src/cvodes/cvodes_nls_sim.c0000644000176200001440000004474514645137106017725 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This the implementation file for the CVODES nonlinear solver interface. * ---------------------------------------------------------------------------*/ /* * When sensitivities are computed using the CV_SIMULTANEOUS approach and the * Newton solver is selected the iteraiton is a quasi-Newton method on the * combined system (by approximating the Jacobian matrix by its block diagonal) * and thus only solve linear systems with multiple right hand sides (all * sharing the same coefficient matrix - whatever iteration matrix we decide on) * we set-up the linear solver to handle N equations at a time. */ #include "cvodes_impl.h" #include "sundials/sundials_math.h" #include "sundials/sundials_nvector_senswrapper.h" /* constant macros */ #define ONE RCONST(1.0) /* private functions */ static int cvNlsResidualSensSim(N_Vector ycorSim, N_Vector resSim, void* cvode_mem); static int cvNlsFPFunctionSensSim(N_Vector ycorSim, N_Vector resSim, void* cvode_mem); static int cvNlsLSetupSensSim(booleantype jbad, booleantype* jcur, void* cvode_mem); static int cvNlsLSolveSensSim(N_Vector deltaSim, void* cvode_mem); static int cvNlsConvTestSensSim(SUNNonlinearSolver NLS, N_Vector ycorSim, N_Vector delSim, realtype tol, N_Vector ewtSim, void* cvode_mem); /* ----------------------------------------------------------------------------- * Exported functions * ---------------------------------------------------------------------------*/ int CVodeSetNonlinearSolverSensSim(void *cvode_mem, SUNNonlinearSolver NLS) { CVodeMem cv_mem; int retval, is; /* Return immediately if CVode memory is NULL */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeSetNonlinearSolverSensSim", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Return immediately if NLS memory is NULL */ if (NLS == NULL) { cvProcessError(NULL, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensSim", "NLS must be non-NULL"); return (CV_ILL_INPUT); } /* check for required nonlinear solver functions */ if ( NLS->ops->gettype == NULL || NLS->ops->solve == NULL || NLS->ops->setsysfn == NULL ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensSim", "NLS does not support required operations"); return(CV_ILL_INPUT); } /* check that sensitivities were initialized */ if (!(cv_mem->cv_sensi)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensSim", MSGCV_NO_SENSI); return(CV_ILL_INPUT); } /* check that simultaneous corrector was selected */ if (cv_mem->cv_ism != CV_SIMULTANEOUS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensStg", "Sensitivity solution method is not CV_SIMULTANEOUS"); return(CV_ILL_INPUT); } /* free any existing nonlinear solver */ if ((cv_mem->NLSsim != NULL) && (cv_mem->ownNLSsim)) retval = SUNNonlinSolFree(cv_mem->NLSsim); /* set SUNNonlinearSolver pointer */ cv_mem->NLSsim = NLS; /* Set NLS ownership flag. If this function was called to attach the default NLS, CVODE will set the flag to SUNTRUE after this function returns. */ cv_mem->ownNLSsim = SUNFALSE; /* set the nonlinear system function */ if (SUNNonlinSolGetType(NLS) == SUNNONLINEARSOLVER_ROOTFIND) { retval = SUNNonlinSolSetSysFn(cv_mem->NLSsim, cvNlsResidualSensSim); } else if (SUNNonlinSolGetType(NLS) == SUNNONLINEARSOLVER_FIXEDPOINT) { retval = SUNNonlinSolSetSysFn(cv_mem->NLSsim, cvNlsFPFunctionSensSim); } else { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensSim", "Invalid nonlinear solver type"); return(CV_ILL_INPUT); } if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensSim", "Setting nonlinear system function failed"); return(CV_ILL_INPUT); } /* set convergence test function */ retval = SUNNonlinSolSetConvTestFn(cv_mem->NLSsim, cvNlsConvTestSensSim, cvode_mem); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensSim", "Setting convergence test function failed"); return(CV_ILL_INPUT); } /* set max allowed nonlinear iterations */ retval = SUNNonlinSolSetMaxIters(cv_mem->NLSsim, NLS_MAXCOR); if (retval != CV_SUCCESS) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensSim", "Setting maximum number of nonlinear iterations failed"); return(CV_ILL_INPUT); } /* create vector wrappers if necessary */ if (cv_mem->simMallocDone == SUNFALSE) { cv_mem->zn0Sim = N_VNewEmpty_SensWrapper(cv_mem->cv_Ns+1, cv_mem->cv_sunctx); if (cv_mem->zn0Sim == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSetNonlinearSolverSensSim", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->ycorSim = N_VNewEmpty_SensWrapper(cv_mem->cv_Ns+1, cv_mem->cv_sunctx); if (cv_mem->ycorSim == NULL) { N_VDestroy(cv_mem->zn0Sim); cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSetNonlinearSolverSensSim", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->ewtSim = N_VNewEmpty_SensWrapper(cv_mem->cv_Ns+1, cv_mem->cv_sunctx); if (cv_mem->ewtSim == NULL) { N_VDestroy(cv_mem->zn0Sim); N_VDestroy(cv_mem->ycorSim); cvProcessError(cv_mem, CV_MEM_FAIL, "CVODES", "CVodeSetNonlinearSolverSensSim", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } cv_mem->simMallocDone = SUNTRUE; } /* attach vectors to vector wrappers */ NV_VEC_SW(cv_mem->zn0Sim, 0) = cv_mem->cv_zn[0]; NV_VEC_SW(cv_mem->ycorSim, 0) = cv_mem->cv_acor; NV_VEC_SW(cv_mem->ewtSim, 0) = cv_mem->cv_ewt; for (is=0; is < cv_mem->cv_Ns; is++) { NV_VEC_SW(cv_mem->zn0Sim, is+1) = cv_mem->cv_znS[0][is]; NV_VEC_SW(cv_mem->ycorSim, is+1) = cv_mem->cv_acorS[is]; NV_VEC_SW(cv_mem->ewtSim, is+1) = cv_mem->cv_ewtS[is]; } /* Reset the acnrmcur flag to SUNFALSE */ cv_mem->cv_acnrmcur = SUNFALSE; /* Set the nonlinear system RHS function */ if (!(cv_mem->cv_f)) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODES", "CVodeSetNonlinearSolverSensSim", "The ODE RHS function is NULL"); return(CV_ILL_INPUT); } cv_mem->nls_f = cv_mem->cv_f; return(CV_SUCCESS); } /*--------------------------------------------------------------- CVodeGetNonlinearSystemDataSens: This routine provides access to the relevant data needed to compute the nonlinear system function. ---------------------------------------------------------------*/ int CVodeGetNonlinearSystemDataSens(void *cvode_mem, realtype *tcur, N_Vector **ySpred, N_Vector **ySn, realtype *gamma, realtype *rl1, N_Vector **znS1, void **user_data) { CVodeMem cv_mem; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "CVodeGetNonlinearSystemDataSens", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; *tcur = cv_mem->cv_tn; *ySpred = cv_mem->cv_znS[0]; *ySn = cv_mem->cv_yS; *gamma = cv_mem->cv_gamma; *rl1 = cv_mem->cv_rl1; *znS1 = cv_mem->cv_znS[1]; *user_data = cv_mem->cv_user_data; return(CV_SUCCESS); } /* ----------------------------------------------------------------------------- * Private functions * ---------------------------------------------------------------------------*/ int cvNlsInitSensSim(CVodeMem cvode_mem) { int retval; /* set the linear solver setup wrapper function */ if (cvode_mem->cv_lsetup) retval = SUNNonlinSolSetLSetupFn(cvode_mem->NLSsim, cvNlsLSetupSensSim); else retval = SUNNonlinSolSetLSetupFn(cvode_mem->NLSsim, NULL); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODES", "cvNlsInitSensSim", "Setting the linear solver setup function failed"); return(CV_NLS_INIT_FAIL); } /* set the linear solver solve wrapper function */ if (cvode_mem->cv_lsolve) retval = SUNNonlinSolSetLSolveFn(cvode_mem->NLSsim, cvNlsLSolveSensSim); else retval = SUNNonlinSolSetLSolveFn(cvode_mem->NLSsim, NULL); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODES", "cvNlsInitSensSim", "Setting linear solver solve function failed"); return(CV_NLS_INIT_FAIL); } /* initialize nonlinear solver */ retval = SUNNonlinSolInitialize(cvode_mem->NLSsim); if (retval != CV_SUCCESS) { cvProcessError(cvode_mem, CV_ILL_INPUT, "CVODES", "cvNlsInitSensSim", MSGCV_NLS_INIT_FAIL); return(CV_NLS_INIT_FAIL); } return(CV_SUCCESS); } static int cvNlsLSetupSensSim(booleantype jbad, booleantype* jcur, void* cvode_mem) { CVodeMem cv_mem; int retval; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsLSetupSensSim", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* if the nonlinear solver marked the Jacobian as bad update convfail */ if (jbad) cv_mem->convfail = CV_FAIL_BAD_J; /* setup the linear solver */ retval = cv_mem->cv_lsetup(cv_mem, cv_mem->convfail, cv_mem->cv_y, cv_mem->cv_ftemp, &(cv_mem->cv_jcur), cv_mem->cv_vtemp1, cv_mem->cv_vtemp2, cv_mem->cv_vtemp3); cv_mem->cv_nsetups++; /* update Jacobian status */ *jcur = cv_mem->cv_jcur; cv_mem->cv_forceSetup = SUNFALSE; cv_mem->cv_gamrat = ONE; cv_mem->cv_gammap = cv_mem->cv_gamma; cv_mem->cv_crate = ONE; cv_mem->cv_crateS = ONE; cv_mem->cv_nstlp = cv_mem->cv_nst; if (retval < 0) return(CV_LSETUP_FAIL); if (retval > 0) return(SUN_NLS_CONV_RECVR); return(CV_SUCCESS); } static int cvNlsLSolveSensSim(N_Vector deltaSim, void* cvode_mem) { CVodeMem cv_mem; int retval, is; N_Vector delta; N_Vector *deltaS; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsLSolveSensSim", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* extract state delta from the vector wrapper */ delta = NV_VEC_SW(deltaSim,0); /* solve the state linear system */ retval = cv_mem->cv_lsolve(cv_mem, delta, cv_mem->cv_ewt, cv_mem->cv_y, cv_mem->cv_ftemp); if (retval < 0) return(CV_LSOLVE_FAIL); if (retval > 0) return(SUN_NLS_CONV_RECVR); /* extract sensitivity deltas from the vector wrapper */ deltaS = NV_VECS_SW(deltaSim)+1; /* solve the sensitivity linear systems */ for (is=0; iscv_Ns; is++) { retval = cv_mem->cv_lsolve(cv_mem, deltaS[is], cv_mem->cv_ewtS[is], cv_mem->cv_y, cv_mem->cv_ftemp); if (retval < 0) return(CV_LSOLVE_FAIL); if (retval > 0) return(SUN_NLS_CONV_RECVR); } return(CV_SUCCESS); } static int cvNlsConvTestSensSim(SUNNonlinearSolver NLS, N_Vector ycorSim, N_Vector deltaSim, realtype tol, N_Vector ewtSim, void* cvode_mem) { CVodeMem cv_mem; int m, retval; realtype del, delS, Del; realtype dcon; N_Vector ycor, delta, ewt; N_Vector *deltaS, *ewtS; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsConvTestSensSim", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* extract the current state and sensitivity corrections */ ycor = NV_VEC_SW(ycorSim,0); /* extract state and sensitivity deltas */ delta = NV_VEC_SW(deltaSim,0); deltaS = NV_VECS_SW(deltaSim)+1; /* extract state and sensitivity error weights */ ewt = NV_VEC_SW(ewtSim,0); ewtS = NV_VECS_SW(ewtSim)+1; /* compute the norm of the state and sensitivity corrections */ del = N_VWrmsNorm(delta, ewt); delS = cvSensUpdateNorm(cv_mem, del, deltaS, ewtS); /* norm used in error test */ Del = delS; /* get the current nonlinear solver iteration count */ retval = SUNNonlinSolGetCurIter(NLS, &m); if (retval != CV_SUCCESS) return(CV_MEM_NULL); /* Test for convergence. If m > 0, an estimate of the convergence rate constant is stored in crate, and used in the test. Recall that, even when errconS=SUNFALSE, all variables are used in the convergence test. Hence, we use Del (and not del). However, acnrm is used in the error test and thus it has different forms depending on errconS (and this explains why we have to carry around del and delS). */ if (m > 0) { cv_mem->cv_crate = SUNMAX(CRDOWN * cv_mem->cv_crate, Del/cv_mem->cv_delp); } dcon = Del * SUNMIN(ONE, cv_mem->cv_crate) / tol; /* check if nonlinear system was solved successfully */ if (dcon <= ONE) { if (m == 0) { cv_mem->cv_acnrm = (cv_mem->cv_errconS) ? delS : del; } else { cv_mem->cv_acnrm = (cv_mem->cv_errconS) ? N_VWrmsNorm(ycorSim, ewtSim) : N_VWrmsNorm(ycor, ewt); } cv_mem->cv_acnrmcur = SUNTRUE; return(CV_SUCCESS); } /* check if the iteration seems to be diverging */ if ((m >= 1) && (Del > RDIV*cv_mem->cv_delp)) return(SUN_NLS_CONV_RECVR); /* Save norm of correction and loop again */ cv_mem->cv_delp = Del; /* Not yet converged */ return(SUN_NLS_CONTINUE); } static int cvNlsResidualSensSim(N_Vector ycorSim, N_Vector resSim, void* cvode_mem) { CVodeMem cv_mem; int retval; N_Vector ycor, res; N_Vector *ycorS, *resS; realtype cvals[3]; N_Vector* XXvecs[3]; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsResidualSensSim", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* extract state and residual vectors from the vector wrapper */ ycor = NV_VEC_SW(ycorSim,0); res = NV_VEC_SW(resSim,0); /* update the state based on the current correction */ N_VLinearSum(ONE, cv_mem->cv_zn[0], ONE, ycor, cv_mem->cv_y); /* evaluate the rhs function */ retval = cv_mem->nls_f(cv_mem->cv_tn, cv_mem->cv_y, cv_mem->cv_ftemp, cv_mem->cv_user_data); cv_mem->cv_nfe++; if (retval < 0) return(CV_RHSFUNC_FAIL); if (retval > 0) return(RHSFUNC_RECVR); /* compute the resiudal */ N_VLinearSum(cv_mem->cv_rl1, cv_mem->cv_zn[1], ONE, ycor, res); N_VLinearSum(-cv_mem->cv_gamma, cv_mem->cv_ftemp, ONE, res, res); /* extract sensitivity and residual vectors from the vector wrapper */ ycorS = NV_VECS_SW(ycorSim)+1; resS = NV_VECS_SW(resSim)+1; /* update sensitivities based on the current correction */ retval = N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znS[0], ONE, ycorS, cv_mem->cv_yS); if (retval != CV_SUCCESS) return(CV_VECTOROP_ERR); /* evaluate the sensitivity rhs function */ retval = cvSensRhsWrapper(cv_mem, cv_mem->cv_tn, cv_mem->cv_y, cv_mem->cv_ftemp, cv_mem->cv_yS, cv_mem->cv_ftempS, cv_mem->cv_vtemp1, cv_mem->cv_vtemp2); if (retval < 0) return(CV_SRHSFUNC_FAIL); if (retval > 0) return(SRHSFUNC_RECVR); /* compute the sensitivity resiudal */ cvals[0] = cv_mem->cv_rl1; XXvecs[0] = cv_mem->cv_znS[1]; cvals[1] = ONE; XXvecs[1] = ycorS; cvals[2] = -cv_mem->cv_gamma; XXvecs[2] = cv_mem->cv_ftempS; retval = N_VLinearCombinationVectorArray(cv_mem->cv_Ns, 3, cvals, XXvecs, resS); if (retval != CV_SUCCESS) return(CV_VECTOROP_ERR); return(CV_SUCCESS); } static int cvNlsFPFunctionSensSim(N_Vector ycorSim, N_Vector resSim, void* cvode_mem) { CVodeMem cv_mem; int retval, is; N_Vector ycor, res; N_Vector *ycorS, *resS; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODES", "cvNlsFPFunctionSensSim", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* extract state and residual vectors from the vector wrapper */ ycor = NV_VEC_SW(ycorSim,0); res = NV_VEC_SW(resSim,0); /* update the state based on the current correction */ N_VLinearSum(ONE, cv_mem->cv_zn[0], ONE, ycor, cv_mem->cv_y); /* evaluate the rhs function */ retval = cv_mem->nls_f(cv_mem->cv_tn, cv_mem->cv_y, res, cv_mem->cv_user_data); cv_mem->cv_nfe++; if (retval < 0) return(CV_RHSFUNC_FAIL); if (retval > 0) return(RHSFUNC_RECVR); /* evaluate fixed point function */ N_VLinearSum(cv_mem->cv_h, res, -ONE, cv_mem->cv_zn[1], res); N_VScale(cv_mem->cv_rl1, res, res); /* extract sensitivity and residual vectors from the vector wrapper */ ycorS = NV_VECS_SW(ycorSim)+1; resS = NV_VECS_SW(resSim)+1; /* update the sensitivities based on the current correction */ N_VLinearSumVectorArray(cv_mem->cv_Ns, ONE, cv_mem->cv_znS[0], ONE, ycorS, cv_mem->cv_yS); /* evaluate the sensitivity rhs function */ retval = cvSensRhsWrapper(cv_mem, cv_mem->cv_tn, cv_mem->cv_y, res, cv_mem->cv_yS, resS, cv_mem->cv_vtemp1, cv_mem->cv_vtemp2); if (retval < 0) return(CV_SRHSFUNC_FAIL); if (retval > 0) return(SRHSFUNC_RECVR); /* evaluate sensitivity fixed point function */ for (is=0; iscv_Ns; is++) { N_VLinearSum(cv_mem->cv_h, resS[is], -ONE, cv_mem->cv_znS[1][is], resS[is]); N_VScale(cv_mem->cv_rl1, resS[is], resS[is]); } return(CV_SUCCESS); } StanHeaders/src/cvodes/LICENSE0000644000176200001440000000305014645137106015537 0ustar liggesusersBSD 3-Clause License Copyright (c) 2002-2022, Lawrence Livermore National Security and Southern Methodist University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. StanHeaders/src/cvodes/cvodes_direct.c0000644000176200001440000000456414645137106017526 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Header file for the deprecated direct linear solver interface in * CVODES; these routines now just wrap the updated CVODE generic * linear solver interface in cvodes_ls.h. * -----------------------------------------------------------------*/ #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*================================================================= Exported Functions (wrappers for equivalent routines in cvodes_ls.h) =================================================================*/ int CVDlsSetLinearSolver(void *cvode_mem, SUNLinearSolver LS, SUNMatrix A) { return(CVodeSetLinearSolver(cvode_mem, LS, A)); } int CVDlsSetJacFn(void *cvode_mem, CVDlsJacFn jac) { return(CVodeSetJacFn(cvode_mem, jac)); } int CVDlsGetWorkSpace(void *cvode_mem, long int *lenrwLS, long int *leniwLS) { return(CVodeGetLinWorkSpace(cvode_mem, lenrwLS, leniwLS)); } int CVDlsGetNumJacEvals(void *cvode_mem, long int *njevals) { return(CVodeGetNumJacEvals(cvode_mem, njevals)); } int CVDlsGetNumRhsEvals(void *cvode_mem, long int *nfevalsLS) { return(CVodeGetNumLinRhsEvals(cvode_mem, nfevalsLS)); } int CVDlsGetLastFlag(void *cvode_mem, long int *flag) { return(CVodeGetLastLinFlag(cvode_mem, flag)); } char *CVDlsGetReturnFlagName(long int flag) { return(CVodeGetLinReturnFlagName(flag)); } int CVDlsSetLinearSolverB(void *cvode_mem, int which, SUNLinearSolver LS, SUNMatrix A) { return(CVodeSetLinearSolverB(cvode_mem, which, LS, A)); } int CVDlsSetJacFnB(void *cvode_mem, int which, CVDlsJacFnB jacB) { return(CVodeSetJacFnB(cvode_mem, which, jacB)); } int CVDlsSetJacFnBS(void *cvode_mem, int which, CVDlsJacFnBS jacBS) { return(CVodeSetJacFnBS(cvode_mem, which, jacBS)); } #ifdef __cplusplus } #endif StanHeaders/src/cvodes/fmod/0000755000176200001440000000000014511135055015452 5ustar liggesusersStanHeaders/src/cvodes/fmod/fcvodes_mod.c0000644000176200001440000027057314645137106020133 0ustar liggesusers/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 4.0.0 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ /* --------------------------------------------------------------- * Programmer(s): Auto-generated by swig. * --------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------*/ /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* qualifier for exported *const* global data variables*/ #ifndef SWIGEXTERN # ifdef __cplusplus # define SWIGEXTERN extern # else # define SWIGEXTERN # endif #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include #define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \ {STAN_SUNDIALS_PRINTF("In " DECL ": " MSG); assert(0); RETURNNULL; } enum { SWIG_MEM_OWN = 0x01, SWIG_MEM_RVALUE = 0x02, SWIG_MEM_CONST = 0x04 }; #define SWIG_check_mutable(SWIG_CLASS_WRAPPER, TYPENAME, FNAME, FUNCNAME, RETURNNULL) \ if ((SWIG_CLASS_WRAPPER).cmemflags & SWIG_MEM_CONST) { \ SWIG_exception_impl(FUNCNAME, SWIG_TypeError, \ "Cannot pass const " TYPENAME " (class " FNAME ") " \ "as a mutable reference", \ RETURNNULL); \ } #define SWIG_check_nonnull(SWIG_CLASS_WRAPPER, TYPENAME, FNAME, FUNCNAME, RETURNNULL) \ if (!(SWIG_CLASS_WRAPPER).cptr) { \ SWIG_exception_impl(FUNCNAME, SWIG_TypeError, \ "Cannot pass null " TYPENAME " (class " FNAME ") " \ "as a reference", RETURNNULL); \ } #define SWIG_check_mutable_nonnull(SWIG_CLASS_WRAPPER, TYPENAME, FNAME, FUNCNAME, RETURNNULL) \ SWIG_check_nonnull(SWIG_CLASS_WRAPPER, TYPENAME, FNAME, FUNCNAME, RETURNNULL); \ SWIG_check_mutable(SWIG_CLASS_WRAPPER, TYPENAME, FNAME, FUNCNAME, RETURNNULL); #include #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # ifndef snprintf # define snprintf _snprintf # endif #endif /* Support for the `contract` feature. * * Note that RETURNNULL is first because it's inserted via a 'Replaceall' in * the fortran.cxx file. */ #define SWIG_contract_assert(RETURNNULL, EXPR, MSG) \ if (!(EXPR)) { SWIG_exception_impl("$decl", SWIG_ValueError, MSG, RETURNNULL); } #define SWIGVERSION 0x040000 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) #include "cvodes/cvodes.h" #include "cvodes/cvodes_bandpre.h" #include "cvodes/cvodes_bbdpre.h" #include "cvodes/cvodes_diag.h" #include "cvodes/cvodes_ls.h" #include #ifdef _MSC_VER # ifndef strtoull # define strtoull _strtoui64 # endif # ifndef strtoll # define strtoll _strtoi64 # endif #endif typedef struct { void* data; size_t size; } SwigArrayWrapper; SWIGINTERN SwigArrayWrapper SwigArrayWrapper_uninitialized() { SwigArrayWrapper result; result.data = NULL; result.size = 0; return result; } #include typedef struct { void* cptr; int cmemflags; } SwigClassWrapper; SWIGINTERN SwigClassWrapper SwigClassWrapper_uninitialized() { SwigClassWrapper result; result.cptr = NULL; result.cmemflags = 0; return result; } SWIGINTERN void SWIG_assign(SwigClassWrapper* self, SwigClassWrapper other) { if (self->cptr == NULL) { /* LHS is unassigned */ if (other.cmemflags & SWIG_MEM_RVALUE) { /* Capture pointer from RHS, clear 'moving' flag */ self->cptr = other.cptr; self->cmemflags = other.cmemflags & (~SWIG_MEM_RVALUE); } else { /* Become a reference to the other object */ self->cptr = other.cptr; self->cmemflags = other.cmemflags & (~SWIG_MEM_OWN); } } else if (other.cptr == NULL) { /* Replace LHS with a null pointer */ free(self->cptr); *self = SwigClassWrapper_uninitialized(); } else { if (self->cmemflags & SWIG_MEM_OWN) { free(self->cptr); } self->cptr = other.cptr; if (other.cmemflags & SWIG_MEM_RVALUE) { /* Capture RHS */ self->cmemflags = other.cmemflags & ~SWIG_MEM_RVALUE; } else { /* Point to RHS */ self->cmemflags = other.cmemflags & ~SWIG_MEM_OWN; } } } SWIGEXPORT void * _wrap_FCVodeCreate(int const *farg1, void *farg2) { void * fresult ; int arg1 ; SUNContext arg2 = (SUNContext) 0 ; void *result = 0 ; arg1 = (int)(*farg1); arg2 = (SUNContext)(farg2); result = (void *)CVodeCreate(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT int _wrap_FCVodeInit(void *farg1, CVRhsFn farg2, double const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; CVRhsFn arg2 = (CVRhsFn) 0 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVRhsFn)(farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeInit(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeReInit(void *farg1, double const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); result = (int)CVodeReInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSStolerances(void *farg1, double const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype)(*farg3); result = (int)CVodeSStolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSVtolerances(void *farg1, double const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); result = (int)CVodeSVtolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeWFtolerances(void *farg1, CVEwtFn farg2) { int fresult ; void *arg1 = (void *) 0 ; CVEwtFn arg2 = (CVEwtFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVEwtFn)(farg2); result = (int)CVodeWFtolerances(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetConstraints(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)CVodeSetConstraints(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetErrFile(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; FILE *arg2 = (FILE *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (FILE *)(farg2); result = (int)CVodeSetErrFile(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetErrHandlerFn(void *farg1, CVErrHandlerFn farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; CVErrHandlerFn arg2 = (CVErrHandlerFn) 0 ; void *arg3 = (void *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVErrHandlerFn)(farg2); arg3 = (void *)(farg3); result = (int)CVodeSetErrHandlerFn(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetInitStep(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)CVodeSetInitStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetLSetupFrequency(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)CVodeSetLSetupFrequency(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMaxConvFails(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetMaxConvFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMaxErrTestFails(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetMaxErrTestFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMaxHnilWarns(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetMaxHnilWarns(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMaxNonlinIters(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetMaxNonlinIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMaxNumSteps(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)CVodeSetMaxNumSteps(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMaxOrd(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetMaxOrd(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMaxStep(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)CVodeSetMaxStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMinStep(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)CVodeSetMinStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMonitorFn(void *farg1, CVMonitorFn farg2) { int fresult ; void *arg1 = (void *) 0 ; CVMonitorFn arg2 = (CVMonitorFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVMonitorFn)(farg2); result = (int)CVodeSetMonitorFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMonitorFrequency(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)CVodeSetMonitorFrequency(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetNlsRhsFn(void *farg1, CVRhsFn farg2) { int fresult ; void *arg1 = (void *) 0 ; CVRhsFn arg2 = (CVRhsFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVRhsFn)(farg2); result = (int)CVodeSetNlsRhsFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetNonlinConvCoef(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)CVodeSetNonlinConvCoef(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetNonlinearSolver(void *farg1, SUNNonlinearSolver farg2) { int fresult ; void *arg1 = (void *) 0 ; SUNNonlinearSolver arg2 = (SUNNonlinearSolver) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNNonlinearSolver)(farg2); result = (int)CVodeSetNonlinearSolver(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetStabLimDet(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetStabLimDet(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetStopTime(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)CVodeSetStopTime(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetUserData(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; void *arg2 = (void *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (void *)(farg2); result = (int)CVodeSetUserData(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeRootInit(void *farg1, int const *farg2, CVRootFn farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVRootFn arg3 = (CVRootFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVRootFn)(farg3); result = (int)CVodeRootInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetRootDirection(void *farg1, int *farg2) { int fresult ; void *arg1 = (void *) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int *)(farg2); result = (int)CVodeSetRootDirection(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetNoInactiveRootWarn(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)CVodeSetNoInactiveRootWarn(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVode(void *farg1, double const *farg2, N_Vector farg3, double *farg4, int const *farg5) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; realtype *arg4 = (realtype *) 0 ; int arg5 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); arg4 = (realtype *)(farg4); arg5 = (int)(*farg5); result = (int)CVode(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeComputeState(void *farg1, N_Vector farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); arg3 = (N_Vector)(farg3); result = (int)CVodeComputeState(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeComputeStateSens(void *farg1, void *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); arg3 = (N_Vector *)(farg3); result = (int)CVodeComputeStateSens(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeComputeStateSens1(void *farg1, int const *farg2, N_Vector farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; N_Vector arg3 = (N_Vector) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (N_Vector)(farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeComputeStateSens1(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetDky(void *farg1, double const *farg2, int const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeGetDky(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)CVodeGetWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumSteps(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumSteps(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumRhsEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumRhsEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumLinSolvSetups(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumLinSolvSetups(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumErrTestFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumErrTestFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetLastOrder(void *farg1, int *farg2) { int fresult ; void *arg1 = (void *) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int *)(farg2); result = (int)CVodeGetLastOrder(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetCurrentOrder(void *farg1, int *farg2) { int fresult ; void *arg1 = (void *) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int *)(farg2); result = (int)CVodeGetCurrentOrder(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetCurrentGamma(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)CVodeGetCurrentGamma(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumStabLimOrderReds(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumStabLimOrderReds(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetActualInitStep(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)CVodeGetActualInitStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetLastStep(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)CVodeGetLastStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetCurrentStep(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)CVodeGetCurrentStep(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetCurrentState(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); result = (int)CVodeGetCurrentState(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetCurrentStateSens(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector **arg2 = (N_Vector **) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector **)(farg2); result = (int)CVodeGetCurrentStateSens(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetCurrentSensSolveIndex(void *farg1, int *farg2) { int fresult ; void *arg1 = (void *) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int *)(farg2); result = (int)CVodeGetCurrentSensSolveIndex(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetCurrentTime(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)CVodeGetCurrentTime(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetTolScaleFactor(void *farg1, double *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); result = (int)CVodeGetTolScaleFactor(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetErrWeights(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)CVodeGetErrWeights(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetEstLocalErrors(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)CVodeGetEstLocalErrors(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumGEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumGEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetRootInfo(void *farg1, int *farg2) { int fresult ; void *arg1 = (void *) 0 ; int *arg2 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int *)(farg2); result = (int)CVodeGetRootInfo(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetIntegratorStats(void *farg1, long *farg2, long *farg3, long *farg4, long *farg5, int *farg6, int *farg7, double *farg8, double *farg9, double *farg10, double *farg11) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; long *arg4 = (long *) 0 ; long *arg5 = (long *) 0 ; int *arg6 = (int *) 0 ; int *arg7 = (int *) 0 ; realtype *arg8 = (realtype *) 0 ; realtype *arg9 = (realtype *) 0 ; realtype *arg10 = (realtype *) 0 ; realtype *arg11 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); arg4 = (long *)(farg4); arg5 = (long *)(farg5); arg6 = (int *)(farg6); arg7 = (int *)(farg7); arg8 = (realtype *)(farg8); arg9 = (realtype *)(farg9); arg10 = (realtype *)(farg10); arg11 = (realtype *)(farg11); result = (int)CVodeGetIntegratorStats(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNonlinearSystemData(void *farg1, double *farg2, void *farg3, void *farg4, void *farg5, double *farg6, double *farg7, void *farg8, void *farg9) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; N_Vector *arg4 = (N_Vector *) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; realtype *arg6 = (realtype *) 0 ; realtype *arg7 = (realtype *) 0 ; N_Vector *arg8 = (N_Vector *) 0 ; void **arg9 = (void **) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); arg4 = (N_Vector *)(farg4); arg5 = (N_Vector *)(farg5); arg6 = (realtype *)(farg6); arg7 = (realtype *)(farg7); arg8 = (N_Vector *)(farg8); arg9 = (void **)(farg9); result = (int)CVodeGetNonlinearSystemData(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNonlinearSystemDataSens(void *farg1, double *farg2, void *farg3, void *farg4, double *farg5, double *farg6, void *farg7, void *farg8) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector **arg3 = (N_Vector **) 0 ; N_Vector **arg4 = (N_Vector **) 0 ; realtype *arg5 = (realtype *) 0 ; realtype *arg6 = (realtype *) 0 ; N_Vector **arg7 = (N_Vector **) 0 ; void **arg8 = (void **) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector **)(farg3); arg4 = (N_Vector **)(farg4); arg5 = (realtype *)(farg5); arg6 = (realtype *)(farg6); arg7 = (N_Vector **)(farg7); arg8 = (void **)(farg8); result = (int)CVodeGetNonlinearSystemDataSens(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumNonlinSolvIters(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumNonlinSolvIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumNonlinSolvConvFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumNonlinSolvConvFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNonlinSolvStats(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)CVodeGetNonlinSolvStats(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT SwigArrayWrapper _wrap_FCVodeGetReturnFlagName(long const *farg1) { SwigArrayWrapper fresult ; long arg1 ; char *result = 0 ; arg1 = (long)(*farg1); result = (char *)CVodeGetReturnFlagName(arg1); fresult.size = strlen((const char*)(result)); fresult.data = (char *)(result); return fresult; } SWIGEXPORT void _wrap_FCVodeFree(void *farg1) { void **arg1 = (void **) 0 ; arg1 = (void **)(farg1); CVodeFree(arg1); } SWIGEXPORT int _wrap_FCVodeSetJacTimesRhsFn(void *farg1, CVRhsFn farg2) { int fresult ; void *arg1 = (void *) 0 ; CVRhsFn arg2 = (CVRhsFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVRhsFn)(farg2); result = (int)CVodeSetJacTimesRhsFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadInit(void *farg1, CVQuadRhsFn farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; CVQuadRhsFn arg2 = (CVQuadRhsFn) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVQuadRhsFn)(farg2); arg3 = (N_Vector)(farg3); result = (int)CVodeQuadInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadReInit(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)CVodeQuadReInit(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadSStolerances(void *farg1, double const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype)(*farg3); result = (int)CVodeQuadSStolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadSVtolerances(void *farg1, double const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); result = (int)CVodeQuadSVtolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetQuadErrCon(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetQuadErrCon(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuad(void *farg1, double *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector)(farg3); result = (int)CVodeGetQuad(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadDky(void *farg1, double const *farg2, int const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeGetQuadDky(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadNumRhsEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetQuadNumRhsEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadNumErrTestFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetQuadNumErrTestFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadErrWeights(void *farg1, N_Vector farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector arg2 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector)(farg2); result = (int)CVodeGetQuadErrWeights(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadStats(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)CVodeGetQuadStats(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FCVodeQuadFree(void *farg1) { void *arg1 = (void *) 0 ; arg1 = (void *)(farg1); CVodeQuadFree(arg1); } SWIGEXPORT int _wrap_FCVodeSensInit(void *farg1, int const *farg2, int const *farg3, CVSensRhsFn farg4, void *farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; CVSensRhsFn arg4 = (CVSensRhsFn) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); arg4 = (CVSensRhsFn)(farg4); arg5 = (N_Vector *)(farg5); result = (int)CVodeSensInit(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSensInit1(void *farg1, int const *farg2, int const *farg3, CVSensRhs1Fn farg4, void *farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; CVSensRhs1Fn arg4 = (CVSensRhs1Fn) 0 ; N_Vector *arg5 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); arg4 = (CVSensRhs1Fn)(farg4); arg5 = (N_Vector *)(farg5); result = (int)CVodeSensInit1(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSensReInit(void *farg1, int const *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)CVodeSensReInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSensSStolerances(void *farg1, double const *farg2, double *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype *arg3 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype *)(farg3); result = (int)CVodeSensSStolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSensSVtolerances(void *farg1, double const *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)CVodeSensSVtolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSensEEtolerances(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)CVodeSensEEtolerances(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetSensDQMethod(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)CVodeSetSensDQMethod(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetSensErrCon(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetSensErrCon(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetSensMaxNonlinIters(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetSensMaxNonlinIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetSensParams(void *farg1, double *farg2, double *farg3, int *farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; realtype *arg3 = (realtype *) 0 ; int *arg4 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (realtype *)(farg3); arg4 = (int *)(farg4); result = (int)CVodeSetSensParams(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetNonlinearSolverSensSim(void *farg1, SUNNonlinearSolver farg2) { int fresult ; void *arg1 = (void *) 0 ; SUNNonlinearSolver arg2 = (SUNNonlinearSolver) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNNonlinearSolver)(farg2); result = (int)CVodeSetNonlinearSolverSensSim(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetNonlinearSolverSensStg(void *farg1, SUNNonlinearSolver farg2) { int fresult ; void *arg1 = (void *) 0 ; SUNNonlinearSolver arg2 = (SUNNonlinearSolver) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNNonlinearSolver)(farg2); result = (int)CVodeSetNonlinearSolverSensStg(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetNonlinearSolverSensStg1(void *farg1, SUNNonlinearSolver farg2) { int fresult ; void *arg1 = (void *) 0 ; SUNNonlinearSolver arg2 = (SUNNonlinearSolver) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNNonlinearSolver)(farg2); result = (int)CVodeSetNonlinearSolverSensStg1(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSensToggleOff(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)CVodeSensToggleOff(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSens(void *farg1, double *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); result = (int)CVodeGetSens(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSens1(void *farg1, double *farg2, int const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (int)(*farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeGetSens1(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSensDky(void *farg1, double const *farg2, int const *farg3, void *farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (N_Vector *)(farg4); result = (int)CVodeGetSensDky(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSensDky1(void *farg1, double const *farg2, int const *farg3, int const *farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; int arg4 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (int)(*farg4); arg5 = (N_Vector)(farg5); result = (int)CVodeGetSensDky1(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSensNumRhsEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetSensNumRhsEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumRhsEvalsSens(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumRhsEvalsSens(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSensNumErrTestFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetSensNumErrTestFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSensNumLinSolvSetups(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetSensNumLinSolvSetups(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSensErrWeights(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); result = (int)CVodeGetSensErrWeights(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSensStats(void *farg1, long *farg2, long *farg3, long *farg4, long *farg5) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; long *arg4 = (long *) 0 ; long *arg5 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); arg4 = (long *)(farg4); arg5 = (long *)(farg5); result = (int)CVodeGetSensStats(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSensNumNonlinSolvIters(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetSensNumNonlinSolvIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSensNumNonlinSolvConvFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetSensNumNonlinSolvConvFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetSensNonlinSolvStats(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)CVodeGetSensNonlinSolvStats(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetStgrSensNumNonlinSolvIters(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetStgrSensNumNonlinSolvIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetStgrSensNumNonlinSolvConvFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetStgrSensNumNonlinSolvConvFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetStgrSensNonlinSolvStats(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)CVodeGetStgrSensNonlinSolvStats(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FCVodeSensFree(void *farg1) { void *arg1 = (void *) 0 ; arg1 = (void *)(farg1); CVodeSensFree(arg1); } SWIGEXPORT int _wrap_FCVodeQuadSensInit(void *farg1, CVQuadSensRhsFn farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; CVQuadSensRhsFn arg2 = (CVQuadSensRhsFn) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVQuadSensRhsFn)(farg2); arg3 = (N_Vector *)(farg3); result = (int)CVodeQuadSensInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadSensReInit(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); result = (int)CVodeQuadSensReInit(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadSensSStolerances(void *farg1, double const *farg2, double *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; realtype *arg3 = (realtype *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (realtype *)(farg3); result = (int)CVodeQuadSensSStolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadSensSVtolerances(void *farg1, double const *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector *)(farg3); result = (int)CVodeQuadSensSVtolerances(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadSensEEtolerances(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)CVodeQuadSensEEtolerances(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetQuadSensErrCon(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetQuadSensErrCon(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadSens(void *farg1, double *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; N_Vector *arg3 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (N_Vector *)(farg3); result = (int)CVodeGetQuadSens(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadSens1(void *farg1, double *farg2, int const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype *arg2 = (realtype *) 0 ; int arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype *)(farg2); arg3 = (int)(*farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeGetQuadSens1(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadSensDky(void *farg1, double const *farg2, int const *farg3, void *farg4) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; N_Vector *arg4 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (N_Vector *)(farg4); result = (int)CVodeGetQuadSensDky(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadSensDky1(void *farg1, double const *farg2, int const *farg3, int const *farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; int arg4 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); arg4 = (int)(*farg4); arg5 = (N_Vector)(farg5); result = (int)CVodeGetQuadSensDky1(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadSensNumRhsEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetQuadSensNumRhsEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadSensNumErrTestFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetQuadSensNumErrTestFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadSensErrWeights(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; N_Vector *arg2 = (N_Vector *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (N_Vector *)(farg2); result = (int)CVodeGetQuadSensErrWeights(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadSensStats(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)CVodeGetQuadSensStats(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FCVodeQuadSensFree(void *farg1) { void *arg1 = (void *) 0 ; arg1 = (void *)(farg1); CVodeQuadSensFree(arg1); } SWIGEXPORT int _wrap_FCVodeAdjInit(void *farg1, long const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); arg3 = (int)(*farg3); result = (int)CVodeAdjInit(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeAdjReInit(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)CVodeAdjReInit(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_FCVodeAdjFree(void *farg1) { void *arg1 = (void *) 0 ; arg1 = (void *)(farg1); CVodeAdjFree(arg1); } SWIGEXPORT int _wrap_FCVodeCreateB(void *farg1, int const *farg2, int *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int *arg3 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int *)(farg3); result = (int)CVodeCreateB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeInitB(void *farg1, int const *farg2, CVRhsFnB farg3, double const *farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVRhsFnB arg3 = (CVRhsFnB) 0 ; realtype arg4 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVRhsFnB)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector)(farg5); result = (int)CVodeInitB(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeInitBS(void *farg1, int const *farg2, CVRhsFnBS farg3, double const *farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVRhsFnBS arg3 = (CVRhsFnBS) 0 ; realtype arg4 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVRhsFnBS)(farg3); arg4 = (realtype)(*farg4); arg5 = (N_Vector)(farg5); result = (int)CVodeInitBS(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeReInitB(void *farg1, int const *farg2, double const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeReInitB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSStolerancesB(void *farg1, int const *farg2, double const *farg3, double const *farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; realtype arg4 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (realtype)(*farg4); result = (int)CVodeSStolerancesB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSVtolerancesB(void *farg1, int const *farg2, double const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeSVtolerancesB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadInitB(void *farg1, int const *farg2, CVQuadRhsFnB farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVQuadRhsFnB arg3 = (CVQuadRhsFnB) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVQuadRhsFnB)(farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeQuadInitB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadInitBS(void *farg1, int const *farg2, CVQuadRhsFnBS farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVQuadRhsFnBS arg3 = (CVQuadRhsFnBS) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVQuadRhsFnBS)(farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeQuadInitBS(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadReInitB(void *farg1, int const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (N_Vector)(farg3); result = (int)CVodeQuadReInitB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadSStolerancesB(void *farg1, int const *farg2, double const *farg3, double const *farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; realtype arg4 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (realtype)(*farg4); result = (int)CVodeQuadSStolerancesB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeQuadSVtolerancesB(void *farg1, int const *farg2, double const *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeQuadSVtolerancesB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeF(void *farg1, double const *farg2, N_Vector farg3, double *farg4, int const *farg5, int *farg6) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; realtype *arg4 = (realtype *) 0 ; int arg5 ; int *arg6 = (int *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); arg4 = (realtype *)(farg4); arg5 = (int)(*farg5); arg6 = (int *)(farg6); result = (int)CVodeF(arg1,arg2,arg3,arg4,arg5,arg6); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeB(void *farg1, double const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (int)(*farg3); result = (int)CVodeB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetAdjNoSensi(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)CVodeSetAdjNoSensi(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetUserDataB(void *farg1, int const *farg2, void *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; void *arg3 = (void *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (void *)(farg3); result = (int)CVodeSetUserDataB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMaxOrdB(void *farg1, int const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); result = (int)CVodeSetMaxOrdB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMaxNumStepsB(void *farg1, int const *farg2, long const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; long arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (long)(*farg3); result = (int)CVodeSetMaxNumStepsB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetStabLimDetB(void *farg1, int const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); result = (int)CVodeSetStabLimDetB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetInitStepB(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)CVodeSetInitStepB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMinStepB(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)CVodeSetMinStepB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetMaxStepB(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)CVodeSetMaxStepB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetConstraintsB(void *farg1, int const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (N_Vector)(farg3); result = (int)CVodeSetConstraintsB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetQuadErrConB(void *farg1, int const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); result = (int)CVodeSetQuadErrConB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetNonlinearSolverB(void *farg1, int const *farg2, SUNNonlinearSolver farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; SUNNonlinearSolver arg3 = (SUNNonlinearSolver) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (SUNNonlinearSolver)(farg3); result = (int)CVodeSetNonlinearSolverB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetB(void *farg1, int const *farg2, double *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype *arg3 = (realtype *) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype *)(farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeGetB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetQuadB(void *farg1, int const *farg2, double *farg3, N_Vector farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype *arg3 = (realtype *) 0 ; N_Vector arg4 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype *)(farg3); arg4 = (N_Vector)(farg4); result = (int)CVodeGetQuadB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT void * _wrap_FCVodeGetAdjCVodeBmem(void *farg1, int const *farg2) { void * fresult ; void *arg1 = (void *) 0 ; int arg2 ; void *result = 0 ; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (void *)CVodeGetAdjCVodeBmem(arg1,arg2); fresult = result; return fresult; } SWIGEXPORT int _wrap_FCVodeGetAdjY(void *farg1, double const *farg2, N_Vector farg3) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; N_Vector arg3 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); arg3 = (N_Vector)(farg3); result = (int)CVodeGetAdjY(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_CVadjCheckPointRec_my_addr_set(SwigClassWrapper const *farg1, void *farg2) { CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; void *arg2 = (void *) 0 ; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::my_addr", return ); arg1 = (CVadjCheckPointRec *)(farg1->cptr); arg2 = (void *)(farg2); if (arg1) (arg1)->my_addr = arg2; } SWIGEXPORT void * _wrap_CVadjCheckPointRec_my_addr_get(SwigClassWrapper const *farg1) { void * fresult ; CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; void *result = 0 ; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::my_addr", return 0); arg1 = (CVadjCheckPointRec *)(farg1->cptr); result = (void *) ((arg1)->my_addr); fresult = result; return fresult; } SWIGEXPORT void _wrap_CVadjCheckPointRec_next_addr_set(SwigClassWrapper const *farg1, void *farg2) { CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; void *arg2 = (void *) 0 ; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::next_addr", return ); arg1 = (CVadjCheckPointRec *)(farg1->cptr); arg2 = (void *)(farg2); if (arg1) (arg1)->next_addr = arg2; } SWIGEXPORT void * _wrap_CVadjCheckPointRec_next_addr_get(SwigClassWrapper const *farg1) { void * fresult ; CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; void *result = 0 ; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::next_addr", return 0); arg1 = (CVadjCheckPointRec *)(farg1->cptr); result = (void *) ((arg1)->next_addr); fresult = result; return fresult; } SWIGEXPORT void _wrap_CVadjCheckPointRec_t0_set(SwigClassWrapper const *farg1, double const *farg2) { CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; realtype arg2 ; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::t0", return ); arg1 = (CVadjCheckPointRec *)(farg1->cptr); arg2 = (realtype)(*farg2); if (arg1) (arg1)->t0 = arg2; } SWIGEXPORT double _wrap_CVadjCheckPointRec_t0_get(SwigClassWrapper const *farg1) { double fresult ; CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; realtype result; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::t0", return 0); arg1 = (CVadjCheckPointRec *)(farg1->cptr); result = (realtype) ((arg1)->t0); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_CVadjCheckPointRec_t1_set(SwigClassWrapper const *farg1, double const *farg2) { CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; realtype arg2 ; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::t1", return ); arg1 = (CVadjCheckPointRec *)(farg1->cptr); arg2 = (realtype)(*farg2); if (arg1) (arg1)->t1 = arg2; } SWIGEXPORT double _wrap_CVadjCheckPointRec_t1_get(SwigClassWrapper const *farg1) { double fresult ; CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; realtype result; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::t1", return 0); arg1 = (CVadjCheckPointRec *)(farg1->cptr); result = (realtype) ((arg1)->t1); fresult = (realtype)(result); return fresult; } SWIGEXPORT void _wrap_CVadjCheckPointRec_nstep_set(SwigClassWrapper const *farg1, long const *farg2) { CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; long arg2 ; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::nstep", return ); arg1 = (CVadjCheckPointRec *)(farg1->cptr); arg2 = (long)(*farg2); if (arg1) (arg1)->nstep = arg2; } SWIGEXPORT long _wrap_CVadjCheckPointRec_nstep_get(SwigClassWrapper const *farg1) { long fresult ; CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; long result; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::nstep", return 0); arg1 = (CVadjCheckPointRec *)(farg1->cptr); result = (long) ((arg1)->nstep); fresult = (long)(result); return fresult; } SWIGEXPORT void _wrap_CVadjCheckPointRec_order_set(SwigClassWrapper const *farg1, int const *farg2) { CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; int arg2 ; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::order", return ); arg1 = (CVadjCheckPointRec *)(farg1->cptr); arg2 = (int)(*farg2); if (arg1) (arg1)->order = arg2; } SWIGEXPORT int _wrap_CVadjCheckPointRec_order_get(SwigClassWrapper const *farg1) { int fresult ; CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; int result; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::order", return 0); arg1 = (CVadjCheckPointRec *)(farg1->cptr); result = (int) ((arg1)->order); fresult = (int)(result); return fresult; } SWIGEXPORT void _wrap_CVadjCheckPointRec_step_set(SwigClassWrapper const *farg1, double const *farg2) { CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; realtype arg2 ; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::step", return ); arg1 = (CVadjCheckPointRec *)(farg1->cptr); arg2 = (realtype)(*farg2); if (arg1) (arg1)->step = arg2; } SWIGEXPORT double _wrap_CVadjCheckPointRec_step_get(SwigClassWrapper const *farg1) { double fresult ; CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; realtype result; SWIG_check_mutable_nonnull(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::step", return 0); arg1 = (CVadjCheckPointRec *)(farg1->cptr); result = (realtype) ((arg1)->step); fresult = (realtype)(result); return fresult; } SWIGEXPORT SwigClassWrapper _wrap_new_CVadjCheckPointRec() { SwigClassWrapper fresult ; CVadjCheckPointRec *result = 0 ; result = (CVadjCheckPointRec *)calloc(1, sizeof(CVadjCheckPointRec)); fresult.cptr = result; fresult.cmemflags = SWIG_MEM_RVALUE | (1 ? SWIG_MEM_OWN : 0); return fresult; } SWIGEXPORT void _wrap_delete_CVadjCheckPointRec(SwigClassWrapper *farg1) { CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; SWIG_check_mutable(*farg1, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVadjCheckPointRec::~CVadjCheckPointRec()", return ); arg1 = (CVadjCheckPointRec *)(farg1->cptr); free((char *) arg1); } SWIGEXPORT void _wrap_CVadjCheckPointRec_op_assign__(SwigClassWrapper *farg1, SwigClassWrapper const *farg2) { CVadjCheckPointRec *arg1 = (CVadjCheckPointRec *) 0 ; CVadjCheckPointRec *arg2 = 0 ; (void)sizeof(arg1); (void)sizeof(arg2); SWIG_assign(farg1, *farg2); } SWIGEXPORT int _wrap_FCVodeGetAdjCheckPointsInfo(void *farg1, SwigClassWrapper const *farg2) { int fresult ; void *arg1 = (void *) 0 ; CVadjCheckPointRec *arg2 = (CVadjCheckPointRec *) 0 ; int result; arg1 = (void *)(farg1); SWIG_check_mutable(*farg2, "CVadjCheckPointRec *", "CVadjCheckPointRec", "CVodeGetAdjCheckPointsInfo(void *,CVadjCheckPointRec *)", return 0); arg2 = (CVadjCheckPointRec *)(farg2->cptr); result = (int)CVodeGetAdjCheckPointsInfo(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetJacTimesRhsFnB(void *farg1, int const *farg2, CVRhsFn farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVRhsFn arg3 = (CVRhsFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVRhsFn)(farg3); result = (int)CVodeSetJacTimesRhsFnB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetAdjDataPointHermite(void *farg1, int const *farg2, double *farg3, N_Vector farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype *arg3 = (realtype *) 0 ; N_Vector arg4 = (N_Vector) 0 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype *)(farg3); arg4 = (N_Vector)(farg4); arg5 = (N_Vector)(farg5); result = (int)CVodeGetAdjDataPointHermite(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetAdjDataPointPolynomial(void *farg1, int const *farg2, double *farg3, int *farg4, N_Vector farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype *arg3 = (realtype *) 0 ; int *arg4 = (int *) 0 ; N_Vector arg5 = (N_Vector) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype *)(farg3); arg4 = (int *)(farg4); arg5 = (N_Vector)(farg5); result = (int)CVodeGetAdjDataPointPolynomial(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetAdjCurrentCheckPoint(void *farg1, void *farg2) { int fresult ; void *arg1 = (void *) 0 ; void **arg2 = (void **) 0 ; int result; arg1 = (void *)(farg1); arg2 = (void **)(farg2); result = (int)CVodeGetAdjCurrentCheckPoint(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVBandPrecInit(void *farg1, int64_t const *farg2, int64_t const *farg3, int64_t const *farg4) { int fresult ; void *arg1 = (void *) 0 ; sunindextype arg2 ; sunindextype arg3 ; sunindextype arg4 ; int result; arg1 = (void *)(farg1); arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (sunindextype)(*farg4); result = (int)CVBandPrecInit(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVBandPrecGetWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)CVBandPrecGetWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVBandPrecGetNumRhsEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVBandPrecGetNumRhsEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVBandPrecInitB(void *farg1, int const *farg2, int64_t const *farg3, int64_t const *farg4, int64_t const *farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; sunindextype arg3 ; sunindextype arg4 ; sunindextype arg5 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (sunindextype)(*farg4); arg5 = (sunindextype)(*farg5); result = (int)CVBandPrecInitB(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVBBDPrecInit(void *farg1, int64_t const *farg2, int64_t const *farg3, int64_t const *farg4, int64_t const *farg5, int64_t const *farg6, double const *farg7, CVLocalFn farg8, CVCommFn farg9) { int fresult ; void *arg1 = (void *) 0 ; sunindextype arg2 ; sunindextype arg3 ; sunindextype arg4 ; sunindextype arg5 ; sunindextype arg6 ; realtype arg7 ; CVLocalFn arg8 = (CVLocalFn) 0 ; CVCommFn arg9 = (CVCommFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (sunindextype)(*farg4); arg5 = (sunindextype)(*farg5); arg6 = (sunindextype)(*farg6); arg7 = (realtype)(*farg7); arg8 = (CVLocalFn)(farg8); arg9 = (CVCommFn)(farg9); result = (int)CVBBDPrecInit(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVBBDPrecReInit(void *farg1, int64_t const *farg2, int64_t const *farg3, double const *farg4) { int fresult ; void *arg1 = (void *) 0 ; sunindextype arg2 ; sunindextype arg3 ; realtype arg4 ; int result; arg1 = (void *)(farg1); arg2 = (sunindextype)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (realtype)(*farg4); result = (int)CVBBDPrecReInit(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVBBDPrecGetWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)CVBBDPrecGetWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVBBDPrecGetNumGfnEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVBBDPrecGetNumGfnEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVBBDPrecInitB(void *farg1, int const *farg2, int64_t const *farg3, int64_t const *farg4, int64_t const *farg5, int64_t const *farg6, int64_t const *farg7, double const *farg8, CVLocalFnB farg9, CVCommFnB farg10) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; sunindextype arg3 ; sunindextype arg4 ; sunindextype arg5 ; sunindextype arg6 ; sunindextype arg7 ; realtype arg8 ; CVLocalFnB arg9 = (CVLocalFnB) 0 ; CVCommFnB arg10 = (CVCommFnB) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (sunindextype)(*farg4); arg5 = (sunindextype)(*farg5); arg6 = (sunindextype)(*farg6); arg7 = (sunindextype)(*farg7); arg8 = (realtype)(*farg8); arg9 = (CVLocalFnB)(farg9); arg10 = (CVCommFnB)(farg10); result = (int)CVBBDPrecInitB(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVBBDPrecReInitB(void *farg1, int const *farg2, int64_t const *farg3, int64_t const *farg4, double const *farg5) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; sunindextype arg3 ; sunindextype arg4 ; realtype arg5 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (sunindextype)(*farg3); arg4 = (sunindextype)(*farg4); arg5 = (realtype)(*farg5); result = (int)CVBBDPrecReInitB(arg1,arg2,arg3,arg4,arg5); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVDiag(void *farg1) { int fresult ; void *arg1 = (void *) 0 ; int result; arg1 = (void *)(farg1); result = (int)CVDiag(arg1); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVDiagGetWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)CVDiagGetWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVDiagGetNumRhsEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVDiagGetNumRhsEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVDiagGetLastFlag(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVDiagGetLastFlag(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT SwigArrayWrapper _wrap_FCVDiagGetReturnFlagName(long const *farg1) { SwigArrayWrapper fresult ; long arg1 ; char *result = 0 ; arg1 = (long)(*farg1); result = (char *)CVDiagGetReturnFlagName(arg1); fresult.size = strlen((const char*)(result)); fresult.data = (char *)(result); return fresult; } SWIGEXPORT int _wrap_FCVDiagB(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVDiagB(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetLinearSolver(void *farg1, SUNLinearSolver farg2, SUNMatrix farg3) { int fresult ; void *arg1 = (void *) 0 ; SUNLinearSolver arg2 = (SUNLinearSolver) 0 ; SUNMatrix arg3 = (SUNMatrix) 0 ; int result; arg1 = (void *)(farg1); arg2 = (SUNLinearSolver)(farg2); arg3 = (SUNMatrix)(farg3); result = (int)CVodeSetLinearSolver(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetJacFn(void *farg1, CVLsJacFn farg2) { int fresult ; void *arg1 = (void *) 0 ; CVLsJacFn arg2 = (CVLsJacFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVLsJacFn)(farg2); result = (int)CVodeSetJacFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetJacEvalFrequency(void *farg1, long const *farg2) { int fresult ; void *arg1 = (void *) 0 ; long arg2 ; int result; arg1 = (void *)(farg1); arg2 = (long)(*farg2); result = (int)CVodeSetJacEvalFrequency(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetLinearSolutionScaling(void *farg1, int const *farg2) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); result = (int)CVodeSetLinearSolutionScaling(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetEpsLin(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)CVodeSetEpsLin(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetLSNormFactor(void *farg1, double const *farg2) { int fresult ; void *arg1 = (void *) 0 ; realtype arg2 ; int result; arg1 = (void *)(farg1); arg2 = (realtype)(*farg2); result = (int)CVodeSetLSNormFactor(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetPreconditioner(void *farg1, CVLsPrecSetupFn farg2, CVLsPrecSolveFn farg3) { int fresult ; void *arg1 = (void *) 0 ; CVLsPrecSetupFn arg2 = (CVLsPrecSetupFn) 0 ; CVLsPrecSolveFn arg3 = (CVLsPrecSolveFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVLsPrecSetupFn)(farg2); arg3 = (CVLsPrecSolveFn)(farg3); result = (int)CVodeSetPreconditioner(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetJacTimes(void *farg1, CVLsJacTimesSetupFn farg2, CVLsJacTimesVecFn farg3) { int fresult ; void *arg1 = (void *) 0 ; CVLsJacTimesSetupFn arg2 = (CVLsJacTimesSetupFn) 0 ; CVLsJacTimesVecFn arg3 = (CVLsJacTimesVecFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVLsJacTimesSetupFn)(farg2); arg3 = (CVLsJacTimesVecFn)(farg3); result = (int)CVodeSetJacTimes(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetLinSysFn(void *farg1, CVLsLinSysFn farg2) { int fresult ; void *arg1 = (void *) 0 ; CVLsLinSysFn arg2 = (CVLsLinSysFn) 0 ; int result; arg1 = (void *)(farg1); arg2 = (CVLsLinSysFn)(farg2); result = (int)CVodeSetLinSysFn(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetLinWorkSpace(void *farg1, long *farg2, long *farg3) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); result = (int)CVodeGetLinWorkSpace(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumJacEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumJacEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumPrecEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumPrecEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumPrecSolves(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumPrecSolves(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumLinIters(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumLinIters(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumLinConvFails(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumLinConvFails(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumJTSetupEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumJTSetupEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumJtimesEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumJtimesEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetNumLinRhsEvals(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetNumLinRhsEvals(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetLinSolveStats(void *farg1, long *farg2, long *farg3, long *farg4, long *farg5, long *farg6, long *farg7, long *farg8, long *farg9) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; long *arg3 = (long *) 0 ; long *arg4 = (long *) 0 ; long *arg5 = (long *) 0 ; long *arg6 = (long *) 0 ; long *arg7 = (long *) 0 ; long *arg8 = (long *) 0 ; long *arg9 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); arg3 = (long *)(farg3); arg4 = (long *)(farg4); arg5 = (long *)(farg5); arg6 = (long *)(farg6); arg7 = (long *)(farg7); arg8 = (long *)(farg8); arg9 = (long *)(farg9); result = (int)CVodeGetLinSolveStats(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeGetLastLinFlag(void *farg1, long *farg2) { int fresult ; void *arg1 = (void *) 0 ; long *arg2 = (long *) 0 ; int result; arg1 = (void *)(farg1); arg2 = (long *)(farg2); result = (int)CVodeGetLastLinFlag(arg1,arg2); fresult = (int)(result); return fresult; } SWIGEXPORT SwigArrayWrapper _wrap_FCVodeGetLinReturnFlagName(long const *farg1) { SwigArrayWrapper fresult ; long arg1 ; char *result = 0 ; arg1 = (long)(*farg1); result = (char *)CVodeGetLinReturnFlagName(arg1); fresult.size = strlen((const char*)(result)); fresult.data = (char *)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetLinearSolverB(void *farg1, int const *farg2, SUNLinearSolver farg3, SUNMatrix farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; SUNLinearSolver arg3 = (SUNLinearSolver) 0 ; SUNMatrix arg4 = (SUNMatrix) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (SUNLinearSolver)(farg3); arg4 = (SUNMatrix)(farg4); result = (int)CVodeSetLinearSolverB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetJacFnB(void *farg1, int const *farg2, CVLsJacFnB farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVLsJacFnB arg3 = (CVLsJacFnB) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVLsJacFnB)(farg3); result = (int)CVodeSetJacFnB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetJacFnBS(void *farg1, int const *farg2, CVLsJacFnBS farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVLsJacFnBS arg3 = (CVLsJacFnBS) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVLsJacFnBS)(farg3); result = (int)CVodeSetJacFnBS(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetEpsLinB(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)CVodeSetEpsLinB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetLSNormFactorB(void *farg1, int const *farg2, double const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; realtype arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (realtype)(*farg3); result = (int)CVodeSetLSNormFactorB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetLinearSolutionScalingB(void *farg1, int const *farg2, int const *farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; int arg3 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (int)(*farg3); result = (int)CVodeSetLinearSolutionScalingB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetPreconditionerB(void *farg1, int const *farg2, CVLsPrecSetupFnB farg3, CVLsPrecSolveFnB farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVLsPrecSetupFnB arg3 = (CVLsPrecSetupFnB) 0 ; CVLsPrecSolveFnB arg4 = (CVLsPrecSolveFnB) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVLsPrecSetupFnB)(farg3); arg4 = (CVLsPrecSolveFnB)(farg4); result = (int)CVodeSetPreconditionerB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetPreconditionerBS(void *farg1, int const *farg2, CVLsPrecSetupFnBS farg3, CVLsPrecSolveFnBS farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVLsPrecSetupFnBS arg3 = (CVLsPrecSetupFnBS) 0 ; CVLsPrecSolveFnBS arg4 = (CVLsPrecSolveFnBS) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVLsPrecSetupFnBS)(farg3); arg4 = (CVLsPrecSolveFnBS)(farg4); result = (int)CVodeSetPreconditionerBS(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetJacTimesB(void *farg1, int const *farg2, CVLsJacTimesSetupFnB farg3, CVLsJacTimesVecFnB farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVLsJacTimesSetupFnB arg3 = (CVLsJacTimesSetupFnB) 0 ; CVLsJacTimesVecFnB arg4 = (CVLsJacTimesVecFnB) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVLsJacTimesSetupFnB)(farg3); arg4 = (CVLsJacTimesVecFnB)(farg4); result = (int)CVodeSetJacTimesB(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetJacTimesBS(void *farg1, int const *farg2, CVLsJacTimesSetupFnBS farg3, CVLsJacTimesVecFnBS farg4) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVLsJacTimesSetupFnBS arg3 = (CVLsJacTimesSetupFnBS) 0 ; CVLsJacTimesVecFnBS arg4 = (CVLsJacTimesVecFnBS) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVLsJacTimesSetupFnBS)(farg3); arg4 = (CVLsJacTimesVecFnBS)(farg4); result = (int)CVodeSetJacTimesBS(arg1,arg2,arg3,arg4); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetLinSysFnB(void *farg1, int const *farg2, CVLsLinSysFnB farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVLsLinSysFnB arg3 = (CVLsLinSysFnB) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVLsLinSysFnB)(farg3); result = (int)CVodeSetLinSysFnB(arg1,arg2,arg3); fresult = (int)(result); return fresult; } SWIGEXPORT int _wrap_FCVodeSetLinSysFnBS(void *farg1, int const *farg2, CVLsLinSysFnBS farg3) { int fresult ; void *arg1 = (void *) 0 ; int arg2 ; CVLsLinSysFnBS arg3 = (CVLsLinSysFnBS) 0 ; int result; arg1 = (void *)(farg1); arg2 = (int)(*farg2); arg3 = (CVLsLinSysFnBS)(farg3); result = (int)CVodeSetLinSysFnBS(arg1,arg2,arg3); fresult = (int)(result); return fresult; } StanHeaders/src/cvodes/fmod/fcvodes_mod.f900000644000176200001440000055057114645137106020306 0ustar liggesusers! This file was automatically generated by SWIG (http://www.swig.org). ! Version 4.0.0 ! ! Do not make changes to this file unless you know what you are doing--modify ! the SWIG interface file instead. ! --------------------------------------------------------------- ! Programmer(s): Auto-generated by swig. ! --------------------------------------------------------------- ! SUNDIALS Copyright Start ! Copyright (c) 2002-2022, Lawrence Livermore National Security ! and Southern Methodist University. ! All rights reserved. ! ! See the top-level LICENSE and NOTICE files for details. ! ! SPDX-License-Identifier: BSD-3-Clause ! SUNDIALS Copyright End ! --------------------------------------------------------------- module fcvodes_mod use, intrinsic :: ISO_C_BINDING use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_linearsolver_mod use fsundials_matrix_mod use fsundials_nvector_mod use fsundials_context_mod use fsundials_types_mod use fsundials_nonlinearsolver_mod use fsundials_types_mod implicit none private ! DECLARATION CONSTRUCTS integer(C_INT), parameter, public :: CV_ADAMS = 1_C_INT integer(C_INT), parameter, public :: CV_BDF = 2_C_INT integer(C_INT), parameter, public :: CV_NORMAL = 1_C_INT integer(C_INT), parameter, public :: CV_ONE_STEP = 2_C_INT integer(C_INT), parameter, public :: CV_SIMULTANEOUS = 1_C_INT integer(C_INT), parameter, public :: CV_STAGGERED = 2_C_INT integer(C_INT), parameter, public :: CV_STAGGERED1 = 3_C_INT integer(C_INT), parameter, public :: CV_CENTERED = 1_C_INT integer(C_INT), parameter, public :: CV_FORWARD = 2_C_INT integer(C_INT), parameter, public :: CV_HERMITE = 1_C_INT integer(C_INT), parameter, public :: CV_POLYNOMIAL = 2_C_INT integer(C_INT), parameter, public :: CV_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: CV_TSTOP_RETURN = 1_C_INT integer(C_INT), parameter, public :: CV_ROOT_RETURN = 2_C_INT integer(C_INT), parameter, public :: CV_WARNING = 99_C_INT integer(C_INT), parameter, public :: CV_TOO_MUCH_WORK = -1_C_INT integer(C_INT), parameter, public :: CV_TOO_MUCH_ACC = -2_C_INT integer(C_INT), parameter, public :: CV_ERR_FAILURE = -3_C_INT integer(C_INT), parameter, public :: CV_CONV_FAILURE = -4_C_INT integer(C_INT), parameter, public :: CV_LINIT_FAIL = -5_C_INT integer(C_INT), parameter, public :: CV_LSETUP_FAIL = -6_C_INT integer(C_INT), parameter, public :: CV_LSOLVE_FAIL = -7_C_INT integer(C_INT), parameter, public :: CV_RHSFUNC_FAIL = -8_C_INT integer(C_INT), parameter, public :: CV_FIRST_RHSFUNC_ERR = -9_C_INT integer(C_INT), parameter, public :: CV_REPTD_RHSFUNC_ERR = -10_C_INT integer(C_INT), parameter, public :: CV_UNREC_RHSFUNC_ERR = -11_C_INT integer(C_INT), parameter, public :: CV_RTFUNC_FAIL = -12_C_INT integer(C_INT), parameter, public :: CV_NLS_INIT_FAIL = -13_C_INT integer(C_INT), parameter, public :: CV_NLS_SETUP_FAIL = -14_C_INT integer(C_INT), parameter, public :: CV_CONSTR_FAIL = -15_C_INT integer(C_INT), parameter, public :: CV_NLS_FAIL = -16_C_INT integer(C_INT), parameter, public :: CV_MEM_FAIL = -20_C_INT integer(C_INT), parameter, public :: CV_MEM_NULL = -21_C_INT integer(C_INT), parameter, public :: CV_ILL_INPUT = -22_C_INT integer(C_INT), parameter, public :: CV_NO_MALLOC = -23_C_INT integer(C_INT), parameter, public :: CV_BAD_K = -24_C_INT integer(C_INT), parameter, public :: CV_BAD_T = -25_C_INT integer(C_INT), parameter, public :: CV_BAD_DKY = -26_C_INT integer(C_INT), parameter, public :: CV_TOO_CLOSE = -27_C_INT integer(C_INT), parameter, public :: CV_VECTOROP_ERR = -28_C_INT integer(C_INT), parameter, public :: CV_NO_QUAD = -30_C_INT integer(C_INT), parameter, public :: CV_QRHSFUNC_FAIL = -31_C_INT integer(C_INT), parameter, public :: CV_FIRST_QRHSFUNC_ERR = -32_C_INT integer(C_INT), parameter, public :: CV_REPTD_QRHSFUNC_ERR = -33_C_INT integer(C_INT), parameter, public :: CV_UNREC_QRHSFUNC_ERR = -34_C_INT integer(C_INT), parameter, public :: CV_NO_SENS = -40_C_INT integer(C_INT), parameter, public :: CV_SRHSFUNC_FAIL = -41_C_INT integer(C_INT), parameter, public :: CV_FIRST_SRHSFUNC_ERR = -42_C_INT integer(C_INT), parameter, public :: CV_REPTD_SRHSFUNC_ERR = -43_C_INT integer(C_INT), parameter, public :: CV_UNREC_SRHSFUNC_ERR = -44_C_INT integer(C_INT), parameter, public :: CV_BAD_IS = -45_C_INT integer(C_INT), parameter, public :: CV_NO_QUADSENS = -50_C_INT integer(C_INT), parameter, public :: CV_QSRHSFUNC_FAIL = -51_C_INT integer(C_INT), parameter, public :: CV_FIRST_QSRHSFUNC_ERR = -52_C_INT integer(C_INT), parameter, public :: CV_REPTD_QSRHSFUNC_ERR = -53_C_INT integer(C_INT), parameter, public :: CV_UNREC_QSRHSFUNC_ERR = -54_C_INT integer(C_INT), parameter, public :: CV_CONTEXT_ERR = -55_C_INT integer(C_INT), parameter, public :: CV_UNRECOGNIZED_ERR = -99_C_INT integer(C_INT), parameter, public :: CV_NO_ADJ = -101_C_INT integer(C_INT), parameter, public :: CV_NO_FWD = -102_C_INT integer(C_INT), parameter, public :: CV_NO_BCK = -103_C_INT integer(C_INT), parameter, public :: CV_BAD_TB0 = -104_C_INT integer(C_INT), parameter, public :: CV_REIFWD_FAIL = -105_C_INT integer(C_INT), parameter, public :: CV_FWD_FAIL = -106_C_INT integer(C_INT), parameter, public :: CV_GETY_BADT = -107_C_INT public :: FCVodeCreate public :: FCVodeInit public :: FCVodeReInit public :: FCVodeSStolerances public :: FCVodeSVtolerances public :: FCVodeWFtolerances public :: FCVodeSetConstraints public :: FCVodeSetErrFile public :: FCVodeSetErrHandlerFn public :: FCVodeSetInitStep public :: FCVodeSetLSetupFrequency public :: FCVodeSetMaxConvFails public :: FCVodeSetMaxErrTestFails public :: FCVodeSetMaxHnilWarns public :: FCVodeSetMaxNonlinIters public :: FCVodeSetMaxNumSteps public :: FCVodeSetMaxOrd public :: FCVodeSetMaxStep public :: FCVodeSetMinStep public :: FCVodeSetMonitorFn public :: FCVodeSetMonitorFrequency public :: FCVodeSetNlsRhsFn public :: FCVodeSetNonlinConvCoef public :: FCVodeSetNonlinearSolver public :: FCVodeSetStabLimDet public :: FCVodeSetStopTime public :: FCVodeSetUserData public :: FCVodeRootInit public :: FCVodeSetRootDirection public :: FCVodeSetNoInactiveRootWarn public :: FCVode public :: FCVodeComputeState public :: FCVodeComputeStateSens public :: FCVodeComputeStateSens1 public :: FCVodeGetDky public :: FCVodeGetWorkSpace public :: FCVodeGetNumSteps public :: FCVodeGetNumRhsEvals public :: FCVodeGetNumLinSolvSetups public :: FCVodeGetNumErrTestFails public :: FCVodeGetLastOrder public :: FCVodeGetCurrentOrder public :: FCVodeGetCurrentGamma public :: FCVodeGetNumStabLimOrderReds public :: FCVodeGetActualInitStep public :: FCVodeGetLastStep public :: FCVodeGetCurrentStep public :: FCVodeGetCurrentState public :: FCVodeGetCurrentStateSens public :: FCVodeGetCurrentSensSolveIndex public :: FCVodeGetCurrentTime public :: FCVodeGetTolScaleFactor public :: FCVodeGetErrWeights public :: FCVodeGetEstLocalErrors public :: FCVodeGetNumGEvals public :: FCVodeGetRootInfo public :: FCVodeGetIntegratorStats public :: FCVodeGetNonlinearSystemData public :: FCVodeGetNonlinearSystemDataSens public :: FCVodeGetNumNonlinSolvIters public :: FCVodeGetNumNonlinSolvConvFails public :: FCVodeGetNonlinSolvStats type, bind(C) :: SwigArrayWrapper type(C_PTR), public :: data = C_NULL_PTR integer(C_SIZE_T), public :: size = 0 end type public :: FCVodeGetReturnFlagName public :: FCVodeFree public :: FCVodeSetJacTimesRhsFn public :: FCVodeQuadInit public :: FCVodeQuadReInit public :: FCVodeQuadSStolerances public :: FCVodeQuadSVtolerances public :: FCVodeSetQuadErrCon public :: FCVodeGetQuad public :: FCVodeGetQuadDky public :: FCVodeGetQuadNumRhsEvals public :: FCVodeGetQuadNumErrTestFails public :: FCVodeGetQuadErrWeights public :: FCVodeGetQuadStats public :: FCVodeQuadFree public :: FCVodeSensInit public :: FCVodeSensInit1 public :: FCVodeSensReInit public :: FCVodeSensSStolerances public :: FCVodeSensSVtolerances public :: FCVodeSensEEtolerances public :: FCVodeSetSensDQMethod public :: FCVodeSetSensErrCon public :: FCVodeSetSensMaxNonlinIters public :: FCVodeSetSensParams public :: FCVodeSetNonlinearSolverSensSim public :: FCVodeSetNonlinearSolverSensStg public :: FCVodeSetNonlinearSolverSensStg1 public :: FCVodeSensToggleOff public :: FCVodeGetSens public :: FCVodeGetSens1 public :: FCVodeGetSensDky public :: FCVodeGetSensDky1 public :: FCVodeGetSensNumRhsEvals public :: FCVodeGetNumRhsEvalsSens public :: FCVodeGetSensNumErrTestFails public :: FCVodeGetSensNumLinSolvSetups public :: FCVodeGetSensErrWeights public :: FCVodeGetSensStats public :: FCVodeGetSensNumNonlinSolvIters public :: FCVodeGetSensNumNonlinSolvConvFails public :: FCVodeGetSensNonlinSolvStats public :: FCVodeGetStgrSensNumNonlinSolvIters public :: FCVodeGetStgrSensNumNonlinSolvConvFails public :: FCVodeGetStgrSensNonlinSolvStats public :: FCVodeSensFree public :: FCVodeQuadSensInit public :: FCVodeQuadSensReInit public :: FCVodeQuadSensSStolerances public :: FCVodeQuadSensSVtolerances public :: FCVodeQuadSensEEtolerances public :: FCVodeSetQuadSensErrCon public :: FCVodeGetQuadSens public :: FCVodeGetQuadSens1 public :: FCVodeGetQuadSensDky public :: FCVodeGetQuadSensDky1 public :: FCVodeGetQuadSensNumRhsEvals public :: FCVodeGetQuadSensNumErrTestFails public :: FCVodeGetQuadSensErrWeights public :: FCVodeGetQuadSensStats public :: FCVodeQuadSensFree public :: FCVodeAdjInit public :: FCVodeAdjReInit public :: FCVodeAdjFree public :: FCVodeCreateB public :: FCVodeInitB public :: FCVodeInitBS public :: FCVodeReInitB public :: FCVodeSStolerancesB public :: FCVodeSVtolerancesB public :: FCVodeQuadInitB public :: FCVodeQuadInitBS public :: FCVodeQuadReInitB public :: FCVodeQuadSStolerancesB public :: FCVodeQuadSVtolerancesB public :: FCVodeF public :: FCVodeB public :: FCVodeSetAdjNoSensi public :: FCVodeSetUserDataB public :: FCVodeSetMaxOrdB public :: FCVodeSetMaxNumStepsB public :: FCVodeSetStabLimDetB public :: FCVodeSetInitStepB public :: FCVodeSetMinStepB public :: FCVodeSetMaxStepB public :: FCVodeSetConstraintsB public :: FCVodeSetQuadErrConB public :: FCVodeSetNonlinearSolverB public :: FCVodeGetB public :: FCVodeGetQuadB public :: FCVodeGetAdjCVodeBmem public :: FCVodeGetAdjY integer, parameter :: swig_cmem_own_bit = 0 integer, parameter :: swig_cmem_rvalue_bit = 1 integer, parameter :: swig_cmem_const_bit = 2 type, bind(C) :: SwigClassWrapper type(C_PTR), public :: cptr = C_NULL_PTR integer(C_INT), public :: cmemflags = 0 end type ! struct CVadjCheckPointRec type, public :: CVadjCheckPointRec type(SwigClassWrapper), public :: swigdata contains procedure :: set_my_addr => swigf_CVadjCheckPointRec_my_addr_set procedure :: get_my_addr => swigf_CVadjCheckPointRec_my_addr_get procedure :: set_next_addr => swigf_CVadjCheckPointRec_next_addr_set procedure :: get_next_addr => swigf_CVadjCheckPointRec_next_addr_get procedure :: set_t0 => swigf_CVadjCheckPointRec_t0_set procedure :: get_t0 => swigf_CVadjCheckPointRec_t0_get procedure :: set_t1 => swigf_CVadjCheckPointRec_t1_set procedure :: get_t1 => swigf_CVadjCheckPointRec_t1_get procedure :: set_nstep => swigf_CVadjCheckPointRec_nstep_set procedure :: get_nstep => swigf_CVadjCheckPointRec_nstep_get procedure :: set_order => swigf_CVadjCheckPointRec_order_set procedure :: get_order => swigf_CVadjCheckPointRec_order_get procedure :: set_step => swigf_CVadjCheckPointRec_step_set procedure :: get_step => swigf_CVadjCheckPointRec_step_get procedure :: release => swigf_release_CVadjCheckPointRec procedure, private :: swigf_CVadjCheckPointRec_op_assign__ generic :: assignment(=) => swigf_CVadjCheckPointRec_op_assign__ end type CVadjCheckPointRec interface CVadjCheckPointRec module procedure swigf_create_CVadjCheckPointRec end interface public :: FCVodeGetAdjCheckPointsInfo public :: FCVodeSetJacTimesRhsFnB public :: FCVodeGetAdjDataPointHermite public :: FCVodeGetAdjDataPointPolynomial public :: FCVodeGetAdjCurrentCheckPoint public :: FCVBandPrecInit public :: FCVBandPrecGetWorkSpace public :: FCVBandPrecGetNumRhsEvals public :: FCVBandPrecInitB public :: FCVBBDPrecInit public :: FCVBBDPrecReInit public :: FCVBBDPrecGetWorkSpace public :: FCVBBDPrecGetNumGfnEvals public :: FCVBBDPrecInitB public :: FCVBBDPrecReInitB integer(C_INT), parameter, public :: CVDIAG_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: CVDIAG_MEM_NULL = -1_C_INT integer(C_INT), parameter, public :: CVDIAG_LMEM_NULL = -2_C_INT integer(C_INT), parameter, public :: CVDIAG_ILL_INPUT = -3_C_INT integer(C_INT), parameter, public :: CVDIAG_MEM_FAIL = -4_C_INT integer(C_INT), parameter, public :: CVDIAG_INV_FAIL = -5_C_INT integer(C_INT), parameter, public :: CVDIAG_RHSFUNC_UNRECVR = -6_C_INT integer(C_INT), parameter, public :: CVDIAG_RHSFUNC_RECVR = -7_C_INT integer(C_INT), parameter, public :: CVDIAG_NO_ADJ = -101_C_INT public :: FCVDiag public :: FCVDiagGetWorkSpace public :: FCVDiagGetNumRhsEvals public :: FCVDiagGetLastFlag public :: FCVDiagGetReturnFlagName public :: FCVDiagB integer(C_INT), parameter, public :: CVLS_SUCCESS = 0_C_INT integer(C_INT), parameter, public :: CVLS_MEM_NULL = -1_C_INT integer(C_INT), parameter, public :: CVLS_LMEM_NULL = -2_C_INT integer(C_INT), parameter, public :: CVLS_ILL_INPUT = -3_C_INT integer(C_INT), parameter, public :: CVLS_MEM_FAIL = -4_C_INT integer(C_INT), parameter, public :: CVLS_PMEM_NULL = -5_C_INT integer(C_INT), parameter, public :: CVLS_JACFUNC_UNRECVR = -6_C_INT integer(C_INT), parameter, public :: CVLS_JACFUNC_RECVR = -7_C_INT integer(C_INT), parameter, public :: CVLS_SUNMAT_FAIL = -8_C_INT integer(C_INT), parameter, public :: CVLS_SUNLS_FAIL = -9_C_INT integer(C_INT), parameter, public :: CVLS_NO_ADJ = -101_C_INT integer(C_INT), parameter, public :: CVLS_LMEMB_NULL = -102_C_INT public :: FCVodeSetLinearSolver public :: FCVodeSetJacFn public :: FCVodeSetJacEvalFrequency public :: FCVodeSetLinearSolutionScaling public :: FCVodeSetEpsLin public :: FCVodeSetLSNormFactor public :: FCVodeSetPreconditioner public :: FCVodeSetJacTimes public :: FCVodeSetLinSysFn public :: FCVodeGetLinWorkSpace public :: FCVodeGetNumJacEvals public :: FCVodeGetNumPrecEvals public :: FCVodeGetNumPrecSolves public :: FCVodeGetNumLinIters public :: FCVodeGetNumLinConvFails public :: FCVodeGetNumJTSetupEvals public :: FCVodeGetNumJtimesEvals public :: FCVodeGetNumLinRhsEvals public :: FCVodeGetLinSolveStats public :: FCVodeGetLastLinFlag public :: FCVodeGetLinReturnFlagName public :: FCVodeSetLinearSolverB public :: FCVodeSetJacFnB public :: FCVodeSetJacFnBS public :: FCVodeSetEpsLinB public :: FCVodeSetLSNormFactorB public :: FCVodeSetLinearSolutionScalingB public :: FCVodeSetPreconditionerB public :: FCVodeSetPreconditionerBS public :: FCVodeSetJacTimesB public :: FCVodeSetJacTimesBS public :: FCVodeSetLinSysFnB public :: FCVodeSetLinSysFnBS ! WRAPPER DECLARATIONS interface function swigc_FCVodeCreate(farg1, farg2) & bind(C, name="_wrap_FCVodeCreate") & result(fresult) use, intrinsic :: ISO_C_BINDING integer(C_INT), intent(in) :: farg1 type(C_PTR), value :: farg2 type(C_PTR) :: fresult end function function swigc_FCVodeInit(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeReInit(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSStolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSStolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSVtolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSVtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeWFtolerances(farg1, farg2) & bind(C, name="_wrap_FCVodeWFtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetConstraints(farg1, farg2) & bind(C, name="_wrap_FCVodeSetConstraints") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetErrFile(farg1, farg2) & bind(C, name="_wrap_FCVodeSetErrFile") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetErrHandlerFn(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetErrHandlerFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetInitStep(farg1, farg2) & bind(C, name="_wrap_FCVodeSetInitStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetLSetupFrequency(farg1, farg2) & bind(C, name="_wrap_FCVodeSetLSetupFrequency") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetMaxConvFails(farg1, farg2) & bind(C, name="_wrap_FCVodeSetMaxConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetMaxErrTestFails(farg1, farg2) & bind(C, name="_wrap_FCVodeSetMaxErrTestFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetMaxHnilWarns(farg1, farg2) & bind(C, name="_wrap_FCVodeSetMaxHnilWarns") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetMaxNonlinIters(farg1, farg2) & bind(C, name="_wrap_FCVodeSetMaxNonlinIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetMaxNumSteps(farg1, farg2) & bind(C, name="_wrap_FCVodeSetMaxNumSteps") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetMaxOrd(farg1, farg2) & bind(C, name="_wrap_FCVodeSetMaxOrd") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetMaxStep(farg1, farg2) & bind(C, name="_wrap_FCVodeSetMaxStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetMinStep(farg1, farg2) & bind(C, name="_wrap_FCVodeSetMinStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetMonitorFn(farg1, farg2) & bind(C, name="_wrap_FCVodeSetMonitorFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetMonitorFrequency(farg1, farg2) & bind(C, name="_wrap_FCVodeSetMonitorFrequency") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetNlsRhsFn(farg1, farg2) & bind(C, name="_wrap_FCVodeSetNlsRhsFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetNonlinConvCoef(farg1, farg2) & bind(C, name="_wrap_FCVodeSetNonlinConvCoef") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetNonlinearSolver(farg1, farg2) & bind(C, name="_wrap_FCVodeSetNonlinearSolver") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetStabLimDet(farg1, farg2) & bind(C, name="_wrap_FCVodeSetStabLimDet") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetStopTime(farg1, farg2) & bind(C, name="_wrap_FCVodeSetStopTime") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetUserData(farg1, farg2) & bind(C, name="_wrap_FCVodeSetUserData") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeRootInit(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeRootInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetRootDirection(farg1, farg2) & bind(C, name="_wrap_FCVodeSetRootDirection") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetNoInactiveRootWarn(farg1) & bind(C, name="_wrap_FCVodeSetNoInactiveRootWarn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FCVode(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVode") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FCVodeComputeState(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeComputeState") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeComputeStateSens(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeComputeStateSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeComputeStateSens1(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeComputeStateSens1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeGetDky(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeGetDky") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeGetWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumSteps(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumSteps") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumRhsEvals(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumRhsEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumLinSolvSetups(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumLinSolvSetups") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumErrTestFails(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumErrTestFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetLastOrder(farg1, farg2) & bind(C, name="_wrap_FCVodeGetLastOrder") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetCurrentOrder(farg1, farg2) & bind(C, name="_wrap_FCVodeGetCurrentOrder") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetCurrentGamma(farg1, farg2) & bind(C, name="_wrap_FCVodeGetCurrentGamma") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumStabLimOrderReds(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumStabLimOrderReds") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetActualInitStep(farg1, farg2) & bind(C, name="_wrap_FCVodeGetActualInitStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetLastStep(farg1, farg2) & bind(C, name="_wrap_FCVodeGetLastStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetCurrentStep(farg1, farg2) & bind(C, name="_wrap_FCVodeGetCurrentStep") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetCurrentState(farg1, farg2) & bind(C, name="_wrap_FCVodeGetCurrentState") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetCurrentStateSens(farg1, farg2) & bind(C, name="_wrap_FCVodeGetCurrentStateSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetCurrentSensSolveIndex(farg1, farg2) & bind(C, name="_wrap_FCVodeGetCurrentSensSolveIndex") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetCurrentTime(farg1, farg2) & bind(C, name="_wrap_FCVodeGetCurrentTime") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetTolScaleFactor(farg1, farg2) & bind(C, name="_wrap_FCVodeGetTolScaleFactor") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetErrWeights(farg1, farg2) & bind(C, name="_wrap_FCVodeGetErrWeights") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetEstLocalErrors(farg1, farg2) & bind(C, name="_wrap_FCVodeGetEstLocalErrors") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumGEvals(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumGEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetRootInfo(farg1, farg2) & bind(C, name="_wrap_FCVodeGetRootInfo") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetIntegratorStats(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9, farg10, farg11) & bind(C, name="_wrap_FCVodeGetIntegratorStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 type(C_PTR), value :: farg7 type(C_PTR), value :: farg8 type(C_PTR), value :: farg9 type(C_PTR), value :: farg10 type(C_PTR), value :: farg11 integer(C_INT) :: fresult end function function swigc_FCVodeGetNonlinearSystemData(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) & bind(C, name="_wrap_FCVodeGetNonlinearSystemData") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 type(C_PTR), value :: farg7 type(C_PTR), value :: farg8 type(C_PTR), value :: farg9 integer(C_INT) :: fresult end function function swigc_FCVodeGetNonlinearSystemDataSens(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8) & bind(C, name="_wrap_FCVodeGetNonlinearSystemDataSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 type(C_PTR), value :: farg7 type(C_PTR), value :: farg8 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumNonlinSolvIters(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumNonlinSolvIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumNonlinSolvConvFails(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumNonlinSolvConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNonlinSolvStats(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetNonlinSolvStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function subroutine SWIG_free(cptr) & bind(C, name="free") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: cptr end subroutine function swigc_FCVodeGetReturnFlagName(farg1) & bind(C, name="_wrap_FCVodeGetReturnFlagName") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper integer(C_LONG), intent(in) :: farg1 type(SwigArrayWrapper) :: fresult end function subroutine swigc_FCVodeFree(farg1) & bind(C, name="_wrap_FCVodeFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FCVodeSetJacTimesRhsFn(farg1, farg2) & bind(C, name="_wrap_FCVodeSetJacTimesRhsFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeQuadInit(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeQuadInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeQuadReInit(farg1, farg2) & bind(C, name="_wrap_FCVodeQuadReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeQuadSStolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeQuadSStolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeQuadSVtolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeQuadSVtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetQuadErrCon(farg1, farg2) & bind(C, name="_wrap_FCVodeSetQuadErrCon") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuad(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetQuad") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadDky(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeGetQuadDky") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadNumRhsEvals(farg1, farg2) & bind(C, name="_wrap_FCVodeGetQuadNumRhsEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadNumErrTestFails(farg1, farg2) & bind(C, name="_wrap_FCVodeGetQuadNumErrTestFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadErrWeights(farg1, farg2) & bind(C, name="_wrap_FCVodeGetQuadErrWeights") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadStats(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetQuadStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function subroutine swigc_FCVodeQuadFree(farg1) & bind(C, name="_wrap_FCVodeQuadFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FCVodeSensInit(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVodeSensInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_FUNPTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FCVodeSensInit1(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVodeSensInit1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_FUNPTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FCVodeSensReInit(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSensReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSensSStolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSensSStolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSensSVtolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSensSVtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSensEEtolerances(farg1) & bind(C, name="_wrap_FCVodeSensEEtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FCVodeSetSensDQMethod(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetSensDQMethod") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetSensErrCon(farg1, farg2) & bind(C, name="_wrap_FCVodeSetSensErrCon") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetSensMaxNonlinIters(farg1, farg2) & bind(C, name="_wrap_FCVodeSetSensMaxNonlinIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetSensParams(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeSetSensParams") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeSetNonlinearSolverSensSim(farg1, farg2) & bind(C, name="_wrap_FCVodeSetNonlinearSolverSensSim") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetNonlinearSolverSensStg(farg1, farg2) & bind(C, name="_wrap_FCVodeSetNonlinearSolverSensStg") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetNonlinearSolverSensStg1(farg1, farg2) & bind(C, name="_wrap_FCVodeSetNonlinearSolverSensStg1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSensToggleOff(farg1) & bind(C, name="_wrap_FCVodeSensToggleOff") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FCVodeGetSens(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeGetSens1(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeGetSens1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeGetSensDky(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeGetSensDky") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeGetSensDky1(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVodeGetSensDky1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT), intent(in) :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FCVodeGetSensNumRhsEvals(farg1, farg2) & bind(C, name="_wrap_FCVodeGetSensNumRhsEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumRhsEvalsSens(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumRhsEvalsSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetSensNumErrTestFails(farg1, farg2) & bind(C, name="_wrap_FCVodeGetSensNumErrTestFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetSensNumLinSolvSetups(farg1, farg2) & bind(C, name="_wrap_FCVodeGetSensNumLinSolvSetups") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetSensErrWeights(farg1, farg2) & bind(C, name="_wrap_FCVodeGetSensErrWeights") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetSensStats(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVodeGetSensStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FCVodeGetSensNumNonlinSolvIters(farg1, farg2) & bind(C, name="_wrap_FCVodeGetSensNumNonlinSolvIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetSensNumNonlinSolvConvFails(farg1, farg2) & bind(C, name="_wrap_FCVodeGetSensNumNonlinSolvConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetSensNonlinSolvStats(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetSensNonlinSolvStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeGetStgrSensNumNonlinSolvIters(farg1, farg2) & bind(C, name="_wrap_FCVodeGetStgrSensNumNonlinSolvIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetStgrSensNumNonlinSolvConvFails(farg1, farg2) & bind(C, name="_wrap_FCVodeGetStgrSensNumNonlinSolvConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetStgrSensNonlinSolvStats(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetStgrSensNonlinSolvStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function subroutine swigc_FCVodeSensFree(farg1) & bind(C, name="_wrap_FCVodeSensFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FCVodeQuadSensInit(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeQuadSensInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeQuadSensReInit(farg1, farg2) & bind(C, name="_wrap_FCVodeQuadSensReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeQuadSensSStolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeQuadSensSStolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeQuadSensSVtolerances(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeQuadSensSVtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeQuadSensEEtolerances(farg1) & bind(C, name="_wrap_FCVodeQuadSensEEtolerances") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FCVodeSetQuadSensErrCon(farg1, farg2) & bind(C, name="_wrap_FCVodeSetQuadSensErrCon") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadSens(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetQuadSens") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadSens1(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeGetQuadSens1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadSensDky(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeGetQuadSensDky") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadSensDky1(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVodeGetQuadSensDky1") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT), intent(in) :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadSensNumRhsEvals(farg1, farg2) & bind(C, name="_wrap_FCVodeGetQuadSensNumRhsEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadSensNumErrTestFails(farg1, farg2) & bind(C, name="_wrap_FCVodeGetQuadSensNumErrTestFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadSensErrWeights(farg1, farg2) & bind(C, name="_wrap_FCVodeGetQuadSensErrWeights") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadSensStats(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetQuadSensStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function subroutine swigc_FCVodeQuadSensFree(farg1) & bind(C, name="_wrap_FCVodeQuadSensFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FCVodeAdjInit(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeAdjInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeAdjReInit(farg1) & bind(C, name="_wrap_FCVodeAdjReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function subroutine swigc_FCVodeAdjFree(farg1) & bind(C, name="_wrap_FCVodeAdjFree") use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 end subroutine function swigc_FCVodeCreateB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeCreateB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeInitB(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVodeInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FCVodeInitBS(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVodeInitBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 real(C_DOUBLE), intent(in) :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FCVodeReInitB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeReInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeSStolerancesB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeSStolerancesB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 real(C_DOUBLE), intent(in) :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeSVtolerancesB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeSVtolerancesB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeQuadInitB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeQuadInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeQuadInitBS(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeQuadInitBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeQuadReInitB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeQuadReInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeQuadSStolerancesB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeQuadSStolerancesB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 real(C_DOUBLE), intent(in) :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeQuadSVtolerancesB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeQuadSVtolerancesB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeF(farg1, farg2, farg3, farg4, farg5, farg6) & bind(C, name="_wrap_FCVodeF") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT), intent(in) :: farg5 type(C_PTR), value :: farg6 integer(C_INT) :: fresult end function function swigc_FCVodeB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetAdjNoSensi(farg1) & bind(C, name="_wrap_FCVodeSetAdjNoSensi") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FCVodeSetUserDataB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetUserDataB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetMaxOrdB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetMaxOrdB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetMaxNumStepsB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetMaxNumStepsB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_LONG), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetStabLimDetB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetStabLimDetB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetInitStepB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetInitStepB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetMinStepB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetMinStepB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetMaxStepB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetMaxStepB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetConstraintsB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetConstraintsB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetQuadErrConB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetQuadErrConB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetNonlinearSolverB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetNonlinearSolverB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeGetB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeGetB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeGetQuadB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeGetQuadB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeGetAdjCVodeBmem(farg1, farg2) & bind(C, name="_wrap_FCVodeGetAdjCVodeBmem") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR) :: fresult end function function swigc_FCVodeGetAdjY(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetAdjY") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function subroutine swigc_CVadjCheckPointRec_my_addr_set(farg1, farg2) & bind(C, name="_wrap_CVadjCheckPointRec_my_addr_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_CVadjCheckPointRec_my_addr_get(farg1) & bind(C, name="_wrap_CVadjCheckPointRec_my_addr_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 type(C_PTR) :: fresult end function subroutine swigc_CVadjCheckPointRec_next_addr_set(farg1, farg2) & bind(C, name="_wrap_CVadjCheckPointRec_next_addr_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 type(C_PTR), value :: farg2 end subroutine function swigc_CVadjCheckPointRec_next_addr_get(farg1) & bind(C, name="_wrap_CVadjCheckPointRec_next_addr_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 type(C_PTR) :: fresult end function subroutine swigc_CVadjCheckPointRec_t0_set(farg1, farg2) & bind(C, name="_wrap_CVadjCheckPointRec_t0_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE), intent(in) :: farg2 end subroutine function swigc_CVadjCheckPointRec_t0_get(farg1) & bind(C, name="_wrap_CVadjCheckPointRec_t0_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: fresult end function subroutine swigc_CVadjCheckPointRec_t1_set(farg1, farg2) & bind(C, name="_wrap_CVadjCheckPointRec_t1_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE), intent(in) :: farg2 end subroutine function swigc_CVadjCheckPointRec_t1_get(farg1) & bind(C, name="_wrap_CVadjCheckPointRec_t1_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: fresult end function subroutine swigc_CVadjCheckPointRec_nstep_set(farg1, farg2) & bind(C, name="_wrap_CVadjCheckPointRec_nstep_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 integer(C_LONG), intent(in) :: farg2 end subroutine function swigc_CVadjCheckPointRec_nstep_get(farg1) & bind(C, name="_wrap_CVadjCheckPointRec_nstep_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 integer(C_LONG) :: fresult end function subroutine swigc_CVadjCheckPointRec_order_set(farg1, farg2) & bind(C, name="_wrap_CVadjCheckPointRec_order_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 integer(C_INT), intent(in) :: farg2 end subroutine function swigc_CVadjCheckPointRec_order_get(farg1) & bind(C, name="_wrap_CVadjCheckPointRec_order_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 integer(C_INT) :: fresult end function subroutine swigc_CVadjCheckPointRec_step_set(farg1, farg2) & bind(C, name="_wrap_CVadjCheckPointRec_step_set") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE), intent(in) :: farg2 end subroutine function swigc_CVadjCheckPointRec_step_get(farg1) & bind(C, name="_wrap_CVadjCheckPointRec_step_get") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: fresult end function function swigc_new_CVadjCheckPointRec() & bind(C, name="_wrap_new_CVadjCheckPointRec") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper) :: fresult end function subroutine swigc_delete_CVadjCheckPointRec(farg1) & bind(C, name="_wrap_delete_CVadjCheckPointRec") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper), intent(inout) :: farg1 end subroutine subroutine swigc_CVadjCheckPointRec_op_assign__(farg1, farg2) & bind(C, name="_wrap_CVadjCheckPointRec_op_assign__") use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(SwigClassWrapper), intent(inout) :: farg1 type(SwigClassWrapper) :: farg2 end subroutine function swigc_FCVodeGetAdjCheckPointsInfo(farg1, farg2) & bind(C, name="_wrap_FCVodeGetAdjCheckPointsInfo") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigclasswrapper type(C_PTR), value :: farg1 type(SwigClassWrapper) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetJacTimesRhsFnB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetJacTimesRhsFnB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeGetAdjDataPointHermite(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVodeGetAdjDataPointHermite") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FCVodeGetAdjDataPointPolynomial(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVodeGetAdjDataPointPolynomial") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 integer(C_INT) :: fresult end function function swigc_FCVodeGetAdjCurrentCheckPoint(farg1, farg2) & bind(C, name="_wrap_FCVodeGetAdjCurrentCheckPoint") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVBandPrecInit(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVBandPrecInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT64_T), intent(in) :: farg4 integer(C_INT) :: fresult end function function swigc_FCVBandPrecGetWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FCVBandPrecGetWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVBandPrecGetNumRhsEvals(farg1, farg2) & bind(C, name="_wrap_FCVBandPrecGetNumRhsEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVBandPrecInitB(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVBandPrecInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT64_T), intent(in) :: farg4 integer(C_INT64_T), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FCVBBDPrecInit(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) & bind(C, name="_wrap_FCVBBDPrecInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT64_T), intent(in) :: farg4 integer(C_INT64_T), intent(in) :: farg5 integer(C_INT64_T), intent(in) :: farg6 real(C_DOUBLE), intent(in) :: farg7 type(C_FUNPTR), value :: farg8 type(C_FUNPTR), value :: farg9 integer(C_INT) :: fresult end function function swigc_FCVBBDPrecReInit(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVBBDPrecReInit") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT64_T), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 real(C_DOUBLE), intent(in) :: farg4 integer(C_INT) :: fresult end function function swigc_FCVBBDPrecGetWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FCVBBDPrecGetWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVBBDPrecGetNumGfnEvals(farg1, farg2) & bind(C, name="_wrap_FCVBBDPrecGetNumGfnEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVBBDPrecInitB(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9, farg10) & bind(C, name="_wrap_FCVBBDPrecInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT64_T), intent(in) :: farg4 integer(C_INT64_T), intent(in) :: farg5 integer(C_INT64_T), intent(in) :: farg6 integer(C_INT64_T), intent(in) :: farg7 real(C_DOUBLE), intent(in) :: farg8 type(C_FUNPTR), value :: farg9 type(C_FUNPTR), value :: farg10 integer(C_INT) :: fresult end function function swigc_FCVBBDPrecReInitB(farg1, farg2, farg3, farg4, farg5) & bind(C, name="_wrap_FCVBBDPrecReInitB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT64_T), intent(in) :: farg3 integer(C_INT64_T), intent(in) :: farg4 real(C_DOUBLE), intent(in) :: farg5 integer(C_INT) :: fresult end function function swigc_FCVDiag(farg1) & bind(C, name="_wrap_FCVDiag") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT) :: fresult end function function swigc_FCVDiagGetWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FCVDiagGetWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVDiagGetNumRhsEvals(farg1, farg2) & bind(C, name="_wrap_FCVDiagGetNumRhsEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVDiagGetLastFlag(farg1, farg2) & bind(C, name="_wrap_FCVDiagGetLastFlag") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVDiagGetReturnFlagName(farg1) & bind(C, name="_wrap_FCVDiagGetReturnFlagName") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper integer(C_LONG), intent(in) :: farg1 type(SwigArrayWrapper) :: fresult end function function swigc_FCVDiagB(farg1, farg2) & bind(C, name="_wrap_FCVDiagB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetLinearSolver(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetLinearSolver") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetJacFn(farg1, farg2) & bind(C, name="_wrap_FCVodeSetJacFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetJacEvalFrequency(farg1, farg2) & bind(C, name="_wrap_FCVodeSetJacEvalFrequency") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_LONG), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetLinearSolutionScaling(farg1, farg2) & bind(C, name="_wrap_FCVodeSetLinearSolutionScaling") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetEpsLin(farg1, farg2) & bind(C, name="_wrap_FCVodeSetEpsLin") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetLSNormFactor(farg1, farg2) & bind(C, name="_wrap_FCVodeSetLSNormFactor") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 real(C_DOUBLE), intent(in) :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeSetPreconditioner(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetPreconditioner") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetJacTimes(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetJacTimes") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetLinSysFn(farg1, farg2) & bind(C, name="_wrap_FCVodeSetLinSysFn") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_FUNPTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetLinWorkSpace(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeGetLinWorkSpace") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumJacEvals(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumJacEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumPrecEvals(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumPrecEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumPrecSolves(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumPrecSolves") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumLinIters(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumLinIters") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumLinConvFails(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumLinConvFails") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumJTSetupEvals(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumJTSetupEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumJtimesEvals(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumJtimesEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetNumLinRhsEvals(farg1, farg2) & bind(C, name="_wrap_FCVodeGetNumLinRhsEvals") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetLinSolveStats(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) & bind(C, name="_wrap_FCVodeGetLinSolveStats") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 type(C_PTR), value :: farg5 type(C_PTR), value :: farg6 type(C_PTR), value :: farg7 type(C_PTR), value :: farg8 type(C_PTR), value :: farg9 integer(C_INT) :: fresult end function function swigc_FCVodeGetLastLinFlag(farg1, farg2) & bind(C, name="_wrap_FCVodeGetLastLinFlag") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 type(C_PTR), value :: farg2 integer(C_INT) :: fresult end function function swigc_FCVodeGetLinReturnFlagName(farg1) & bind(C, name="_wrap_FCVodeGetLinReturnFlagName") & result(fresult) use, intrinsic :: ISO_C_BINDING import :: swigarraywrapper integer(C_LONG), intent(in) :: farg1 type(SwigArrayWrapper) :: fresult end function function swigc_FCVodeSetLinearSolverB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeSetLinearSolverB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_PTR), value :: farg3 type(C_PTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeSetJacFnB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetJacFnB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetJacFnBS(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetJacFnBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetEpsLinB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetEpsLinB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetLSNormFactorB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetLSNormFactorB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 real(C_DOUBLE), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetLinearSolutionScalingB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetLinearSolutionScalingB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 integer(C_INT), intent(in) :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetPreconditionerB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeSetPreconditionerB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeSetPreconditionerBS(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeSetPreconditionerBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeSetJacTimesB(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeSetJacTimesB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeSetJacTimesBS(farg1, farg2, farg3, farg4) & bind(C, name="_wrap_FCVodeSetJacTimesBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 type(C_FUNPTR), value :: farg4 integer(C_INT) :: fresult end function function swigc_FCVodeSetLinSysFnB(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetLinSysFnB") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function function swigc_FCVodeSetLinSysFnBS(farg1, farg2, farg3) & bind(C, name="_wrap_FCVodeSetLinSysFnBS") & result(fresult) use, intrinsic :: ISO_C_BINDING type(C_PTR), value :: farg1 integer(C_INT), intent(in) :: farg2 type(C_FUNPTR), value :: farg3 integer(C_INT) :: fresult end function end interface contains ! MODULE SUBPROGRAMS function FCVodeCreate(lmm, sunctx) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result integer(C_INT), intent(in) :: lmm type(C_PTR) :: sunctx type(C_PTR) :: fresult integer(C_INT) :: farg1 type(C_PTR) :: farg2 farg1 = lmm farg2 = sunctx fresult = swigc_FCVodeCreate(farg1, farg2) swig_result = fresult end function function FCVodeInit(cvode_mem, f, t0, y0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: f real(C_DOUBLE), intent(in) :: t0 type(N_Vector), target, intent(inout) :: y0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = f farg3 = t0 farg4 = c_loc(y0) fresult = swigc_FCVodeInit(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeReInit(cvode_mem, t0, y0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: t0 type(N_Vector), target, intent(inout) :: y0 integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = t0 farg3 = c_loc(y0) fresult = swigc_FCVodeReInit(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSStolerances(cvode_mem, reltol, abstol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: reltol real(C_DOUBLE), intent(in) :: abstol integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 real(C_DOUBLE) :: farg3 farg1 = cvode_mem farg2 = reltol farg3 = abstol fresult = swigc_FCVodeSStolerances(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSVtolerances(cvode_mem, reltol, abstol) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: reltol type(N_Vector), target, intent(inout) :: abstol integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = reltol farg3 = c_loc(abstol) fresult = swigc_FCVodeSVtolerances(farg1, farg2, farg3) swig_result = fresult end function function FCVodeWFtolerances(cvode_mem, efun) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: efun integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = cvode_mem farg2 = efun fresult = swigc_FCVodeWFtolerances(farg1, farg2) swig_result = fresult end function function FCVodeSetConstraints(cvode_mem, constraints) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(N_Vector), target, intent(inout) :: constraints integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(constraints) fresult = swigc_FCVodeSetConstraints(farg1, farg2) swig_result = fresult end function function FCVodeSetErrFile(cvode_mem, errfp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_PTR) :: errfp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = errfp fresult = swigc_FCVodeSetErrFile(farg1, farg2) swig_result = fresult end function function FCVodeSetErrHandlerFn(cvode_mem, ehfun, eh_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: ehfun type(C_PTR) :: eh_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = ehfun farg3 = eh_data fresult = swigc_FCVodeSetErrHandlerFn(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetInitStep(cvode_mem, hin) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: hin integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = cvode_mem farg2 = hin fresult = swigc_FCVodeSetInitStep(farg1, farg2) swig_result = fresult end function function FCVodeSetLSetupFrequency(cvode_mem, msbp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), intent(in) :: msbp integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = cvode_mem farg2 = msbp fresult = swigc_FCVodeSetLSetupFrequency(farg1, farg2) swig_result = fresult end function function FCVodeSetMaxConvFails(cvode_mem, maxncf) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: maxncf integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = maxncf fresult = swigc_FCVodeSetMaxConvFails(farg1, farg2) swig_result = fresult end function function FCVodeSetMaxErrTestFails(cvode_mem, maxnef) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: maxnef integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = maxnef fresult = swigc_FCVodeSetMaxErrTestFails(farg1, farg2) swig_result = fresult end function function FCVodeSetMaxHnilWarns(cvode_mem, mxhnil) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: mxhnil integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = mxhnil fresult = swigc_FCVodeSetMaxHnilWarns(farg1, farg2) swig_result = fresult end function function FCVodeSetMaxNonlinIters(cvode_mem, maxcor) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: maxcor integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = maxcor fresult = swigc_FCVodeSetMaxNonlinIters(farg1, farg2) swig_result = fresult end function function FCVodeSetMaxNumSteps(cvode_mem, mxsteps) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), intent(in) :: mxsteps integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = cvode_mem farg2 = mxsteps fresult = swigc_FCVodeSetMaxNumSteps(farg1, farg2) swig_result = fresult end function function FCVodeSetMaxOrd(cvode_mem, maxord) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: maxord integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = maxord fresult = swigc_FCVodeSetMaxOrd(farg1, farg2) swig_result = fresult end function function FCVodeSetMaxStep(cvode_mem, hmax) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: hmax integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = cvode_mem farg2 = hmax fresult = swigc_FCVodeSetMaxStep(farg1, farg2) swig_result = fresult end function function FCVodeSetMinStep(cvode_mem, hmin) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: hmin integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = cvode_mem farg2 = hmin fresult = swigc_FCVodeSetMinStep(farg1, farg2) swig_result = fresult end function function FCVodeSetMonitorFn(cvode_mem, fn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: fn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = cvode_mem farg2 = fn fresult = swigc_FCVodeSetMonitorFn(farg1, farg2) swig_result = fresult end function function FCVodeSetMonitorFrequency(cvode_mem, nst) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), intent(in) :: nst integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = cvode_mem farg2 = nst fresult = swigc_FCVodeSetMonitorFrequency(farg1, farg2) swig_result = fresult end function function FCVodeSetNlsRhsFn(cvode_mem, f) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: f integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = cvode_mem farg2 = f fresult = swigc_FCVodeSetNlsRhsFn(farg1, farg2) swig_result = fresult end function function FCVodeSetNonlinConvCoef(cvode_mem, nlscoef) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: nlscoef integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = cvode_mem farg2 = nlscoef fresult = swigc_FCVodeSetNonlinConvCoef(farg1, farg2) swig_result = fresult end function function FCVodeSetNonlinearSolver(cvode_mem, nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nls) fresult = swigc_FCVodeSetNonlinearSolver(farg1, farg2) swig_result = fresult end function function FCVodeSetStabLimDet(cvode_mem, stldet) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: stldet integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = stldet fresult = swigc_FCVodeSetStabLimDet(farg1, farg2) swig_result = fresult end function function FCVodeSetStopTime(cvode_mem, tstop) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: tstop integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = cvode_mem farg2 = tstop fresult = swigc_FCVodeSetStopTime(farg1, farg2) swig_result = fresult end function function FCVodeSetUserData(cvode_mem, user_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_PTR) :: user_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = user_data fresult = swigc_FCVodeSetUserData(farg1, farg2) swig_result = fresult end function function FCVodeRootInit(cvode_mem, nrtfn, g) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: nrtfn type(C_FUNPTR), intent(in), value :: g integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 farg1 = cvode_mem farg2 = nrtfn farg3 = g fresult = swigc_FCVodeRootInit(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetRootDirection(cvode_mem, rootdir) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), dimension(*), target, intent(inout) :: rootdir integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(rootdir(1)) fresult = swigc_FCVodeSetRootDirection(farg1, farg2) swig_result = fresult end function function FCVodeSetNoInactiveRootWarn(cvode_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = cvode_mem fresult = swigc_FCVodeSetNoInactiveRootWarn(farg1) swig_result = fresult end function function FCVode(cvode_mem, tout, yout, tret, itask) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: tout type(N_Vector), target, intent(inout) :: yout real(C_DOUBLE), dimension(*), target, intent(inout) :: tret integer(C_INT), intent(in) :: itask integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 integer(C_INT) :: farg5 farg1 = cvode_mem farg2 = tout farg3 = c_loc(yout) farg4 = c_loc(tret(1)) farg5 = itask fresult = swigc_FCVode(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVodeComputeState(cvode_mem, ycor, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(N_Vector), target, intent(inout) :: ycor type(N_Vector), target, intent(inout) :: y integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(ycor) farg3 = c_loc(y) fresult = swigc_FCVodeComputeState(farg1, farg2, farg3) swig_result = fresult end function function FCVodeComputeStateSens(cvode_mem, yscor, ys) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_PTR) :: yscor type(C_PTR) :: ys integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = yscor farg3 = ys fresult = swigc_FCVodeComputeStateSens(farg1, farg2, farg3) swig_result = fresult end function function FCVodeComputeStateSens1(cvode_mem, idx, yscor1, ys1) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: idx type(N_Vector), target, intent(inout) :: yscor1 type(N_Vector), target, intent(inout) :: ys1 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = idx farg3 = c_loc(yscor1) farg4 = c_loc(ys1) fresult = swigc_FCVodeComputeStateSens1(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeGetDky(cvode_mem, t, k, dky) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k type(N_Vector), target, intent(inout) :: dky integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = t farg3 = k farg4 = c_loc(dky) fresult = swigc_FCVodeGetDky(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeGetWorkSpace(cvode_mem, lenrw, leniw) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: lenrw integer(C_LONG), dimension(*), target, intent(inout) :: leniw integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(lenrw(1)) farg3 = c_loc(leniw(1)) fresult = swigc_FCVodeGetWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FCVodeGetNumSteps(cvode_mem, nsteps) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsteps integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nsteps(1)) fresult = swigc_FCVodeGetNumSteps(farg1, farg2) swig_result = fresult end function function FCVodeGetNumRhsEvals(cvode_mem, nfevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nfevals(1)) fresult = swigc_FCVodeGetNumRhsEvals(farg1, farg2) swig_result = fresult end function function FCVodeGetNumLinSolvSetups(cvode_mem, nlinsetups) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nlinsetups integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nlinsetups(1)) fresult = swigc_FCVodeGetNumLinSolvSetups(farg1, farg2) swig_result = fresult end function function FCVodeGetNumErrTestFails(cvode_mem, netfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: netfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(netfails(1)) fresult = swigc_FCVodeGetNumErrTestFails(farg1, farg2) swig_result = fresult end function function FCVodeGetLastOrder(cvode_mem, qlast) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), dimension(*), target, intent(inout) :: qlast integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(qlast(1)) fresult = swigc_FCVodeGetLastOrder(farg1, farg2) swig_result = fresult end function function FCVodeGetCurrentOrder(cvode_mem, qcur) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), dimension(*), target, intent(inout) :: qcur integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(qcur(1)) fresult = swigc_FCVodeGetCurrentOrder(farg1, farg2) swig_result = fresult end function function FCVodeGetCurrentGamma(cvode_mem, gamma) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: gamma integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(gamma(1)) fresult = swigc_FCVodeGetCurrentGamma(farg1, farg2) swig_result = fresult end function function FCVodeGetNumStabLimOrderReds(cvode_mem, nslred) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nslred integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nslred(1)) fresult = swigc_FCVodeGetNumStabLimOrderReds(farg1, farg2) swig_result = fresult end function function FCVodeGetActualInitStep(cvode_mem, hinused) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: hinused integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(hinused(1)) fresult = swigc_FCVodeGetActualInitStep(farg1, farg2) swig_result = fresult end function function FCVodeGetLastStep(cvode_mem, hlast) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: hlast integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(hlast(1)) fresult = swigc_FCVodeGetLastStep(farg1, farg2) swig_result = fresult end function function FCVodeGetCurrentStep(cvode_mem, hcur) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: hcur integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(hcur(1)) fresult = swigc_FCVodeGetCurrentStep(farg1, farg2) swig_result = fresult end function function FCVodeGetCurrentState(cvode_mem, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_PTR) :: y integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = y fresult = swigc_FCVodeGetCurrentState(farg1, farg2) swig_result = fresult end function function FCVodeGetCurrentStateSens(cvode_mem, ys) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_PTR), target, intent(inout) :: ys integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(ys) fresult = swigc_FCVodeGetCurrentStateSens(farg1, farg2) swig_result = fresult end function function FCVodeGetCurrentSensSolveIndex(cvode_mem, index) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), dimension(*), target, intent(inout) :: index integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(index(1)) fresult = swigc_FCVodeGetCurrentSensSolveIndex(farg1, farg2) swig_result = fresult end function function FCVodeGetCurrentTime(cvode_mem, tcur) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tcur integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(tcur(1)) fresult = swigc_FCVodeGetCurrentTime(farg1, farg2) swig_result = fresult end function function FCVodeGetTolScaleFactor(cvode_mem, tolsfac) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tolsfac integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(tolsfac(1)) fresult = swigc_FCVodeGetTolScaleFactor(farg1, farg2) swig_result = fresult end function function FCVodeGetErrWeights(cvode_mem, eweight) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(N_Vector), target, intent(inout) :: eweight integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(eweight) fresult = swigc_FCVodeGetErrWeights(farg1, farg2) swig_result = fresult end function function FCVodeGetEstLocalErrors(cvode_mem, ele) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(N_Vector), target, intent(inout) :: ele integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(ele) fresult = swigc_FCVodeGetEstLocalErrors(farg1, farg2) swig_result = fresult end function function FCVodeGetNumGEvals(cvode_mem, ngevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: ngevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(ngevals(1)) fresult = swigc_FCVodeGetNumGEvals(farg1, farg2) swig_result = fresult end function function FCVodeGetRootInfo(cvode_mem, rootsfound) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), dimension(*), target, intent(inout) :: rootsfound integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(rootsfound(1)) fresult = swigc_FCVodeGetRootInfo(farg1, farg2) swig_result = fresult end function function FCVodeGetIntegratorStats(cvode_mem, nsteps, nfevals, nlinsetups, netfails, qlast, qcur, hinused, hlast, hcur, tcur) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsteps integer(C_LONG), dimension(*), target, intent(inout) :: nfevals integer(C_LONG), dimension(*), target, intent(inout) :: nlinsetups integer(C_LONG), dimension(*), target, intent(inout) :: netfails integer(C_INT), dimension(*), target, intent(inout) :: qlast integer(C_INT), dimension(*), target, intent(inout) :: qcur real(C_DOUBLE), dimension(*), target, intent(inout) :: hinused real(C_DOUBLE), dimension(*), target, intent(inout) :: hlast real(C_DOUBLE), dimension(*), target, intent(inout) :: hcur real(C_DOUBLE), dimension(*), target, intent(inout) :: tcur integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 type(C_PTR) :: farg7 type(C_PTR) :: farg8 type(C_PTR) :: farg9 type(C_PTR) :: farg10 type(C_PTR) :: farg11 farg1 = cvode_mem farg2 = c_loc(nsteps(1)) farg3 = c_loc(nfevals(1)) farg4 = c_loc(nlinsetups(1)) farg5 = c_loc(netfails(1)) farg6 = c_loc(qlast(1)) farg7 = c_loc(qcur(1)) farg8 = c_loc(hinused(1)) farg9 = c_loc(hlast(1)) farg10 = c_loc(hcur(1)) farg11 = c_loc(tcur(1)) fresult = swigc_FCVodeGetIntegratorStats(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9, farg10, farg11) swig_result = fresult end function function FCVodeGetNonlinearSystemData(cvode_mem, tcur, ypred, yn, fn, gamma, rl1, zn1, user_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tcur type(C_PTR) :: ypred type(C_PTR) :: yn type(C_PTR) :: fn real(C_DOUBLE), dimension(*), target, intent(inout) :: gamma real(C_DOUBLE), dimension(*), target, intent(inout) :: rl1 type(C_PTR) :: zn1 type(C_PTR), target, intent(inout) :: user_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 type(C_PTR) :: farg7 type(C_PTR) :: farg8 type(C_PTR) :: farg9 farg1 = cvode_mem farg2 = c_loc(tcur(1)) farg3 = ypred farg4 = yn farg5 = fn farg6 = c_loc(gamma(1)) farg7 = c_loc(rl1(1)) farg8 = zn1 farg9 = c_loc(user_data) fresult = swigc_FCVodeGetNonlinearSystemData(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) swig_result = fresult end function function FCVodeGetNonlinearSystemDataSens(cvode_mem, tcur, yspred, ysn, gamma, rl1, zn1, user_data) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tcur type(C_PTR), target, intent(inout) :: yspred type(C_PTR), target, intent(inout) :: ysn real(C_DOUBLE), dimension(*), target, intent(inout) :: gamma real(C_DOUBLE), dimension(*), target, intent(inout) :: rl1 type(C_PTR), target, intent(inout) :: zn1 type(C_PTR), target, intent(inout) :: user_data integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 type(C_PTR) :: farg7 type(C_PTR) :: farg8 farg1 = cvode_mem farg2 = c_loc(tcur(1)) farg3 = c_loc(yspred) farg4 = c_loc(ysn) farg5 = c_loc(gamma(1)) farg6 = c_loc(rl1(1)) farg7 = c_loc(zn1) farg8 = c_loc(user_data) fresult = swigc_FCVodeGetNonlinearSystemDataSens(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8) swig_result = fresult end function function FCVodeGetNumNonlinSolvIters(cvode_mem, nniters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nniters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nniters(1)) fresult = swigc_FCVodeGetNumNonlinSolvIters(farg1, farg2) swig_result = fresult end function function FCVodeGetNumNonlinSolvConvFails(cvode_mem, nncfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nncfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nncfails(1)) fresult = swigc_FCVodeGetNumNonlinSolvConvFails(farg1, farg2) swig_result = fresult end function function FCVodeGetNonlinSolvStats(cvode_mem, nniters, nncfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nniters integer(C_LONG), dimension(*), target, intent(inout) :: nncfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(nniters(1)) farg3 = c_loc(nncfails(1)) fresult = swigc_FCVodeGetNonlinSolvStats(farg1, farg2, farg3) swig_result = fresult end function subroutine SWIG_chararray_to_string(wrap, string) use, intrinsic :: ISO_C_BINDING type(SwigArrayWrapper), intent(IN) :: wrap character(kind=C_CHAR, len=:), allocatable, intent(OUT) :: string character(kind=C_CHAR), dimension(:), pointer :: chars integer(kind=C_SIZE_T) :: i call c_f_pointer(wrap%data, chars, [wrap%size]) allocate(character(kind=C_CHAR, len=wrap%size) :: string) do i=1, wrap%size string(i:i) = chars(i) end do end subroutine function FCVodeGetReturnFlagName(flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING character(kind=C_CHAR, len=:), allocatable :: swig_result integer(C_LONG), intent(in) :: flag type(SwigArrayWrapper) :: fresult integer(C_LONG) :: farg1 farg1 = flag fresult = swigc_FCVodeGetReturnFlagName(farg1) call SWIG_chararray_to_string(fresult, swig_result) if (.false.) call SWIG_free(fresult%data) end function subroutine FCVodeFree(cvode_mem) use, intrinsic :: ISO_C_BINDING type(C_PTR), target, intent(inout) :: cvode_mem type(C_PTR) :: farg1 farg1 = c_loc(cvode_mem) call swigc_FCVodeFree(farg1) end subroutine function FCVodeSetJacTimesRhsFn(cvode_mem, jtimesrhsfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: jtimesrhsfn integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = cvode_mem farg2 = jtimesrhsfn fresult = swigc_FCVodeSetJacTimesRhsFn(farg1, farg2) swig_result = fresult end function function FCVodeQuadInit(cvode_mem, fq, yq0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: fq type(N_Vector), target, intent(inout) :: yq0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = fq farg3 = c_loc(yq0) fresult = swigc_FCVodeQuadInit(farg1, farg2, farg3) swig_result = fresult end function function FCVodeQuadReInit(cvode_mem, yq0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(N_Vector), target, intent(inout) :: yq0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(yq0) fresult = swigc_FCVodeQuadReInit(farg1, farg2) swig_result = fresult end function function FCVodeQuadSStolerances(cvode_mem, reltolq, abstolq) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: reltolq real(C_DOUBLE), intent(in) :: abstolq integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 real(C_DOUBLE) :: farg3 farg1 = cvode_mem farg2 = reltolq farg3 = abstolq fresult = swigc_FCVodeQuadSStolerances(farg1, farg2, farg3) swig_result = fresult end function function FCVodeQuadSVtolerances(cvode_mem, reltolq, abstolq) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: reltolq type(N_Vector), target, intent(inout) :: abstolq integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = reltolq farg3 = c_loc(abstolq) fresult = swigc_FCVodeQuadSVtolerances(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetQuadErrCon(cvode_mem, errconq) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: errconq integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = errconq fresult = swigc_FCVodeSetQuadErrCon(farg1, farg2) swig_result = fresult end function function FCVodeGetQuad(cvode_mem, tret, yqout) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tret type(N_Vector), target, intent(inout) :: yqout integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(tret(1)) farg3 = c_loc(yqout) fresult = swigc_FCVodeGetQuad(farg1, farg2, farg3) swig_result = fresult end function function FCVodeGetQuadDky(cvode_mem, t, k, dky) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k type(N_Vector), target, intent(inout) :: dky integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = t farg3 = k farg4 = c_loc(dky) fresult = swigc_FCVodeGetQuadDky(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeGetQuadNumRhsEvals(cvode_mem, nfqevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfqevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nfqevals(1)) fresult = swigc_FCVodeGetQuadNumRhsEvals(farg1, farg2) swig_result = fresult end function function FCVodeGetQuadNumErrTestFails(cvode_mem, nqetfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nqetfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nqetfails(1)) fresult = swigc_FCVodeGetQuadNumErrTestFails(farg1, farg2) swig_result = fresult end function function FCVodeGetQuadErrWeights(cvode_mem, eqweight) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(N_Vector), target, intent(inout) :: eqweight integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(eqweight) fresult = swigc_FCVodeGetQuadErrWeights(farg1, farg2) swig_result = fresult end function function FCVodeGetQuadStats(cvode_mem, nfqevals, nqetfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfqevals integer(C_LONG), dimension(*), target, intent(inout) :: nqetfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(nfqevals(1)) farg3 = c_loc(nqetfails(1)) fresult = swigc_FCVodeGetQuadStats(farg1, farg2, farg3) swig_result = fresult end function subroutine FCVodeQuadFree(cvode_mem) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: cvode_mem type(C_PTR) :: farg1 farg1 = cvode_mem call swigc_FCVodeQuadFree(farg1) end subroutine function FCVodeSensInit(cvode_mem, ns, ism, fs, ys0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: ns integer(C_INT), intent(in) :: ism type(C_FUNPTR), intent(in), value :: fs type(C_PTR) :: ys0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 type(C_FUNPTR) :: farg4 type(C_PTR) :: farg5 farg1 = cvode_mem farg2 = ns farg3 = ism farg4 = fs farg5 = ys0 fresult = swigc_FCVodeSensInit(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVodeSensInit1(cvode_mem, ns, ism, fs1, ys0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: ns integer(C_INT), intent(in) :: ism type(C_FUNPTR), intent(in), value :: fs1 type(C_PTR) :: ys0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 type(C_FUNPTR) :: farg4 type(C_PTR) :: farg5 farg1 = cvode_mem farg2 = ns farg3 = ism farg4 = fs1 farg5 = ys0 fresult = swigc_FCVodeSensInit1(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVodeSensReInit(cvode_mem, ism, ys0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: ism type(C_PTR) :: ys0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = ism farg3 = ys0 fresult = swigc_FCVodeSensReInit(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSensSStolerances(cvode_mem, reltols, abstols) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: reltols real(C_DOUBLE), dimension(*), target, intent(inout) :: abstols integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = reltols farg3 = c_loc(abstols(1)) fresult = swigc_FCVodeSensSStolerances(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSensSVtolerances(cvode_mem, reltols, abstols) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: reltols type(C_PTR) :: abstols integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = reltols farg3 = abstols fresult = swigc_FCVodeSensSVtolerances(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSensEEtolerances(cvode_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = cvode_mem fresult = swigc_FCVodeSensEEtolerances(farg1) swig_result = fresult end function function FCVodeSetSensDQMethod(cvode_mem, dqtype, dqrhomax) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: dqtype real(C_DOUBLE), intent(in) :: dqrhomax integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = cvode_mem farg2 = dqtype farg3 = dqrhomax fresult = swigc_FCVodeSetSensDQMethod(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetSensErrCon(cvode_mem, errcons) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: errcons integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = errcons fresult = swigc_FCVodeSetSensErrCon(farg1, farg2) swig_result = fresult end function function FCVodeSetSensMaxNonlinIters(cvode_mem, maxcors) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: maxcors integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = maxcors fresult = swigc_FCVodeSetSensMaxNonlinIters(farg1, farg2) swig_result = fresult end function function FCVodeSetSensParams(cvode_mem, p, pbar, plist) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: p real(C_DOUBLE), dimension(*), target, intent(inout) :: pbar integer(C_INT), dimension(*), target, intent(inout) :: plist integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = c_loc(p(1)) farg3 = c_loc(pbar(1)) farg4 = c_loc(plist(1)) fresult = swigc_FCVodeSetSensParams(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeSetNonlinearSolverSensSim(cvode_mem, nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nls) fresult = swigc_FCVodeSetNonlinearSolverSensSim(farg1, farg2) swig_result = fresult end function function FCVodeSetNonlinearSolverSensStg(cvode_mem, nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nls) fresult = swigc_FCVodeSetNonlinearSolverSensStg(farg1, farg2) swig_result = fresult end function function FCVodeSetNonlinearSolverSensStg1(cvode_mem, nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nls) fresult = swigc_FCVodeSetNonlinearSolverSensStg1(farg1, farg2) swig_result = fresult end function function FCVodeSensToggleOff(cvode_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = cvode_mem fresult = swigc_FCVodeSensToggleOff(farg1) swig_result = fresult end function function FCVodeGetSens(cvode_mem, tret, ysout) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tret type(C_PTR) :: ysout integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(tret(1)) farg3 = ysout fresult = swigc_FCVodeGetSens(farg1, farg2, farg3) swig_result = fresult end function function FCVodeGetSens1(cvode_mem, tret, is, ysout) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tret integer(C_INT), intent(in) :: is type(N_Vector), target, intent(inout) :: ysout integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = c_loc(tret(1)) farg3 = is farg4 = c_loc(ysout) fresult = swigc_FCVodeGetSens1(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeGetSensDky(cvode_mem, t, k, dkya) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k type(C_PTR) :: dkya integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = t farg3 = k farg4 = dkya fresult = swigc_FCVodeGetSensDky(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeGetSensDky1(cvode_mem, t, k, is, dky) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k integer(C_INT), intent(in) :: is type(N_Vector), target, intent(inout) :: dky integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 integer(C_INT) :: farg4 type(C_PTR) :: farg5 farg1 = cvode_mem farg2 = t farg3 = k farg4 = is farg5 = c_loc(dky) fresult = swigc_FCVodeGetSensDky1(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVodeGetSensNumRhsEvals(cvode_mem, nfsevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfsevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nfsevals(1)) fresult = swigc_FCVodeGetSensNumRhsEvals(farg1, farg2) swig_result = fresult end function function FCVodeGetNumRhsEvalsSens(cvode_mem, nfevalss) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfevalss integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nfevalss(1)) fresult = swigc_FCVodeGetNumRhsEvalsSens(farg1, farg2) swig_result = fresult end function function FCVodeGetSensNumErrTestFails(cvode_mem, nsetfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsetfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nsetfails(1)) fresult = swigc_FCVodeGetSensNumErrTestFails(farg1, farg2) swig_result = fresult end function function FCVodeGetSensNumLinSolvSetups(cvode_mem, nlinsetupss) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nlinsetupss integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nlinsetupss(1)) fresult = swigc_FCVodeGetSensNumLinSolvSetups(farg1, farg2) swig_result = fresult end function function FCVodeGetSensErrWeights(cvode_mem, esweight) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_PTR) :: esweight integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = esweight fresult = swigc_FCVodeGetSensErrWeights(farg1, farg2) swig_result = fresult end function function FCVodeGetSensStats(cvode_mem, nfsevals, nfevalss, nsetfails, nlinsetupss) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfsevals integer(C_LONG), dimension(*), target, intent(inout) :: nfevalss integer(C_LONG), dimension(*), target, intent(inout) :: nsetfails integer(C_LONG), dimension(*), target, intent(inout) :: nlinsetupss integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = cvode_mem farg2 = c_loc(nfsevals(1)) farg3 = c_loc(nfevalss(1)) farg4 = c_loc(nsetfails(1)) farg5 = c_loc(nlinsetupss(1)) fresult = swigc_FCVodeGetSensStats(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVodeGetSensNumNonlinSolvIters(cvode_mem, nsniters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsniters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nsniters(1)) fresult = swigc_FCVodeGetSensNumNonlinSolvIters(farg1, farg2) swig_result = fresult end function function FCVodeGetSensNumNonlinSolvConvFails(cvode_mem, nsncfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsncfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nsncfails(1)) fresult = swigc_FCVodeGetSensNumNonlinSolvConvFails(farg1, farg2) swig_result = fresult end function function FCVodeGetSensNonlinSolvStats(cvode_mem, nsniters, nsncfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nsniters integer(C_LONG), dimension(*), target, intent(inout) :: nsncfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(nsniters(1)) farg3 = c_loc(nsncfails(1)) fresult = swigc_FCVodeGetSensNonlinSolvStats(farg1, farg2, farg3) swig_result = fresult end function function FCVodeGetStgrSensNumNonlinSolvIters(cvode_mem, nstgr1niters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nstgr1niters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nstgr1niters(1)) fresult = swigc_FCVodeGetStgrSensNumNonlinSolvIters(farg1, farg2) swig_result = fresult end function function FCVodeGetStgrSensNumNonlinSolvConvFails(cvode_mem, nstgr1ncfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nstgr1ncfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nstgr1ncfails(1)) fresult = swigc_FCVodeGetStgrSensNumNonlinSolvConvFails(farg1, farg2) swig_result = fresult end function function FCVodeGetStgrSensNonlinSolvStats(cvode_mem, nstgr1niters, nstgr1ncfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nstgr1niters integer(C_LONG), dimension(*), target, intent(inout) :: nstgr1ncfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(nstgr1niters(1)) farg3 = c_loc(nstgr1ncfails(1)) fresult = swigc_FCVodeGetStgrSensNonlinSolvStats(farg1, farg2, farg3) swig_result = fresult end function subroutine FCVodeSensFree(cvode_mem) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: cvode_mem type(C_PTR) :: farg1 farg1 = cvode_mem call swigc_FCVodeSensFree(farg1) end subroutine function FCVodeQuadSensInit(cvode_mem, fqs, yqs0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: fqs type(C_PTR) :: yqs0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = fqs farg3 = yqs0 fresult = swigc_FCVodeQuadSensInit(farg1, farg2, farg3) swig_result = fresult end function function FCVodeQuadSensReInit(cvode_mem, yqs0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_PTR) :: yqs0 integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = yqs0 fresult = swigc_FCVodeQuadSensReInit(farg1, farg2) swig_result = fresult end function function FCVodeQuadSensSStolerances(cvode_mem, reltolqs, abstolqs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: reltolqs real(C_DOUBLE), dimension(*), target, intent(inout) :: abstolqs integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = reltolqs farg3 = c_loc(abstolqs(1)) fresult = swigc_FCVodeQuadSensSStolerances(farg1, farg2, farg3) swig_result = fresult end function function FCVodeQuadSensSVtolerances(cvode_mem, reltolqs, abstolqs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: reltolqs type(C_PTR) :: abstolqs integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = reltolqs farg3 = abstolqs fresult = swigc_FCVodeQuadSensSVtolerances(farg1, farg2, farg3) swig_result = fresult end function function FCVodeQuadSensEEtolerances(cvode_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = cvode_mem fresult = swigc_FCVodeQuadSensEEtolerances(farg1) swig_result = fresult end function function FCVodeSetQuadSensErrCon(cvode_mem, errconqs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: errconqs integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = errconqs fresult = swigc_FCVodeSetQuadSensErrCon(farg1, farg2) swig_result = fresult end function function FCVodeGetQuadSens(cvode_mem, tret, yqsout) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tret type(C_PTR) :: yqsout integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(tret(1)) farg3 = yqsout fresult = swigc_FCVodeGetQuadSens(farg1, farg2, farg3) swig_result = fresult end function function FCVodeGetQuadSens1(cvode_mem, tret, is, yqsout) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), dimension(*), target, intent(inout) :: tret integer(C_INT), intent(in) :: is type(N_Vector), target, intent(inout) :: yqsout integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = c_loc(tret(1)) farg3 = is farg4 = c_loc(yqsout) fresult = swigc_FCVodeGetQuadSens1(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeGetQuadSensDky(cvode_mem, t, k, dkyqs_all) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k type(C_PTR) :: dkyqs_all integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = t farg3 = k farg4 = dkyqs_all fresult = swigc_FCVodeGetQuadSensDky(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeGetQuadSensDky1(cvode_mem, t, k, is, dkyqs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: t integer(C_INT), intent(in) :: k integer(C_INT), intent(in) :: is type(N_Vector), target, intent(inout) :: dkyqs integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 integer(C_INT) :: farg4 type(C_PTR) :: farg5 farg1 = cvode_mem farg2 = t farg3 = k farg4 = is farg5 = c_loc(dkyqs) fresult = swigc_FCVodeGetQuadSensDky1(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVodeGetQuadSensNumRhsEvals(cvode_mem, nfqsevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfqsevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nfqsevals(1)) fresult = swigc_FCVodeGetQuadSensNumRhsEvals(farg1, farg2) swig_result = fresult end function function FCVodeGetQuadSensNumErrTestFails(cvode_mem, nqsetfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nqsetfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nqsetfails(1)) fresult = swigc_FCVodeGetQuadSensNumErrTestFails(farg1, farg2) swig_result = fresult end function function FCVodeGetQuadSensErrWeights(cvode_mem, eqsweight) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_PTR) :: eqsweight integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = eqsweight fresult = swigc_FCVodeGetQuadSensErrWeights(farg1, farg2) swig_result = fresult end function function FCVodeGetQuadSensStats(cvode_mem, nfqsevals, nqsetfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfqsevals integer(C_LONG), dimension(*), target, intent(inout) :: nqsetfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(nfqsevals(1)) farg3 = c_loc(nqsetfails(1)) fresult = swigc_FCVodeGetQuadSensStats(farg1, farg2, farg3) swig_result = fresult end function subroutine FCVodeQuadSensFree(cvode_mem) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: cvode_mem type(C_PTR) :: farg1 farg1 = cvode_mem call swigc_FCVodeQuadSensFree(farg1) end subroutine function FCVodeAdjInit(cvode_mem, steps, interp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), intent(in) :: steps integer(C_INT), intent(in) :: interp integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 integer(C_INT) :: farg3 farg1 = cvode_mem farg2 = steps farg3 = interp fresult = swigc_FCVodeAdjInit(farg1, farg2, farg3) swig_result = fresult end function function FCVodeAdjReInit(cvode_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = cvode_mem fresult = swigc_FCVodeAdjReInit(farg1) swig_result = fresult end function subroutine FCVodeAdjFree(cvode_mem) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: cvode_mem type(C_PTR) :: farg1 farg1 = cvode_mem call swigc_FCVodeAdjFree(farg1) end subroutine function FCVodeCreateB(cvode_mem, lmmb, which) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: lmmb integer(C_INT), dimension(*), target, intent(inout) :: which integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = lmmb farg3 = c_loc(which(1)) fresult = swigc_FCVodeCreateB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeInitB(cvode_mem, which, fb, tb0, yb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: fb real(C_DOUBLE), intent(in) :: tb0 type(N_Vector), target, intent(inout) :: yb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 farg1 = cvode_mem farg2 = which farg3 = fb farg4 = tb0 farg5 = c_loc(yb0) fresult = swigc_FCVodeInitB(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVodeInitBS(cvode_mem, which, fbs, tb0, yb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: fbs real(C_DOUBLE), intent(in) :: tb0 type(N_Vector), target, intent(inout) :: yb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 real(C_DOUBLE) :: farg4 type(C_PTR) :: farg5 farg1 = cvode_mem farg2 = which farg3 = fbs farg4 = tb0 farg5 = c_loc(yb0) fresult = swigc_FCVodeInitBS(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVodeReInitB(cvode_mem, which, tb0, yb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: tb0 type(N_Vector), target, intent(inout) :: yb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = tb0 farg4 = c_loc(yb0) fresult = swigc_FCVodeReInitB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeSStolerancesB(cvode_mem, which, reltolb, abstolb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: reltolb real(C_DOUBLE), intent(in) :: abstolb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 real(C_DOUBLE) :: farg4 farg1 = cvode_mem farg2 = which farg3 = reltolb farg4 = abstolb fresult = swigc_FCVodeSStolerancesB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeSVtolerancesB(cvode_mem, which, reltolb, abstolb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: reltolb type(N_Vector), target, intent(inout) :: abstolb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = reltolb farg4 = c_loc(abstolb) fresult = swigc_FCVodeSVtolerancesB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeQuadInitB(cvode_mem, which, fqb, yqb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: fqb type(N_Vector), target, intent(inout) :: yqb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = fqb farg4 = c_loc(yqb0) fresult = swigc_FCVodeQuadInitB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeQuadInitBS(cvode_mem, which, fqbs, yqb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: fqbs type(N_Vector), target, intent(inout) :: yqb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = fqbs farg4 = c_loc(yqb0) fresult = swigc_FCVodeQuadInitBS(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeQuadReInitB(cvode_mem, which, yqb0) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(N_Vector), target, intent(inout) :: yqb0 integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = which farg3 = c_loc(yqb0) fresult = swigc_FCVodeQuadReInitB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeQuadSStolerancesB(cvode_mem, which, reltolqb, abstolqb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: reltolqb real(C_DOUBLE), intent(in) :: abstolqb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 real(C_DOUBLE) :: farg4 farg1 = cvode_mem farg2 = which farg3 = reltolqb farg4 = abstolqb fresult = swigc_FCVodeQuadSStolerancesB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeQuadSVtolerancesB(cvode_mem, which, reltolqb, abstolqb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: reltolqb type(N_Vector), target, intent(inout) :: abstolqb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = reltolqb farg4 = c_loc(abstolqb) fresult = swigc_FCVodeQuadSVtolerancesB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeF(cvode_mem, tout, yout, tret, itask, ncheckptr) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: tout type(N_Vector), target, intent(inout) :: yout real(C_DOUBLE), dimension(*), target, intent(inout) :: tret integer(C_INT), intent(in) :: itask integer(C_INT), dimension(*), target, intent(inout) :: ncheckptr integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 integer(C_INT) :: farg5 type(C_PTR) :: farg6 farg1 = cvode_mem farg2 = tout farg3 = c_loc(yout) farg4 = c_loc(tret(1)) farg5 = itask farg6 = c_loc(ncheckptr(1)) fresult = swigc_FCVodeF(farg1, farg2, farg3, farg4, farg5, farg6) swig_result = fresult end function function FCVodeB(cvode_mem, tbout, itaskb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: tbout integer(C_INT), intent(in) :: itaskb integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 integer(C_INT) :: farg3 farg1 = cvode_mem farg2 = tbout farg3 = itaskb fresult = swigc_FCVodeB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetAdjNoSensi(cvode_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = cvode_mem fresult = swigc_FCVodeSetAdjNoSensi(farg1) swig_result = fresult end function function FCVodeSetUserDataB(cvode_mem, which, user_datab) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_PTR) :: user_datab integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = which farg3 = user_datab fresult = swigc_FCVodeSetUserDataB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetMaxOrdB(cvode_mem, which, maxordb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which integer(C_INT), intent(in) :: maxordb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 farg1 = cvode_mem farg2 = which farg3 = maxordb fresult = swigc_FCVodeSetMaxOrdB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetMaxNumStepsB(cvode_mem, which, mxstepsb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which integer(C_LONG), intent(in) :: mxstepsb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_LONG) :: farg3 farg1 = cvode_mem farg2 = which farg3 = mxstepsb fresult = swigc_FCVodeSetMaxNumStepsB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetStabLimDetB(cvode_mem, which, stldetb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which integer(C_INT), intent(in) :: stldetb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 farg1 = cvode_mem farg2 = which farg3 = stldetb fresult = swigc_FCVodeSetStabLimDetB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetInitStepB(cvode_mem, which, hinb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: hinb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = cvode_mem farg2 = which farg3 = hinb fresult = swigc_FCVodeSetInitStepB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetMinStepB(cvode_mem, which, hminb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: hminb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = cvode_mem farg2 = which farg3 = hminb fresult = swigc_FCVodeSetMinStepB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetMaxStepB(cvode_mem, which, hmaxb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: hmaxb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = cvode_mem farg2 = which farg3 = hmaxb fresult = swigc_FCVodeSetMaxStepB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetConstraintsB(cvode_mem, which, constraintsb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(N_Vector), target, intent(inout) :: constraintsb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = which farg3 = c_loc(constraintsb) fresult = swigc_FCVodeSetConstraintsB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetQuadErrConB(cvode_mem, which, errconqb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which integer(C_INT), intent(in) :: errconqb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 farg1 = cvode_mem farg2 = which farg3 = errconqb fresult = swigc_FCVodeSetQuadErrConB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetNonlinearSolverB(cvode_mem, which, nls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(SUNNonlinearSolver), target, intent(inout) :: nls integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = which farg3 = c_loc(nls) fresult = swigc_FCVodeSetNonlinearSolverB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeGetB(cvode_mem, which, tbret, yb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), dimension(*), target, intent(inout) :: tbret type(N_Vector), target, intent(inout) :: yb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = c_loc(tbret(1)) farg4 = c_loc(yb) fresult = swigc_FCVodeGetB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeGetQuadB(cvode_mem, which, tbret, qb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), dimension(*), target, intent(inout) :: tbret type(N_Vector), target, intent(inout) :: qb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = c_loc(tbret(1)) farg4 = c_loc(qb) fresult = swigc_FCVodeGetQuadB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeGetAdjCVodeBmem(cvode_mem, which) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_PTR) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = which fresult = swigc_FCVodeGetAdjCVodeBmem(farg1, farg2) swig_result = fresult end function function FCVodeGetAdjY(cvode_mem, t, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: t type(N_Vector), target, intent(inout) :: y integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = t farg3 = c_loc(y) fresult = swigc_FCVodeGetAdjY(farg1, farg2, farg3) swig_result = fresult end function subroutine swigf_CVadjCheckPointRec_my_addr_set(self, my_addr) use, intrinsic :: ISO_C_BINDING class(CVadjCheckPointRec), intent(in) :: self type(C_PTR) :: my_addr type(SwigClassWrapper) :: farg1 type(C_PTR) :: farg2 farg1 = self%swigdata farg2 = my_addr call swigc_CVadjCheckPointRec_my_addr_set(farg1, farg2) end subroutine function swigf_CVadjCheckPointRec_my_addr_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result class(CVadjCheckPointRec), intent(in) :: self type(C_PTR) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_CVadjCheckPointRec_my_addr_get(farg1) swig_result = fresult end function subroutine swigf_CVadjCheckPointRec_next_addr_set(self, next_addr) use, intrinsic :: ISO_C_BINDING class(CVadjCheckPointRec), intent(in) :: self type(C_PTR) :: next_addr type(SwigClassWrapper) :: farg1 type(C_PTR) :: farg2 farg1 = self%swigdata farg2 = next_addr call swigc_CVadjCheckPointRec_next_addr_set(farg1, farg2) end subroutine function swigf_CVadjCheckPointRec_next_addr_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING type(C_PTR) :: swig_result class(CVadjCheckPointRec), intent(in) :: self type(C_PTR) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_CVadjCheckPointRec_next_addr_get(farg1) swig_result = fresult end function subroutine swigf_CVadjCheckPointRec_t0_set(self, t0) use, intrinsic :: ISO_C_BINDING class(CVadjCheckPointRec), intent(in) :: self real(C_DOUBLE), intent(in) :: t0 type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: farg2 farg1 = self%swigdata farg2 = t0 call swigc_CVadjCheckPointRec_t0_set(farg1, farg2) end subroutine function swigf_CVadjCheckPointRec_t0_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result class(CVadjCheckPointRec), intent(in) :: self real(C_DOUBLE) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_CVadjCheckPointRec_t0_get(farg1) swig_result = fresult end function subroutine swigf_CVadjCheckPointRec_t1_set(self, t1) use, intrinsic :: ISO_C_BINDING class(CVadjCheckPointRec), intent(in) :: self real(C_DOUBLE), intent(in) :: t1 type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: farg2 farg1 = self%swigdata farg2 = t1 call swigc_CVadjCheckPointRec_t1_set(farg1, farg2) end subroutine function swigf_CVadjCheckPointRec_t1_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result class(CVadjCheckPointRec), intent(in) :: self real(C_DOUBLE) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_CVadjCheckPointRec_t1_get(farg1) swig_result = fresult end function subroutine swigf_CVadjCheckPointRec_nstep_set(self, nstep) use, intrinsic :: ISO_C_BINDING class(CVadjCheckPointRec), intent(in) :: self integer(C_LONG), intent(in) :: nstep type(SwigClassWrapper) :: farg1 integer(C_LONG) :: farg2 farg1 = self%swigdata farg2 = nstep call swigc_CVadjCheckPointRec_nstep_set(farg1, farg2) end subroutine function swigf_CVadjCheckPointRec_nstep_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_LONG) :: swig_result class(CVadjCheckPointRec), intent(in) :: self integer(C_LONG) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_CVadjCheckPointRec_nstep_get(farg1) swig_result = fresult end function subroutine swigf_CVadjCheckPointRec_order_set(self, order) use, intrinsic :: ISO_C_BINDING class(CVadjCheckPointRec), intent(in) :: self integer(C_INT), intent(in) :: order type(SwigClassWrapper) :: farg1 integer(C_INT) :: farg2 farg1 = self%swigdata farg2 = order call swigc_CVadjCheckPointRec_order_set(farg1, farg2) end subroutine function swigf_CVadjCheckPointRec_order_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result class(CVadjCheckPointRec), intent(in) :: self integer(C_INT) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_CVadjCheckPointRec_order_get(farg1) swig_result = fresult end function subroutine swigf_CVadjCheckPointRec_step_set(self, step) use, intrinsic :: ISO_C_BINDING class(CVadjCheckPointRec), intent(in) :: self real(C_DOUBLE), intent(in) :: step type(SwigClassWrapper) :: farg1 real(C_DOUBLE) :: farg2 farg1 = self%swigdata farg2 = step call swigc_CVadjCheckPointRec_step_set(farg1, farg2) end subroutine function swigf_CVadjCheckPointRec_step_get(self) & result(swig_result) use, intrinsic :: ISO_C_BINDING real(C_DOUBLE) :: swig_result class(CVadjCheckPointRec), intent(in) :: self real(C_DOUBLE) :: fresult type(SwigClassWrapper) :: farg1 farg1 = self%swigdata fresult = swigc_CVadjCheckPointRec_step_get(farg1) swig_result = fresult end function function swigf_create_CVadjCheckPointRec() & result(self) use, intrinsic :: ISO_C_BINDING type(CVadjCheckPointRec) :: self type(SwigClassWrapper) :: fresult fresult = swigc_new_CVadjCheckPointRec() self%swigdata = fresult end function subroutine swigf_release_CVadjCheckPointRec(self) use, intrinsic :: ISO_C_BINDING class(CVadjCheckPointRec), intent(inout) :: self type(SwigClassWrapper) :: farg1 farg1 = self%swigdata if (btest(farg1%cmemflags, swig_cmem_own_bit)) then call swigc_delete_CVadjCheckPointRec(farg1) endif farg1%cptr = C_NULL_PTR farg1%cmemflags = 0 self%swigdata = farg1 end subroutine subroutine swigf_CVadjCheckPointRec_op_assign__(self, other) use, intrinsic :: ISO_C_BINDING class(CVadjCheckPointRec), intent(inout) :: self type(CVadjCheckPointRec), intent(in) :: other type(SwigClassWrapper) :: farg1 type(SwigClassWrapper) :: farg2 farg1 = self%swigdata farg2 = other%swigdata call swigc_CVadjCheckPointRec_op_assign__(farg1, farg2) self%swigdata = farg1 end subroutine function FCVodeGetAdjCheckPointsInfo(cvode_mem, ckpnt) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem class(CVadjCheckPointRec), intent(in) :: ckpnt integer(C_INT) :: fresult type(C_PTR) :: farg1 type(SwigClassWrapper) :: farg2 farg1 = cvode_mem farg2 = ckpnt%swigdata fresult = swigc_FCVodeGetAdjCheckPointsInfo(farg1, farg2) swig_result = fresult end function function FCVodeSetJacTimesRhsFnB(cvode_mem, which, jtimesrhsfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: jtimesrhsfn integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 farg1 = cvode_mem farg2 = which farg3 = jtimesrhsfn fresult = swigc_FCVodeSetJacTimesRhsFnB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeGetAdjDataPointHermite(cvode_mem, which, t, y, yd) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), dimension(*), target, intent(inout) :: t type(N_Vector), target, intent(inout) :: y type(N_Vector), target, intent(inout) :: yd integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = cvode_mem farg2 = which farg3 = c_loc(t(1)) farg4 = c_loc(y) farg5 = c_loc(yd) fresult = swigc_FCVodeGetAdjDataPointHermite(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVodeGetAdjDataPointPolynomial(cvode_mem, which, t, order, y) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), dimension(*), target, intent(inout) :: t integer(C_INT), dimension(*), target, intent(inout) :: order type(N_Vector), target, intent(inout) :: y integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 farg1 = cvode_mem farg2 = which farg3 = c_loc(t(1)) farg4 = c_loc(order(1)) farg5 = c_loc(y) fresult = swigc_FCVodeGetAdjDataPointPolynomial(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVodeGetAdjCurrentCheckPoint(cvode_mem, addr) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_PTR), target, intent(inout) :: addr integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(addr) fresult = swigc_FCVodeGetAdjCurrentCheckPoint(farg1, farg2) swig_result = fresult end function function FCVBandPrecInit(cvode_mem, n, mu, ml) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT64_T), intent(in) :: n integer(C_INT64_T), intent(in) :: mu integer(C_INT64_T), intent(in) :: ml integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT64_T) :: farg4 farg1 = cvode_mem farg2 = n farg3 = mu farg4 = ml fresult = swigc_FCVBandPrecInit(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVBandPrecGetWorkSpace(cvode_mem, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FCVBandPrecGetWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FCVBandPrecGetNumRhsEvals(cvode_mem, nfevalsbp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfevalsbp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nfevalsbp(1)) fresult = swigc_FCVBandPrecGetNumRhsEvals(farg1, farg2) swig_result = fresult end function function FCVBandPrecInitB(cvode_mem, which, nb, mub, mlb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which integer(C_INT64_T), intent(in) :: nb integer(C_INT64_T), intent(in) :: mub integer(C_INT64_T), intent(in) :: mlb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT64_T) :: farg4 integer(C_INT64_T) :: farg5 farg1 = cvode_mem farg2 = which farg3 = nb farg4 = mub farg5 = mlb fresult = swigc_FCVBandPrecInitB(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVBBDPrecInit(cvode_mem, nlocal, mudq, mldq, mukeep, mlkeep, dqrely, gloc, cfn) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT64_T), intent(in) :: nlocal integer(C_INT64_T), intent(in) :: mudq integer(C_INT64_T), intent(in) :: mldq integer(C_INT64_T), intent(in) :: mukeep integer(C_INT64_T), intent(in) :: mlkeep real(C_DOUBLE), intent(in) :: dqrely type(C_FUNPTR), intent(in), value :: gloc type(C_FUNPTR), intent(in), value :: cfn integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT64_T) :: farg4 integer(C_INT64_T) :: farg5 integer(C_INT64_T) :: farg6 real(C_DOUBLE) :: farg7 type(C_FUNPTR) :: farg8 type(C_FUNPTR) :: farg9 farg1 = cvode_mem farg2 = nlocal farg3 = mudq farg4 = mldq farg5 = mukeep farg6 = mlkeep farg7 = dqrely farg8 = gloc farg9 = cfn fresult = swigc_FCVBBDPrecInit(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) swig_result = fresult end function function FCVBBDPrecReInit(cvode_mem, mudq, mldq, dqrely) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT64_T), intent(in) :: mudq integer(C_INT64_T), intent(in) :: mldq real(C_DOUBLE), intent(in) :: dqrely integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT64_T) :: farg2 integer(C_INT64_T) :: farg3 real(C_DOUBLE) :: farg4 farg1 = cvode_mem farg2 = mudq farg3 = mldq farg4 = dqrely fresult = swigc_FCVBBDPrecReInit(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVBBDPrecGetWorkSpace(cvode_mem, lenrwbbdp, leniwbbdp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: lenrwbbdp integer(C_LONG), dimension(*), target, intent(inout) :: leniwbbdp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(lenrwbbdp(1)) farg3 = c_loc(leniwbbdp(1)) fresult = swigc_FCVBBDPrecGetWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FCVBBDPrecGetNumGfnEvals(cvode_mem, ngevalsbbdp) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: ngevalsbbdp integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(ngevalsbbdp(1)) fresult = swigc_FCVBBDPrecGetNumGfnEvals(farg1, farg2) swig_result = fresult end function function FCVBBDPrecInitB(cvode_mem, which, nlocalb, mudqb, mldqb, mukeepb, mlkeepb, dqrelyb, glocb, cfnb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which integer(C_INT64_T), intent(in) :: nlocalb integer(C_INT64_T), intent(in) :: mudqb integer(C_INT64_T), intent(in) :: mldqb integer(C_INT64_T), intent(in) :: mukeepb integer(C_INT64_T), intent(in) :: mlkeepb real(C_DOUBLE), intent(in) :: dqrelyb type(C_FUNPTR), intent(in), value :: glocb type(C_FUNPTR), intent(in), value :: cfnb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT64_T) :: farg4 integer(C_INT64_T) :: farg5 integer(C_INT64_T) :: farg6 integer(C_INT64_T) :: farg7 real(C_DOUBLE) :: farg8 type(C_FUNPTR) :: farg9 type(C_FUNPTR) :: farg10 farg1 = cvode_mem farg2 = which farg3 = nlocalb farg4 = mudqb farg5 = mldqb farg6 = mukeepb farg7 = mlkeepb farg8 = dqrelyb farg9 = glocb farg10 = cfnb fresult = swigc_FCVBBDPrecInitB(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9, farg10) swig_result = fresult end function function FCVBBDPrecReInitB(cvode_mem, which, mudqb, mldqb, dqrelyb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which integer(C_INT64_T), intent(in) :: mudqb integer(C_INT64_T), intent(in) :: mldqb real(C_DOUBLE), intent(in) :: dqrelyb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT64_T) :: farg3 integer(C_INT64_T) :: farg4 real(C_DOUBLE) :: farg5 farg1 = cvode_mem farg2 = which farg3 = mudqb farg4 = mldqb farg5 = dqrelyb fresult = swigc_FCVBBDPrecReInitB(farg1, farg2, farg3, farg4, farg5) swig_result = fresult end function function FCVDiag(cvode_mem) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT) :: fresult type(C_PTR) :: farg1 farg1 = cvode_mem fresult = swigc_FCVDiag(farg1) swig_result = fresult end function function FCVDiagGetWorkSpace(cvode_mem, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FCVDiagGetWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FCVDiagGetNumRhsEvals(cvode_mem, nfevalsls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfevalsls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nfevalsls(1)) fresult = swigc_FCVDiagGetNumRhsEvals(farg1, farg2) swig_result = fresult end function function FCVDiagGetLastFlag(cvode_mem, flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: flag integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(flag(1)) fresult = swigc_FCVDiagGetLastFlag(farg1, farg2) swig_result = fresult end function function FCVDiagGetReturnFlagName(flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING character(kind=C_CHAR, len=:), allocatable :: swig_result integer(C_LONG), intent(in) :: flag type(SwigArrayWrapper) :: fresult integer(C_LONG) :: farg1 farg1 = flag fresult = swigc_FCVDiagGetReturnFlagName(farg1) call SWIG_chararray_to_string(fresult, swig_result) if (.false.) call SWIG_free(fresult%data) end function function FCVDiagB(cvode_mem, which) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = which fresult = swigc_FCVDiagB(farg1, farg2) swig_result = fresult end function function FCVodeSetLinearSolver(cvode_mem, ls, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(SUNLinearSolver), target, intent(inout) :: ls type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(ls) farg3 = c_loc(a) fresult = swigc_FCVodeSetLinearSolver(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetJacFn(cvode_mem, jac) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: jac integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = cvode_mem farg2 = jac fresult = swigc_FCVodeSetJacFn(farg1, farg2) swig_result = fresult end function function FCVodeSetJacEvalFrequency(cvode_mem, msbj) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), intent(in) :: msbj integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_LONG) :: farg2 farg1 = cvode_mem farg2 = msbj fresult = swigc_FCVodeSetJacEvalFrequency(farg1, farg2) swig_result = fresult end function function FCVodeSetLinearSolutionScaling(cvode_mem, onoff) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: onoff integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 farg1 = cvode_mem farg2 = onoff fresult = swigc_FCVodeSetLinearSolutionScaling(farg1, farg2) swig_result = fresult end function function FCVodeSetEpsLin(cvode_mem, eplifac) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem real(C_DOUBLE), intent(in) :: eplifac integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = cvode_mem farg2 = eplifac fresult = swigc_FCVodeSetEpsLin(farg1, farg2) swig_result = fresult end function function FCVodeSetLSNormFactor(arkode_mem, nrmfac) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: arkode_mem real(C_DOUBLE), intent(in) :: nrmfac integer(C_INT) :: fresult type(C_PTR) :: farg1 real(C_DOUBLE) :: farg2 farg1 = arkode_mem farg2 = nrmfac fresult = swigc_FCVodeSetLSNormFactor(farg1, farg2) swig_result = fresult end function function FCVodeSetPreconditioner(cvode_mem, pset, psolve) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: pset type(C_FUNPTR), intent(in), value :: psolve integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = cvode_mem farg2 = pset farg3 = psolve fresult = swigc_FCVodeSetPreconditioner(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetJacTimes(cvode_mem, jtsetup, jtimes) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: jtsetup type(C_FUNPTR), intent(in), value :: jtimes integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 type(C_FUNPTR) :: farg3 farg1 = cvode_mem farg2 = jtsetup farg3 = jtimes fresult = swigc_FCVodeSetJacTimes(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetLinSysFn(cvode_mem, linsys) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem type(C_FUNPTR), intent(in), value :: linsys integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_FUNPTR) :: farg2 farg1 = cvode_mem farg2 = linsys fresult = swigc_FCVodeSetLinSysFn(farg1, farg2) swig_result = fresult end function function FCVodeGetLinWorkSpace(cvode_mem, lenrwls, leniwls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: lenrwls integer(C_LONG), dimension(*), target, intent(inout) :: leniwls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 farg1 = cvode_mem farg2 = c_loc(lenrwls(1)) farg3 = c_loc(leniwls(1)) fresult = swigc_FCVodeGetLinWorkSpace(farg1, farg2, farg3) swig_result = fresult end function function FCVodeGetNumJacEvals(cvode_mem, njevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: njevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(njevals(1)) fresult = swigc_FCVodeGetNumJacEvals(farg1, farg2) swig_result = fresult end function function FCVodeGetNumPrecEvals(cvode_mem, npevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: npevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(npevals(1)) fresult = swigc_FCVodeGetNumPrecEvals(farg1, farg2) swig_result = fresult end function function FCVodeGetNumPrecSolves(cvode_mem, npsolves) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: npsolves integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(npsolves(1)) fresult = swigc_FCVodeGetNumPrecSolves(farg1, farg2) swig_result = fresult end function function FCVodeGetNumLinIters(cvode_mem, nliters) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nliters integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nliters(1)) fresult = swigc_FCVodeGetNumLinIters(farg1, farg2) swig_result = fresult end function function FCVodeGetNumLinConvFails(cvode_mem, nlcfails) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nlcfails integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nlcfails(1)) fresult = swigc_FCVodeGetNumLinConvFails(farg1, farg2) swig_result = fresult end function function FCVodeGetNumJTSetupEvals(cvode_mem, njtsetups) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: njtsetups integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(njtsetups(1)) fresult = swigc_FCVodeGetNumJTSetupEvals(farg1, farg2) swig_result = fresult end function function FCVodeGetNumJtimesEvals(cvode_mem, njvevals) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: njvevals integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(njvevals(1)) fresult = swigc_FCVodeGetNumJtimesEvals(farg1, farg2) swig_result = fresult end function function FCVodeGetNumLinRhsEvals(cvode_mem, nfevalsls) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: nfevalsls integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(nfevalsls(1)) fresult = swigc_FCVodeGetNumLinRhsEvals(farg1, farg2) swig_result = fresult end function function FCVodeGetLinSolveStats(cvode_mem, njevals, nfevalsls, nliters, nlcfails, npevals, npsolves, njtsetups, njtimes) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: njevals integer(C_LONG), dimension(*), target, intent(inout) :: nfevalsls integer(C_LONG), dimension(*), target, intent(inout) :: nliters integer(C_LONG), dimension(*), target, intent(inout) :: nlcfails integer(C_LONG), dimension(*), target, intent(inout) :: npevals integer(C_LONG), dimension(*), target, intent(inout) :: npsolves integer(C_LONG), dimension(*), target, intent(inout) :: njtsetups integer(C_LONG), dimension(*), target, intent(inout) :: njtimes integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 type(C_PTR) :: farg5 type(C_PTR) :: farg6 type(C_PTR) :: farg7 type(C_PTR) :: farg8 type(C_PTR) :: farg9 farg1 = cvode_mem farg2 = c_loc(njevals(1)) farg3 = c_loc(nfevalsls(1)) farg4 = c_loc(nliters(1)) farg5 = c_loc(nlcfails(1)) farg6 = c_loc(npevals(1)) farg7 = c_loc(npsolves(1)) farg8 = c_loc(njtsetups(1)) farg9 = c_loc(njtimes(1)) fresult = swigc_FCVodeGetLinSolveStats(farg1, farg2, farg3, farg4, farg5, farg6, farg7, farg8, farg9) swig_result = fresult end function function FCVodeGetLastLinFlag(cvode_mem, flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_LONG), dimension(*), target, intent(inout) :: flag integer(C_INT) :: fresult type(C_PTR) :: farg1 type(C_PTR) :: farg2 farg1 = cvode_mem farg2 = c_loc(flag(1)) fresult = swigc_FCVodeGetLastLinFlag(farg1, farg2) swig_result = fresult end function function FCVodeGetLinReturnFlagName(flag) & result(swig_result) use, intrinsic :: ISO_C_BINDING character(kind=C_CHAR, len=:), allocatable :: swig_result integer(C_LONG), intent(in) :: flag type(SwigArrayWrapper) :: fresult integer(C_LONG) :: farg1 farg1 = flag fresult = swigc_FCVodeGetLinReturnFlagName(farg1) call SWIG_chararray_to_string(fresult, swig_result) if (.false.) call SWIG_free(fresult%data) end function function FCVodeSetLinearSolverB(cvode_mem, which, ls, a) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(SUNLinearSolver), target, intent(inout) :: ls type(SUNMatrix), target, intent(inout) :: a integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_PTR) :: farg3 type(C_PTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = c_loc(ls) farg4 = c_loc(a) fresult = swigc_FCVodeSetLinearSolverB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeSetJacFnB(cvode_mem, which, jacb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: jacb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 farg1 = cvode_mem farg2 = which farg3 = jacb fresult = swigc_FCVodeSetJacFnB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetJacFnBS(cvode_mem, which, jacbs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: jacbs integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 farg1 = cvode_mem farg2 = which farg3 = jacbs fresult = swigc_FCVodeSetJacFnBS(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetEpsLinB(cvode_mem, which, eplifacb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: eplifacb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = cvode_mem farg2 = which farg3 = eplifacb fresult = swigc_FCVodeSetEpsLinB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetLSNormFactorB(arkode_mem, which, nrmfacb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: arkode_mem integer(C_INT), intent(in) :: which real(C_DOUBLE), intent(in) :: nrmfacb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 real(C_DOUBLE) :: farg3 farg1 = arkode_mem farg2 = which farg3 = nrmfacb fresult = swigc_FCVodeSetLSNormFactorB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetLinearSolutionScalingB(cvode_mem, which, onoffb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which integer(C_INT), intent(in) :: onoffb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 integer(C_INT) :: farg3 farg1 = cvode_mem farg2 = which farg3 = onoffb fresult = swigc_FCVodeSetLinearSolutionScalingB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetPreconditionerB(cvode_mem, which, psetb, psolveb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: psetb type(C_FUNPTR), intent(in), value :: psolveb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = psetb farg4 = psolveb fresult = swigc_FCVodeSetPreconditionerB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeSetPreconditionerBS(cvode_mem, which, psetbs, psolvebs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: psetbs type(C_FUNPTR), intent(in), value :: psolvebs integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = psetbs farg4 = psolvebs fresult = swigc_FCVodeSetPreconditionerBS(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeSetJacTimesB(cvode_mem, which, jtsetupb, jtimesb) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: jtsetupb type(C_FUNPTR), intent(in), value :: jtimesb integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = jtsetupb farg4 = jtimesb fresult = swigc_FCVodeSetJacTimesB(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeSetJacTimesBS(cvode_mem, which, jtsetupbs, jtimesbs) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: jtsetupbs type(C_FUNPTR), intent(in), value :: jtimesbs integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 type(C_FUNPTR) :: farg4 farg1 = cvode_mem farg2 = which farg3 = jtsetupbs farg4 = jtimesbs fresult = swigc_FCVodeSetJacTimesBS(farg1, farg2, farg3, farg4) swig_result = fresult end function function FCVodeSetLinSysFnB(cvode_mem, which, linsys) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: linsys integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 farg1 = cvode_mem farg2 = which farg3 = linsys fresult = swigc_FCVodeSetLinSysFnB(farg1, farg2, farg3) swig_result = fresult end function function FCVodeSetLinSysFnBS(cvode_mem, which, linsys) & result(swig_result) use, intrinsic :: ISO_C_BINDING integer(C_INT) :: swig_result type(C_PTR) :: cvode_mem integer(C_INT), intent(in) :: which type(C_FUNPTR), intent(in), value :: linsys integer(C_INT) :: fresult type(C_PTR) :: farg1 integer(C_INT) :: farg2 type(C_FUNPTR) :: farg3 farg1 = cvode_mem farg2 = which farg3 = linsys fresult = swigc_FCVodeSetLinSysFnBS(farg1, farg2, farg3) swig_result = fresult end function end module StanHeaders/src/cvodes/README.md0000644000176200001440000000506214645137106016016 0ustar liggesusers# CVODES ### Version 6.1.1 (Feb 2022) **Alan C. Hindmarsh, Radu Serban, Cody J. Balos, David J. Gardner, and Carol S. Woodward, Center for Applied Scientific Computing, LLNL** **Daniel R. Reynolds, Department of Mathematics, Southern Methodist University** CVODES is a package for the solution of stiff and nonstiff ordinary differential equation (ODE) systems (initial value problem) given in explicit form ``` dy/dt = f(t,y,p), y(t0) = y0(p) ``` with sensitivity analysis capabilities (both forward and adjoint modes). CVODES provides a choice of two variable-order, variable-coefficient multistep methods, Adams-Moulton methods for non-stiff problems or BDF (Backward Differentiation Formula) methods in fixed-leading-coefficient form for stiff problems. CVODES is part of the SUNDIALS Suite of Nonlinear and Differential/Algebraic equation Solvers which consists of ARKode, CVODE, CVODES, IDA, IDAS and KINSOL. It is written in ANSI standard C and can be used in a variety of computing environments including serial, shared memory, distributed memory, and accelerator-based (e.g., GPU) systems. This flexibility is obtained from a modular design that leverages the shared vector, matrix, linear solver, and nonlinear solver APIs used across SUNDIALS packages. ## Documentation See the [CVODES User Guide](/doc/cvodes/cvs_guide.pdf) and [CVODES Examples](/doc/cvodes/cvs_examples.pdf) document for more information about CVODES usage and the provided example programs respectively. ## Installation For installation instructions see the [INSTALL_GUIDE](/INSTALL_GUIDE.pdf) or the "Installation Procedure" chapter in the CVODES User Guide. ## Release History Information on recent changes to CVODES can be found in the "Introduction" chapter of the CVODES User Guide and a complete release history is available in the "SUNDIALS Release History" appendix of the CVODES User Guide. ## References * A. C. Hindmarsh, R. Serban, C. J. Balos, D. J. Gardner, D. R. Reynolds and C. S. Woodward, "User Documentation for CVODES v6.1.1," LLNL technical report UCRL-SM-208111, Feb 2022. * A. C. Hindmarsh and R. Serban, "Example Programs for CVODES v6.1.1," LLNL technical report UCRL-SM-208115, Feb 2022. * R. Serban and A. C. Hindmarsh, "CVODES: the Sensitivity-Enabled ODE solver in SUNDIALS," Proceedings of IDETC/CIE 2005, Sept. 2005, Long Beach, CA. * A. C. Hindmarsh, P. N. Brown, K. E. Grant, S. L. Lee, R. Serban, D. E. Shumaker, and C. S. Woodward, "SUNDIALS, Suite of Nonlinear and Differential/Algebraic Equation Solvers," ACM Trans. Math. Softw., 31(3), pp. 363-396, 2005. StanHeaders/src/cvodes/cvodes_bbdpre.c0000644000176200001440000007122214645137106017505 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Radu Serban and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This file contains implementations of routines for a * band-block-diagonal preconditioner, i.e. a block-diagonal * matrix with banded blocks, for use with CVODE, the CVSLS linear * solver interface, and the MPI-parallel implementation of NVECTOR. * ----------------------------------------------------------------- */ #include #include #include "cvodes_impl.h" #include "cvodes_bbdpre_impl.h" #include "cvodes_ls_impl.h" #include #include #define MIN_INC_MULT RCONST(1000.0) #define ZERO RCONST(0.0) #define ONE RCONST(1.0) #define TWO RCONST(2.0) /* Prototypes of functions cvBBDPrecSetup and cvBBDPrecSolve */ static int cvBBDPrecSetup(realtype t, N_Vector y, N_Vector fy, booleantype jok, booleantype *jcurPtr, realtype gamma, void *bbd_data); static int cvBBDPrecSolve(realtype t, N_Vector y, N_Vector fy, N_Vector r, N_Vector z, realtype gamma, realtype delta, int lr, void *bbd_data); /* Prototype for cvBBDPrecFree */ static int cvBBDPrecFree(CVodeMem cv_mem); /* Wrapper functions for adjoint code */ static int cvGlocWrapper(sunindextype NlocalB, realtype t, N_Vector yB, N_Vector gB, void *cvadj_mem); static int cvCfnWrapper(sunindextype NlocalB, realtype t, N_Vector yB, void *cvadj_mem); /* Prototype for difference quotient Jacobian calculation routine */ static int cvBBDDQJac(CVBBDPrecData pdata, realtype t, N_Vector y, N_Vector gy, N_Vector ytemp, N_Vector gtemp); /* Prototype for the backward pfree routine */ static int CVBBDPrecFreeB(CVodeBMem cvB_mem); /*================================================================ PART I - forward problems ================================================================*/ /*----------------------------------------------------------------- User-Callable Functions: initialization, reinit and free -----------------------------------------------------------------*/ int CVBBDPrecInit(void *cvode_mem, sunindextype Nlocal, sunindextype mudq, sunindextype mldq, sunindextype mukeep, sunindextype mlkeep, realtype dqrely, CVLocalFn gloc, CVCommFn cfn) { CVodeMem cv_mem; CVLsMem cvls_mem; CVBBDPrecData pdata; sunindextype muk, mlk, storage_mu, lrw1, liw1; long int lrw, liw; int flag; if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_MEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Test if the CVSLS linear solver interface has been created */ if (cv_mem->cv_lmem == NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_LMEM_NULL); return(CVLS_LMEM_NULL); } cvls_mem = (CVLsMem) cv_mem->cv_lmem; /* Test compatibility of NVECTOR package with the BBD preconditioner */ if(cv_mem->cv_tempv->ops->nvgetarraypointer == NULL) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_BAD_NVECTOR); return(CVLS_ILL_INPUT); } /* Allocate data memory */ pdata = NULL; pdata = (CVBBDPrecData) malloc(sizeof *pdata); if (pdata == NULL) { cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_MEM_FAIL); return(CVLS_MEM_FAIL); } /* Set pointers to gloc and cfn; load half-bandwidths */ pdata->cvode_mem = cvode_mem; pdata->gloc = gloc; pdata->cfn = cfn; pdata->mudq = SUNMIN(Nlocal-1, SUNMAX(0,mudq)); pdata->mldq = SUNMIN(Nlocal-1, SUNMAX(0,mldq)); muk = SUNMIN(Nlocal-1, SUNMAX(0,mukeep)); mlk = SUNMIN(Nlocal-1, SUNMAX(0,mlkeep)); pdata->mukeep = muk; pdata->mlkeep = mlk; /* Allocate memory for saved Jacobian */ pdata->savedJ = SUNBandMatrixStorage(Nlocal, muk, mlk, muk, cv_mem->cv_sunctx); if (pdata->savedJ == NULL) { free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_MEM_FAIL); return(CVLS_MEM_FAIL); } /* Allocate memory for preconditioner matrix */ storage_mu = SUNMIN(Nlocal-1, muk + mlk); pdata->savedP = NULL; pdata->savedP = SUNBandMatrixStorage(Nlocal, muk, mlk, storage_mu, cv_mem->cv_sunctx); if (pdata->savedP == NULL) { SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_MEM_FAIL); return(CVLS_MEM_FAIL); } /* Allocate memory for temporary N_Vectors */ pdata->zlocal = NULL; pdata->zlocal = N_VNewEmpty_Serial(Nlocal, cv_mem->cv_sunctx); if (pdata->zlocal == NULL) { SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_MEM_FAIL); return(CVLS_MEM_FAIL); } pdata->rlocal = NULL; pdata->rlocal = N_VNewEmpty_Serial(Nlocal, cv_mem->cv_sunctx); if (pdata->rlocal == NULL) { N_VDestroy(pdata->zlocal); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_MEM_FAIL); return(CVLS_MEM_FAIL); } pdata->tmp1 = NULL; pdata->tmp1 = N_VClone(cv_mem->cv_tempv); if (pdata->tmp1 == NULL) { N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_MEM_FAIL); return(CVLS_MEM_FAIL); } pdata->tmp2 = NULL; pdata->tmp2 = N_VClone(cv_mem->cv_tempv); if (pdata->tmp2 == NULL) { N_VDestroy(pdata->tmp1); N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_MEM_FAIL); return(CVLS_MEM_FAIL); } pdata->tmp3 = NULL; pdata->tmp3 = N_VClone(cv_mem->cv_tempv); if (pdata->tmp3 == NULL) { N_VDestroy(pdata->tmp1); N_VDestroy(pdata->tmp2); N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_MEM_FAIL); return(CVLS_MEM_FAIL); } /* Allocate memory for banded linear solver */ pdata->LS = NULL; pdata->LS = SUNLinSol_Band(pdata->rlocal, pdata->savedP, cv_mem->cv_sunctx); if (pdata->LS == NULL) { N_VDestroy(pdata->tmp1); N_VDestroy(pdata->tmp2); N_VDestroy(pdata->tmp3); N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_MEM_FAIL); return(CVLS_MEM_FAIL); } /* initialize band linear solver object */ flag = SUNLinSolInitialize(pdata->LS); if (flag != SUNLS_SUCCESS) { N_VDestroy(pdata->tmp1); N_VDestroy(pdata->tmp2); N_VDestroy(pdata->tmp3); N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); SUNLinSolFree(pdata->LS); free(pdata); pdata = NULL; cvProcessError(cv_mem, CVLS_SUNLS_FAIL, "CVSBBDPRE", "CVBBDPrecInit", MSGBBD_SUNLS_FAIL); return(CVLS_SUNLS_FAIL); } /* Set pdata->dqrely based on input dqrely (0 implies default). */ pdata->dqrely = (dqrely > ZERO) ? dqrely : SUNRsqrt(cv_mem->cv_uround); /* Store Nlocal to be used in CVBBDPrecSetup */ pdata->n_local = Nlocal; /* Set work space sizes and initialize nge */ pdata->rpwsize = 0; pdata->ipwsize = 0; if (cv_mem->cv_tempv->ops->nvspace) { N_VSpace(cv_mem->cv_tempv, &lrw1, &liw1); pdata->rpwsize += 3*lrw1; pdata->ipwsize += 3*liw1; } if (pdata->rlocal->ops->nvspace) { N_VSpace(pdata->rlocal, &lrw1, &liw1); pdata->rpwsize += 2*lrw1; pdata->ipwsize += 2*liw1; } if (pdata->savedJ->ops->space) { flag = SUNMatSpace(pdata->savedJ, &lrw, &liw); pdata->rpwsize += lrw; pdata->ipwsize += liw; } if (pdata->savedP->ops->space) { flag = SUNMatSpace(pdata->savedP, &lrw, &liw); pdata->rpwsize += lrw; pdata->ipwsize += liw; } if (pdata->LS->ops->space) { flag = SUNLinSolSpace(pdata->LS, &lrw, &liw); pdata->rpwsize += lrw; pdata->ipwsize += liw; } pdata->nge = 0; /* make sure s_P_data is free from any previous allocations */ if (cvls_mem->pfree) cvls_mem->pfree(cv_mem); /* Point to the new P_data field in the LS memory */ cvls_mem->P_data = pdata; /* Attach the pfree function */ cvls_mem->pfree = cvBBDPrecFree; /* Attach preconditioner solve and setup functions */ flag = CVodeSetPreconditioner(cvode_mem, cvBBDPrecSetup, cvBBDPrecSolve); return(flag); } int CVBBDPrecReInit(void *cvode_mem, sunindextype mudq, sunindextype mldq, realtype dqrely) { CVodeMem cv_mem; CVLsMem cvls_mem; CVBBDPrecData pdata; sunindextype Nlocal; if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSBBDPRE", "CVBBDPrecReInit", MSGBBD_MEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Test if the LS linear solver interface has been created */ if (cv_mem->cv_lmem == NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSBBDPRE", "CVBBDPrecReInit", MSGBBD_LMEM_NULL); return(CVLS_LMEM_NULL); } cvls_mem = (CVLsMem) cv_mem->cv_lmem; /* Test if the preconditioner data is non-NULL */ if (cvls_mem->P_data == NULL) { cvProcessError(cv_mem, CVLS_PMEM_NULL, "CVSBBDPRE", "CVBBDPrecReInit", MSGBBD_PMEM_NULL); return(CVLS_PMEM_NULL); } pdata = (CVBBDPrecData) cvls_mem->P_data; /* Load half-bandwidths */ Nlocal = pdata->n_local; pdata->mudq = SUNMIN(Nlocal-1, SUNMAX(0,mudq)); pdata->mldq = SUNMIN(Nlocal-1, SUNMAX(0,mldq)); /* Set pdata->dqrely based on input dqrely (0 implies default). */ pdata->dqrely = (dqrely > ZERO) ? dqrely : SUNRsqrt(cv_mem->cv_uround); /* Re-initialize nge */ pdata->nge = 0; return(CVLS_SUCCESS); } int CVBBDPrecGetWorkSpace(void *cvode_mem, long int *lenrwBBDP, long int *leniwBBDP) { CVodeMem cv_mem; CVLsMem cvls_mem; CVBBDPrecData pdata; if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSBBDPRE", "CVBBDPrecGetWorkSpace", MSGBBD_MEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_lmem == NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSBBDPRE", "CVBBDPrecGetWorkSpace", MSGBBD_LMEM_NULL); return(CVLS_LMEM_NULL); } cvls_mem = (CVLsMem) cv_mem->cv_lmem; if (cvls_mem->P_data == NULL) { cvProcessError(cv_mem, CVLS_PMEM_NULL, "CVSBBDPRE", "CVBBDPrecGetWorkSpace", MSGBBD_PMEM_NULL); return(CVLS_PMEM_NULL); } pdata = (CVBBDPrecData) cvls_mem->P_data; *lenrwBBDP = pdata->rpwsize; *leniwBBDP = pdata->ipwsize; return(CVLS_SUCCESS); } int CVBBDPrecGetNumGfnEvals(void *cvode_mem, long int *ngevalsBBDP) { CVodeMem cv_mem; CVLsMem cvls_mem; CVBBDPrecData pdata; if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSBBDPRE", "CVBBDPrecGetNumGfnEvals", MSGBBD_MEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_lmem == NULL) { cvProcessError(cv_mem, CVLS_LMEM_NULL, "CVSBBDPRE", "CVBBDPrecGetNumGfnEvals", MSGBBD_LMEM_NULL); return(CVLS_LMEM_NULL); } cvls_mem = (CVLsMem) cv_mem->cv_lmem; if (cvls_mem->P_data == NULL) { cvProcessError(cv_mem, CVLS_PMEM_NULL, "CVSBBDPRE", "CVBBDPrecGetNumGfnEvals", MSGBBD_PMEM_NULL); return(CVLS_PMEM_NULL); } pdata = (CVBBDPrecData) cvls_mem->P_data; *ngevalsBBDP = pdata->nge; return(CVLS_SUCCESS); } /*----------------------------------------------------------------- Function : cvBBDPrecSetup ----------------------------------------------------------------- cvBBDPrecSetup generates and factors a banded block of the preconditioner matrix on each processor, via calls to the user-supplied gloc and cfn functions. It uses difference quotient approximations to the Jacobian elements. cvBBDPrecSetup calculates a new J,if necessary, then calculates P = I - gamma*J, and does an LU factorization of P. The parameters of cvBBDPrecSetup used here are as follows: t is the current value of the independent variable. y is the current value of the dependent variable vector, namely the predicted value of y(t). fy is the vector f(t,y). jok is an input flag indicating whether Jacobian-related data needs to be recomputed, as follows: jok == SUNFALSE means recompute Jacobian-related data from scratch. jok == SUNTRUE means that Jacobian data from the previous CVBBDPrecon call can be reused (with the current value of gamma). A cvBBDPrecSetup call with jok == SUNTRUE should only occur after a call with jok == SUNFALSE. jcurPtr is a pointer to an output integer flag which is set by cvBBDPrecSetup as follows: *jcurPtr = SUNTRUE if Jacobian data was recomputed. *jcurPtr = SUNFALSE if Jacobian data was not recomputed, but saved data was reused. gamma is the scalar appearing in the Newton matrix. bbd_data is a pointer to the preconditioner data set by CVBBDPrecInit Return value: The value returned by this cvBBDPrecSetup function is the int 0 if successful, 1 for a recoverable error (step will be retried). -----------------------------------------------------------------*/ static int cvBBDPrecSetup(realtype t, N_Vector y, N_Vector fy, booleantype jok, booleantype *jcurPtr, realtype gamma, void *bbd_data) { CVBBDPrecData pdata; CVodeMem cv_mem; int retval; pdata = (CVBBDPrecData) bbd_data; cv_mem = (CVodeMem) pdata->cvode_mem; /* If jok = SUNTRUE, use saved copy of J */ if (jok) { *jcurPtr = SUNFALSE; retval = SUNMatCopy(pdata->savedJ, pdata->savedP); if (retval < 0) { cvProcessError(cv_mem, -1, "CVBBDPRE", "CVBBDPrecSetup", MSGBBD_SUNMAT_FAIL); return(-1); } if (retval > 0) { return(1); } /* Otherwise call cvBBDDQJac for new J value */ } else { *jcurPtr = SUNTRUE; retval = SUNMatZero(pdata->savedJ); if (retval < 0) { cvProcessError(cv_mem, -1, "CVBBDPRE", "CVBBDPrecSetup", MSGBBD_SUNMAT_FAIL); return(-1); } if (retval > 0) { return(1); } retval = cvBBDDQJac(pdata, t, y, pdata->tmp1, pdata->tmp2, pdata->tmp3); if (retval < 0) { cvProcessError(cv_mem, -1, "CVBBDPRE", "CVBBDPrecSetup", MSGBBD_FUNC_FAILED); return(-1); } if (retval > 0) { return(1); } retval = SUNMatCopy(pdata->savedJ, pdata->savedP); if (retval < 0) { cvProcessError(cv_mem, -1, "CVBBDPRE", "CVBBDPrecSetup", MSGBBD_SUNMAT_FAIL); return(-1); } if (retval > 0) { return(1); } } /* Scale and add I to get P = I - gamma*J */ retval = SUNMatScaleAddI(-gamma, pdata->savedP); if (retval) { cvProcessError(cv_mem, -1, "CVBBDPRE", "CVBBDPrecSetup", MSGBBD_SUNMAT_FAIL); return(-1); } /* Do LU factorization of matrix and return error flag */ retval = SUNLinSolSetup_Band(pdata->LS, pdata->savedP); return(retval); } /*----------------------------------------------------------------- Function : cvBBDPrecSolve ----------------------------------------------------------------- cvBBDPrecSolve solves a linear system P z = r, with the band-block-diagonal preconditioner matrix P generated and factored by cvBBDPrecSetup. The parameters of cvBBDPrecSolve used here are as follows: r is the right-hand side vector of the linear system. bbd_data is a pointer to the preconditioner data set by CVBBDPrecInit. z is the output vector computed by cvBBDPrecSolve. The value returned by the cvBBDPrecSolve function is always 0, indicating success. -----------------------------------------------------------------*/ static int cvBBDPrecSolve(realtype t, N_Vector y, N_Vector fy, N_Vector r, N_Vector z, realtype gamma, realtype delta, int lr, void *bbd_data) { int retval; CVBBDPrecData pdata; pdata = (CVBBDPrecData) bbd_data; /* Attach local data arrays for r and z to rlocal and zlocal */ N_VSetArrayPointer(N_VGetArrayPointer(r), pdata->rlocal); N_VSetArrayPointer(N_VGetArrayPointer(z), pdata->zlocal); /* Call banded solver object to do the work */ retval = SUNLinSolSolve(pdata->LS, pdata->savedP, pdata->zlocal, pdata->rlocal, ZERO); /* Detach local data arrays from rlocal and zlocal */ N_VSetArrayPointer(NULL, pdata->rlocal); N_VSetArrayPointer(NULL, pdata->zlocal); return(retval); } static int cvBBDPrecFree(CVodeMem cv_mem) { CVLsMem cvls_mem; CVBBDPrecData pdata; if (cv_mem->cv_lmem == NULL) return(0); cvls_mem = (CVLsMem) cv_mem->cv_lmem; if (cvls_mem->P_data == NULL) return(0); pdata = (CVBBDPrecData) cvls_mem->P_data; SUNLinSolFree(pdata->LS); N_VDestroy(pdata->tmp1); N_VDestroy(pdata->tmp2); N_VDestroy(pdata->tmp3); N_VDestroy(pdata->zlocal); N_VDestroy(pdata->rlocal); SUNMatDestroy(pdata->savedP); SUNMatDestroy(pdata->savedJ); free(pdata); pdata = NULL; return(0); } /*----------------------------------------------------------------- Function : cvBBDDQJac ----------------------------------------------------------------- This routine generates a banded difference quotient approximation to the local block of the Jacobian of g(t,y). It assumes that a band SUNMatrix is stored columnwise, and that elements within each column are contiguous. All matrix elements are generated as difference quotients, by way of calls to the user routine gloc. By virtue of the band structure, the number of these calls is bandwidth + 1, where bandwidth = mldq + mudq + 1. But the band matrix kept has bandwidth = mlkeep + mukeep + 1. This routine also assumes that the local elements of a vector are stored contiguously. -----------------------------------------------------------------*/ static int cvBBDDQJac(CVBBDPrecData pdata, realtype t, N_Vector y, N_Vector gy, N_Vector ytemp, N_Vector gtemp) { CVodeMem cv_mem; realtype gnorm, minInc, inc, inc_inv, yj, conj; sunindextype group, i, j, width, ngroups, i1, i2; realtype *y_data, *ewt_data, *gy_data, *gtemp_data; realtype *ytemp_data, *col_j, *cns_data; int retval; /* initialize cns_data to avoid compiler warning */ cns_data = NULL; cv_mem = (CVodeMem) pdata->cvode_mem; /* Load ytemp with y = predicted solution vector */ N_VScale(ONE, y, ytemp); /* Call cfn and gloc to get base value of g(t,y) */ if (pdata->cfn != NULL) { retval = pdata->cfn(pdata->n_local, t, y, cv_mem->cv_user_data); if (retval != 0) return(retval); } retval = pdata->gloc(pdata->n_local, t, ytemp, gy, cv_mem->cv_user_data); pdata->nge++; if (retval != 0) return(retval); /* Obtain pointers to the data for various vectors */ y_data = N_VGetArrayPointer(y); gy_data = N_VGetArrayPointer(gy); ewt_data = N_VGetArrayPointer(cv_mem->cv_ewt); ytemp_data = N_VGetArrayPointer(ytemp); gtemp_data = N_VGetArrayPointer(gtemp); if (cv_mem->cv_constraintsSet) cns_data = N_VGetArrayPointer(cv_mem->cv_constraints); /* Set minimum increment based on uround and norm of g */ gnorm = N_VWrmsNorm(gy, cv_mem->cv_ewt); minInc = (gnorm != ZERO) ? (MIN_INC_MULT * SUNRabs(cv_mem->cv_h) * cv_mem->cv_uround * pdata->n_local * gnorm) : ONE; /* Set bandwidth and number of column groups for band differencing */ width = pdata->mldq + pdata->mudq + 1; ngroups = SUNMIN(width, pdata->n_local); /* Loop over groups */ for (group=1; group <= ngroups; group++) { /* Increment all y_j in group */ for(j=group-1; j < pdata->n_local; j+=width) { inc = SUNMAX(pdata->dqrely * SUNRabs(y_data[j]), minInc/ewt_data[j]); yj = y_data[j]; /* Adjust sign(inc) again if yj has an inequality constraint. */ if (cv_mem->cv_constraintsSet) { conj = cns_data[j]; if (SUNRabs(conj) == ONE) {if ((yj+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if ((yj+inc)*conj <= ZERO) inc = -inc;} } ytemp_data[j] += inc; } /* Evaluate g with incremented y */ retval = pdata->gloc(pdata->n_local, t, ytemp, gtemp, cv_mem->cv_user_data); pdata->nge++; if (retval != 0) return(retval); /* Restore ytemp, then form and load difference quotients */ for (j=group-1; j < pdata->n_local; j+=width) { yj = ytemp_data[j] = y_data[j]; col_j = SUNBandMatrix_Column(pdata->savedJ,j); inc = SUNMAX(pdata->dqrely * SUNRabs(y_data[j]), minInc/ewt_data[j]); /* Adjust sign(inc) as before. */ if (cv_mem->cv_constraintsSet) { conj = cns_data[j]; if (SUNRabs(conj) == ONE) {if ((yj+inc)*conj < ZERO) inc = -inc;} else if (SUNRabs(conj) == TWO) {if ((yj+inc)*conj <= ZERO) inc = -inc;} } inc_inv = ONE/inc; i1 = SUNMAX(0, j-pdata->mukeep); i2 = SUNMIN(j + pdata->mlkeep, pdata->n_local-1); for (i=i1; i <= i2; i++) SM_COLUMN_ELEMENT_B(col_j,i,j) = inc_inv * (gtemp_data[i] - gy_data[i]); } } return(0); } /*================================================================ PART II - Backward Problems ================================================================*/ /*--------------------------------------------------------------- User-Callable Functions: initialization, reinit and free ---------------------------------------------------------------*/ int CVBBDPrecInitB(void *cvode_mem, int which, sunindextype NlocalB, sunindextype mudqB, sunindextype mldqB, sunindextype mukeepB, sunindextype mlkeepB, realtype dqrelyB, CVLocalFnB glocB, CVCommFnB cfnB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVBBDPrecDataB cvbbdB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSBBDPRE", "CVBBDPrecInitB", MSGBBD_MEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CVLS_NO_ADJ, "CVSBBDPRE", "CVBBDPrecInitB", MSGBBD_NO_ADJ); return(CVLS_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSBBDPRE", "CVBBDPrecInitB", MSGBBD_BAD_WHICH); return(CVLS_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; /* advance */ cvB_mem = cvB_mem->cv_next; } /* cv_mem corresponding to 'which' problem. */ cvodeB_mem = (void *) (cvB_mem->cv_mem); /* Initialize the BBD preconditioner for this backward problem. */ flag = CVBBDPrecInit(cvodeB_mem, NlocalB, mudqB, mldqB, mukeepB, mlkeepB, dqrelyB, cvGlocWrapper, cvCfnWrapper); if (flag != CV_SUCCESS) return(flag); /* Allocate memory for CVBBDPrecDataB to store the user-provided functions which will be called from the wrappers */ cvbbdB_mem = NULL; cvbbdB_mem = (CVBBDPrecDataB) malloc(sizeof(* cvbbdB_mem)); if (cvbbdB_mem == NULL) { cvProcessError(cv_mem, CVLS_MEM_FAIL, "CVSBBDPRE", "CVBBDPrecInitB", MSGBBD_MEM_FAIL); return(CVLS_MEM_FAIL); } /* set pointers to user-provided functions */ cvbbdB_mem->glocB = glocB; cvbbdB_mem->cfnB = cfnB; /* Attach pmem and pfree */ cvB_mem->cv_pmem = cvbbdB_mem; cvB_mem->cv_pfree = CVBBDPrecFreeB; return(CVLS_SUCCESS); } int CVBBDPrecReInitB(void *cvode_mem, int which, sunindextype mudqB, sunindextype mldqB, realtype dqrelyB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CVLS_MEM_NULL, "CVSBBDPRE", "CVBBDPrecReInitB", MSGBBD_MEM_NULL); return(CVLS_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CVLS_NO_ADJ, "CVSBBDPRE", "CVBBDPrecReInitB", MSGBBD_NO_ADJ); return(CVLS_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CVLS_ILL_INPUT, "CVSBBDPRE", "CVBBDPrecReInitB", MSGBBD_BAD_WHICH); return(CVLS_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; /* advance */ cvB_mem = cvB_mem->cv_next; } /* cv_mem corresponding to 'which' backward problem. */ cvodeB_mem = (void *) (cvB_mem->cv_mem); /* ReInitialize the BBD preconditioner for this backward problem. */ flag = CVBBDPrecReInit(cvodeB_mem, mudqB, mldqB, dqrelyB); return(flag); } static int CVBBDPrecFreeB(CVodeBMem cvB_mem) { free(cvB_mem->cv_pmem); cvB_mem->cv_pmem = NULL; return(0); } /*---------------------------------------------------------------- Wrapper functions ----------------------------------------------------------------*/ /* cvGlocWrapper interfaces to the CVLocalFnB routine provided by the user */ static int cvGlocWrapper(sunindextype NlocalB, realtype t, N_Vector yB, N_Vector gB, void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVBBDPrecDataB cvbbdB_mem; int flag; cv_mem = (CVodeMem) cvode_mem; ca_mem = cv_mem->cv_adj_mem; cvB_mem = ca_mem->ca_bckpbCrt; cvbbdB_mem = (CVBBDPrecDataB) (cvB_mem->cv_pmem); /* Get forward solution from interpolation */ flag = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (flag != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSBBDPRE", "cvGlocWrapper", MSGBBD_BAD_TINTERP); return(-1); } /* Call user's adjoint glocB routine */ return cvbbdB_mem->glocB(NlocalB, t, ca_mem->ca_ytmp, yB, gB, cvB_mem->cv_user_data); } /* cvCfnWrapper interfaces to the CVCommFnB routine provided by the user */ static int cvCfnWrapper(sunindextype NlocalB, realtype t, N_Vector yB, void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; CVBBDPrecDataB cvbbdB_mem; int flag; cv_mem = (CVodeMem) cvode_mem; ca_mem = cv_mem->cv_adj_mem; cvB_mem = ca_mem->ca_bckpbCrt; cvbbdB_mem = (CVBBDPrecDataB) (cvB_mem->cv_pmem); if (cvbbdB_mem->cfnB == NULL) return(0); /* Get forward solution from interpolation */ flag = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (flag != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVSBBDPRE", "cvCfnWrapper", MSGBBD_BAD_TINTERP); return(-1); } /* Call user's adjoint cfnB routine */ return cvbbdB_mem->cfnB(NlocalB, t, ca_mem->ca_ytmp, yB, cvB_mem->cv_user_data); } StanHeaders/src/cvodes/cvodea.c0000644000176200001440000025343514645137106016155 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for the CVODEA adjoint integrator. * ----------------------------------------------------------------- */ /* * ================================================================= * IMPORTED HEADER FILES * ================================================================= */ #include #include #include "cvodes_impl.h" #include #include /* * ================================================================= * CVODEA PRIVATE CONSTANTS * ================================================================= */ #define ZERO RCONST(0.0) /* real 0.0 */ #define ONE RCONST(1.0) /* real 1.0 */ #define TWO RCONST(2.0) /* real 2.0 */ #define HUNDRED RCONST(100.0) /* real 100.0 */ #define FUZZ_FACTOR RCONST(1000000.0) /* fuzz factor for IMget */ /*=================================================================*/ /* Shortcuts */ /*=================================================================*/ #define CV_PROFILER cv_mem->cv_sunctx->profiler /* * ================================================================= * PRIVATE FUNCTION PROTOTYPES * ================================================================= */ static CkpntMem CVAckpntInit(CVodeMem cv_mem); static CkpntMem CVAckpntNew(CVodeMem cv_mem); static void CVAckpntDelete(CkpntMem *ck_memPtr); static void CVAbckpbDelete(CVodeBMem *cvB_memPtr); static int CVAdataStore(CVodeMem cv_mem, CkpntMem ck_mem); static int CVAckpntGet(CVodeMem cv_mem, CkpntMem ck_mem); static int CVAfindIndex(CVodeMem cv_mem, realtype t, long int *indx, booleantype *newpoint); static booleantype CVAhermiteMalloc(CVodeMem cv_mem); static void CVAhermiteFree(CVodeMem cv_mem); static int CVAhermiteGetY(CVodeMem cv_mem, realtype t, N_Vector y, N_Vector *yS); static int CVAhermiteStorePnt(CVodeMem cv_mem, DtpntMem d); static booleantype CVApolynomialMalloc(CVodeMem cv_mem); static void CVApolynomialFree(CVodeMem cv_mem); static int CVApolynomialGetY(CVodeMem cv_mem, realtype t, N_Vector y, N_Vector *yS); static int CVApolynomialStorePnt(CVodeMem cv_mem, DtpntMem d); /* Wrappers */ static int CVArhs(realtype t, N_Vector yB, N_Vector yBdot, void *cvode_mem); static int CVArhsQ(realtype t, N_Vector yB, N_Vector qBdot, void *cvode_mem); /* * ================================================================= * EXPORTED FUNCTIONS IMPLEMENTATION * ================================================================= */ /* * CVodeAdjInit * * This routine initializes ASA and allocates space for the adjoint * memory structure. */ int CVodeAdjInit(void *cvode_mem, long int steps, int interp) { CVadjMem ca_mem; CVodeMem cv_mem; long int i, ii; /* --------------- * Check arguments * --------------- */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeAdjInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem)cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); if (steps <= 0) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeAdjInit", MSGCV_BAD_STEPS); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } if ( (interp != CV_HERMITE) && (interp != CV_POLYNOMIAL) ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeAdjInit", MSGCV_BAD_INTERP); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* ---------------------------- * Allocate CVODEA memory block * ---------------------------- */ ca_mem = NULL; ca_mem = (CVadjMem) malloc(sizeof(struct CVadjMemRec)); if (ca_mem == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODEA", "CVodeAdjInit", MSGCV_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_MEM_FAIL); } /* Attach ca_mem to CVodeMem structure */ cv_mem->cv_adj_mem = ca_mem; /* ------------------------------ * Initialization of check points * ------------------------------ */ /* Set Check Points linked list to NULL */ ca_mem->ck_mem = NULL; /* Initialize nckpnts to ZERO */ ca_mem->ca_nckpnts = 0; /* No interpolation data is available */ ca_mem->ca_ckpntData = NULL; /* ------------------------------------ * Initialization of interpolation data * ------------------------------------ */ /* Interpolation type */ ca_mem->ca_IMtype = interp; /* Number of steps between check points */ ca_mem->ca_nsteps = steps; /* Last index used in CVAfindIndex, initailize to invalid value */ ca_mem->ca_ilast = -1; /* Allocate space for the array of Data Point structures */ ca_mem->dt_mem = NULL; ca_mem->dt_mem = (DtpntMem *) malloc((steps+1)*sizeof(struct DtpntMemRec *)); if (ca_mem->dt_mem == NULL) { free(ca_mem); ca_mem = NULL; cvProcessError(cv_mem, CV_MEM_FAIL, "CVODEA", "CVodeAdjInit", MSGCV_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_MEM_FAIL); } for (i=0; i<=steps; i++) { ca_mem->dt_mem[i] = NULL; ca_mem->dt_mem[i] = (DtpntMem) malloc(sizeof(struct DtpntMemRec)); if (ca_mem->dt_mem[i] == NULL) { for(ii=0; iidt_mem[ii]); ca_mem->dt_mem[ii] = NULL;} free(ca_mem->dt_mem); ca_mem->dt_mem = NULL; free(ca_mem); ca_mem = NULL; cvProcessError(cv_mem, CV_MEM_FAIL, "CVODEA", "CVodeAdjInit", MSGCV_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_MEM_FAIL); } } /* Attach functions for the appropriate interpolation module */ switch(interp) { case CV_HERMITE: ca_mem->ca_IMmalloc = CVAhermiteMalloc; ca_mem->ca_IMfree = CVAhermiteFree; ca_mem->ca_IMget = CVAhermiteGetY; ca_mem->ca_IMstore = CVAhermiteStorePnt; break; case CV_POLYNOMIAL: ca_mem->ca_IMmalloc = CVApolynomialMalloc; ca_mem->ca_IMfree = CVApolynomialFree; ca_mem->ca_IMget = CVApolynomialGetY; ca_mem->ca_IMstore = CVApolynomialStorePnt; break; } /* The interpolation module has not been initialized yet */ ca_mem->ca_IMmallocDone = SUNFALSE; /* By default we will store but not interpolate sensitivities * - IMstoreSensi will be set in CVodeF to SUNFALSE if FSA is not enabled * or if the user can force this through CVodeSetAdjNoSensi * - IMinterpSensi will be set in CVodeB to SUNTRUE if IMstoreSensi is * SUNTRUE and if at least one backward problem requires sensitivities */ ca_mem->ca_IMstoreSensi = SUNTRUE; ca_mem->ca_IMinterpSensi = SUNFALSE; /* ------------------------------------ * Initialize list of backward problems * ------------------------------------ */ ca_mem->cvB_mem = NULL; ca_mem->ca_bckpbCrt = NULL; ca_mem->ca_nbckpbs = 0; /* -------------------------------- * CVodeF and CVodeB not called yet * -------------------------------- */ ca_mem->ca_firstCVodeFcall = SUNTRUE; ca_mem->ca_tstopCVodeFcall = SUNFALSE; ca_mem->ca_firstCVodeBcall = SUNTRUE; ca_mem->ca_rootret = SUNFALSE; /* --------------------------------------------- * ASA initialized and allocated * --------------------------------------------- */ cv_mem->cv_adj = SUNTRUE; cv_mem->cv_adjMallocDone = SUNTRUE; SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* CVodeAdjReInit * * This routine reinitializes the CVODEA memory structure assuming that the * the number of steps between check points and the type of interpolation * remain unchanged. * The list of check points (and associated memory) is deleted. * The list of backward problems is kept (however, new backward problems can * be added to this list by calling CVodeCreateB). * The CVODES memory for the forward and backward problems can be reinitialized * separately by calling CVodeReInit and CVodeReInitB, respectively. * NOTE: if a completely new list of backward problems is also needed, then * simply free the adjoint memory (by calling CVodeAdjFree) and reinitialize * ASA with CVodeAdjInit. */ int CVodeAdjReInit(void *cvode_mem) { CVadjMem ca_mem; CVodeMem cv_mem; /* Check cvode_mem */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeAdjReInit", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeAdjReInit", MSGCV_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Free current list of Check Points */ while (ca_mem->ck_mem != NULL) CVAckpntDelete(&(ca_mem->ck_mem)); /* Initialization of check points */ ca_mem->ck_mem = NULL; ca_mem->ca_nckpnts = 0; ca_mem->ca_ckpntData = NULL; /* CVodeF and CVodeB not called yet */ ca_mem->ca_firstCVodeFcall = SUNTRUE; ca_mem->ca_tstopCVodeFcall = SUNFALSE; ca_mem->ca_firstCVodeBcall = SUNTRUE; SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } /* * CVodeAdjFree * * This routine frees the memory allocated by CVodeAdjInit. */ void CVodeAdjFree(void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; long int i; if (cvode_mem == NULL) return; cv_mem = (CVodeMem) cvode_mem; if (cv_mem->cv_adjMallocDone) { ca_mem = cv_mem->cv_adj_mem; /* Delete check points one by one */ while (ca_mem->ck_mem != NULL) CVAckpntDelete(&(ca_mem->ck_mem)); /* Free vectors at all data points */ if (ca_mem->ca_IMmallocDone) { ca_mem->ca_IMfree(cv_mem); } for(i=0; i<=ca_mem->ca_nsteps; i++) { free(ca_mem->dt_mem[i]); ca_mem->dt_mem[i] = NULL; } free(ca_mem->dt_mem); ca_mem->dt_mem = NULL; /* Delete backward problems one by one */ while (ca_mem->cvB_mem != NULL) CVAbckpbDelete(&(ca_mem->cvB_mem)); /* Free CVODEA memory */ free(ca_mem); cv_mem->cv_adj_mem = NULL; } } /* * CVodeF * * This routine integrates to tout and returns solution into yout. * In the same time, it stores check point data every 'steps' steps. * * CVodeF can be called repeatedly by the user. * * ncheckPtr points to the number of check points stored so far. */ int CVodeF(void *cvode_mem, realtype tout, N_Vector yout, realtype *tret, int itask, int *ncheckPtr) { CVadjMem ca_mem; CVodeMem cv_mem; CkpntMem tmp; DtpntMem *dt_mem; long int nstloc; int flag, i; booleantype allocOK, earlyret; realtype ttest; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeF", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeF", MSGCV_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check for yout != NULL */ if (yout == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeF", MSGCV_YOUT_NULL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Check for tret != NULL */ if (tret == NULL) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeF", MSGCV_TRET_NULL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Check for valid itask */ if ( (itask != CV_NORMAL) && (itask != CV_ONE_STEP) ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeF", MSGCV_BAD_ITASK); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* All error checking done */ dt_mem = ca_mem->dt_mem; /* If tstop is enabled, store some info */ if (cv_mem->cv_tstopset) { ca_mem->ca_tstopCVodeFcall = SUNTRUE; ca_mem->ca_tstopCVodeF = cv_mem->cv_tstop; } /* On the first step: * - set tinitial * - initialize list of check points * - if needed, initialize the interpolation module * - load dt_mem[0] * On subsequent steps, test if taking a new step is necessary. */ if ( ca_mem->ca_firstCVodeFcall ) { ca_mem->ca_tinitial = cv_mem->cv_tn; ca_mem->ck_mem = CVAckpntInit(cv_mem); if (ca_mem->ck_mem == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODEA", "CVodeF", MSGCV_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_MEM_FAIL); } if ( !ca_mem->ca_IMmallocDone ) { /* Do we need to store sensitivities? */ if (!cv_mem->cv_sensi) ca_mem->ca_IMstoreSensi = SUNFALSE; /* Allocate space for interpolation data */ allocOK = ca_mem->ca_IMmalloc(cv_mem); if (!allocOK) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODEA", "CVodeF", MSGCV_MEM_FAIL); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_MEM_FAIL); } /* Rename zn and, if needed, znS for use in interpolation */ for (i=0;ica_Y[i] = cv_mem->cv_zn[i]; if (ca_mem->ca_IMstoreSensi) { for (i=0;ica_YS[i] = cv_mem->cv_znS[i]; } ca_mem->ca_IMmallocDone = SUNTRUE; } dt_mem[0]->t = ca_mem->ck_mem->ck_t0; ca_mem->ca_IMstore(cv_mem, dt_mem[0]); ca_mem->ca_firstCVodeFcall = SUNFALSE; } else if ( itask == CV_NORMAL ) { /* When in normal mode, check if tout was passed or if a previous root was not reported and return an interpolated solution. No changes to ck_mem or dt_mem are needed. */ /* flag to signal if an early return is needed */ earlyret = SUNFALSE; /* if a root needs to be reported compare tout to troot otherwise compare to the current time tn */ ttest = (ca_mem->ca_rootret) ? ca_mem->ca_troot : cv_mem->cv_tn; if ((ttest - tout)*cv_mem->cv_h >= ZERO) { /* ttest is after tout, interpolate to tout */ *tret = tout; flag = CVodeGetDky(cv_mem, tout, 0, yout); earlyret = SUNTRUE; } else if (ca_mem->ca_rootret) { /* tout is after troot, interpolate to troot */ *tret = ca_mem->ca_troot; flag = CVodeGetDky(cv_mem, ca_mem->ca_troot, 0, yout); flag = CV_ROOT_RETURN; ca_mem->ca_rootret = SUNFALSE; earlyret = SUNTRUE; } /* return if necessary */ if (earlyret) { *ncheckPtr = ca_mem->ca_nckpnts; ca_mem->ca_IMnewData = SUNTRUE; ca_mem->ca_ckpntData = ca_mem->ck_mem; ca_mem->ca_np = cv_mem->cv_nst % ca_mem->ca_nsteps + 1; SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } } /* Integrate to tout (in CV_ONE_STEP mode) while loading check points */ nstloc = 0; for(;;) { /* Check for too many steps */ if ( (cv_mem->cv_mxstep>0) && (nstloc >= cv_mem->cv_mxstep) ) { cvProcessError(cv_mem, CV_TOO_MUCH_WORK, "CVODEA", "CVodeF", MSGCV_MAX_STEPS, cv_mem->cv_tn); flag = CV_TOO_MUCH_WORK; break; } /* Perform one step of the integration */ flag = CVode(cv_mem, tout, yout, tret, CV_ONE_STEP); if (flag < 0) break; nstloc++; /* Test if a new check point is needed */ if ( cv_mem->cv_nst % ca_mem->ca_nsteps == 0 ) { ca_mem->ck_mem->ck_t1 = cv_mem->cv_tn; /* Create a new check point, load it, and append it to the list */ tmp = CVAckpntNew(cv_mem); if (tmp == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODEA", "CVodeF", MSGCV_MEM_FAIL); flag = CV_MEM_FAIL; break; } tmp->ck_next = ca_mem->ck_mem; ca_mem->ck_mem = tmp; ca_mem->ca_nckpnts++; cv_mem->cv_forceSetup = SUNTRUE; /* Reset i=0 and load dt_mem[0] */ dt_mem[0]->t = ca_mem->ck_mem->ck_t0; ca_mem->ca_IMstore(cv_mem, dt_mem[0]); } else { /* Load next point in dt_mem */ dt_mem[cv_mem->cv_nst % ca_mem->ca_nsteps]->t = cv_mem->cv_tn; ca_mem->ca_IMstore(cv_mem, dt_mem[cv_mem->cv_nst % ca_mem->ca_nsteps]); } /* Set t1 field of the current ckeck point structure for the case in which there will be no future check points */ ca_mem->ck_mem->ck_t1 = cv_mem->cv_tn; /* tfinal is now set to tn */ ca_mem->ca_tfinal = cv_mem->cv_tn; /* Return if in CV_ONE_STEP mode */ if (itask == CV_ONE_STEP) break; /* CV_NORMAL_STEP returns */ /* Return if tout reached */ if ( (*tret - tout)*cv_mem->cv_h >= ZERO ) { /* If this was a root return, save the root time to return later */ if (flag == CV_ROOT_RETURN) { ca_mem->ca_rootret = SUNTRUE; ca_mem->ca_troot = *tret; } /* Get solution value at tout to return now */ *tret = tout; flag = CVodeGetDky(cv_mem, tout, 0, yout); /* Reset tretlast in cv_mem so that CVodeGetQuad and CVodeGetSens * evaluate quadratures and/or sensitivities at the proper time */ cv_mem->cv_tretlast = tout; break; } /* Return if tstop or a root was found */ if ( (flag == CV_TSTOP_RETURN) || (flag == CV_ROOT_RETURN) ) break; } /* end of for(;;) */ /* Get ncheck from ca_mem */ *ncheckPtr = ca_mem->ca_nckpnts; /* Data is available for the last interval */ ca_mem->ca_IMnewData = SUNTRUE; ca_mem->ca_ckpntData = ca_mem->ck_mem; ca_mem->ca_np = cv_mem->cv_nst % ca_mem->ca_nsteps + 1; SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } /* * ================================================================= * FUNCTIONS FOR BACKWARD PROBLEMS * ================================================================= */ int CVodeCreateB(void *cvode_mem, int lmmB, int *which) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem new_cvB_mem; void *cvodeB_mem; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeCreateB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeCreateB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Allocate space for new CVodeBMem object */ new_cvB_mem = NULL; new_cvB_mem = (CVodeBMem) malloc(sizeof(struct CVodeBMemRec)); if (new_cvB_mem == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODEA", "CVodeCreateB", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } /* Create and set a new CVODES object for the backward problem */ cvodeB_mem = CVodeCreate(lmmB, cv_mem->cv_sunctx); if (cvodeB_mem == NULL) { cvProcessError(cv_mem, CV_MEM_FAIL, "CVODEA", "CVodeCreateB", MSGCV_MEM_FAIL); return(CV_MEM_FAIL); } CVodeSetUserData(cvodeB_mem, cvode_mem); CVodeSetMaxHnilWarns(cvodeB_mem, -1); CVodeSetErrHandlerFn(cvodeB_mem, cv_mem->cv_ehfun, cv_mem->cv_eh_data); CVodeSetErrFile(cvodeB_mem, cv_mem->cv_errfp); /* Set/initialize fields in the new CVodeBMem object, new_cvB_mem */ new_cvB_mem->cv_index = ca_mem->ca_nbckpbs; new_cvB_mem->cv_mem = (CVodeMem) cvodeB_mem; new_cvB_mem->cv_f = NULL; new_cvB_mem->cv_fs = NULL; new_cvB_mem->cv_fQ = NULL; new_cvB_mem->cv_fQs = NULL; new_cvB_mem->cv_user_data = NULL; new_cvB_mem->cv_lmem = NULL; new_cvB_mem->cv_lfree = NULL; new_cvB_mem->cv_pmem = NULL; new_cvB_mem->cv_pfree = NULL; new_cvB_mem->cv_y = NULL; new_cvB_mem->cv_f_withSensi = SUNFALSE; new_cvB_mem->cv_fQ_withSensi = SUNFALSE; /* Attach the new object to the linked list cvB_mem */ new_cvB_mem->cv_next = ca_mem->cvB_mem; ca_mem->cvB_mem = new_cvB_mem; /* Return the index of the newly created CVodeBMem object. * This must be passed to CVodeInitB and to other ***B * functions to set optional inputs for this backward problem */ *which = ca_mem->ca_nbckpbs; ca_mem->ca_nbckpbs++; return(CV_SUCCESS); } int CVodeInitB(void *cvode_mem, int which, CVRhsFnB fB, realtype tB0, N_Vector yB0) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeInitB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeInitB", MSGCV_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check the value of which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeInitB", MSGCV_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); /* Allocate and set the CVODES object */ flag = CVodeInit(cvodeB_mem, CVArhs, tB0, yB0); if (flag != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } /* Copy fB function in cvB_mem */ cvB_mem->cv_f_withSensi = SUNFALSE; cvB_mem->cv_f = fB; /* Allocate space and initialize the y Nvector in cvB_mem */ cvB_mem->cv_t0 = tB0; cvB_mem->cv_y = N_VClone(yB0); N_VScale(ONE, yB0, cvB_mem->cv_y); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } int CVodeInitBS(void *cvode_mem, int which, CVRhsFnBS fBs, realtype tB0, N_Vector yB0) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeInitBS", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeInitBS", MSGCV_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check the value of which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeInitBS", MSGCV_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); /* Allocate and set the CVODES object */ flag = CVodeInit(cvodeB_mem, CVArhs, tB0, yB0); if (flag != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } /* Copy fBs function in cvB_mem */ cvB_mem->cv_f_withSensi = SUNTRUE; cvB_mem->cv_fs = fBs; /* Allocate space and initialize the y Nvector in cvB_mem */ cvB_mem->cv_t0 = tB0; cvB_mem->cv_y = N_VClone(yB0); N_VScale(ONE, yB0, cvB_mem->cv_y); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } int CVodeReInitB(void *cvode_mem, int which, realtype tB0, N_Vector yB0) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeReInitB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeReInitB", MSGCV_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check the value of which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeReInitB", MSGCV_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); /* Reinitialize CVODES object */ flag = CVodeReInit(cvodeB_mem, tB0, yB0); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } int CVodeSStolerancesB(void *cvode_mem, int which, realtype reltolB, realtype abstolB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSStolerancesB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSStolerancesB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check the value of which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSStolerancesB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); /* Set tolerances */ flag = CVodeSStolerances(cvodeB_mem, reltolB, abstolB); return(flag); } int CVodeSVtolerancesB(void *cvode_mem, int which, realtype reltolB, N_Vector abstolB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeSVtolerancesB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeSVtolerancesB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check the value of which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeSVtolerancesB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); /* Set tolerances */ flag = CVodeSVtolerances(cvodeB_mem, reltolB, abstolB); return(flag); } int CVodeQuadInitB(void *cvode_mem, int which, CVQuadRhsFnB fQB, N_Vector yQB0) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeQuadInitB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeQuadInitB", MSGCV_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeQuadInitB", MSGCV_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeQuadInit(cvodeB_mem, CVArhsQ, yQB0); if (flag != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } cvB_mem->cv_fQ_withSensi = SUNFALSE; cvB_mem->cv_fQ = fQB; SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } int CVodeQuadInitBS(void *cvode_mem, int which, CVQuadRhsFnBS fQBs, N_Vector yQB0) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeQuadInitBS", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeQuadInitBS", MSGCV_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeQuadInitBS", MSGCV_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeQuadInit(cvodeB_mem, CVArhsQ, yQB0); if (flag != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } cvB_mem->cv_fQ_withSensi = SUNTRUE; cvB_mem->cv_fQs = fQBs; SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } int CVodeQuadReInitB(void *cvode_mem, int which, N_Vector yQB0) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeQuadReInitB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeQuadReInitB", MSGCV_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check the value of which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeQuadReInitB", MSGCV_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeQuadReInit(cvodeB_mem, yQB0); if (flag != CV_SUCCESS) { SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_SUCCESS); } int CVodeQuadSStolerancesB(void *cvode_mem, int which, realtype reltolQB, realtype abstolQB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeQuadSStolerancesB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeQuadSStolerancesB", MSGCV_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeQuadSStolerancesB", MSGCV_BAD_WHICH); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeQuadSStolerances(cvodeB_mem, reltolQB, abstolQB); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } int CVodeQuadSVtolerancesB(void *cvode_mem, int which, realtype reltolQB, N_Vector abstolQB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeQuadSStolerancesB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeQuadSStolerancesB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeQuadSStolerancesB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); flag = CVodeQuadSVtolerances(cvodeB_mem, reltolQB, abstolQB); return(flag); } /* * CVodeB * * This routine performs the backward integration towards tBout * of all backward problems that were defined. * When necessary, it performs a forward integration between two * consecutive check points to update interpolation data. * * On a successful return, CVodeB returns CV_SUCCESS. * * NOTE that CVodeB DOES NOT return the solution for the backward * problem(s). Use CVodeGetB to extract the solution at tBret * for any given backward problem. * * If there are multiple backward problems and multiple check points, * CVodeB may not succeed in getting all problems to take one step * when called in ONE_STEP mode. */ int CVodeB(void *cvode_mem, realtype tBout, int itaskB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem, tmp_cvB_mem; CkpntMem ck_mem; int sign, flag=0; realtype tfuzz, tBret, tBn; booleantype gotCheckpoint, isActive, reachedTBout; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; SUNDIALS_MARK_FUNCTION_BEGIN(CV_PROFILER); /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeB", MSGCV_NO_ADJ); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check if any backward problem has been defined */ if ( ca_mem->ca_nbckpbs == 0 ) { cvProcessError(cv_mem, CV_NO_BCK, "CVODEA", "CVodeB", MSGCV_NO_BCK); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_BCK); } cvB_mem = ca_mem->cvB_mem; /* Check whether CVodeF has been called */ if ( ca_mem->ca_firstCVodeFcall ) { cvProcessError(cv_mem, CV_NO_FWD, "CVODEA", "CVodeB", MSGCV_NO_FWD); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_NO_FWD); } sign = (ca_mem->ca_tfinal - ca_mem->ca_tinitial > ZERO) ? 1 : -1; /* If this is the first call, loop over all backward problems and * - check that tB0 is valid * - check that tBout is ahead of tB0 in the backward direction * - check whether we need to interpolate forward sensitivities */ if ( ca_mem->ca_firstCVodeBcall ) { tmp_cvB_mem = cvB_mem; while(tmp_cvB_mem != NULL) { tBn = tmp_cvB_mem->cv_mem->cv_tn; if ( (sign*(tBn-ca_mem->ca_tinitial) < ZERO) || (sign*(ca_mem->ca_tfinal-tBn) < ZERO) ) { cvProcessError(cv_mem, CV_BAD_TB0, "CVODEA", "CVodeB", MSGCV_BAD_TB0, tmp_cvB_mem->cv_index); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_BAD_TB0); } if (sign*(tBn-tBout) <= ZERO) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeB", MSGCV_BAD_TBOUT, tmp_cvB_mem->cv_index); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } if ( tmp_cvB_mem->cv_f_withSensi || tmp_cvB_mem->cv_fQ_withSensi ) ca_mem->ca_IMinterpSensi = SUNTRUE; tmp_cvB_mem = tmp_cvB_mem->cv_next; } if ( ca_mem->ca_IMinterpSensi && !ca_mem->ca_IMstoreSensi) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeB", MSGCV_BAD_SENSI); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } ca_mem->ca_firstCVodeBcall = SUNFALSE; } /* Check if itaskB is legal */ if ( (itaskB != CV_NORMAL) && (itaskB != CV_ONE_STEP) ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeB", MSGCV_BAD_ITASKB); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } /* Check if tBout is legal */ if ( (sign*(tBout-ca_mem->ca_tinitial) < ZERO) || (sign*(ca_mem->ca_tfinal-tBout) < ZERO) ) { tfuzz = HUNDRED*cv_mem->cv_uround*(SUNRabs(ca_mem->ca_tinitial) + SUNRabs(ca_mem->ca_tfinal)); if ( (sign*(tBout-ca_mem->ca_tinitial) < ZERO) && (SUNRabs(tBout-ca_mem->ca_tinitial) < tfuzz) ) { tBout = ca_mem->ca_tinitial; } else { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeB", MSGCV_BAD_TBOUT); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(CV_ILL_INPUT); } } /* Loop through the check points and stop as soon as a backward * problem has its tn value behind the current check point's t0_ * value (in the backward direction) */ ck_mem = ca_mem->ck_mem; gotCheckpoint = SUNFALSE; for(;;) { tmp_cvB_mem = cvB_mem; while(tmp_cvB_mem != NULL) { tBn = tmp_cvB_mem->cv_mem->cv_tn; if ( sign*(tBn-ck_mem->ck_t0) > ZERO ) { gotCheckpoint = SUNTRUE; break; } if ( (itaskB==CV_NORMAL) && (tBn == ck_mem->ck_t0) && (sign*(tBout-ck_mem->ck_t0) >= ZERO) ) { gotCheckpoint = SUNTRUE; break; } tmp_cvB_mem = tmp_cvB_mem->cv_next; } if (gotCheckpoint) break; if (ck_mem->ck_next == NULL) break; ck_mem = ck_mem->ck_next; } /* Starting with the current check point from above, loop over check points while propagating backward problems */ for(;;) { /* Store interpolation data if not available. This is the 2nd forward integration pass */ if (ck_mem != ca_mem->ca_ckpntData) { flag = CVAdataStore(cv_mem, ck_mem); if (flag != CV_SUCCESS) break; } /* Loop through all backward problems and, if needed, * propagate their solution towards tBout */ tmp_cvB_mem = cvB_mem; while (tmp_cvB_mem != NULL) { /* Decide if current backward problem is "active" in this check point */ isActive = SUNTRUE; tBn = tmp_cvB_mem->cv_mem->cv_tn; if ( (tBn == ck_mem->ck_t0) && (sign*(tBout-ck_mem->ck_t0) < ZERO ) ) isActive = SUNFALSE; if ( (tBn == ck_mem->ck_t0) && (itaskB==CV_ONE_STEP) ) isActive = SUNFALSE; if ( sign * (tBn - ck_mem->ck_t0) < ZERO ) isActive = SUNFALSE; if ( isActive ) { /* Store the address of current backward problem memory * in ca_mem to be used in the wrapper functions */ ca_mem->ca_bckpbCrt = tmp_cvB_mem; /* Integrate current backward problem */ CVodeSetStopTime(tmp_cvB_mem->cv_mem, ck_mem->ck_t0); flag = CVode(tmp_cvB_mem->cv_mem, tBout, tmp_cvB_mem->cv_y, &tBret, itaskB); /* Set the time at which we will report solution and/or quadratures */ tmp_cvB_mem->cv_tout = tBret; /* If an error occurred, exit while loop */ if (flag < 0) break; } else { flag = CV_SUCCESS; tmp_cvB_mem->cv_tout = tBn; } /* Move to next backward problem */ tmp_cvB_mem = tmp_cvB_mem->cv_next; } /* If an error occurred, return now */ if (flag <0) { cvProcessError(cv_mem, flag, "CVODEA", "CVodeB", MSGCV_BACK_ERROR, tmp_cvB_mem->cv_index); SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } /* If in CV_ONE_STEP mode, return now (flag = CV_SUCCESS) */ if (itaskB == CV_ONE_STEP) break; /* If all backward problems have succesfully reached tBout, return now */ reachedTBout = SUNTRUE; tmp_cvB_mem = cvB_mem; while(tmp_cvB_mem != NULL) { if ( sign*(tmp_cvB_mem->cv_tout - tBout) > ZERO ) { reachedTBout = SUNFALSE; break; } tmp_cvB_mem = tmp_cvB_mem->cv_next; } if ( reachedTBout ) break; /* Move check point in linked list to next one */ ck_mem = ck_mem->ck_next; } SUNDIALS_MARK_FUNCTION_END(CV_PROFILER); return(flag); } int CVodeGetB(void *cvode_mem, int which, realtype *tret, N_Vector yB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeGetB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeGetB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check the value of which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeGetB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } N_VScale(ONE, cvB_mem->cv_y, yB); *tret = cvB_mem->cv_tout; return(CV_SUCCESS); } /* * CVodeGetQuadB */ int CVodeGetQuadB(void *cvode_mem, int which, realtype *tret, N_Vector qB) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; void *cvodeB_mem; long int nstB; int flag; /* Check if cvode_mem exists */ if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeGetQuadB", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; /* Was ASA initialized? */ if (cv_mem->cv_adjMallocDone == SUNFALSE) { cvProcessError(cv_mem, CV_NO_ADJ, "CVODEA", "CVodeGetQuadB", MSGCV_NO_ADJ); return(CV_NO_ADJ); } ca_mem = cv_mem->cv_adj_mem; /* Check the value of which */ if ( which >= ca_mem->ca_nbckpbs ) { cvProcessError(cv_mem, CV_ILL_INPUT, "CVODEA", "CVodeGetQuadB", MSGCV_BAD_WHICH); return(CV_ILL_INPUT); } /* Find the CVodeBMem entry in the linked list corresponding to which */ cvB_mem = ca_mem->cvB_mem; while (cvB_mem != NULL) { if ( which == cvB_mem->cv_index ) break; cvB_mem = cvB_mem->cv_next; } cvodeB_mem = (void *) (cvB_mem->cv_mem); /* If the integration for this backward problem has not started yet, * simply return the current value of qB (i.e. the final conditions) */ flag = CVodeGetNumSteps(cvodeB_mem, &nstB); if (nstB == 0) { N_VScale(ONE, cvB_mem->cv_mem->cv_znQ[0], qB); *tret = cvB_mem->cv_tout; } else { flag = CVodeGetQuad(cvodeB_mem, tret, qB); } return(flag); } /* * ================================================================= * PRIVATE FUNCTIONS FOR CHECK POINTS * ================================================================= */ /* * CVAckpntInit * * This routine initializes the check point linked list with * information from the initial time. */ static CkpntMem CVAckpntInit(CVodeMem cv_mem) { CkpntMem ck_mem; int is; /* Allocate space for ckdata */ ck_mem = NULL; ck_mem = (CkpntMem) malloc(sizeof(struct CkpntMemRec)); if (ck_mem == NULL) return(NULL); ck_mem->ck_zn[0] = N_VClone(cv_mem->cv_tempv); if (ck_mem->ck_zn[0] == NULL) { free(ck_mem); ck_mem = NULL; return(NULL); } ck_mem->ck_zn[1] = N_VClone(cv_mem->cv_tempv); if (ck_mem->ck_zn[1] == NULL) { N_VDestroy(ck_mem->ck_zn[0]); free(ck_mem); ck_mem = NULL; return(NULL); } /* ck_mem->ck_zn[qmax] was not allocated */ ck_mem->ck_zqm = 0; /* Load ckdata from cv_mem */ N_VScale(ONE, cv_mem->cv_zn[0], ck_mem->ck_zn[0]); ck_mem->ck_t0 = cv_mem->cv_tn; ck_mem->ck_nst = 0; ck_mem->ck_q = 1; ck_mem->ck_h = 0.0; /* Do we need to carry quadratures */ ck_mem->ck_quadr = cv_mem->cv_quadr && cv_mem->cv_errconQ; if (ck_mem->ck_quadr) { ck_mem->ck_znQ[0] = N_VClone(cv_mem->cv_tempvQ); if (ck_mem->ck_znQ[0] == NULL) { N_VDestroy(ck_mem->ck_zn[0]); N_VDestroy(ck_mem->ck_zn[1]); free(ck_mem); ck_mem = NULL; return(NULL); } N_VScale(ONE, cv_mem->cv_znQ[0], ck_mem->ck_znQ[0]); } /* Do we need to carry sensitivities? */ ck_mem->ck_sensi = cv_mem->cv_sensi; if (ck_mem->ck_sensi) { ck_mem->ck_Ns = cv_mem->cv_Ns; ck_mem->ck_znS[0] = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempv); if (ck_mem->ck_znS[0] == NULL) { N_VDestroy(ck_mem->ck_zn[0]); N_VDestroy(ck_mem->ck_zn[1]); if (ck_mem->ck_quadr) N_VDestroy(ck_mem->ck_znQ[0]); free(ck_mem); ck_mem = NULL; return(NULL); } for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znS[0], ck_mem->ck_znS[0]); } /* Do we need to carry quadrature sensitivities? */ ck_mem->ck_quadr_sensi = cv_mem->cv_quadr_sensi && cv_mem->cv_errconQS; if (ck_mem->ck_quadr_sensi) { ck_mem->ck_znQS[0] = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempvQ); if (ck_mem->ck_znQS[0] == NULL) { N_VDestroy(ck_mem->ck_zn[0]); N_VDestroy(ck_mem->ck_zn[1]); if (ck_mem->ck_quadr) N_VDestroy(ck_mem->ck_znQ[0]); N_VDestroyVectorArray(ck_mem->ck_znS[0], cv_mem->cv_Ns); free(ck_mem); ck_mem = NULL; return(NULL); } for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znQS[0], ck_mem->ck_znQS[0]); } /* Next in list */ ck_mem->ck_next = NULL; return(ck_mem); } /* * CVAckpntNew * * This routine allocates space for a new check point and sets * its data from current values in cv_mem. */ static CkpntMem CVAckpntNew(CVodeMem cv_mem) { CkpntMem ck_mem; int j, jj, is, qmax; /* Allocate space for ckdata */ ck_mem = NULL; ck_mem = (CkpntMem) malloc(sizeof(struct CkpntMemRec)); if (ck_mem == NULL) return(NULL); /* Set cv_next to NULL */ ck_mem->ck_next = NULL; /* Test if we need to allocate space for the last zn. * NOTE: zn(qmax) may be needed for a hot restart, if an order * increase is deemed necessary at the first step after a check point */ qmax = cv_mem->cv_qmax; ck_mem->ck_zqm = (cv_mem->cv_q < qmax) ? qmax : 0; for (j=0; j<=cv_mem->cv_q; j++) { ck_mem->ck_zn[j] = N_VClone(cv_mem->cv_tempv); if (ck_mem->ck_zn[j] == NULL) { for (jj=0; jjck_zn[jj]); free(ck_mem); ck_mem = NULL; return(NULL); } } if (cv_mem->cv_q < qmax) { ck_mem->ck_zn[qmax] = N_VClone(cv_mem->cv_tempv); if (ck_mem->ck_zn[qmax] == NULL) { for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_zn[jj]); free(ck_mem); ck_mem = NULL; return(NULL); } } /* Test if we need to carry quadratures */ ck_mem->ck_quadr = cv_mem->cv_quadr && cv_mem->cv_errconQ; if (ck_mem->ck_quadr) { for (j=0; j<=cv_mem->cv_q; j++) { ck_mem->ck_znQ[j] = N_VClone(cv_mem->cv_tempvQ); if(ck_mem->ck_znQ[j] == NULL) { for (jj=0; jjck_znQ[jj]); if (cv_mem->cv_q < qmax) N_VDestroy(ck_mem->ck_zn[qmax]); for (jj=0; jj<=cv_mem->cv_q; j++) N_VDestroy(ck_mem->ck_zn[jj]); free(ck_mem); ck_mem = NULL; return(NULL); } } if (cv_mem->cv_q < qmax) { ck_mem->ck_znQ[qmax] = N_VClone(cv_mem->cv_tempvQ); if (ck_mem->ck_znQ[qmax] == NULL) { for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_znQ[jj]); N_VDestroy(ck_mem->ck_zn[qmax]); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_zn[jj]); free(ck_mem); ck_mem = NULL; return(NULL); } } } /* Test if we need to carry sensitivities */ ck_mem->ck_sensi = cv_mem->cv_sensi; if (ck_mem->ck_sensi) { ck_mem->ck_Ns = cv_mem->cv_Ns; for (j=0; j<=cv_mem->cv_q; j++) { ck_mem->ck_znS[j] = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempv); if (ck_mem->ck_znS[j] == NULL) { for (jj=0; jjck_znS[jj], cv_mem->cv_Ns); if (ck_mem->ck_quadr) { if (cv_mem->cv_q < qmax) N_VDestroy(ck_mem->ck_znQ[qmax]); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_znQ[jj]); } if (cv_mem->cv_q < qmax) N_VDestroy(ck_mem->ck_zn[qmax]); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_zn[jj]); free(ck_mem); ck_mem = NULL; return(NULL); } } if ( cv_mem->cv_q < qmax) { ck_mem->ck_znS[qmax] = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempv); if (ck_mem->ck_znS[qmax] == NULL) { for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroyVectorArray(ck_mem->ck_znS[jj], cv_mem->cv_Ns); if (ck_mem->ck_quadr) { N_VDestroy(ck_mem->ck_znQ[qmax]); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_znQ[jj]); } N_VDestroy(ck_mem->ck_zn[qmax]); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_zn[jj]); free(ck_mem); ck_mem = NULL; return(NULL); } } } /* Test if we need to carry quadrature sensitivities */ ck_mem->ck_quadr_sensi = cv_mem->cv_quadr_sensi && cv_mem->cv_errconQS; if (ck_mem->ck_quadr_sensi) { for (j=0; j<=cv_mem->cv_q; j++) { ck_mem->ck_znQS[j] = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempvQ); if (ck_mem->ck_znQS[j] == NULL) { for (jj=0; jjck_znQS[jj], cv_mem->cv_Ns); if (cv_mem->cv_q < qmax) N_VDestroyVectorArray(ck_mem->ck_znS[qmax], cv_mem->cv_Ns); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroyVectorArray(ck_mem->ck_znS[jj], cv_mem->cv_Ns); if (ck_mem->ck_quadr) { if (cv_mem->cv_q < qmax) N_VDestroy(ck_mem->ck_znQ[qmax]); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_znQ[jj]); } if (cv_mem->cv_q < qmax) N_VDestroy(ck_mem->ck_zn[qmax]); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_zn[jj]); free(ck_mem); ck_mem = NULL; return(NULL); } } if ( cv_mem->cv_q < qmax) { ck_mem->ck_znQS[qmax] = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempvQ); if (ck_mem->ck_znQS[qmax] == NULL) { for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroyVectorArray(ck_mem->ck_znQS[jj], cv_mem->cv_Ns); N_VDestroyVectorArray(ck_mem->ck_znS[qmax], cv_mem->cv_Ns); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroyVectorArray(ck_mem->ck_znS[jj], cv_mem->cv_Ns); if (ck_mem->ck_quadr) { N_VDestroy(ck_mem->ck_znQ[qmax]); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_zn[jj]); } N_VDestroy(ck_mem->ck_zn[qmax]); for (jj=0; jj<=cv_mem->cv_q; jj++) N_VDestroy(ck_mem->ck_zn[jj]); free(ck_mem); ck_mem = NULL; return(NULL); } } } /* Load check point data from cv_mem */ for (j=0; j<=cv_mem->cv_q; j++) cv_mem->cv_cvals[j] = ONE; (void) N_VScaleVectorArray(cv_mem->cv_q+1, cv_mem->cv_cvals, cv_mem->cv_zn, ck_mem->ck_zn); if ( cv_mem->cv_q < qmax ) N_VScale(ONE, cv_mem->cv_zn[qmax], ck_mem->ck_zn[qmax]); if (ck_mem->ck_quadr) { for (j=0; j<=cv_mem->cv_q; j++) cv_mem->cv_cvals[j] = ONE; (void) N_VScaleVectorArray(cv_mem->cv_q+1, cv_mem->cv_cvals, cv_mem->cv_znQ, ck_mem->ck_znQ); if ( cv_mem->cv_q < qmax ) N_VScale(ONE, cv_mem->cv_znQ[qmax], ck_mem->ck_znQ[qmax]); } if (ck_mem->ck_sensi) { for (j=0; j<=cv_mem->cv_q; j++) { for (is=0; iscv_Ns; is++) { cv_mem->cv_cvals[j*cv_mem->cv_Ns+is] = ONE; cv_mem->cv_Xvecs[j*cv_mem->cv_Ns+is] = cv_mem->cv_znS[j][is]; cv_mem->cv_Zvecs[j*cv_mem->cv_Ns+is] = ck_mem->ck_znS[j][is]; } } (void) N_VScaleVectorArray(cv_mem->cv_Ns*(cv_mem->cv_q+1), cv_mem->cv_cvals, cv_mem->cv_Xvecs, cv_mem->cv_Zvecs); if ( cv_mem->cv_q < qmax ) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znS[qmax], ck_mem->ck_znS[qmax]); } } if (ck_mem->ck_quadr_sensi) { for (j=0; j<=cv_mem->cv_q; j++) { for (is=0; iscv_Ns; is++) { cv_mem->cv_cvals[j*cv_mem->cv_Ns+is] = ONE; cv_mem->cv_Xvecs[j*cv_mem->cv_Ns+is] = cv_mem->cv_znQS[j][is]; cv_mem->cv_Zvecs[j*cv_mem->cv_Ns+is] = ck_mem->ck_znQS[j][is]; } } (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_Xvecs, cv_mem->cv_Zvecs); if ( cv_mem->cv_q < qmax ) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; (void) N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znQS[qmax], ck_mem->ck_znQS[qmax]); } } for (j=0; j<=L_MAX; j++) ck_mem->ck_tau[j] = cv_mem->cv_tau[j]; for (j=0; j<=NUM_TESTS; j++) ck_mem->ck_tq[j] = cv_mem->cv_tq[j]; for (j=0; j<=cv_mem->cv_q; j++) ck_mem->ck_l[j] = cv_mem->cv_l[j]; ck_mem->ck_nst = cv_mem->cv_nst; ck_mem->ck_tretlast = cv_mem->cv_tretlast; ck_mem->ck_q = cv_mem->cv_q; ck_mem->ck_qprime = cv_mem->cv_qprime; ck_mem->ck_qwait = cv_mem->cv_qwait; ck_mem->ck_L = cv_mem->cv_L; ck_mem->ck_gammap = cv_mem->cv_gammap; ck_mem->ck_h = cv_mem->cv_h; ck_mem->ck_hprime = cv_mem->cv_hprime; ck_mem->ck_hscale = cv_mem->cv_hscale; ck_mem->ck_eta = cv_mem->cv_eta; ck_mem->ck_etamax = cv_mem->cv_etamax; ck_mem->ck_t0 = cv_mem->cv_tn; ck_mem->ck_saved_tq5 = cv_mem->cv_saved_tq5; return(ck_mem); } /* * CVAckpntDelete * * This routine deletes the first check point in list and returns * the new list head */ static void CVAckpntDelete(CkpntMem *ck_memPtr) { CkpntMem tmp; int j; if (*ck_memPtr == NULL) return; /* store head of list */ tmp = *ck_memPtr; /* move head of list */ *ck_memPtr = (*ck_memPtr)->ck_next; /* free N_Vectors in tmp */ for (j=0;j<=tmp->ck_q;j++) N_VDestroy(tmp->ck_zn[j]); if (tmp->ck_zqm != 0) N_VDestroy(tmp->ck_zn[tmp->ck_zqm]); /* free N_Vectors for quadratures in tmp * Note that at the check point at t_initial, only znQ_[0] * was allocated */ if (tmp->ck_quadr) { if (tmp->ck_next != NULL) { for (j=0;j<=tmp->ck_q;j++) N_VDestroy(tmp->ck_znQ[j]); if (tmp->ck_zqm != 0) N_VDestroy(tmp->ck_znQ[tmp->ck_zqm]); } else { N_VDestroy(tmp->ck_znQ[0]); } } /* free N_Vectors for sensitivities in tmp * Note that at the check point at t_initial, only znS_[0] * was allocated */ if (tmp->ck_sensi) { if (tmp->ck_next != NULL) { for (j=0;j<=tmp->ck_q;j++) N_VDestroyVectorArray(tmp->ck_znS[j], tmp->ck_Ns); if (tmp->ck_zqm != 0) N_VDestroyVectorArray(tmp->ck_znS[tmp->ck_zqm], tmp->ck_Ns); } else { N_VDestroyVectorArray(tmp->ck_znS[0], tmp->ck_Ns); } } /* free N_Vectors for quadrature sensitivities in tmp * Note that at the check point at t_initial, only znQS_[0] * was allocated */ if (tmp->ck_quadr_sensi) { if (tmp->ck_next != NULL) { for (j=0;j<=tmp->ck_q;j++) N_VDestroyVectorArray(tmp->ck_znQS[j], tmp->ck_Ns); if (tmp->ck_zqm != 0) N_VDestroyVectorArray(tmp->ck_znQS[tmp->ck_zqm], tmp->ck_Ns); } else { N_VDestroyVectorArray(tmp->ck_znQS[0], tmp->ck_Ns); } } free(tmp); tmp = NULL; } /* * ================================================================= * PRIVATE FUNCTIONS FOR BACKWARD PROBLEMS * ================================================================= */ static void CVAbckpbDelete(CVodeBMem *cvB_memPtr) { CVodeBMem tmp; void *cvode_mem; if (*cvB_memPtr != NULL) { /* Save head of the list */ tmp = *cvB_memPtr; /* Move head of the list */ *cvB_memPtr = (*cvB_memPtr)->cv_next; /* Free CVODES memory in tmp */ cvode_mem = (void *)(tmp->cv_mem); CVodeFree(&cvode_mem); /* Free linear solver memory */ if (tmp->cv_lfree != NULL) tmp->cv_lfree(tmp); /* Free preconditioner memory */ if (tmp->cv_pfree != NULL) tmp->cv_pfree(tmp); /* Free workspace Nvector */ N_VDestroy(tmp->cv_y); free(tmp); tmp = NULL; } } /* * ================================================================= * PRIVATE FUNCTIONS FOR INTERPOLATION * ================================================================= */ /* * CVAdataStore * * This routine integrates the forward model starting at the check * point ck_mem and stores y and yprime at all intermediate steps. * * Return values: * CV_SUCCESS * CV_REIFWD_FAIL * CV_FWD_FAIL */ static int CVAdataStore(CVodeMem cv_mem, CkpntMem ck_mem) { CVadjMem ca_mem; DtpntMem *dt_mem; realtype t; long int i; int flag, sign; ca_mem = cv_mem->cv_adj_mem; dt_mem = ca_mem->dt_mem; /* Initialize cv_mem with data from ck_mem */ flag = CVAckpntGet(cv_mem, ck_mem); if (flag != CV_SUCCESS) return(CV_REIFWD_FAIL); /* Set first structure in dt_mem[0] */ dt_mem[0]->t = ck_mem->ck_t0; ca_mem->ca_IMstore(cv_mem, dt_mem[0]); /* Decide whether TSTOP must be activated */ if (ca_mem->ca_tstopCVodeFcall) { CVodeSetStopTime(cv_mem, ca_mem->ca_tstopCVodeF); } sign = (ca_mem->ca_tfinal - ca_mem->ca_tinitial > ZERO) ? 1 : -1; /* Run CVode to set following structures in dt_mem[i] */ i = 1; do { flag = CVode(cv_mem, ck_mem->ck_t1, ca_mem->ca_ytmp, &t, CV_ONE_STEP); if (flag < 0) return(CV_FWD_FAIL); dt_mem[i]->t = t; ca_mem->ca_IMstore(cv_mem, dt_mem[i]); i++; } while ( sign*(ck_mem->ck_t1 - t) > ZERO ); ca_mem->ca_IMnewData = SUNTRUE; /* New data is now available */ ca_mem->ca_ckpntData = ck_mem; /* starting at this check point */ ca_mem->ca_np = i; /* and we have this many points */ return(CV_SUCCESS); } /* * CVAckpntGet * * This routine prepares CVODES for a hot restart from * the check point ck_mem */ static int CVAckpntGet(CVodeMem cv_mem, CkpntMem ck_mem) { int flag, j, is, qmax, retval; if (ck_mem->ck_next == NULL) { /* In this case, we just call the reinitialization routine, * but make sure we use the same initial stepsize as on * the first run. */ CVodeSetInitStep(cv_mem, cv_mem->cv_h0u); flag = CVodeReInit(cv_mem, ck_mem->ck_t0, ck_mem->ck_zn[0]); if (flag != CV_SUCCESS) return(flag); if (ck_mem->ck_quadr) { flag = CVodeQuadReInit(cv_mem, ck_mem->ck_znQ[0]); if (flag != CV_SUCCESS) return(flag); } if (ck_mem->ck_sensi) { flag = CVodeSensReInit(cv_mem, cv_mem->cv_ism, ck_mem->ck_znS[0]); if (flag != CV_SUCCESS) return(flag); } if (ck_mem->ck_quadr_sensi) { flag = CVodeQuadSensReInit(cv_mem, ck_mem->ck_znQS[0]); if (flag != CV_SUCCESS) return(flag); } } else { qmax = cv_mem->cv_qmax; /* Copy parameters from check point data structure */ cv_mem->cv_nst = ck_mem->ck_nst; cv_mem->cv_tretlast = ck_mem->ck_tretlast; cv_mem->cv_q = ck_mem->ck_q; cv_mem->cv_qprime = ck_mem->ck_qprime; cv_mem->cv_qwait = ck_mem->ck_qwait; cv_mem->cv_L = ck_mem->ck_L; cv_mem->cv_gammap = ck_mem->ck_gammap; cv_mem->cv_h = ck_mem->ck_h; cv_mem->cv_hprime = ck_mem->ck_hprime; cv_mem->cv_hscale = ck_mem->ck_hscale; cv_mem->cv_eta = ck_mem->ck_eta; cv_mem->cv_etamax = ck_mem->ck_etamax; cv_mem->cv_tn = ck_mem->ck_t0; cv_mem->cv_saved_tq5 = ck_mem->ck_saved_tq5; /* Copy the arrays from check point data structure */ for (j=0; j<=cv_mem->cv_q; j++) cv_mem->cv_cvals[j] = ONE; retval = N_VScaleVectorArray(cv_mem->cv_q+1, cv_mem->cv_cvals, ck_mem->ck_zn, cv_mem->cv_zn); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); if ( cv_mem->cv_q < qmax ) N_VScale(ONE, ck_mem->ck_zn[qmax], cv_mem->cv_zn[qmax]); if (ck_mem->ck_quadr) { for (j=0; j<=cv_mem->cv_q; j++) cv_mem->cv_cvals[j] = ONE; retval = N_VScaleVectorArray(cv_mem->cv_q+1, cv_mem->cv_cvals, ck_mem->ck_znQ, cv_mem->cv_znQ); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); if ( cv_mem->cv_q < qmax ) N_VScale(ONE, ck_mem->ck_znQ[qmax], cv_mem->cv_znQ[qmax]); } if (ck_mem->ck_sensi) { for (j=0; j<=cv_mem->cv_q; j++) { for (is=0; iscv_Ns; is++) { cv_mem->cv_cvals[j*cv_mem->cv_Ns+is] = ONE; cv_mem->cv_Xvecs[j*cv_mem->cv_Ns+is] = ck_mem->ck_znS[j][is]; cv_mem->cv_Zvecs[j*cv_mem->cv_Ns+is] = cv_mem->cv_znS[j][is]; } } retval = N_VScaleVectorArray(cv_mem->cv_Ns*(cv_mem->cv_q+1), cv_mem->cv_cvals, cv_mem->cv_Xvecs, cv_mem->cv_Zvecs); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); if ( cv_mem->cv_q < qmax ) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, ck_mem->ck_znS[qmax], cv_mem->cv_znS[qmax]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } } if (ck_mem->ck_quadr_sensi) { for (j=0; j<=cv_mem->cv_q; j++) { for (is=0; iscv_Ns; is++) { cv_mem->cv_cvals[j*cv_mem->cv_Ns+is] = ONE; cv_mem->cv_Xvecs[j*cv_mem->cv_Ns+is] = ck_mem->ck_znQS[j][is]; cv_mem->cv_Zvecs[j*cv_mem->cv_Ns+is] = cv_mem->cv_znQS[j][is]; } } retval = N_VScaleVectorArray(cv_mem->cv_Ns*(cv_mem->cv_q+1), cv_mem->cv_cvals, cv_mem->cv_Xvecs, cv_mem->cv_Zvecs); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); if ( cv_mem->cv_q < qmax ) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, ck_mem->ck_znQS[qmax], cv_mem->cv_znQS[qmax]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } } for (j=0; j<=L_MAX; j++) cv_mem->cv_tau[j] = ck_mem->ck_tau[j]; for (j=0; j<=NUM_TESTS; j++) cv_mem->cv_tq[j] = ck_mem->ck_tq[j]; for (j=0; j<=cv_mem->cv_q; j++) cv_mem->cv_l[j] = ck_mem->ck_l[j]; /* Force a call to setup */ cv_mem->cv_forceSetup = SUNTRUE; } return(CV_SUCCESS); } /* * ----------------------------------------------------------------- * Functions for interpolation * ----------------------------------------------------------------- */ /* * CVAfindIndex * * Finds the index in the array of data point strctures such that * dt_mem[indx-1].t <= t < dt_mem[indx].t * If indx is changed from the previous invocation, then newpoint = SUNTRUE * * If t is beyond the leftmost limit, but close enough, indx=0. * * Returns CV_SUCCESS if successful and CV_GETY_BADT if unable to * find indx (t is too far beyond limits). */ static int CVAfindIndex(CVodeMem cv_mem, realtype t, long int *indx, booleantype *newpoint) { CVadjMem ca_mem; DtpntMem *dt_mem; int sign; booleantype to_left, to_right; ca_mem = cv_mem->cv_adj_mem; dt_mem = ca_mem->dt_mem; *newpoint = SUNFALSE; /* Find the direction of integration */ sign = (ca_mem->ca_tfinal - ca_mem->ca_tinitial > ZERO) ? 1 : -1; /* If this is the first time we use new data */ if (ca_mem->ca_IMnewData) { ca_mem->ca_ilast = ca_mem->ca_np-1; *newpoint = SUNTRUE; ca_mem->ca_IMnewData = SUNFALSE; } /* Search for indx starting from ilast */ to_left = ( sign*(t - dt_mem[ca_mem->ca_ilast-1]->t) < ZERO); to_right = ( sign*(t - dt_mem[ca_mem->ca_ilast]->t) > ZERO); if ( to_left ) { /* look for a new indx to the left */ *newpoint = SUNTRUE; *indx = ca_mem->ca_ilast; for(;;) { if ( *indx == 0 ) break; if ( sign*(t - dt_mem[*indx-1]->t) <= ZERO ) (*indx)--; else break; } if ( *indx == 0 ) ca_mem->ca_ilast = 1; else ca_mem->ca_ilast = *indx; if ( *indx == 0 ) { /* t is beyond leftmost limit. Is it too far? */ if ( SUNRabs(t - dt_mem[0]->t) > FUZZ_FACTOR * cv_mem->cv_uround ) { return(CV_GETY_BADT); } } } else if ( to_right ) { /* look for a new indx to the right */ *newpoint = SUNTRUE; *indx = ca_mem->ca_ilast; for(;;) { if ( sign*(t - dt_mem[*indx]->t) > ZERO) (*indx)++; else break; } ca_mem->ca_ilast = *indx; } else { /* ilast is still OK */ *indx = ca_mem->ca_ilast; } return(CV_SUCCESS); } /* * CVodeGetAdjY * * This routine returns the interpolated forward solution at time t. * The user must allocate space for y. */ int CVodeGetAdjY(void *cvode_mem, realtype t, N_Vector y) { CVodeMem cv_mem; CVadjMem ca_mem; int flag; if (cvode_mem == NULL) { cvProcessError(NULL, CV_MEM_NULL, "CVODEA", "CVodeGetAdjY", MSGCV_NO_MEM); return(CV_MEM_NULL); } cv_mem = (CVodeMem) cvode_mem; ca_mem = cv_mem->cv_adj_mem; flag = ca_mem->ca_IMget(cv_mem, t, y, NULL); return(flag); } /* * ----------------------------------------------------------------- * Functions specific to cubic Hermite interpolation * ----------------------------------------------------------------- */ /* * CVAhermiteMalloc * * This routine allocates memory for storing information at all * intermediate points between two consecutive check points. * This data is then used to interpolate the forward solution * at any other time. */ static booleantype CVAhermiteMalloc(CVodeMem cv_mem) { CVadjMem ca_mem; DtpntMem *dt_mem; HermiteDataMem content; long int i, ii=0; booleantype allocOK; allocOK = SUNTRUE; ca_mem = cv_mem->cv_adj_mem; /* Allocate space for the vectors ytmp and yStmp */ ca_mem->ca_ytmp = N_VClone(cv_mem->cv_tempv); if (ca_mem->ca_ytmp == NULL) { return(SUNFALSE); } if (ca_mem->ca_IMstoreSensi) { ca_mem->ca_yStmp = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempv); if (ca_mem->ca_yStmp == NULL) { N_VDestroy(ca_mem->ca_ytmp); return(SUNFALSE); } } /* Allocate space for the content field of the dt structures */ dt_mem = ca_mem->dt_mem; for (i=0; i<=ca_mem->ca_nsteps; i++) { content = NULL; content = (HermiteDataMem) malloc(sizeof(struct HermiteDataMemRec)); if (content == NULL) { ii = i; allocOK = SUNFALSE; break; } content->y = N_VClone(cv_mem->cv_tempv); if (content->y == NULL) { free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } content->yd = N_VClone(cv_mem->cv_tempv); if (content->yd == NULL) { N_VDestroy(content->y); free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } if (ca_mem->ca_IMstoreSensi) { content->yS = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempv); if (content->yS == NULL) { N_VDestroy(content->y); N_VDestroy(content->yd); free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } content->ySd = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempv); if (content->ySd == NULL) { N_VDestroy(content->y); N_VDestroy(content->yd); N_VDestroyVectorArray(content->yS, cv_mem->cv_Ns); free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } } dt_mem[i]->content = content; } /* If an error occurred, deallocate and return */ if (!allocOK) { N_VDestroy(ca_mem->ca_ytmp); if (ca_mem->ca_IMstoreSensi) { N_VDestroyVectorArray(ca_mem->ca_yStmp, cv_mem->cv_Ns); } for (i=0; icontent); N_VDestroy(content->y); N_VDestroy(content->yd); if (ca_mem->ca_IMstoreSensi) { N_VDestroyVectorArray(content->yS, cv_mem->cv_Ns); N_VDestroyVectorArray(content->ySd, cv_mem->cv_Ns); } free(dt_mem[i]->content); dt_mem[i]->content = NULL; } } return(allocOK); } /* * CVAhermiteFree * * This routine frees the memory allocated for data storage. */ static void CVAhermiteFree(CVodeMem cv_mem) { CVadjMem ca_mem; DtpntMem *dt_mem; HermiteDataMem content; long int i; ca_mem = cv_mem->cv_adj_mem; N_VDestroy(ca_mem->ca_ytmp); if (ca_mem->ca_IMstoreSensi) { N_VDestroyVectorArray(ca_mem->ca_yStmp, cv_mem->cv_Ns); } dt_mem = ca_mem->dt_mem; for (i=0; i<=ca_mem->ca_nsteps; i++) { content = (HermiteDataMem) (dt_mem[i]->content); N_VDestroy(content->y); N_VDestroy(content->yd); if (ca_mem->ca_IMstoreSensi) { N_VDestroyVectorArray(content->yS, cv_mem->cv_Ns); N_VDestroyVectorArray(content->ySd, cv_mem->cv_Ns); } free(dt_mem[i]->content); dt_mem[i]->content = NULL; } } /* * CVAhermiteStorePnt ( -> IMstore ) * * This routine stores a new point (y,yd) in the structure d for use * in the cubic Hermite interpolation. * Note that the time is already stored. */ static int CVAhermiteStorePnt(CVodeMem cv_mem, DtpntMem d) { CVadjMem ca_mem; HermiteDataMem content; int is, retval; ca_mem = cv_mem->cv_adj_mem; content = (HermiteDataMem) d->content; /* Load solution */ N_VScale(ONE, cv_mem->cv_zn[0], content->y); if (ca_mem->ca_IMstoreSensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znS[0], content->yS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } /* Load derivative */ if (cv_mem->cv_nst == 0) { /* retval = */ cv_mem->cv_f(cv_mem->cv_tn, content->y, content->yd, cv_mem->cv_user_data); if (ca_mem->ca_IMstoreSensi) { /* retval = */ cvSensRhsWrapper(cv_mem, cv_mem->cv_tn, content->y, content->yd, content->yS, content->ySd, cv_mem->cv_tempv, cv_mem->cv_ftemp); } } else { N_VScale(ONE/cv_mem->cv_h, cv_mem->cv_zn[1], content->yd); if (ca_mem->ca_IMstoreSensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE/cv_mem->cv_h; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znS[1], content->ySd); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } } return(0); } /* * CVAhermiteGetY ( -> IMget ) * * This routine uses cubic piece-wise Hermite interpolation for * the forward solution vector. * It is typically called by the wrapper routines before calling * user provided routines (fB, djacB, bjacB, jtimesB, psolB) but * can be directly called by the user through CVodeGetAdjY */ static int CVAhermiteGetY(CVodeMem cv_mem, realtype t, N_Vector y, N_Vector *yS) { CVadjMem ca_mem; DtpntMem *dt_mem; HermiteDataMem content0, content1; realtype t0, t1, delta; realtype factor1, factor2, factor3; N_Vector y0, yd0, y1, yd1; N_Vector *yS0=NULL, *ySd0=NULL, *yS1, *ySd1; int flag, is, NS; long int indx; booleantype newpoint; /* local variables for fused vector oerations */ int retval; realtype cvals[4]; N_Vector Xvecs[4]; N_Vector* XXvecs[4]; ca_mem = cv_mem->cv_adj_mem; dt_mem = ca_mem->dt_mem; /* Local value of Ns */ NS = (ca_mem->ca_IMinterpSensi && (yS != NULL)) ? cv_mem->cv_Ns : 0; /* Get the index in dt_mem */ flag = CVAfindIndex(cv_mem, t, &indx, &newpoint); if (flag != CV_SUCCESS) return(flag); /* If we are beyond the left limit but close enough, then return y at the left limit. */ if (indx == 0) { content0 = (HermiteDataMem) (dt_mem[0]->content); N_VScale(ONE, content0->y, y); if (NS > 0) { for (is=0; iscv_cvals[is] = ONE; retval = N_VScaleVectorArray(NS, cv_mem->cv_cvals, content0->yS, yS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } return(CV_SUCCESS); } /* Extract stuff from the appropriate data points */ t0 = dt_mem[indx-1]->t; t1 = dt_mem[indx]->t; delta = t1 - t0; content0 = (HermiteDataMem) (dt_mem[indx-1]->content); y0 = content0->y; yd0 = content0->yd; if (ca_mem->ca_IMinterpSensi) { yS0 = content0->yS; ySd0 = content0->ySd; } if (newpoint) { /* Recompute Y0 and Y1 */ content1 = (HermiteDataMem) (dt_mem[indx]->content); y1 = content1->y; yd1 = content1->yd; /* Y1 = delta (yd1 + yd0) - 2 (y1 - y0) */ cvals[0] = -TWO; Xvecs[0] = y1; cvals[1] = TWO; Xvecs[1] = y0; cvals[2] = delta; Xvecs[2] = yd1; cvals[3] = delta; Xvecs[3] = yd0; retval = N_VLinearCombination(4, cvals, Xvecs, ca_mem->ca_Y[1]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); /* Y0 = y1 - y0 - delta * yd0 */ cvals[0] = ONE; Xvecs[0] = y1; cvals[1] = -ONE; Xvecs[1] = y0; cvals[2] = -delta; Xvecs[2] = yd0; retval = N_VLinearCombination(3, cvals, Xvecs, ca_mem->ca_Y[0]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); /* Recompute YS0 and YS1, if needed */ if (NS > 0) { yS1 = content1->yS; ySd1 = content1->ySd; /* YS1 = delta (ySd1 + ySd0) - 2 (yS1 - yS0) */ cvals[0] = -TWO; XXvecs[0] = yS1; cvals[1] = TWO; XXvecs[1] = yS0; cvals[2] = delta; XXvecs[2] = ySd1; cvals[3] = delta; XXvecs[3] = ySd0; retval = N_VLinearCombinationVectorArray(NS, 4, cvals, XXvecs, ca_mem->ca_YS[1]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); /* YS0 = yS1 - yS0 - delta * ySd0 */ cvals[0] = ONE; XXvecs[0] = yS1; cvals[1] = -ONE; XXvecs[1] = yS0; cvals[2] = -delta; XXvecs[2] = ySd0; retval = N_VLinearCombinationVectorArray(NS, 3, cvals, XXvecs, ca_mem->ca_YS[0]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } } /* Perform the actual interpolation. */ factor1 = t - t0; factor2 = factor1/delta; factor2 = factor2*factor2; factor3 = factor2*(t-t1)/delta; cvals[0] = ONE; cvals[1] = factor1; cvals[2] = factor2; cvals[3] = factor3; /* y = y0 + factor1 yd0 + factor2 * Y[0] + factor3 Y[1] */ Xvecs[0] = y0; Xvecs[1] = yd0; Xvecs[2] = ca_mem->ca_Y[0]; Xvecs[3] = ca_mem->ca_Y[1]; retval = N_VLinearCombination(4, cvals, Xvecs, y); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); /* yS = yS0 + factor1 ySd0 + factor2 * YS[0] + factor3 YS[1], if needed */ if (NS > 0) { XXvecs[0] = yS0; XXvecs[1] = ySd0; XXvecs[2] = ca_mem->ca_YS[0]; XXvecs[3] = ca_mem->ca_YS[1]; retval = N_VLinearCombinationVectorArray(NS, 4, cvals, XXvecs, yS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } return(CV_SUCCESS); } /* * ----------------------------------------------------------------- * Functions specific to Polynomial interpolation * ----------------------------------------------------------------- */ /* * CVApolynomialMalloc * * This routine allocates memory for storing information at all * intermediate points between two consecutive check points. * This data is then used to interpolate the forward solution * at any other time. */ static booleantype CVApolynomialMalloc(CVodeMem cv_mem) { CVadjMem ca_mem; DtpntMem *dt_mem; PolynomialDataMem content; long int i, ii=0; booleantype allocOK; allocOK = SUNTRUE; ca_mem = cv_mem->cv_adj_mem; /* Allocate space for the vectors ytmp and yStmp */ ca_mem->ca_ytmp = N_VClone(cv_mem->cv_tempv); if (ca_mem->ca_ytmp == NULL) { return(SUNFALSE); } if (ca_mem->ca_IMstoreSensi) { ca_mem->ca_yStmp = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempv); if (ca_mem->ca_yStmp == NULL) { N_VDestroy(ca_mem->ca_ytmp); return(SUNFALSE); } } /* Allocate space for the content field of the dt structures */ dt_mem = ca_mem->dt_mem; for (i=0; i<=ca_mem->ca_nsteps; i++) { content = NULL; content = (PolynomialDataMem) malloc(sizeof(struct PolynomialDataMemRec)); if (content == NULL) { ii = i; allocOK = SUNFALSE; break; } content->y = N_VClone(cv_mem->cv_tempv); if (content->y == NULL) { free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } if (ca_mem->ca_IMstoreSensi) { content->yS = N_VCloneVectorArray(cv_mem->cv_Ns, cv_mem->cv_tempv); if (content->yS == NULL) { N_VDestroy(content->y); free(content); content = NULL; ii = i; allocOK = SUNFALSE; break; } } dt_mem[i]->content = content; } /* If an error occurred, deallocate and return */ if (!allocOK) { N_VDestroy(ca_mem->ca_ytmp); if (ca_mem->ca_IMstoreSensi) { N_VDestroyVectorArray(ca_mem->ca_yStmp, cv_mem->cv_Ns); } for (i=0; icontent); N_VDestroy(content->y); if (ca_mem->ca_IMstoreSensi) { N_VDestroyVectorArray(content->yS, cv_mem->cv_Ns); } free(dt_mem[i]->content); dt_mem[i]->content = NULL; } } return(allocOK); } /* * CVApolynomialFree * * This routine frees the memeory allocated for data storage. */ static void CVApolynomialFree(CVodeMem cv_mem) { CVadjMem ca_mem; DtpntMem *dt_mem; PolynomialDataMem content; long int i; ca_mem = cv_mem->cv_adj_mem; N_VDestroy(ca_mem->ca_ytmp); if (ca_mem->ca_IMstoreSensi) { N_VDestroyVectorArray(ca_mem->ca_yStmp, cv_mem->cv_Ns); } dt_mem = ca_mem->dt_mem; for (i=0; i<=ca_mem->ca_nsteps; i++) { content = (PolynomialDataMem) (dt_mem[i]->content); N_VDestroy(content->y); if (ca_mem->ca_IMstoreSensi) { N_VDestroyVectorArray(content->yS, cv_mem->cv_Ns); } free(dt_mem[i]->content); dt_mem[i]->content = NULL; } } /* * CVApolynomialStorePnt ( -> IMstore ) * * This routine stores a new point y in the structure d for use * in the Polynomial interpolation. * Note that the time is already stored. */ static int CVApolynomialStorePnt(CVodeMem cv_mem, DtpntMem d) { CVadjMem ca_mem; PolynomialDataMem content; int is, retval; ca_mem = cv_mem->cv_adj_mem; content = (PolynomialDataMem) d->content; N_VScale(ONE, cv_mem->cv_zn[0], content->y); if (ca_mem->ca_IMstoreSensi) { for (is=0; iscv_Ns; is++) cv_mem->cv_cvals[is] = ONE; retval = N_VScaleVectorArray(cv_mem->cv_Ns, cv_mem->cv_cvals, cv_mem->cv_znS[0], content->yS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } content->order = cv_mem->cv_qu; return(0); } /* * CVApolynomialGetY ( -> IMget ) * * This routine uses polynomial interpolation for the forward solution vector. * It is typically called by the wrapper routines before calling * user provided routines (fB, djacB, bjacB, jtimesB, psolB)) but * can be directly called by the user through CVodeGetAdjY. */ static int CVApolynomialGetY(CVodeMem cv_mem, realtype t, N_Vector y, N_Vector *yS) { CVadjMem ca_mem; DtpntMem *dt_mem; PolynomialDataMem content; int flag, dir, order, i, j, is, NS, retval; long int indx, base; booleantype newpoint; realtype dt, factor; ca_mem = cv_mem->cv_adj_mem; dt_mem = ca_mem->dt_mem; /* Local value of Ns */ NS = (ca_mem->ca_IMinterpSensi && (yS != NULL)) ? cv_mem->cv_Ns : 0; /* Get the index in dt_mem */ flag = CVAfindIndex(cv_mem, t, &indx, &newpoint); if (flag != CV_SUCCESS) return(flag); /* If we are beyond the left limit but close enough, then return y at the left limit. */ if (indx == 0) { content = (PolynomialDataMem) (dt_mem[0]->content); N_VScale(ONE, content->y, y); if (NS > 0) { for (is=0; iscv_cvals[is] = ONE; retval = N_VScaleVectorArray(NS, cv_mem->cv_cvals, content->yS, yS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } return(CV_SUCCESS); } /* Scaling factor */ dt = SUNRabs(dt_mem[indx]->t - dt_mem[indx-1]->t); /* Find the direction of the forward integration */ dir = (ca_mem->ca_tfinal - ca_mem->ca_tinitial > ZERO) ? 1 : -1; /* Establish the base point depending on the integration direction. Modify the base if there are not enough points for the current order */ if (dir == 1) { base = indx; content = (PolynomialDataMem) (dt_mem[base]->content); order = content->order; if(indx < order) base += order-indx; } else { base = indx-1; content = (PolynomialDataMem) (dt_mem[base]->content); order = content->order; if (ca_mem->ca_np-indx > order) base -= indx+order-ca_mem->ca_np; } /* Recompute Y (divided differences for Newton polynomial) if needed */ if (newpoint) { /* Store 0-th order DD */ if (dir == 1) { for(j=0;j<=order;j++) { ca_mem->ca_T[j] = dt_mem[base-j]->t; content = (PolynomialDataMem) (dt_mem[base-j]->content); N_VScale(ONE, content->y, ca_mem->ca_Y[j]); if (NS > 0) { for (is=0; iscv_cvals[is] = ONE; retval = N_VScaleVectorArray(NS, cv_mem->cv_cvals, content->yS, ca_mem->ca_YS[j]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } } } else { for(j=0;j<=order;j++) { ca_mem->ca_T[j] = dt_mem[base-1+j]->t; content = (PolynomialDataMem) (dt_mem[base-1+j]->content); N_VScale(ONE, content->y, ca_mem->ca_Y[j]); if (NS > 0) { for (is=0; iscv_cvals[is] = ONE; retval = N_VScaleVectorArray(NS, cv_mem->cv_cvals, content->yS, ca_mem->ca_YS[j]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } } } /* Compute higher-order DD */ for(i=1;i<=order;i++) { for(j=order;j>=i;j--) { factor = dt/(ca_mem->ca_T[j]-ca_mem->ca_T[j-i]); N_VLinearSum(factor, ca_mem->ca_Y[j], -factor, ca_mem->ca_Y[j-1], ca_mem->ca_Y[j]); if (NS > 0) { retval = N_VLinearSumVectorArray(NS, factor, ca_mem->ca_YS[j], -factor, ca_mem->ca_YS[j-1], ca_mem->ca_YS[j]); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } } } } /* Perform the actual interpolation using nested multiplications */ cv_mem->cv_cvals[0] = ONE; for (i=0; icv_cvals[i+1] = cv_mem->cv_cvals[i] * (t-ca_mem->ca_T[i]) / dt; retval = N_VLinearCombination(order+1, cv_mem->cv_cvals, ca_mem->ca_Y, y); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); if (NS > 0) { retval = N_VLinearCombinationVectorArray(NS, order+1, cv_mem->cv_cvals, ca_mem->ca_YS, yS); if (retval != CV_SUCCESS) return (CV_VECTOROP_ERR); } return(CV_SUCCESS); } /* * ================================================================= * WRAPPERS FOR ADJOINT SYSTEM * ================================================================= */ /* * CVArhs * * This routine interfaces to the CVRhsFnB (or CVRhsFnBS) routine * provided by the user. */ static int CVArhs(realtype t, N_Vector yB, N_Vector yBdot, void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; int flag, retval; cv_mem = (CVodeMem) cvode_mem; ca_mem = cv_mem->cv_adj_mem; cvB_mem = ca_mem->ca_bckpbCrt; /* Get forward solution from interpolation */ if (ca_mem->ca_IMinterpSensi) flag = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, ca_mem->ca_yStmp); else flag = ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); if (flag != CV_SUCCESS) { cvProcessError(cv_mem, -1, "CVODEA", "CVArhs", MSGCV_BAD_TINTERP, t); return(-1); } /* Call the user's RHS function */ if (cvB_mem->cv_f_withSensi) retval = (cvB_mem->cv_fs)(t, ca_mem->ca_ytmp, ca_mem->ca_yStmp, yB, yBdot, cvB_mem->cv_user_data); else retval = (cvB_mem->cv_f)(t, ca_mem->ca_ytmp, yB, yBdot, cvB_mem->cv_user_data); return(retval); } /* * CVArhsQ * * This routine interfaces to the CVQuadRhsFnB (or CVQuadRhsFnBS) routine * provided by the user. */ static int CVArhsQ(realtype t, N_Vector yB, N_Vector qBdot, void *cvode_mem) { CVodeMem cv_mem; CVadjMem ca_mem; CVodeBMem cvB_mem; /* int flag; */ int retval; cv_mem = (CVodeMem) cvode_mem; ca_mem = cv_mem->cv_adj_mem; cvB_mem = ca_mem->ca_bckpbCrt; /* Get forward solution from interpolation */ if (ca_mem->ca_IMinterpSensi) /* flag = */ ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, ca_mem->ca_yStmp); else /* flag = */ ca_mem->ca_IMget(cv_mem, t, ca_mem->ca_ytmp, NULL); /* Call the user's RHS function */ if (cvB_mem->cv_fQ_withSensi) retval = (cvB_mem->cv_fQs)(t, ca_mem->ca_ytmp, ca_mem->ca_yStmp, yB, qBdot, cvB_mem->cv_user_data); else retval = (cvB_mem->cv_fQ)(t, ca_mem->ca_ytmp, yB, qBdot, cvB_mem->cv_user_data); return(retval); } StanHeaders/src/install.libs.R0000644000176200001440000000117414571443743016002 0ustar liggesusersif (.Platform$OS.type == "unix") { files <- Sys.glob("../lib/libStanHeaders.a") dest <- file.path(R_PACKAGE_DIR, paste0('lib', R_ARCH)) dir.create(dest, recursive = TRUE, showWarnings = FALSE) file.copy(files, dest, overwrite = TRUE) if (file.exists("symbols.rds")) file.copy("symbols.rds", dest, overwrite = TRUE) } else { files <- Sys.glob(paste0("*", SHLIB_EXT)) dest <- file.path(R_PACKAGE_DIR, paste0('libs', R_ARCH)) dir.create(dest, recursive = TRUE, showWarnings = FALSE) file.copy(files, dest, overwrite = TRUE) if(file.exists("symbols.rds")) file.copy("symbols.rds", dest, overwrite = TRUE) } StanHeaders/NAMESPACE0000644000176200001440000000022014571443743013700 0ustar liggesusersif(.Platform$OS.type == "windows") useDynLib(StanHeaders, .registration = TRUE) importFrom(RcppParallel, RcppParallelLibs) export(stanFunction) StanHeaders/LICENSE0000644000176200001440000000014514571443743013474 0ustar liggesusersYEAR: 2011--2017 COPYRIGHT HOLDER: Trustees of Columbia University ORGANIZATION: Columbia University StanHeaders/inst/0000755000176200001440000000000014645137104013435 5ustar liggesusersStanHeaders/inst/include/0000755000176200001440000000000014645137111015056 5ustar liggesusersStanHeaders/inst/include/stan_sundials_printf_override.hpp0000644000176200001440000000050214645137106023720 0ustar liggesusers#ifndef STAN_MATH_SUNDIALS_PRINTF_OVERRIDE_HPP #define STAN_MATH_SUNDIALS_PRINTF_OVERRIDE_HPP #ifdef WITH_SUNDIAL_PRINTF #define STAN_SUNDIALS_PRINTF(...) printf(__VA_ARGS__) #define STAN_SUNDIALS_FPRINTF(...) fprintf(__VA_ARGS__) #else #define STAN_SUNDIALS_PRINTF(...) #define STAN_SUNDIALS_FPRINTF(...) #endif #endif StanHeaders/inst/include/nvector/0000755000176200001440000000000014511135055016533 5ustar liggesusersStanHeaders/inst/include/nvector/nvector_parallel.h0000644000176200001440000002700614645137106022254 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Scott D. Cohen, Alan C. Hindmarsh, Radu Serban, * and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the main header file for the MPI-enabled implementation * of the NVECTOR module. * * Notes: * * - The definition of the generic N_Vector structure can be * found in the header file sundials_nvector.h. * * - The definition of the type realtype can be found in the * header file sundials_types.h, and it may be changed (at the * configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type booleantype. * * - N_Vector arguments to arithmetic vector operations need not * be distinct. For example, the following call: * * N_VLinearSum_Parallel(a,x,b,y,y); * * (which stores the result of the operation a*x+b*y in y) * is legal. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_PARALLEL_H #define _NVECTOR_PARALLEL_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * Parallel implementation of N_Vector * ----------------------------------------------------------------- */ struct _N_VectorContent_Parallel { sunindextype local_length; /* local vector length */ sunindextype global_length; /* global vector length */ booleantype own_data; /* ownership of data */ realtype *data; /* local data array */ MPI_Comm comm; /* pointer to MPI communicator */ }; typedef struct _N_VectorContent_Parallel *N_VectorContent_Parallel; /* * ----------------------------------------------------------------- * Macros NV_CONTENT_P, NV_DATA_P, NV_OWN_DATA_P, * NV_LOCLENGTH_P, NV_GLOBLENGTH_P,NV_COMM_P, and NV_Ith_P * ----------------------------------------------------------------- */ #define NV_CONTENT_P(v) ( (N_VectorContent_Parallel)(v->content) ) #define NV_LOCLENGTH_P(v) ( NV_CONTENT_P(v)->local_length ) #define NV_GLOBLENGTH_P(v) ( NV_CONTENT_P(v)->global_length ) #define NV_OWN_DATA_P(v) ( NV_CONTENT_P(v)->own_data ) #define NV_DATA_P(v) ( NV_CONTENT_P(v)->data ) #define NV_COMM_P(v) ( NV_CONTENT_P(v)->comm ) #define NV_Ith_P(v,i) ( NV_DATA_P(v)[i] ) /* * ----------------------------------------------------------------- * Functions exported by nvector_parallel * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNew_Parallel(MPI_Comm comm, sunindextype local_length, sunindextype global_length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewEmpty_Parallel(MPI_Comm comm, sunindextype local_length, sunindextype global_length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_Parallel(MPI_Comm comm, sunindextype local_length, sunindextype global_length, realtype *v_data, SUNContext sunctx); SUNDIALS_EXPORT sunindextype N_VGetLength_Parallel(N_Vector v); SUNDIALS_EXPORT sunindextype N_VGetLocalLength_Parallel(N_Vector v); SUNDIALS_EXPORT void N_VPrint_Parallel(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_Parallel(N_Vector v, FILE *outfile); SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_Parallel(N_Vector v); SUNDIALS_EXPORT N_Vector N_VCloneEmpty_Parallel(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_Parallel(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_Parallel(N_Vector v); SUNDIALS_EXPORT void N_VSpace_Parallel(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT realtype *N_VGetArrayPointer_Parallel(N_Vector v); SUNDIALS_EXPORT void N_VSetArrayPointer_Parallel(realtype *v_data, N_Vector v); SUNDIALS_EXPORT void *N_VGetCommunicator_Parallel(N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_Parallel(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_Parallel(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_Parallel(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_Parallel(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_Parallel(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_Parallel(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_Parallel(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_Parallel(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_Parallel(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_Parallel(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_Parallel(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_Parallel(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_Parallel(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_Parallel(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_Parallel(N_Vector x); SUNDIALS_EXPORT void N_VCompare_Parallel(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_Parallel(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_Parallel(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_Parallel(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_Parallel(int nvec, realtype* c, N_Vector* V, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti_Parallel(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_Parallel(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_Parallel(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_Parallel(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_Parallel(int nvecs, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_Parallel(int nvecs, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_Parallel(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_Parallel(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_Parallel(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VDotProdLocal_Parallel(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNormLocal_Parallel(N_Vector x); SUNDIALS_EXPORT realtype N_VMinLocal_Parallel(N_Vector x); SUNDIALS_EXPORT realtype N_VL1NormLocal_Parallel(N_Vector x); SUNDIALS_EXPORT realtype N_VWSqrSumLocal_Parallel(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_Parallel(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT booleantype N_VInvTestLocal_Parallel(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMaskLocal_Parallel(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotientLocal_Parallel(N_Vector num, N_Vector denom); /* OPTIONAL single buffer reduction operations */ SUNDIALS_EXPORT int N_VDotProdMultiLocal_Parallel(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); SUNDIALS_EXPORT int N_VDotProdMultiAllReduce_Parallel(int nvec_total, N_Vector x, realtype* dotprods); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_Parallel(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_Parallel(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_Parallel(N_Vector x, void *buf); /* * ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_Parallel(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMultiLocal_Parallel(N_Vector v, booleantype tf); /* * ----------------------------------------------------------------- * Deprecated functions * ----------------------------------------------------------------- */ /* use N_VCloneVectorArray */ SUNDIALS_DEPRECATED_EXPORT N_Vector* N_VCloneVectorArray_Parallel(int count, N_Vector w); /* use N_VCloneVectorArrayEmpty */ SUNDIALS_DEPRECATED_EXPORT N_Vector* N_VCloneVectorArrayEmpty_Parallel(int count, N_Vector w); /* use N_VDestroyVectorArray */ SUNDIALS_DEPRECATED_EXPORT void N_VDestroyVectorArray_Parallel(N_Vector* vs, int count); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_raja.h0000644000176200001440000002071214645137106021372 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles, Cody J. Balos, Daniel McGreer @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the RAJA implementation of the * NVECTOR module. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_RAJA_H #define _NVECTOR_RAJA_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * RAJA implementation of N_Vector * ----------------------------------------------------------------- */ /* RAJA implementation of the N_Vector 'content' structure contains the length of the vector, pointers to host and device arrays of 'realtype' components, a flag indicating ownership of the data, and a private data pointer */ struct _N_VectorContent_Raja { sunindextype length; booleantype own_helper; SUNMemory host_data; SUNMemory device_data; SUNMemoryHelper mem_helper; void* priv; /* 'private' data */ }; typedef struct _N_VectorContent_Raja *N_VectorContent_Raja; /* * ----------------------------------------------------------------- * NVECTOR_RAJA implementation specific functions * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNewEmpty_Raja(SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNew_Raja(sunindextype length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewManaged_Raja(sunindextype length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewWithMemHelp_Raja(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_Raja(sunindextype length, realtype *h_vdata, realtype *d_vdata, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMakeManaged_Raja(sunindextype length, realtype *vdata, SUNContext sunctx); SUNDIALS_EXPORT void N_VSetHostArrayPointer_Raja(realtype* h_vdata, N_Vector v); SUNDIALS_EXPORT void N_VSetDeviceArrayPointer_Raja(realtype* d_vdata, N_Vector v); SUNDIALS_EXPORT booleantype N_VIsManagedMemory_Raja(N_Vector x); SUNDIALS_EXPORT void N_VCopyToDevice_Raja(N_Vector v); SUNDIALS_EXPORT void N_VCopyFromDevice_Raja(N_Vector v); SUNDIALS_STATIC_INLINE sunindextype N_VGetLength_Raja(N_Vector x) { N_VectorContent_Raja content = (N_VectorContent_Raja)x->content; return content->length; } SUNDIALS_STATIC_INLINE realtype *N_VGetHostArrayPointer_Raja(N_Vector x) { N_VectorContent_Raja content = (N_VectorContent_Raja)x->content; return(content->host_data == NULL ? NULL : (realtype*)content->host_data->ptr); } SUNDIALS_STATIC_INLINE realtype *N_VGetDeviceArrayPointer_Raja(N_Vector x) { N_VectorContent_Raja content = (N_VectorContent_Raja)x->content; return(content->device_data == NULL ? NULL : (realtype*)content->device_data->ptr); } /* * ----------------------------------------------------------------- * NVECTOR API functions * ----------------------------------------------------------------- */ SUNDIALS_STATIC_INLINE N_Vector_ID N_VGetVectorID_Raja(N_Vector v) { return SUNDIALS_NVEC_RAJA; } SUNDIALS_EXPORT N_Vector N_VCloneEmpty_Raja(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_Raja(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_Raja(N_Vector v); SUNDIALS_EXPORT void N_VSpace_Raja(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT void N_VSetArrayPointer_Raja(realtype *v_data, N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_Raja(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_Raja(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_Raja(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_Raja(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_Raja(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_Raja(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_Raja(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_Raja(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_Raja(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_Raja(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_Raja(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_Raja(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_Raja(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_Raja(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_Raja(N_Vector x); SUNDIALS_EXPORT void N_VCompare_Raja(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_Raja(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_Raja(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_Raja(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_Raja(int nvec, realtype* c, N_Vector* X, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti_Raja(int nvec, realtype* c, N_Vector x, N_Vector* Y, N_Vector* Z); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_Raja(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_Raja(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_Raja(int nvec, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_Raja(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_Raja(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VWSqrSumLocal_Raja(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_Raja(N_Vector x, N_Vector w, N_Vector id); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_Raja(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_Raja(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_Raja(N_Vector x, void *buf); /* OPTIONAL operations for debugging */ SUNDIALS_EXPORT void N_VPrint_Raja(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_Raja(N_Vector v, FILE *outfile); /* * ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_Raja(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_Raja(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_Raja(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_Raja(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_Raja(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_Raja(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_Raja(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_Raja(N_Vector v, booleantype tf); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_openmpdev.h0000644000176200001440000002343314645137106022455 0ustar liggesusers/* ------------------------------------------------------------------- * Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL * ------------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the OpenMP 4.5+ implementation of the * NVECTOR module. * * Notes: * * - The definition of the generic N_Vector structure can be found * in the header file sundials_nvector.h. * * - The definition of the type 'realtype' can be found in the * header file sundials_types.h, and it may be changed (at the * configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type 'booleantype'. * * - N_Vector arguments to arithmetic vector operations need not * be distinct. For example, the following call: * * N_VLinearSum_OpenMPDEV(a,x,b,y,y); * * (which stores the result of the operation a*x+b*y in y) * is legal. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_OPENMPDEV_H #define _NVECTOR_OPENMPDEV_H #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * OpenMPDEV implementation of N_Vector * ----------------------------------------------------------------- */ struct _N_VectorContent_OpenMPDEV { sunindextype length; /* vector length */ booleantype own_data; /* data ownership flag */ realtype *host_data; /* host data array */ realtype *dev_data; /* device data array */ }; typedef struct _N_VectorContent_OpenMPDEV *N_VectorContent_OpenMPDEV; /* * ----------------------------------------------------------------- * Macros NV_CONTENT_OMPDEV, NV_DATA_HOST_OMPDEV, NV_OWN_DATA_OMPDEV, * NV_LENGTH_OMPDEV, and NV_Ith_OMPDEV * ----------------------------------------------------------------- */ #define NV_CONTENT_OMPDEV(v) ( (N_VectorContent_OpenMPDEV)(v->content) ) #define NV_LENGTH_OMPDEV(v) ( NV_CONTENT_OMPDEV(v)->length ) #define NV_OWN_DATA_OMPDEV(v) ( NV_CONTENT_OMPDEV(v)->own_data ) #define NV_DATA_HOST_OMPDEV(v) ( NV_CONTENT_OMPDEV(v)->host_data ) #define NV_DATA_DEV_OMPDEV(v) ( NV_CONTENT_OMPDEV(v)->dev_data ) /* * ----------------------------------------------------------------- * Functions exported by nvector_openmpdev * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNew_OpenMPDEV(sunindextype vec_length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewEmpty_OpenMPDEV(sunindextype vec_length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_OpenMPDEV(sunindextype vec_length, realtype *h_data, realtype *v_data, SUNContext sunctx); SUNDIALS_EXPORT sunindextype N_VGetLength_OpenMPDEV(N_Vector v); SUNDIALS_EXPORT realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v); SUNDIALS_EXPORT realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v); SUNDIALS_EXPORT void N_VPrint_OpenMPDEV(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_OpenMPDEV(N_Vector v, FILE *outfile); SUNDIALS_EXPORT void N_VCopyToDevice_OpenMPDEV(N_Vector v); SUNDIALS_EXPORT void N_VCopyFromDevice_OpenMPDEV(N_Vector v); SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v); SUNDIALS_EXPORT N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_OpenMPDEV(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_OpenMPDEV(N_Vector v); SUNDIALS_EXPORT void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_OpenMPDEV(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_OpenMPDEV(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_OpenMPDEV(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_OpenMPDEV(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_OpenMPDEV(N_Vector x); SUNDIALS_EXPORT void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* V, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector *Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_OpenMPDEV(int nvecs, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_OpenMPDEV(int nvecs, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvecs, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VWSqrSumLocal_OpenMPDEV(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id); /* * ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf); /* * ----------------------------------------------------------------- * Deprecated functions * ----------------------------------------------------------------- */ /* use N_VCloneVectorArray */ SUNDIALS_DEPRECATED_EXPORT N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w); /* use N_VCloneVectorArrayEmpty */ SUNDIALS_DEPRECATED_EXPORT N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w); /* use N_VDestroyVectorArray */ SUNDIALS_DEPRECATED_EXPORT void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_mpiplusx.h0000644000176200001440000000337414645137106022343 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the MPI+X implementation of the * NVECTOR module. The MPIPlusX NVECTOR is really just an extension * of the ManyVector. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_MPIPLUSX_H #define _NVECTOR_MPIPLUSX_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif typedef N_VectorContent_MPIManyVector N_VectorContent_MPIPlusX; SUNDIALS_EXPORT N_Vector N_VMake_MPIPlusX(MPI_Comm comm, N_Vector X, SUNContext sunctx); SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_MPIPlusX(N_Vector v); SUNDIALS_EXPORT realtype* N_VGetArrayPointer_MPIPlusX(N_Vector v); SUNDIALS_EXPORT void N_VSetArrayPointer_MPIPlusX(realtype *vdata, N_Vector v); SUNDIALS_EXPORT void N_VPrint_MPIPlusX(N_Vector x); SUNDIALS_EXPORT void N_VPrintFile_MPIPlusX(N_Vector x, FILE *outfile); SUNDIALS_EXPORT N_Vector N_VGetLocalVector_MPIPlusX(N_Vector v); SUNDIALS_EXPORT sunindextype N_VGetLocalLength_MPIPlusX(N_Vector v); SUNDIALS_STATIC_INLINE int N_VEnableFusedOps_MPIPlusX(N_Vector v, booleantype tf) { return N_VEnableFusedOps_MPIManyVector(v, tf); } #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_petsc.h0000644000176200001440000002376014645137106021601 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the main header file for the PETSc vector wrapper * for NVECTOR module. * * Notes: * * - The definition of the generic N_Vector structure can be * found in the header file sundials_nvector.h. * * - The definition of the type realtype can be found in the * header file sundials_types.h, and it may be changed (at the * build configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type booleantype. * * - N_Vector arguments to arithmetic vector operations need not * be distinct. For example, the following call: * * N_VLinearSum_Petsc(a,x,b,y,y); * * (which stores the result of the operation a*x+b*y in y) * is legal. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_PETSC_H #define _NVECTOR_PETSC_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * PETSc implementation of N_Vector * ----------------------------------------------------------------- */ struct _N_VectorContent_Petsc { sunindextype local_length; /* copy of local vector length */ sunindextype global_length; /* copy of global vector length */ booleantype own_data; /* ownership of data */ Vec pvec; /* the PETSc Vec object */ MPI_Comm comm; /* copy of MPI communicator */ }; typedef struct _N_VectorContent_Petsc *N_VectorContent_Petsc; /* * ----------------------------------------------------------------- * Functions exported by nvector_petsc * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNewEmpty_Petsc(MPI_Comm comm, sunindextype local_length, sunindextype global_length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_Petsc(Vec v, SUNContext sunctx); SUNDIALS_EXPORT realtype *N_VGetArrayPointer_Petsc(N_Vector v); SUNDIALS_EXPORT Vec N_VGetVector_Petsc(N_Vector v); SUNDIALS_EXPORT void N_VSetVector_Petsc(N_Vector v, Vec p); SUNDIALS_EXPORT void N_VPrint_Petsc(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_Petsc(N_Vector v, const char fname[]); /* nvector API functions */ SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_Petsc(N_Vector v); SUNDIALS_EXPORT N_Vector N_VCloneEmpty_Petsc(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_Petsc(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_Petsc(N_Vector v); SUNDIALS_EXPORT void N_VSpace_Petsc(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT void N_VSetArrayPointer_Petsc(realtype *v_data, N_Vector v); SUNDIALS_EXPORT void *N_VGetCommunicator_Petsc(N_Vector v); SUNDIALS_EXPORT sunindextype N_VGetLength_Petsc(N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_Petsc(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_Petsc(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_Petsc(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_Petsc(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_Petsc(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_Petsc(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_Petsc(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_Petsc(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_Petsc(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_Petsc(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_Petsc(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_Petsc(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_Petsc(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_Petsc(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_Petsc(N_Vector x); SUNDIALS_EXPORT void N_VCompare_Petsc(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_Petsc(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_Petsc(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_Petsc(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_Petsc(int nvec, realtype* c, N_Vector* X, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti_Petsc(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_Petsc(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_Petsc(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_Petsc(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_Petsc(int nvecs, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_Petsc(int nvecs, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_Petsc(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_Petsc(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_Petsc(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VDotProdLocal_Petsc(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNormLocal_Petsc(N_Vector x); SUNDIALS_EXPORT realtype N_VMinLocal_Petsc(N_Vector x); SUNDIALS_EXPORT realtype N_VL1NormLocal_Petsc(N_Vector x); SUNDIALS_EXPORT realtype N_VWSqrSumLocal_Petsc(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_Petsc(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT booleantype N_VInvTestLocal_Petsc(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMaskLocal_Petsc(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotientLocal_Petsc(N_Vector num, N_Vector denom); /* OPTIONAL single buffer reduction operations */ SUNDIALS_EXPORT int N_VDotProdMultiLocal_Petsc(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); SUNDIALS_EXPORT int N_VDotProdMultiAllReduce_Petsc(int nvec, N_Vector x, realtype* sum); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_Petsc(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_Petsc(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_Petsc(N_Vector x, void *buf); /* * ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_Petsc(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMultiLocal_Petsc(N_Vector v, booleantype tf); /* * ----------------------------------------------------------------- * Deprecated functions * ----------------------------------------------------------------- */ /* use N_VCloneVectorArray */ SUNDIALS_DEPRECATED_EXPORT N_Vector *N_VCloneVectorArray_Petsc(int count, N_Vector w); /* use N_VCloneVectorArrayEmpty */ SUNDIALS_DEPRECATED_EXPORT N_Vector *N_VCloneVectorArrayEmpty_Petsc(int count, N_Vector w); /* use N_VDestroyVectorArray */ SUNDIALS_DEPRECATED_EXPORT void N_VDestroyVectorArray_Petsc(N_Vector *vs, int count); #ifdef __cplusplus } #endif #endif /* _NVECTOR_PETSC_H */ StanHeaders/inst/include/nvector/nvector_hip.h0000644000176200001440000002213614645137106021237 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel McGreer and Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the HIP implementation of the * NVECTOR module. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_HIP_H #define _NVECTOR_HIP_H #include #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * HIP implementation of N_Vector * ----------------------------------------------------------------- */ struct _N_VectorContent_Hip { sunindextype length; booleantype own_helper; SUNMemory host_data; SUNMemory device_data; SUNHipExecPolicy* stream_exec_policy; SUNHipExecPolicy* reduce_exec_policy; SUNMemoryHelper mem_helper; void* priv; /* 'private' data */ }; typedef struct _N_VectorContent_Hip *N_VectorContent_Hip; /* * ----------------------------------------------------------------- * NVECTOR_HIP implementation specific functions * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNewEmpty_Hip(SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNew_Hip(sunindextype length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewManaged_Hip(sunindextype length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewWithMemHelp_Hip(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_Hip(sunindextype length, realtype *h_vdata, realtype *d_vdata, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMakeManaged_Hip(sunindextype length, realtype *vdata, SUNContext sunctx); SUNDIALS_EXPORT void N_VSetHostArrayPointer_Hip(realtype* h_vdata, N_Vector v); SUNDIALS_EXPORT void N_VSetDeviceArrayPointer_Hip(realtype* d_vdata, N_Vector v); SUNDIALS_EXPORT booleantype N_VIsManagedMemory_Hip(N_Vector x); SUNDIALS_EXPORT int N_VSetKernelExecPolicy_Hip(N_Vector x, SUNHipExecPolicy* stream_exec_policy, SUNHipExecPolicy* reduce_exec_policy); SUNDIALS_EXPORT void N_VCopyToDevice_Hip(N_Vector v); SUNDIALS_EXPORT void N_VCopyFromDevice_Hip(N_Vector v); SUNDIALS_STATIC_INLINE sunindextype N_VGetLength_Hip(N_Vector x) { N_VectorContent_Hip content = (N_VectorContent_Hip)x->content; return content->length; } SUNDIALS_STATIC_INLINE realtype *N_VGetHostArrayPointer_Hip(N_Vector x) { N_VectorContent_Hip content = (N_VectorContent_Hip)x->content; return(content->host_data == NULL ? NULL : (realtype*)content->host_data->ptr); } SUNDIALS_STATIC_INLINE realtype *N_VGetDeviceArrayPointer_Hip(N_Vector x) { N_VectorContent_Hip content = (N_VectorContent_Hip)x->content; return(content->device_data == NULL ? NULL : (realtype*)content->device_data->ptr); } /* * ----------------------------------------------------------------- * NVECTOR API functions * ----------------------------------------------------------------- */ SUNDIALS_STATIC_INLINE N_Vector_ID N_VGetVectorID_Hip(N_Vector /*v*/) { return SUNDIALS_NVEC_HIP; } SUNDIALS_EXPORT N_Vector N_VCloneEmpty_Hip(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_Hip(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_Hip(N_Vector v); SUNDIALS_EXPORT void N_VSpace_Hip(N_Vector v, sunindextype *lrw, sunindextype *liw); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_Hip(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_Hip(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_Hip(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_Hip(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_Hip(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_Hip(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_Hip(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_Hip(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_Hip(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_Hip(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_Hip(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_Hip(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_Hip(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_Hip(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_Hip(N_Vector x); SUNDIALS_EXPORT void N_VCompare_Hip(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_Hip(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_Hip(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_Hip(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_Hip(int nvec, realtype* c, N_Vector* X, N_Vector Z); SUNDIALS_EXPORT int N_VScaleAddMulti_Hip(int nvec, realtype* c, N_Vector X, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_Hip(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_Hip(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_Hip(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_Hip(int nvec, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_Hip(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_Hip(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_Hip(int nvec, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_Hip(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VWSqrSumLocal_Hip(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_Hip(N_Vector x, N_Vector w, N_Vector id); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_Hip(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_Hip(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_Hip(N_Vector x, void *buf); /* OPTIONAL operations for debugging */ SUNDIALS_EXPORT void N_VPrint_Hip(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_Hip(N_Vector v, FILE *outfile); /* * ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_Hip(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_Hip(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_Hip(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_Hip(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_Hip(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_Hip(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_Hip(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_Hip(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_Hip(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_Hip(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_Hip(N_Vector v, booleantype tf); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_trilinos.h0000644000176200001440000001331714645137106022323 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the main header file for the Trilinos vector wrapper * for NVECTOR module. * * Part I contains declarations specific to the Trilinos vector wrapper * implementation. * * Part II contains the prototype for the constructor * N_VMake_Trilinos as well as Trilinos-specific prototypes * for various useful vector operations. * * Notes: * * - The definition of the generic N_Vector structure can be * found in the header file sundials_nvector.h. * * - The definition of the type realtype can be found in the * header file sundials_types.h, and it may be changed (at the * build configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type booleantype. * * - N_Vector arguments to arithmetic vector operations need not * be distinct. For example, the following call: * * N_VLinearSum_Trilinos(a,x,b,y,y); * * (which stores the result of the operation a*x+b*y in y) * is legal. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_TRILINOS_H #define _NVECTOR_TRILINOS_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * PART I: N_Vector interface to Trilinos vector * ----------------------------------------------------------------- */ /* * Dummy _N_VectorContent_Trilinos structure is used for * interfacing C with C++ code */ struct _N_VectorContent_Trilinos {}; typedef struct _N_VectorContent_Trilinos *N_VectorContent_Trilinos; /* * ----------------------------------------------------------------- * PART II: functions exported by nvector_Trilinos * * CONSTRUCTORS: * N_VNewEmpty_Trilinos * ----------------------------------------------------------------- */ /* * ----------------------------------------------------------------- * Function : N_VNewEmpty_Trilinos * ----------------------------------------------------------------- * This function creates a new N_Vector wrapper for a Trilinos * vector. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNewEmpty_Trilinos(SUNContext sunctx); /* * ----------------------------------------------------------------- * Trilinos implementations of the vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_Trilinos(N_Vector v); SUNDIALS_EXPORT N_Vector N_VCloneEmpty_Trilinos(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_Trilinos(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_Trilinos(N_Vector v); SUNDIALS_EXPORT void N_VSpace_Trilinos(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT void *N_VGetCommunicator_Trilinos(N_Vector v); SUNDIALS_EXPORT sunindextype N_VGetLength_Trilinos(N_Vector v); SUNDIALS_EXPORT void N_VLinearSum_Trilinos(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_Trilinos(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_Trilinos(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_Trilinos(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_Trilinos(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_Trilinos(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_Trilinos(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_Trilinos(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_Trilinos(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_Trilinos(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_Trilinos(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_Trilinos(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_Trilinos(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_Trilinos(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_Trilinos(N_Vector x); SUNDIALS_EXPORT void N_VCompare_Trilinos(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_Trilinos(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_Trilinos(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_Trilinos(N_Vector num, N_Vector denom); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VDotProdLocal_Trilinos(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNormLocal_Trilinos(N_Vector x); SUNDIALS_EXPORT realtype N_VMinLocal_Trilinos(N_Vector x); SUNDIALS_EXPORT realtype N_VL1NormLocal_Trilinos(N_Vector x); SUNDIALS_EXPORT realtype N_VWSqrSumLocal_Trilinos(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_Trilinos(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT booleantype N_VInvTestLocal_Trilinos(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMaskLocal_Trilinos(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotientLocal_Trilinos(N_Vector num, N_Vector denom); #ifdef __cplusplus } #endif #endif /* _NVECTOR_TRILINOS_H */ StanHeaders/inst/include/nvector/nvector_openmp.h0000644000176200001440000002316514645137106021760 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the OpenMP implementation of the * NVECTOR module. * * Notes: * * - The definition of the generic N_Vector structure can be found * in the header file sundials_nvector.h. * * - The definition of the type 'realtype' can be found in the * header file sundials_types.h, and it may be changed (at the * configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type 'booleantype'. * * - N_Vector arguments to arithmetic vector operations need not * be distinct. For example, the following call: * * N_VLinearSum_OpenMP(a,x,b,y,y); * * (which stores the result of the operation a*x+b*y in y) * is legal. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_OPENMP_H #define _NVECTOR_OPENMP_H #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * OpenMP implementation of N_Vector * ----------------------------------------------------------------- */ struct _N_VectorContent_OpenMP { sunindextype length; /* vector length */ booleantype own_data; /* data ownership flag */ realtype *data; /* data array */ int num_threads; /* number of OpenMP threads */ }; typedef struct _N_VectorContent_OpenMP *N_VectorContent_OpenMP; /* * ----------------------------------------------------------------- * Macros NV_CONTENT_OMP, NV_DATA_OMP, NV_OWN_DATA_OMP, * NV_LENGTH_OMP, and NV_Ith_OMP * ----------------------------------------------------------------- */ #define NV_CONTENT_OMP(v) ( (N_VectorContent_OpenMP)(v->content) ) #define NV_LENGTH_OMP(v) ( NV_CONTENT_OMP(v)->length ) #define NV_NUM_THREADS_OMP(v) ( NV_CONTENT_OMP(v)->num_threads ) #define NV_OWN_DATA_OMP(v) ( NV_CONTENT_OMP(v)->own_data ) #define NV_DATA_OMP(v) ( NV_CONTENT_OMP(v)->data ) #define NV_Ith_OMP(v,i) ( NV_DATA_OMP(v)[i] ) /* * ----------------------------------------------------------------- * Functions exported by nvector_openmp * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNew_OpenMP(sunindextype vec_length, int num_threads, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewEmpty_OpenMP(sunindextype vec_length, int num_threads, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_OpenMP(sunindextype vec_length, realtype *v_data, int num_threads, SUNContext sunctx); SUNDIALS_EXPORT sunindextype N_VGetLength_OpenMP(N_Vector v); SUNDIALS_EXPORT void N_VPrint_OpenMP(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_OpenMP(N_Vector v, FILE *outfile); SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_OpenMP(N_Vector v); SUNDIALS_EXPORT N_Vector N_VCloneEmpty_OpenMP(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_OpenMP(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_OpenMP(N_Vector v); SUNDIALS_EXPORT void N_VSpace_OpenMP(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT realtype *N_VGetArrayPointer_OpenMP(N_Vector v); SUNDIALS_EXPORT void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_OpenMP(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_OpenMP(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_OpenMP(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_OpenMP(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_OpenMP(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_OpenMP(N_Vector x); SUNDIALS_EXPORT void N_VCompare_OpenMP(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_OpenMP(int nvec, realtype* c, N_Vector* V, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti_OpenMP(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_OpenMP(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_OpenMP(int nvecs, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_OpenMP(int nvecs, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_OpenMP(int nvecs, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_OpenMP(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_OpenMP(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VWSqrSumLocal_OpenMP(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_OpenMP(N_Vector x, N_Vector w, N_Vector id); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_OpenMP(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_OpenMP(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_OpenMP(N_Vector x, void *buf); /* * ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_OpenMP(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_OpenMP(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_OpenMP(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_OpenMP(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_OpenMP(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_OpenMP(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_OpenMP(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_OpenMP(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_OpenMP(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_OpenMP(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_OpenMP(N_Vector v, booleantype tf); /* * ----------------------------------------------------------------- * Deprecated functions * ----------------------------------------------------------------- */ /* use N_VCloneVectorArray */ SUNDIALS_DEPRECATED_EXPORT N_Vector* N_VCloneVectorArray_OpenMP(int count, N_Vector w); /* use N_VCloneVectorArrayEmpty */ SUNDIALS_DEPRECATED_EXPORT N_Vector* N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w); /* use N_VDestroyVectorArray */ SUNDIALS_DEPRECATED_EXPORT void N_VDestroyVectorArray_OpenMP(N_Vector* vs, int count); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_mpimanyvector.h0000644000176200001440000002465114645137106023360 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the main header file for the "MPIManyVector" implementation * of the NVECTOR module. * * Notes: * * - The definition of the generic N_Vector structure can be found * in the header file sundials_nvector.h. * * - The definitions of the types 'realtype' and 'sunindextype' can * be found in the header file sundials_types.h, and it may be * changed (at the configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type 'booleantype'. * * - N_Vector arguments to arithmetic vector operations need not * be distinct. For example, the following call: * * N_VLinearSum_MPIManyVector(a,x,b,y,y); * * (which stores the result of the operation a*x+b*y in y) * is legal. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_MANY_VECTOR_H #define _NVECTOR_MANY_VECTOR_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------------------------- ManyVector implementation of N_Vector ----------------------------------------------------------------- */ struct _N_VectorContent_MPIManyVector { MPI_Comm comm; /* overall MPI communicator */ sunindextype num_subvectors; /* number of vectors attached */ sunindextype global_length; /* overall manyvector length */ N_Vector* subvec_array; /* pointer to N_Vector array */ booleantype own_data; /* flag indicating data ownership */ }; typedef struct _N_VectorContent_MPIManyVector *N_VectorContent_MPIManyVector; /* ----------------------------------------------------------------- functions exported by ManyVector ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VMake_MPIManyVector(MPI_Comm comm, sunindextype num_subvectors, N_Vector *vec_array, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNew_MPIManyVector(sunindextype num_subvectors, N_Vector *vec_array, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VGetSubvector_MPIManyVector(N_Vector v, sunindextype vec_num); SUNDIALS_EXPORT realtype *N_VGetSubvectorArrayPointer_MPIManyVector(N_Vector v, sunindextype vec_num); SUNDIALS_EXPORT int N_VSetSubvectorArrayPointer_MPIManyVector(realtype *v_data, N_Vector v, sunindextype vec_num); SUNDIALS_EXPORT sunindextype N_VGetNumSubvectors_MPIManyVector(N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_MPIManyVector(N_Vector v); SUNDIALS_EXPORT void N_VPrint_MPIManyVector(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_MPIManyVector(N_Vector v, FILE *outfile); SUNDIALS_EXPORT N_Vector N_VCloneEmpty_MPIManyVector(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_MPIManyVector(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_MPIManyVector(N_Vector v); SUNDIALS_EXPORT void N_VSpace_MPIManyVector(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT void *N_VGetCommunicator_MPIManyVector(N_Vector v); SUNDIALS_EXPORT sunindextype N_VGetLength_MPIManyVector(N_Vector v); SUNDIALS_EXPORT void N_VLinearSum_MPIManyVector(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_MPIManyVector(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_MPIManyVector(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_MPIManyVector(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_MPIManyVector(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_MPIManyVector(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_MPIManyVector(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_MPIManyVector(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_MPIManyVector(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_MPIManyVector(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_MPIManyVector(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_MPIManyVector(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_MPIManyVector(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_MPIManyVector(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_MPIManyVector(N_Vector x); SUNDIALS_EXPORT void N_VCompare_MPIManyVector(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_MPIManyVector(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_MPIManyVector(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_MPIManyVector(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_MPIManyVector(int nvec, realtype* c, N_Vector* V, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti_MPIManyVector(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_MPIManyVector(int nvec, N_Vector x, N_Vector *Y, realtype* dotprods); /* single buffer reduction operations */ SUNDIALS_EXPORT int N_VDotProdMultiLocal_MPIManyVector(int nvec, N_Vector x, N_Vector *Y, realtype* dotprods); SUNDIALS_EXPORT int N_VDotProdMultiAllReduce_MPIManyVector(int nvec_total, N_Vector x, realtype* sum); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_MPIManyVector(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_MPIManyVector(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_MPIManyVector(int nvecs, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_MPIManyVector(int nvecs, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_MPIManyVector(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VDotProdLocal_MPIManyVector(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNormLocal_MPIManyVector(N_Vector x); SUNDIALS_EXPORT realtype N_VMinLocal_MPIManyVector(N_Vector x); SUNDIALS_EXPORT realtype N_VL1NormLocal_MPIManyVector(N_Vector x); SUNDIALS_EXPORT realtype N_VWSqrSumLocal_MPIManyVector(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_MPIManyVector(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT booleantype N_VInvTestLocal_MPIManyVector(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMaskLocal_MPIManyVector(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotientLocal_MPIManyVector(N_Vector num, N_Vector denom); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_MPIManyVector(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_MPIManyVector(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_MPIManyVector(N_Vector x, void *buf); /* ----------------------------------------------------------------- Enable / disable fused vector operations ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_MPIManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_MPIManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_MPIManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_MPIManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_MPIManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_MPIManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_MPIManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_MPIManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_MPIManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMultiLocal_MPIManyVector(N_Vector v, booleantype tf); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_serial.h0000644000176200001440000002211414645137106021732 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Scott D. Cohen, Alan C. Hindmarsh, Radu Serban, * and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the serial implementation of the * NVECTOR module. * * Notes: * * - The definition of the generic N_Vector structure can be found * in the header file sundials_nvector.h. * * - The definition of the type 'realtype' can be found in the * header file sundials_types.h, and it may be changed (at the * configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type 'booleantype'. * * - N_Vector arguments to arithmetic vector operations need not * be distinct. For example, the following call: * * N_VLinearSum_Serial(a,x,b,y,y); * * (which stores the result of the operation a*x+b*y in y) * is legal. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_SERIAL_H #define _NVECTOR_SERIAL_H #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * SERIAL implementation of N_Vector * ----------------------------------------------------------------- */ struct _N_VectorContent_Serial { sunindextype length; /* vector length */ booleantype own_data; /* data ownership flag */ realtype *data; /* data array */ }; typedef struct _N_VectorContent_Serial *N_VectorContent_Serial; /* * ----------------------------------------------------------------- * Macros NV_CONTENT_S, NV_DATA_S, NV_OWN_DATA_S, * NV_LENGTH_S, and NV_Ith_S * ----------------------------------------------------------------- */ #define NV_CONTENT_S(v) ( (N_VectorContent_Serial)(v->content) ) #define NV_LENGTH_S(v) ( NV_CONTENT_S(v)->length ) #define NV_OWN_DATA_S(v) ( NV_CONTENT_S(v)->own_data ) #define NV_DATA_S(v) ( NV_CONTENT_S(v)->data ) #define NV_Ith_S(v,i) ( NV_DATA_S(v)[i] ) /* * ----------------------------------------------------------------- * Functions exported by nvector_serial * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNew_Serial(sunindextype vec_length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewEmpty_Serial(sunindextype vec_length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_Serial(sunindextype vec_length, realtype *v_data, SUNContext sunctx); SUNDIALS_EXPORT sunindextype N_VGetLength_Serial(N_Vector v); SUNDIALS_EXPORT void N_VPrint_Serial(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_Serial(N_Vector v, FILE *outfile); SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_Serial(N_Vector v); SUNDIALS_EXPORT N_Vector N_VCloneEmpty_Serial(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_Serial(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_Serial(N_Vector v); SUNDIALS_EXPORT void N_VSpace_Serial(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT realtype *N_VGetArrayPointer_Serial(N_Vector v); SUNDIALS_EXPORT void N_VSetArrayPointer_Serial(realtype *v_data, N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_Serial(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_Serial(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_Serial(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_Serial(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_Serial(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_Serial(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_Serial(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_Serial(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_Serial(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_Serial(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_Serial(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_Serial(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_Serial(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_Serial(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_Serial(N_Vector x); SUNDIALS_EXPORT void N_VCompare_Serial(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_Serial(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_Serial(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_Serial(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_Serial(int nvec, realtype* c, N_Vector* V, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti_Serial(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_Serial(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_Serial(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_Serial(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_Serial(int nvecs, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_Serial(int nvecs, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_Serial(int nvecs, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_Serial(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_Serial(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VWSqrSumLocal_Serial(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_Serial(N_Vector x, N_Vector w, N_Vector id); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_Serial(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_Serial(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_Serial(N_Vector x, void *buf); /* * ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_Serial(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_Serial(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_Serial(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_Serial(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_Serial(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_Serial(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_Serial(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_Serial(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_Serial(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_Serial(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_Serial(N_Vector v, booleantype tf); /* * ----------------------------------------------------------------- * Deprecated functions * ----------------------------------------------------------------- */ /* use N_VCloneVectorArray */ SUNDIALS_DEPRECATED_EXPORT N_Vector* N_VCloneVectorArray_Serial(int count, N_Vector w); /* use N_VCloneVectorArrayEmpty */ SUNDIALS_DEPRECATED_EXPORT N_Vector* N_VCloneVectorArrayEmpty_Serial(int count, N_Vector w); /* use N_VDestroyVectorArray */ SUNDIALS_DEPRECATED_EXPORT void N_VDestroyVectorArray_Serial(N_Vector* vs, int count); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/trilinos/0000755000176200001440000000000014511135055020376 5ustar liggesusersStanHeaders/inst/include/nvector/trilinos/SundialsTpetraVectorKernels.hpp0000644000176200001440000005674714645137106026612 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_TPETRA_VECTOR_KERNELS_HPP_ #define _SUNDIALS_TPETRA_VECTOR_KERNELS_HPP_ #include #include #include namespace sundials { namespace trilinos { /** * The namespace contains custom Kokkos-based kernels needed by SUNDIALS * * Kernels are inlined in case this file is included in more than one * translation unit. */ namespace nvector_tpetra { using Teuchos::outArg; using Teuchos::REDUCE_SUM; using Teuchos::REDUCE_MIN; using Teuchos::REDUCE_MAX; using Teuchos::reduceAll; typedef sundials::trilinos::nvector_tpetra::TpetraVectorInterface::vector_type vector_type; typedef vector_type::scalar_type scalar_type; typedef vector_type::mag_type mag_type; typedef vector_type::global_ordinal_type global_ordinal_type; typedef vector_type::local_ordinal_type local_ordinal_type; typedef vector_type::node_type::memory_space memory_space; typedef vector_type::execution_space execution_space; static constexpr scalar_type zero = 0; static constexpr scalar_type half = 0.5; static constexpr scalar_type one = 1.0; static constexpr scalar_type onept5 = 1.5; /*---------------------------------------------------------------- * Streaming vector kernels *---------------------------------------------------------------*/ /// Divide: z(i) = x(i)/y(i) forall i inline void elementWiseDivide(const vector_type& x, const vector_type& y, vector_type& z) { const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); if (y.need_sync()) const_cast(y).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto y_2d = y.getLocalView(); auto y_1d = Kokkos::subview (y_2d, Kokkos::ALL(), 0); auto z_2d = z.getLocalView(); auto z_1d = Kokkos::subview (z_2d, Kokkos::ALL(), 0); z.modify(); Kokkos::parallel_for ("elementWiseDivide", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i) { z_1d(i) = x_1d(i)/y_1d(i); } ); } /// Add constant to all vector elements: z(i) = x(i) + b inline void addConst(const vector_type& x, scalar_type b, vector_type& z) { const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); if (z.need_sync()) const_cast(z).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto z_2d = z.getLocalView(); auto z_1d = Kokkos::subview (z_2d, Kokkos::ALL(), 0); z.modify(); Kokkos::parallel_for ("addConst", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i) { z_1d(i) = x_1d(i) + b; } ); } /// Compare vector elements to c: z(i) = |x(i)| >= c ? 1 : 0 inline void compare(scalar_type c, const vector_type& x, vector_type& z) { const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); if (z.need_sync()) const_cast(z).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto z_2d = z.getLocalView(); auto z_1d = Kokkos::subview (z_2d, Kokkos::ALL(), 0); z.modify(); Kokkos::parallel_for ("compare", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i) { z_1d(i) = std::abs(x_1d(i)) >= c ? one : zero; } ); } /*---------------------------------------------------------------- * Reduction vector kernels *---------------------------------------------------------------*/ /// Weighted root-mean-square norm inline mag_type normWrms(const vector_type& x, const vector_type& w) { const Teuchos::RCP >& comm = x.getMap()->getComm(); const local_ordinal_type N = static_cast(x.getLocalLength()); const global_ordinal_type Nglob = static_cast(x.getGlobalLength()); if (x.need_sync()) const_cast(x).sync(); if (w.need_sync()) const_cast(w).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto w_2d = w.getLocalView(); auto w_1d = Kokkos::subview (w_2d, Kokkos::ALL(), 0); mag_type sum = zero; Kokkos::parallel_reduce ("normWrms", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, mag_type &local_sum) { local_sum += x_1d(i)*w_1d(i)*(x_1d(i)*w_1d(i)); }, sum); mag_type globalSum = zero; reduceAll(*comm, REDUCE_SUM, sum, outArg(globalSum)); return std::sqrt(globalSum/static_cast(Nglob)); } /// Weighted root-mean-square norm with mask inline mag_type normWrmsMask(const vector_type& x, const vector_type& w, const vector_type& id) { const Teuchos::RCP >& comm = x.getMap()->getComm(); const local_ordinal_type N = static_cast(x.getLocalLength()); const global_ordinal_type Nglob = static_cast(x.getGlobalLength()); if (x.need_sync()) const_cast(x).sync(); if (w.need_sync()) const_cast(w).sync(); if (id.need_sync()) const_cast(id).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto w_2d = w.getLocalView(); auto w_1d = Kokkos::subview (w_2d, Kokkos::ALL(), 0); auto id_2d = id.getLocalView(); auto id_1d = Kokkos::subview (id_2d, Kokkos::ALL(), 0); mag_type sum = zero; Kokkos::parallel_reduce ("normWrmsMask", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, mag_type &local_sum) { if (id_1d(i) > zero) local_sum += x_1d(i)*w_1d(i)*(x_1d(i)*w_1d(i)); }, sum); mag_type globalSum = zero; reduceAll(*comm, REDUCE_SUM, sum, outArg(globalSum)); return std::sqrt(globalSum/static_cast(Nglob)); } /// Find minimum element value in the vector inline scalar_type minElement(const vector_type& x) { using namespace Kokkos; const Teuchos::RCP >& comm = x.getMap()->getComm(); const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); scalar_type minimum; Min min_reducer(minimum); Kokkos::parallel_reduce ("minElement", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, scalar_type &local_min) { min_reducer.join(local_min, x_1d(i)); }, min_reducer); scalar_type globalMin; reduceAll(*comm, REDUCE_MIN, minimum, outArg(globalMin)); return globalMin; } /// Weighted L2 norm inline mag_type normWL2(const vector_type& x, const vector_type& w) { const Teuchos::RCP >& comm = x.getMap()->getComm(); const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); if (w.need_sync()) const_cast(w).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto w_2d = w.getLocalView(); auto w_1d = Kokkos::subview (w_2d, Kokkos::ALL(), 0); mag_type sum = zero; Kokkos::parallel_reduce ("normWL2", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, mag_type &local_sum) { local_sum += x_1d(i)*w_1d(i)*(x_1d(i)*w_1d(i)); }, sum); mag_type globalSum = zero; reduceAll(*comm, REDUCE_SUM, sum, outArg(globalSum)); return std::sqrt(globalSum); } /// Elementwise inverse, return false if any denominator is zero. inline bool invTest(const vector_type& x, vector_type& z) { using namespace Kokkos; const Teuchos::RCP >& comm = x.getMap()->getComm(); const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto z_2d = z.getLocalView(); auto z_1d = Kokkos::subview (z_2d, Kokkos::ALL(), 0); scalar_type minimum; Min min_reducer(minimum); z.modify(); Kokkos::parallel_reduce ("invTest", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, scalar_type &local_min) { static constexpr scalar_type zero = 0; static constexpr scalar_type one = 1.0; if (x_1d(i) == zero) { min_reducer.join(local_min, zero); } else { z_1d(i) = one/x_1d(i); } }, min_reducer); scalar_type globalMin; reduceAll(*comm, REDUCE_MIN, minimum, outArg(globalMin)); return (globalMin > half); } /// Find constraint violations inline bool constraintMask(const vector_type& c, const vector_type& x, vector_type& m) { const Teuchos::RCP >& comm = x.getMap()->getComm(); const local_ordinal_type N = static_cast(x.getLocalLength()); if (c.need_sync()) const_cast(c).sync(); if (x.need_sync()) const_cast(x).sync(); auto c_2d = c.getLocalView(); auto c_1d = Kokkos::subview (c_2d, Kokkos::ALL(), 0); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto m_2d = m.getLocalView(); auto m_1d = Kokkos::subview (m_2d, Kokkos::ALL(), 0); m.modify(); mag_type sum = zero; Kokkos::parallel_reduce ("constraintMask", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, mag_type &local_sum) { const bool test = (std::abs(c_1d(i)) > onept5 && c_1d(i)*x_1d(i) <= zero) || (std::abs(c_1d(i)) > half && c_1d(i)*x_1d(i) < zero); m_1d(i) = test ? one : zero; local_sum += m_1d(i); }, sum); mag_type globalSum = zero; reduceAll(*comm, REDUCE_SUM, sum, outArg(globalSum)); return (globalSum < half); } /// Minimum quotient: min_i(num(i)/den(i)) inline scalar_type minQuotient(const vector_type& num, const vector_type& den) { using namespace Kokkos; const Teuchos::RCP >& comm = num.getMap()->getComm(); const local_ordinal_type N = static_cast(num.getLocalLength()); if (num.need_sync()) const_cast(num).sync(); if (den.need_sync()) const_cast(den).sync(); auto num_2d = num.getLocalView(); auto num_1d = Kokkos::subview (num_2d, Kokkos::ALL(), 0); auto den_2d = den.getLocalView(); auto den_1d = Kokkos::subview (den_2d, Kokkos::ALL(), 0); scalar_type minimum; Min min_reducer(minimum); Kokkos::parallel_reduce ("minQuotient", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, scalar_type &local_min) { if (den_1d(i) != zero) min_reducer.join(local_min, num_1d(i)/den_1d(i)); }, min_reducer); scalar_type globalMin; reduceAll(*comm, REDUCE_MIN, minimum, outArg(globalMin)); return globalMin; } /// MPI task-local dot-product inline scalar_type dotProdLocal(const vector_type& x, const vector_type& y) { const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); if (y.need_sync()) const_cast(y).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto y_2d = y.getLocalView(); auto y_1d = Kokkos::subview (y_2d, Kokkos::ALL(), 0); scalar_type sum = zero; Kokkos::parallel_reduce ("dotProdLocal", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, scalar_type &local_sum) { local_sum += x_1d(i)*y_1d(i); }, sum); return sum; } /// MPI task-local maximum norm of a vector inline mag_type maxNormLocal(const vector_type& x) { using namespace Kokkos; const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); mag_type maximum; Max max_reducer(maximum); Kokkos::parallel_reduce ("maxNormLocal", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, mag_type &local_max) { max_reducer.join(local_max, std::abs(x_1d(i))); }, max_reducer); return maximum; } /// MPI task-local minimum element in the vector inline scalar_type minLocal(const vector_type& x) { using namespace Kokkos; const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); scalar_type minimum; Min min_reducer(minimum); Kokkos::parallel_reduce ("minElement", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, scalar_type &local_min) { min_reducer.join(local_min, x_1d(i)); }, min_reducer); return minimum; } /// MPI task-local L1 norm of a vector inline mag_type L1NormLocal(const vector_type& x) { using namespace Kokkos; const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); mag_type sum = zero; Kokkos::parallel_reduce ("L1NormLocal", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, mag_type &local_sum) { local_sum += std::abs(x_1d(i)); }, sum); return sum; } /// MPI task-local weighted squared sum inline mag_type WSqrSumLocal(const vector_type& x, const vector_type& w) { const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); if (w.need_sync()) const_cast(w).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto w_2d = w.getLocalView(); auto w_1d = Kokkos::subview (w_2d, Kokkos::ALL(), 0); mag_type sum = zero; Kokkos::parallel_reduce ("WSqrSumLocal", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, mag_type &local_sum) { local_sum += x_1d(i)*w_1d(i)*(x_1d(i)*w_1d(i)); }, sum); return sum; } /// MPI task-local weighted squared masked sum inline mag_type WSqrSumMaskLocal(const vector_type& x, const vector_type& w, const vector_type& id) { const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); if (w.need_sync()) const_cast(w).sync(); if (id.need_sync()) const_cast(id).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto w_2d = w.getLocalView(); auto w_1d = Kokkos::subview (w_2d, Kokkos::ALL(), 0); auto id_2d = id.getLocalView(); auto id_1d = Kokkos::subview (id_2d, Kokkos::ALL(), 0); mag_type sum = zero; Kokkos::parallel_reduce ("WSqrSumMaskLocal", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, mag_type &local_sum) { if (id_1d(i) > zero) local_sum += x_1d(i)*w_1d(i)*(x_1d(i)*w_1d(i)); }, sum); return sum; } /// MPI task-local elementwise inverse, return false if any denominator is zero. inline bool invTestLocal(const vector_type& x, vector_type& z) { using namespace Kokkos; const local_ordinal_type N = static_cast(x.getLocalLength()); if (x.need_sync()) const_cast(x).sync(); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto z_2d = z.getLocalView(); auto z_1d = Kokkos::subview (z_2d, Kokkos::ALL(), 0); scalar_type minimum; Min min_reducer(minimum); z.modify(); Kokkos::parallel_reduce ("invTestLocal", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, scalar_type &local_min) { static constexpr scalar_type zero = 0; static constexpr scalar_type one = 1.0; if (x_1d(i) == zero) { min_reducer.join(local_min, zero); } else { z_1d(i) = one/x_1d(i); } }, min_reducer); return (minimum > half); } /// MPI task-local constraint violation check inline bool constraintMaskLocal(const vector_type& c, const vector_type& x, vector_type& m) { const local_ordinal_type N = static_cast(x.getLocalLength()); if (c.need_sync()) const_cast(c).sync(); if (x.need_sync()) const_cast(x).sync(); auto c_2d = c.getLocalView(); auto c_1d = Kokkos::subview (c_2d, Kokkos::ALL(), 0); auto x_2d = x.getLocalView(); auto x_1d = Kokkos::subview (x_2d, Kokkos::ALL(), 0); auto m_2d = m.getLocalView(); auto m_1d = Kokkos::subview (m_2d, Kokkos::ALL(), 0); m.modify(); mag_type sum = zero; Kokkos::parallel_reduce ("constraintMaskLocal", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, mag_type &local_sum) { const bool test = (std::abs(c_1d(i)) > onept5 && c_1d(i)*x_1d(i) <= zero) || (std::abs(c_1d(i)) > half && c_1d(i)*x_1d(i) < zero); m_1d(i) = test ? one : zero; local_sum += m_1d(i); }, sum); return (sum < half); } /// MPI task-local minimum quotient: min_i(num(i)/den(i)) inline scalar_type minQuotientLocal(const vector_type& num, const vector_type& den) { using namespace Kokkos; const local_ordinal_type N = static_cast(num.getLocalLength()); if (num.need_sync()) const_cast(num).sync(); if (den.need_sync()) const_cast(den).sync(); auto num_2d = num.getLocalView(); auto num_1d = Kokkos::subview (num_2d, Kokkos::ALL(), 0); auto den_2d = den.getLocalView(); auto den_1d = Kokkos::subview (den_2d, Kokkos::ALL(), 0); scalar_type minimum; Min min_reducer(minimum); Kokkos::parallel_reduce ("minQuotient", Kokkos::RangePolicy(0, N), KOKKOS_LAMBDA (const local_ordinal_type &i, scalar_type &local_min) { if (den_1d(i) != zero) min_reducer.join(local_min, num_1d(i)/den_1d(i)); }, min_reducer); return minimum; } } // namespace nvector_tpetra } // namespace trilinos } // namespace sundials #endif // _TPETRA_SUNDIALS_VECTOR_KERNELS_HPP_ StanHeaders/inst/include/nvector/trilinos/SundialsTpetraVectorInterface.hpp0000644000176200001440000000401514645137106027064 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_TPETRA_INTERFACE_HPP_ #define _SUNDIALS_TPETRA_INTERFACE_HPP_ #include #include namespace sundials { namespace trilinos { namespace nvector_tpetra { struct TpetraVectorInterface : public _N_VectorContent_Trilinos { // Typedef of Tpetra vector class to be used with SUNDIALS typedef Tpetra::Vector vector_type; TpetraVectorInterface(Teuchos::RCP rcpvec) { rcpvec_ = rcpvec; } ~TpetraVectorInterface() = default; Teuchos::RCP rcpvec_; }; } // namespace nvector_tpetra } // namespace trilinos } // namespace sundials inline Teuchos::RCP N_VGetVector_Trilinos(N_Vector v) { sundials::trilinos::nvector_tpetra::TpetraVectorInterface* iface = reinterpret_cast(v->content); return iface->rcpvec_; } /* * ----------------------------------------------------------------- * Function : N_VMake_Trilinos * ----------------------------------------------------------------- * This function attaches N_Vector functions to a Tpetra vector. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VMake_Trilinos(Teuchos::RCP v, SUNContext sunctx); #endif // _TPETRA_SUNDIALS_INTERFACE_HPP_ StanHeaders/inst/include/nvector/nvector_sycl.h0000644000176200001440000002401114645137106021423 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the SYCL implementation of the * NVECTOR module. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_SYCL_H #define _NVECTOR_SYCL_H #include #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------------------------- * SYCL implementation of N_Vector * ----------------------------------------------------------------- */ struct _N_VectorContent_Sycl { sunindextype length; booleantype own_exec; booleantype own_helper; SUNMemory host_data; SUNMemory device_data; SUNSyclExecPolicy* stream_exec_policy; SUNSyclExecPolicy* reduce_exec_policy; SUNMemoryHelper mem_helper; ::sycl::queue* queue; void* priv; /* 'private' data */ }; typedef struct _N_VectorContent_Sycl *N_VectorContent_Sycl; /* ----------------------------------------------------------------- * NVECTOR_SYCL implementation specific functions * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNewEmpty_Sycl(SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNew_Sycl(sunindextype length, ::sycl::queue *Q, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewManaged_Sycl(sunindextype length, ::sycl::queue *Q, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewWithMemHelp_Sycl(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper, ::sycl::queue *Q, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_Sycl(sunindextype length, realtype *h_vdata, realtype *d_vdata, ::sycl::queue *Q, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMakeManaged_Sycl(sunindextype length, realtype *vdata, ::sycl::queue *Q, SUNContext sunctx); SUNDIALS_EXPORT void N_VSetHostArrayPointer_Sycl(realtype* h_vdata, N_Vector v); SUNDIALS_EXPORT void N_VSetDeviceArrayPointer_Sycl(realtype* d_vdata, N_Vector v); SUNDIALS_EXPORT booleantype N_VIsManagedMemory_Sycl(N_Vector x); SUNDIALS_EXPORT int N_VSetKernelExecPolicy_Sycl(N_Vector x, SUNSyclExecPolicy* stream_exec_policy, SUNSyclExecPolicy* reduce_exec_policy); SUNDIALS_EXPORT void N_VCopyToDevice_Sycl(N_Vector v); SUNDIALS_EXPORT void N_VCopyFromDevice_Sycl(N_Vector v); SUNDIALS_STATIC_INLINE sunindextype N_VGetLength_Sycl(N_Vector x) { N_VectorContent_Sycl content = (N_VectorContent_Sycl)x->content; return content->length; } SUNDIALS_STATIC_INLINE realtype *N_VGetHostArrayPointer_Sycl(N_Vector x) { N_VectorContent_Sycl content = (N_VectorContent_Sycl)x->content; return(content->host_data == NULL ? NULL : (realtype*)content->host_data->ptr); } SUNDIALS_STATIC_INLINE realtype *N_VGetDeviceArrayPointer_Sycl(N_Vector x) { N_VectorContent_Sycl content = (N_VectorContent_Sycl)x->content; return(content->device_data == NULL ? NULL : (realtype*)content->device_data->ptr); } /* ----------------------------------------------------------------- * NVECTOR API functions * ----------------------------------------------------------------- */ SUNDIALS_STATIC_INLINE N_Vector_ID N_VGetVectorID_Sycl(N_Vector v) { return SUNDIALS_NVEC_SYCL; } SUNDIALS_EXPORT N_Vector N_VCloneEmpty_Sycl(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_Sycl(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_Sycl(N_Vector v); SUNDIALS_EXPORT void N_VSpace_Sycl(N_Vector v, sunindextype *lrw, sunindextype *liw); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_Sycl(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_Sycl(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_Sycl(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_Sycl(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_Sycl(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_Sycl(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_Sycl(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_Sycl(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_Sycl(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_Sycl(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_Sycl(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_Sycl(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_Sycl(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_Sycl(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_Sycl(N_Vector x); SUNDIALS_EXPORT void N_VCompare_Sycl(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_Sycl(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_Sycl(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_Sycl(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_Sycl(int nvec, realtype* c, N_Vector* X, N_Vector Z); SUNDIALS_EXPORT int N_VScaleAddMulti_Sycl(int nvec, realtype* c, N_Vector X, N_Vector* Y, N_Vector* Z); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_Sycl(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_Sycl(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_Sycl(int nvec, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_Sycl(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_Sycl(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_Sycl(int nvec, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_Sycl(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VWSqrSumLocal_Sycl(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_Sycl(N_Vector x, N_Vector w, N_Vector id); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_Sycl(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_Sycl(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_Sycl(N_Vector x, void *buf); /* OPTIONAL operations for debugging */ SUNDIALS_EXPORT void N_VPrint_Sycl(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_Sycl(N_Vector v, FILE *outfile); /* ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_Sycl(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_Sycl(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_Sycl(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_Sycl(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_Sycl(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_Sycl(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_Sycl(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_Sycl(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_Sycl(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_Sycl(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_Sycl(N_Vector v, booleantype tf); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_cuda.h0000644000176200001440000002231014645137106021365 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles and Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the CUDA implementation of the * NVECTOR module. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_CUDA_H #define _NVECTOR_CUDA_H #include #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * CUDA implementation of N_Vector * ----------------------------------------------------------------- */ struct _N_VectorContent_Cuda { sunindextype length; booleantype own_helper; SUNMemory host_data; SUNMemory device_data; SUNCudaExecPolicy* stream_exec_policy; SUNCudaExecPolicy* reduce_exec_policy; SUNMemoryHelper mem_helper; void* priv; /* 'private' data */ }; typedef struct _N_VectorContent_Cuda *N_VectorContent_Cuda; /* * ----------------------------------------------------------------- * NVECTOR_CUDA implementation specific functions * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNewEmpty_Cuda(SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNew_Cuda(sunindextype length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewManaged_Cuda(sunindextype length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewWithMemHelp_Cuda(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_Cuda(sunindextype length, realtype *h_vdata, realtype *d_vdata, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMakeManaged_Cuda(sunindextype length, realtype *vdata, SUNContext sunctx); SUNDIALS_EXPORT void N_VSetHostArrayPointer_Cuda(realtype* h_vdata, N_Vector v); SUNDIALS_EXPORT void N_VSetDeviceArrayPointer_Cuda(realtype* d_vdata, N_Vector v); SUNDIALS_EXPORT booleantype N_VIsManagedMemory_Cuda(N_Vector x); SUNDIALS_EXPORT int N_VSetKernelExecPolicy_Cuda(N_Vector x, SUNCudaExecPolicy* stream_exec_policy, SUNCudaExecPolicy* reduce_exec_policy); SUNDIALS_EXPORT void N_VCopyToDevice_Cuda(N_Vector v); SUNDIALS_EXPORT void N_VCopyFromDevice_Cuda(N_Vector v); SUNDIALS_STATIC_INLINE sunindextype N_VGetLength_Cuda(N_Vector x) { N_VectorContent_Cuda content = (N_VectorContent_Cuda)x->content; return content->length; } SUNDIALS_STATIC_INLINE realtype *N_VGetHostArrayPointer_Cuda(N_Vector x) { N_VectorContent_Cuda content = (N_VectorContent_Cuda)x->content; return(content->host_data == NULL ? NULL : (realtype*)content->host_data->ptr); } SUNDIALS_STATIC_INLINE realtype *N_VGetDeviceArrayPointer_Cuda(N_Vector x) { N_VectorContent_Cuda content = (N_VectorContent_Cuda)x->content; return(content->device_data == NULL ? NULL : (realtype*)content->device_data->ptr); } /* * ----------------------------------------------------------------- * NVECTOR API functions * ----------------------------------------------------------------- */ SUNDIALS_STATIC_INLINE N_Vector_ID N_VGetVectorID_Cuda(N_Vector /*v*/) { return SUNDIALS_NVEC_CUDA; } SUNDIALS_EXPORT N_Vector N_VCloneEmpty_Cuda(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_Cuda(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_Cuda(N_Vector v); SUNDIALS_EXPORT void N_VSpace_Cuda(N_Vector v, sunindextype *lrw, sunindextype *liw); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_Cuda(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_Cuda(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_Cuda(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_Cuda(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_Cuda(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_Cuda(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_Cuda(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_Cuda(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_Cuda(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_Cuda(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_Cuda(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_Cuda(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_Cuda(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_Cuda(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_Cuda(N_Vector x); SUNDIALS_EXPORT void N_VCompare_Cuda(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_Cuda(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_Cuda(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_Cuda(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_Cuda(int nvec, realtype* c, N_Vector* X, N_Vector Z); SUNDIALS_EXPORT int N_VScaleAddMulti_Cuda(int nvec, realtype* c, N_Vector X, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_Cuda(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_Cuda(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_Cuda(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_Cuda(int nvec, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_Cuda(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_Cuda(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_Cuda(int nvec, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_Cuda(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VWSqrSumLocal_Cuda(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_Cuda(N_Vector x, N_Vector w, N_Vector id); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_Cuda(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_Cuda(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_Cuda(N_Vector x, void *buf); /* OPTIONAL operations for debugging */ SUNDIALS_EXPORT void N_VPrint_Cuda(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_Cuda(N_Vector v, FILE *outfile); /* * ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_Cuda(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_Cuda(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_Cuda(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_Cuda(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_Cuda(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_Cuda(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_Cuda(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_Cuda(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_Cuda(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_Cuda(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_Cuda(N_Vector v, booleantype tf); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_pthreads.h0000644000176200001440000003023014645137106022263 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the POSIX Threads (Pthreads) * implementation of the NVECTOR module using LOCAL data structs * to share data between threads. * * Notes: * * - The definition of the generic N_Vector structure can be found * in the header file sundials_nvector.h. * * - The definition of the type 'realtype' can be found in the * header file sundials_types.h, and it may be changed (at the * configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type 'booleantype'. * * - N_Vector arguments to arithmetic vector operations need not * be distinct. For example, the following call: * * N_VLinearSum_Pthreads(a,x,b,y,y); * * (which stores the result of the operation a*x+b*y in y) * is legal. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_PTHREADS_H #define _NVECTOR_PTHREADS_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * Pthreads implementation of N_Vector * ----------------------------------------------------------------- */ struct _N_VectorContent_Pthreads { sunindextype length; /* vector length */ booleantype own_data; /* data ownership flag */ realtype *data; /* data array */ int num_threads; /* number of POSIX threads */ }; typedef struct _N_VectorContent_Pthreads *N_VectorContent_Pthreads; /* Structure to hold parallelization information for each thread when calling "companion" functions to compute vector operations. The start and end vector (loop) indices are unique to each thread, the realtype variables are the same for each thread, and the mutex variable is used to lock variables in reductions. */ struct _Pthreads_Data{ sunindextype start; /* starting index for loop */ sunindextype end; /* ending index for loop */ realtype c1, c2; /* scalar values */ realtype *v1, *v2, *v3; /* vector data */ realtype *global_val; /* shared global variable */ pthread_mutex_t *global_mutex; /* lock for shared variable */ int nvec; /* number of vectors in fused op */ int nsum; /* number of sums in fused op */ realtype* cvals; /* scalar values in fused op */ N_Vector x1; /* vector array in fused op */ N_Vector x2; /* vector array in fused op */ N_Vector x3; /* vector array in fused op */ N_Vector* Y1; /* vector array in fused op */ N_Vector* Y2; /* vector array in fused op */ N_Vector* Y3; /* vector array in fused op */ N_Vector** ZZ1; /* array of vector arrays in fused op */ N_Vector** ZZ2; /* array of vector arrays in fused op */ }; typedef struct _Pthreads_Data Pthreads_Data; /* * ----------------------------------------------------------------- * Macros NV_CONTENT_PT, NV_DATA_PT, NV_OWN_DATA_PT, * NV_LENGTH_PT, and NV_Ith_PT * ----------------------------------------------------------------- */ #define NV_CONTENT_PT(v) ( (N_VectorContent_Pthreads)(v->content) ) #define NV_LENGTH_PT(v) ( NV_CONTENT_PT(v)->length ) #define NV_NUM_THREADS_PT(v) ( NV_CONTENT_PT(v)->num_threads ) #define NV_OWN_DATA_PT(v) ( NV_CONTENT_PT(v)->own_data ) #define NV_DATA_PT(v) ( NV_CONTENT_PT(v)->data ) #define NV_Ith_PT(v,i) ( NV_DATA_PT(v)[i] ) /* * ----------------------------------------------------------------- * Functions exported by nvector_Pthreads * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNew_Pthreads(sunindextype vec_length, int n_threads, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNewEmpty_Pthreads(sunindextype vec_length, int n_threads, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_Pthreads(sunindextype vec_length, int n_threads, realtype *v_data, SUNContext sunctx); SUNDIALS_EXPORT sunindextype N_VGetLength_Pthreads(N_Vector v); SUNDIALS_EXPORT void N_VPrint_Pthreads(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_Pthreads(N_Vector v, FILE *outfile); SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_Pthreads(N_Vector v); SUNDIALS_EXPORT N_Vector N_VCloneEmpty_Pthreads(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_Pthreads(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_Pthreads(N_Vector v); SUNDIALS_EXPORT void N_VSpace_Pthreads(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT realtype *N_VGetArrayPointer_Pthreads(N_Vector v); SUNDIALS_EXPORT void N_VSetArrayPointer_Pthreads(realtype *v_data, N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_Pthreads(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_Pthreads(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_Pthreads(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_Pthreads(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_Pthreads(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_Pthreads(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_Pthreads(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_Pthreads(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_Pthreads(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_Pthreads(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_Pthreads(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_Pthreads(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_Pthreads(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_Pthreads(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_Pthreads(N_Vector x); SUNDIALS_EXPORT void N_VCompare_Pthreads(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_Pthreads(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_Pthreads(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_Pthreads(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_Pthreads(int nvec, realtype* c, N_Vector* X, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti_Pthreads(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_Pthreads(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_Pthreads(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_Pthreads(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_Pthreads(int nvec, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_Pthreads(int nvec, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_Pthreads(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_Pthreads(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_Pthreads(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VWSqrSumLocal_Pthreads(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_Pthreads(N_Vector x, N_Vector w, N_Vector id); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_Pthreads(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_Pthreads(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_Pthreads(N_Vector x, void *buf); /* * ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_Pthreads(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_Pthreads(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_Pthreads(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_Pthreads(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_Pthreads(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_Pthreads(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_Pthreads(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_Pthreads(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_Pthreads(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_Pthreads(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_Pthreads(N_Vector v, booleantype tf); /* * ----------------------------------------------------------------- * Deprecated functions * ----------------------------------------------------------------- */ /* use N_VCloneVectorArray */ SUNDIALS_DEPRECATED_EXPORT N_Vector* N_VCloneVectorArray_Pthreads(int count, N_Vector w); /* use N_VCloneVectorArrayEmpty */ SUNDIALS_DEPRECATED_EXPORT N_Vector* N_VCloneVectorArrayEmpty_Pthreads(int count, N_Vector w); /* use N_VDestroyVectorArray */ SUNDIALS_DEPRECATED_EXPORT void N_VDestroyVectorArray_Pthreads(N_Vector* vs, int count); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_manyvector.h0000644000176200001440000002157414645137106022653 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the main header file for the "ManyVector" implementation * of the NVECTOR module. * * Notes: * * - The definition of the generic N_Vector structure can be found * in the header file sundials_nvector.h. * * - The definitions of the types 'realtype' and 'sunindextype' can * be found in the header file sundials_types.h, and it may be * changed (at the configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type 'booleantype'. * * - N_Vector arguments to arithmetic vector operations need not * be distinct. For example, the following call: * * N_VLinearSum_ManyVector(a,x,b,y,y); * * (which stores the result of the operation a*x+b*y in y) * is legal. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_MANY_VECTOR_H #define _NVECTOR_MANY_VECTOR_H #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------------------------- ManyVector implementation of N_Vector ----------------------------------------------------------------- */ struct _N_VectorContent_ManyVector { sunindextype num_subvectors; /* number of vectors attached */ sunindextype global_length; /* overall manyvector length */ N_Vector* subvec_array; /* pointer to N_Vector array */ booleantype own_data; /* flag indicating data ownership */ }; typedef struct _N_VectorContent_ManyVector *N_VectorContent_ManyVector; /* ----------------------------------------------------------------- functions exported by ManyVector ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNew_ManyVector(sunindextype num_subvectors, N_Vector *vec_array, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VGetSubvector_ManyVector(N_Vector v, sunindextype vec_num); SUNDIALS_EXPORT realtype *N_VGetSubvectorArrayPointer_ManyVector(N_Vector v, sunindextype vec_num); SUNDIALS_EXPORT int N_VSetSubvectorArrayPointer_ManyVector(realtype *v_data, N_Vector v, sunindextype vec_num); SUNDIALS_EXPORT sunindextype N_VGetNumSubvectors_ManyVector(N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_ManyVector(N_Vector v); SUNDIALS_EXPORT void N_VPrint_ManyVector(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_ManyVector(N_Vector v, FILE *outfile); SUNDIALS_EXPORT N_Vector N_VCloneEmpty_ManyVector(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_ManyVector(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_ManyVector(N_Vector v); SUNDIALS_EXPORT void N_VSpace_ManyVector(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT sunindextype N_VGetLength_ManyVector(N_Vector v); SUNDIALS_EXPORT void N_VLinearSum_ManyVector(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_ManyVector(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_ManyVector(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_ManyVector(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_ManyVector(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_ManyVector(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_ManyVector(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_ManyVector(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VWrmsNorm_ManyVector(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_ManyVector(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VWL2Norm_ManyVector(N_Vector x, N_Vector w); SUNDIALS_EXPORT void N_VCompare_ManyVector(realtype c, N_Vector x, N_Vector z); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_ManyVector(int nvec, realtype* c, N_Vector* V, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti_ManyVector(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_ManyVector(int nvec, N_Vector x, N_Vector *Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_ManyVector(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_ManyVector(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_ManyVector(int nvecs, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_ManyVector(int nvecs, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_ManyVector(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VDotProdLocal_ManyVector(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNormLocal_ManyVector(N_Vector x); SUNDIALS_EXPORT realtype N_VMinLocal_ManyVector(N_Vector x); SUNDIALS_EXPORT realtype N_VL1NormLocal_ManyVector(N_Vector x); SUNDIALS_EXPORT realtype N_VWSqrSumLocal_ManyVector(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_ManyVector(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT booleantype N_VInvTestLocal_ManyVector(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMaskLocal_ManyVector(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotientLocal_ManyVector(N_Vector num, N_Vector denom); /* OPTIONAL single buffer reduction operations */ SUNDIALS_EXPORT int N_VDotProdMultiLocal_ManyVector(int nvec, N_Vector x, N_Vector *Y, realtype* dotprods); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_ManyVector(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_ManyVector(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_ManyVector(N_Vector x, void *buf); /* ----------------------------------------------------------------- Enable / disable fused vector operations ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_ManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_ManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_ManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_ManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_ManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_ManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_ManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_ManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_ManyVector(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMultiLocal_ManyVector(N_Vector v, booleantype tf); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/nvector/nvector_parhyp.h0000644000176200001440000002617014645137106021764 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Jean M. Sexton @ SMU * Slaven Peles @ LLNL * ----------------------------------------------------------------- * Based on work by: Scott D. Cohen, Alan C. Hindmarsh, Radu Serban, * and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the main header file for the ParHyp implementation * of the NVECTOR module. * * Notes: * * - The definition of the generic N_Vector structure can be * found in the header file sundials_nvector.h. * * - The definition of the type realtype can be found in the * header file sundials_types.h, and it may be changed (at the * configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type booleantype. * * - N_Vector arguments to arithmetic vector operations need not * be distinct. For example, the following call: * * N_VLinearSum_ParHyp(a,x,b,y,y); * * (which stores the result of the operation a*x+b*y in y) * is legal. * -----------------------------------------------------------------*/ #ifndef _NVECTOR_PARHYP_H #define _NVECTOR_PARHYP_H #include #include #include #include /* hypre header files */ #include <_hypre_parcsr_mv.h> #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * ParHyp implementation of N_Vector * ----------------------------------------------------------------- */ struct _N_VectorContent_ParHyp { sunindextype local_length; /* local vector length */ sunindextype global_length; /* global vector length */ booleantype own_parvector; /* ownership of HYPRE vector */ MPI_Comm comm; /* pointer to MPI communicator */ HYPRE_ParVector x; /* the actual HYPRE_ParVector object */ }; typedef struct _N_VectorContent_ParHyp *N_VectorContent_ParHyp; /* * ----------------------------------------------------------------- * Functions exported by nvector_parhyp * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNewEmpty_ParHyp(MPI_Comm comm, sunindextype local_length, sunindextype global_length, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VMake_ParHyp(HYPRE_ParVector x, SUNContext sunctx); SUNDIALS_EXPORT HYPRE_ParVector N_VGetVector_ParHyp(N_Vector v); SUNDIALS_EXPORT void N_VPrint_ParHyp(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile_ParHyp(N_Vector v, FILE *outfile); SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID_ParHyp(N_Vector v); SUNDIALS_EXPORT N_Vector N_VCloneEmpty_ParHyp(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_ParHyp(N_Vector w); SUNDIALS_EXPORT void N_VDestroy_ParHyp(N_Vector v); SUNDIALS_EXPORT void N_VSpace_ParHyp(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT realtype *N_VGetArrayPointer_ParHyp(N_Vector v); SUNDIALS_EXPORT void N_VSetArrayPointer_ParHyp(realtype *v_data, N_Vector v); SUNDIALS_EXPORT void *N_VGetCommunicator_ParHyp(N_Vector v); SUNDIALS_EXPORT sunindextype N_VGetLength_ParHyp(N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_ParHyp(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_ParHyp(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_ParHyp(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_ParHyp(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_ParHyp(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_ParHyp(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_ParHyp(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_ParHyp(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_ParHyp(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_ParHyp(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_ParHyp(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_ParHyp(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_ParHyp(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_ParHyp(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_ParHyp(N_Vector x); SUNDIALS_EXPORT void N_VCompare_ParHyp(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_ParHyp(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_ParHyp(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_ParHyp(N_Vector num, N_Vector denom); /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination_ParHyp(int nvec, realtype* c, N_Vector* X, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti_ParHyp(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti_ParHyp(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray_ParHyp(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray_ParHyp(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray_ParHyp(int nvecs, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray_ParHyp(int nvecs, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray_ParHyp(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray_ParHyp(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray_ParHyp(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); /* OPTIONAL local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VDotProdLocal_ParHyp(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNormLocal_ParHyp(N_Vector x); SUNDIALS_EXPORT realtype N_VMinLocal_ParHyp(N_Vector x); SUNDIALS_EXPORT realtype N_VL1NormLocal_ParHyp(N_Vector x); SUNDIALS_EXPORT realtype N_VWSqrSumLocal_ParHyp(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal_ParHyp(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT booleantype N_VInvTestLocal_ParHyp(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMaskLocal_ParHyp(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotientLocal_ParHyp(N_Vector num, N_Vector denom); /* OPTIONAL single buffer reduction operations */ SUNDIALS_EXPORT int N_VDotProdMultiLocal_ParHyp(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); SUNDIALS_EXPORT int N_VDotProdMultiAllReduce_ParHyp(int nvec, N_Vector x, realtype* sum); /* OPTIONAL XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize_ParHyp(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack_ParHyp(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack_ParHyp(N_Vector x, void *buf); /* * ----------------------------------------------------------------- * Enable / disable fused vector operations * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int N_VEnableFusedOps_ParHyp(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombination_ParHyp(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMulti_ParHyp(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableDotProdMulti_ParHyp(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearSumVectorArray_ParHyp(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleVectorArray_ParHyp(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableConstVectorArray_ParHyp(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormVectorArray_ParHyp(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableWrmsNormMaskVectorArray_ParHyp(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableScaleAddMultiVectorArray_ParHyp(N_Vector v, booleantype tf); SUNDIALS_EXPORT int N_VEnableLinearCombinationVectorArray_ParHyp(N_Vector v, booleantype tf); /* * ----------------------------------------------------------------- * Deprecated functions * ----------------------------------------------------------------- */ /* use N_VCloneVectorArray */ SUNDIALS_DEPRECATED_EXPORT N_Vector *N_VCloneVectorArray_ParHyp(int count, N_Vector w); /* use N_VCloneVectorArrayEmpty */ SUNDIALS_DEPRECATED_EXPORT N_Vector *N_VCloneVectorArrayEmpty_ParHyp(int count, N_Vector w); /* use N_VDestroyVectorArray */ SUNDIALS_DEPRECATED_EXPORT void N_VDestroyVectorArray_ParHyp(N_Vector *vs, int count); SUNDIALS_EXPORT int N_VEnableDotProdMultiLocal_ParHyp(N_Vector v, booleantype tf); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials_debug.h0000644000176200001440000000257314645137106020232 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header files defines internal utility functions and macros * for SUNDIALS debugging. * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_DEBUG_H #define _SUNDIALS_DEBUG_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * Macro which prints to stderr when in debug mode */ #ifdef SUNDIALS_DEBUG #define SUNDIALS_DEBUG_PRINT(str) fprintf(stderr, str) #else #define SUNDIALS_DEBUG_PRINT(str) #endif /* * Macro which prints error messages in debug mode */ #ifdef SUNDIALS_DEBUG #define SUNDIALS_DEBUG_ERROR(msg) \ fprintf(stderr, "ERROR in %s (%s line %d): %s", \ __func__, __FILE__, __LINE__, msg); #else #define SUNDIALS_DEBUG_ERROR(msg) #endif #ifdef __cplusplus /* wrapper to enable C++ usage */ } #endif #endif /* _SUNDIALS_DEBUG_H */ StanHeaders/inst/include/sunmatrix/0000755000176200001440000000000014511135055017105 5ustar liggesusersStanHeaders/inst/include/sunmatrix/sunmatrix_sparse.h0000644000176200001440000001210514645137106022673 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * David Gardner @ LLNL * Based on code sundials_sparse.h by: Carol Woodward and * Slaven Peles @ LLNL, and Daniel R. Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the sparse implementation of the * SUNMATRIX module, SUNMATRIX_SPARSE. * * Notes: * - The definition of the generic SUNMatrix structure can be found * in the header file sundials_matrix.h. * - The definition of the type 'realtype' can be found in the * header file sundials_types.h, and it may be changed (at the * configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type 'booleantype' and 'indextype'. * ----------------------------------------------------------------- */ #ifndef _SUNMATRIX_SPARSE_H #define _SUNMATRIX_SPARSE_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ------------------------ * Matrix Type Definitions * ------------------------ */ #define CSC_MAT 0 #define CSR_MAT 1 /* ------------------------------------------ * Sparse Implementation of SUNMATRIX_SPARSE * ------------------------------------------ */ struct _SUNMatrixContent_Sparse { sunindextype M; sunindextype N; sunindextype NNZ; sunindextype NP; realtype *data; int sparsetype; sunindextype *indexvals; sunindextype *indexptrs; /* CSC indices */ sunindextype **rowvals; sunindextype **colptrs; /* CSR indices */ sunindextype **colvals; sunindextype **rowptrs; }; typedef struct _SUNMatrixContent_Sparse *SUNMatrixContent_Sparse; /* --------------------------------------- * Macros for access to SUNMATRIX_SPARSE * --------------------------------------- */ #define SM_CONTENT_S(A) ( (SUNMatrixContent_Sparse)(A->content) ) #define SM_ROWS_S(A) ( SM_CONTENT_S(A)->M ) #define SM_COLUMNS_S(A) ( SM_CONTENT_S(A)->N ) #define SM_NNZ_S(A) ( SM_CONTENT_S(A)->NNZ ) #define SM_NP_S(A) ( SM_CONTENT_S(A)->NP ) #define SM_SPARSETYPE_S(A) ( SM_CONTENT_S(A)->sparsetype ) #define SM_DATA_S(A) ( SM_CONTENT_S(A)->data ) #define SM_INDEXVALS_S(A) ( SM_CONTENT_S(A)->indexvals ) #define SM_INDEXPTRS_S(A) ( SM_CONTENT_S(A)->indexptrs ) /* ---------------------------------------- * Exported Functions for SUNMATRIX_SPARSE * ---------------------------------------- */ SUNDIALS_EXPORT SUNMatrix SUNSparseMatrix(sunindextype M, sunindextype N, sunindextype NNZ, int sparsetype, SUNContext sunctx); SUNDIALS_EXPORT SUNMatrix SUNSparseFromDenseMatrix(SUNMatrix A, realtype droptol, int sparsetype); SUNDIALS_EXPORT SUNMatrix SUNSparseFromBandMatrix(SUNMatrix A, realtype droptol, int sparsetype); SUNDIALS_EXPORT int SUNSparseMatrix_ToCSR(const SUNMatrix A, SUNMatrix* Bout); SUNDIALS_EXPORT int SUNSparseMatrix_ToCSC(const SUNMatrix A, SUNMatrix* Bout); SUNDIALS_EXPORT int SUNSparseMatrix_Realloc(SUNMatrix A); SUNDIALS_EXPORT int SUNSparseMatrix_Reallocate(SUNMatrix A, sunindextype NNZ); SUNDIALS_EXPORT void SUNSparseMatrix_Print(SUNMatrix A, FILE* outfile); SUNDIALS_EXPORT sunindextype SUNSparseMatrix_Rows(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNSparseMatrix_Columns(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNSparseMatrix_NNZ(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNSparseMatrix_NP(SUNMatrix A); SUNDIALS_EXPORT int SUNSparseMatrix_SparseType(SUNMatrix A); SUNDIALS_EXPORT realtype* SUNSparseMatrix_Data(SUNMatrix A); SUNDIALS_EXPORT sunindextype* SUNSparseMatrix_IndexValues(SUNMatrix A); SUNDIALS_EXPORT sunindextype* SUNSparseMatrix_IndexPointers(SUNMatrix A); SUNDIALS_EXPORT SUNMatrix_ID SUNMatGetID_Sparse(SUNMatrix A); SUNDIALS_EXPORT SUNMatrix SUNMatClone_Sparse(SUNMatrix A); SUNDIALS_EXPORT void SUNMatDestroy_Sparse(SUNMatrix A); SUNDIALS_EXPORT int SUNMatZero_Sparse(SUNMatrix A); SUNDIALS_EXPORT int SUNMatCopy_Sparse(SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAdd_Sparse(realtype c, SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAddI_Sparse(realtype c, SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvec_Sparse(SUNMatrix A, N_Vector x, N_Vector y); SUNDIALS_EXPORT int SUNMatSpace_Sparse(SUNMatrix A, long int *lenrw, long int *leniw); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunmatrix/sunmatrix_band.h0000644000176200001440000001102314645137106022300 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * David Gardner @ LLNL * Based on code sundials_direct.h by: Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the band implementation of the * SUNMATRIX module, SUNMATRIX_BAND. * * Notes: * - The definition of the generic SUNMatrix structure can be found * in the header file sundials_matrix.h. * - The definition of the type 'realtype' can be found in the * header file sundials_types.h, and it may be changed (at the * configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type 'booleantype' and 'indextype'. * ----------------------------------------------------------------- */ #ifndef _SUNMATRIX_BAND_H #define _SUNMATRIX_BAND_H #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* --------------------------------- * Band implementation of SUNMatrix * --------------------------------- */ struct _SUNMatrixContent_Band { sunindextype M; sunindextype N; sunindextype ldim; sunindextype mu; sunindextype ml; sunindextype s_mu; realtype *data; sunindextype ldata; realtype **cols; }; typedef struct _SUNMatrixContent_Band *SUNMatrixContent_Band; /* ------------------------------------ * Macros for access to SUNMATRIX_BAND * ------------------------------------ */ #define SM_CONTENT_B(A) ( (SUNMatrixContent_Band)(A->content) ) #define SM_ROWS_B(A) ( SM_CONTENT_B(A)->M ) #define SM_COLUMNS_B(A) ( SM_CONTENT_B(A)->N ) #define SM_LDATA_B(A) ( SM_CONTENT_B(A)->ldata ) #define SM_UBAND_B(A) ( SM_CONTENT_B(A)->mu ) #define SM_LBAND_B(A) ( SM_CONTENT_B(A)->ml ) #define SM_SUBAND_B(A) ( SM_CONTENT_B(A)->s_mu ) #define SM_LDIM_B(A) ( SM_CONTENT_B(A)->ldim ) #define SM_DATA_B(A) ( SM_CONTENT_B(A)->data ) #define SM_COLS_B(A) ( SM_CONTENT_B(A)->cols ) #define SM_COLUMN_B(A,j) ( ((SM_CONTENT_B(A)->cols)[j])+SM_SUBAND_B(A) ) #define SM_COLUMN_ELEMENT_B(col_j,i,j) (col_j[(i)-(j)]) #define SM_ELEMENT_B(A,i,j) ( (SM_CONTENT_B(A)->cols)[j][(i)-(j)+SM_SUBAND_B(A)] ) /* ---------------------------------------- * Exported Functions for SUNMATRIX_BAND * ---------------------------------------- */ SUNDIALS_EXPORT SUNMatrix SUNBandMatrix(sunindextype N, sunindextype mu, sunindextype ml, SUNContext sunctx); SUNDIALS_EXPORT SUNMatrix SUNBandMatrixStorage(sunindextype N, sunindextype mu, sunindextype ml, sunindextype smu, SUNContext sunctx); SUNDIALS_EXPORT void SUNBandMatrix_Print(SUNMatrix A, FILE* outfile); SUNDIALS_EXPORT sunindextype SUNBandMatrix_Rows(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNBandMatrix_Columns(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNBandMatrix_LowerBandwidth(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNBandMatrix_UpperBandwidth(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNBandMatrix_StoredUpperBandwidth(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNBandMatrix_LDim(SUNMatrix A); SUNDIALS_EXPORT realtype* SUNBandMatrix_Data(SUNMatrix A); SUNDIALS_EXPORT realtype** SUNBandMatrix_Cols(SUNMatrix A); SUNDIALS_EXPORT realtype* SUNBandMatrix_Column(SUNMatrix A, sunindextype j); SUNDIALS_EXPORT SUNMatrix_ID SUNMatGetID_Band(SUNMatrix A); SUNDIALS_EXPORT SUNMatrix SUNMatClone_Band(SUNMatrix A); SUNDIALS_EXPORT void SUNMatDestroy_Band(SUNMatrix A); SUNDIALS_EXPORT int SUNMatZero_Band(SUNMatrix A); SUNDIALS_EXPORT int SUNMatCopy_Band(SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAdd_Band(realtype c, SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAddI_Band(realtype c, SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvec_Band(SUNMatrix A, N_Vector x, N_Vector y); SUNDIALS_EXPORT int SUNMatSpace_Band(SUNMatrix A, long int *lenrw, long int *leniw); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunmatrix/sunmatrix_onemkldense.h0000644000176200001440000001337714645137106023716 0ustar liggesusers/* --------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * --------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * --------------------------------------------------------------------------- * This is the header file for the dense implementation of the SUNMATRIX * class using the Intel oneAPI Math Kernel Library (oneMKL). * ---------------------------------------------------------------------------*/ #ifndef _SUNMATRIX_ONEMKLDENSE_H #define _SUNMATRIX_ONEMKLDENSE_H #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif struct _SUNMatrixContent_OneMklDense { int last_flag; /* last error code returned */ sunindextype block_rows; /* number of rows in a block */ sunindextype block_cols; /* number of columns in a block */ sunindextype num_blocks; /* number of blocks in the matrix */ sunindextype rows; /* total number of rows */ sunindextype cols; /* total number of columns */ sunindextype ldata; /* length of data array */ SUNMemory data; /* matrix data; column-major */ SUNMemory blocks; /* device pointers to blocks of A */ SUNSyclExecPolicy* exec_policy; /* execution policy */ SUNMemoryType mem_type; /* memory type */ SUNMemoryHelper mem_helper; /* memory helper */ ::sycl::queue* queue; /* operation queue */ }; typedef struct _SUNMatrixContent_OneMklDense *SUNMatrixContent_OneMklDense; /* --------------------------------------------------------------------------- * Implementation specific functions * ---------------------------------------------------------------------------*/ /* Constructors */ SUNDIALS_EXPORT SUNMatrix SUNMatrix_OneMklDense(sunindextype M, sunindextype N, SUNMemoryType mem_type, SUNMemoryHelper mem_helper, ::sycl::queue* queue, SUNContext sunctx); SUNDIALS_EXPORT SUNMatrix SUNMatrix_OneMklDenseBlock(sunindextype num_blocks, sunindextype M_block, sunindextype N_block, SUNMemoryType mem_type, SUNMemoryHelper mem_helper, ::sycl::queue* queue, SUNContext sunctx); /* Get matrix dimensions */ SUNDIALS_EXPORT sunindextype SUNMatrix_OneMklDense_Rows(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNMatrix_OneMklDense_Columns(SUNMatrix A); /* Get matrix block dimensions */ SUNDIALS_EXPORT sunindextype SUNMatrix_OneMklDense_NumBlocks(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNMatrix_OneMklDense_BlockRows(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNMatrix_OneMklDense_BlockColumns(SUNMatrix A); /* Get matrix data */ SUNDIALS_EXPORT sunindextype SUNMatrix_OneMklDense_LData(SUNMatrix A); SUNDIALS_EXPORT realtype* SUNMatrix_OneMklDense_Data(SUNMatrix A); SUNDIALS_STATIC_INLINE realtype* SUNMatrix_OneMklDense_Column(SUNMatrix Amat, sunindextype j) { SUNMatrixContent_OneMklDense A = (SUNMatrixContent_OneMklDense) Amat->content; return( ((realtype*) A->data->ptr) + j * A->block_rows ); } /* Get matrix block data */ SUNDIALS_EXPORT sunindextype SUNMatrix_OneMklDense_BlockLData(SUNMatrix A); SUNDIALS_EXPORT realtype** SUNMatrix_OneMklDense_BlockData(SUNMatrix A); SUNDIALS_STATIC_INLINE realtype* SUNMatrix_OneMklDense_Block(SUNMatrix Amat, sunindextype k) { SUNMatrixContent_OneMklDense A = (SUNMatrixContent_OneMklDense) Amat->content; return( ((realtype*) A->data->ptr) + k * A->block_rows * A->block_cols ); } SUNDIALS_STATIC_INLINE realtype* SUNMatrix_OneMklDense_BlockColumn(SUNMatrix Amat, sunindextype k, sunindextype j) { SUNMatrixContent_OneMklDense A = (SUNMatrixContent_OneMklDense) Amat->content; return( ((realtype*) A->data->ptr) + k * A->block_rows * A->block_cols + j * A->block_rows ); } /* Copy data */ SUNDIALS_EXPORT int SUNMatrix_OneMklDense_CopyToDevice(SUNMatrix A, realtype* h_data); SUNDIALS_EXPORT int SUNMatrix_OneMklDense_CopyFromDevice(SUNMatrix A, realtype* h_data); /* --------------------------------------------------------------------------- * SUNMatrix API functions * ---------------------------------------------------------------------------*/ SUNDIALS_STATIC_INLINE SUNMatrix_ID SUNMatGetID_OneMklDense(SUNMatrix A) { return SUNMATRIX_ONEMKLDENSE; } SUNDIALS_EXPORT SUNMatrix SUNMatClone_OneMklDense(SUNMatrix A); SUNDIALS_EXPORT void SUNMatDestroy_OneMklDense(SUNMatrix A); SUNDIALS_EXPORT int SUNMatZero_OneMklDense(SUNMatrix A); SUNDIALS_EXPORT int SUNMatCopy_OneMklDense(SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAdd_OneMklDense(realtype c, SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAddI_OneMklDense(realtype c, SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvecSetup_OneMklDense(SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvec_OneMklDense(SUNMatrix A, N_Vector x, N_Vector y); SUNDIALS_EXPORT int SUNMatSpace_OneMklDense(SUNMatrix A, long int *lenrw, long int *leniw); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunmatrix/sunmatrix_dense.h0000644000176200001440000000711314645137106022477 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * David Gardner @ LLNL * Based on code sundials_direct.h by: Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the dense implementation of the * SUNMATRIX module, SUNMATRIX_DENSE. * * Notes: * - The definition of the generic SUNMatrix structure can be found * in the header file sundials_matrix.h. * - The definition of the type 'realtype' can be found in the * header file sundials_types.h, and it may be changed (at the * configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type 'booleantype' and 'indextype'. * ----------------------------------------------------------------- */ #ifndef _SUNMATRIX_DENSE_H #define _SUNMATRIX_DENSE_H #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ---------------------------------- * Dense implementation of SUNMatrix * ---------------------------------- */ struct _SUNMatrixContent_Dense { sunindextype M; sunindextype N; realtype *data; sunindextype ldata; realtype **cols; }; typedef struct _SUNMatrixContent_Dense *SUNMatrixContent_Dense; /* ------------------------------------ * Macros for access to SUNMATRIX_DENSE * ------------------------------------ */ #define SM_CONTENT_D(A) ( (SUNMatrixContent_Dense)(A->content) ) #define SM_ROWS_D(A) ( SM_CONTENT_D(A)->M ) #define SM_COLUMNS_D(A) ( SM_CONTENT_D(A)->N ) #define SM_LDATA_D(A) ( SM_CONTENT_D(A)->ldata ) #define SM_DATA_D(A) ( SM_CONTENT_D(A)->data ) #define SM_COLS_D(A) ( SM_CONTENT_D(A)->cols ) #define SM_COLUMN_D(A,j) ( (SM_CONTENT_D(A)->cols)[j] ) #define SM_ELEMENT_D(A,i,j) ( (SM_CONTENT_D(A)->cols)[j][i] ) /* --------------------------------------- * Exported Functions for SUNMATRIX_DENSE * --------------------------------------- */ SUNDIALS_EXPORT SUNMatrix SUNDenseMatrix(sunindextype M, sunindextype N, SUNContext sunctx); SUNDIALS_EXPORT void SUNDenseMatrix_Print(SUNMatrix A, FILE* outfile); SUNDIALS_EXPORT sunindextype SUNDenseMatrix_Rows(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNDenseMatrix_Columns(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNDenseMatrix_LData(SUNMatrix A); SUNDIALS_EXPORT realtype* SUNDenseMatrix_Data(SUNMatrix A); SUNDIALS_EXPORT realtype** SUNDenseMatrix_Cols(SUNMatrix A); SUNDIALS_EXPORT realtype* SUNDenseMatrix_Column(SUNMatrix A, sunindextype j); SUNDIALS_EXPORT SUNMatrix_ID SUNMatGetID_Dense(SUNMatrix A); SUNDIALS_EXPORT SUNMatrix SUNMatClone_Dense(SUNMatrix A); SUNDIALS_EXPORT void SUNMatDestroy_Dense(SUNMatrix A); SUNDIALS_EXPORT int SUNMatZero_Dense(SUNMatrix A); SUNDIALS_EXPORT int SUNMatCopy_Dense(SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAdd_Dense(realtype c, SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAddI_Dense(realtype c, SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvec_Dense(SUNMatrix A, N_Vector x, N_Vector y); SUNDIALS_EXPORT int SUNMatSpace_Dense(SUNMatrix A, long int *lenrw, long int *leniw); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunmatrix/sunmatrix_slunrloc.h0000644000176200001440000000630014645137106023237 0ustar liggesusers/* * ---------------------------------------------------------------------------- * Programmer(s): Cody Balos @ LLNL * ---------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ---------------------------------------------------------------------------- * This is the header file for the SuperLU SLU_NR_loc SUNMatrix. * ---------------------------------------------------------------------------- */ #ifndef _SUNMATRIX_SUPERLUNRLOC_H #define _SUNMATRIX_SUPERLUNRLOC_H #include #include #include #ifdef __cplusplus extern "C" { #endif /* ---------------------------------------------------------------------------- * PART 1: SUNMatrix wrapper for the SuperLU SuperMatrix structure with * Stype = SLU_NR_loc, i.e. a SuperMatrix stored in a distributed * CSR format. * ---------------------------------------------------------------------------*/ struct _SUNMatrixContent_SLUNRloc { booleantype own_data; gridinfo_t *grid; sunindextype *row_to_proc; pdgsmv_comm_t *gsmv_comm; SuperMatrix *A_super; SuperMatrix *ACS_super; }; typedef struct _SUNMatrixContent_SLUNRloc *SUNMatrixContent_SLUNRloc; /* ---------------------------------------------------------------------------- * PART 2: Functions exported by SUNMatrix_SLUNRloc: * --------------------------------------------------------------------------*/ SUNDIALS_EXPORT SUNMatrix SUNMatrix_SLUNRloc(SuperMatrix *A_super, gridinfo_t *grid, SUNContext sunctx); SUNDIALS_EXPORT void SUNMatrix_SLUNRloc_Print(SUNMatrix A, FILE *fp); /* ---------------------------------------------------------------------------- * Accessor Functions: * --------------------------------------------------------------------------*/ SUNDIALS_EXPORT SuperMatrix* SUNMatrix_SLUNRloc_SuperMatrix(SUNMatrix A); SUNDIALS_EXPORT gridinfo_t* SUNMatrix_SLUNRloc_ProcessGrid(SUNMatrix A); SUNDIALS_EXPORT booleantype SUNMatrix_SLUNRloc_OwnData(SUNMatrix A); /* ----------------------------------------------------------------------------- * SuperLU implementations of various SUNMatrix operations: * ----------------------------------------------------------------------------*/ SUNDIALS_EXPORT SUNMatrix_ID SUNMatGetID_SLUNRloc(SUNMatrix A); SUNDIALS_EXPORT SUNMatrix SUNMatClone_SLUNRloc(SUNMatrix A); SUNDIALS_EXPORT void SUNMatDestroy_SLUNRloc(SUNMatrix A); SUNDIALS_EXPORT int SUNMatZero_SLUNRloc(SUNMatrix A); SUNDIALS_EXPORT int SUNMatCopy_SLUNRloc(SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAdd_SLUNRloc(realtype c, SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAddI_SLUNRloc(realtype c, SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvecSetup_SLUNRloc(SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvec_SLUNRloc(SUNMatrix A, N_Vector x, N_Vector y); SUNDIALS_EXPORT int SUNMatSpace_SLUNRloc(SUNMatrix A, long int *lenrw, long int *leniw); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunmatrix/sunmatrix_magmadense.h0000644000176200001440000001200114645137106023472 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the dense implementation of the * SUNMATRIX module, SUNMATRIX_MAGMADENSE. * ----------------------------------------------------------------- */ #ifndef _SUNMATRIX_MAGMADENSE_H #define _SUNMATRIX_MAGMADENSE_H #include #include #include #if defined(SUNDIALS_MAGMA_BACKENDS_CUDA) #define HAVE_CUBLAS #elif defined(SUNDIALS_MAGMA_BACKENDS_HIP) #define HAVE_HIP #endif #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif struct _SUNMatrixContent_MagmaDense { int last_flag; /* last error code returned by magma */ int device_id; /* device ID used by magma */ sunindextype M; /* number of rows in block */ sunindextype N; /* number of columns in block */ sunindextype nblocks; /* number of blocks in matrix */ sunindextype ldata; /* length of data array */ SUNMemory data; /* matrix data; column-major */ SUNMemory blocks; /* device pointers to blocks of A */ SUNMemory xblocks; /* device pointers to blocks of x */ SUNMemory yblocks; /* device pointers to blocks of y */ SUNMemoryHelper memhelp; /* memory helper */ magma_queue_t q; /* operation queue (i.e. stream) */ }; typedef struct _SUNMatrixContent_MagmaDense *SUNMatrixContent_MagmaDense; /* --------------------------------------- * Implementation specific functions * ---------------------------------------*/ SUNDIALS_EXPORT SUNMatrix SUNMatrix_MagmaDense(sunindextype M, sunindextype N, SUNMemoryType memtype, SUNMemoryHelper memhelper, void* queue, SUNContext sunctx); SUNDIALS_EXPORT SUNMatrix SUNMatrix_MagmaDenseBlock(sunindextype nblocks, sunindextype M, sunindextype N, SUNMemoryType memtype, SUNMemoryHelper memhelper, void* queue, SUNContext sunctx); SUNDIALS_EXPORT void SUNMatrix_MagmaDense_Print(SUNMatrix A); SUNDIALS_EXPORT realtype* SUNMatrix_MagmaDense_Data(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNMatrix_MagmaDense_LData(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNMatrix_MagmaDense_Rows(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNMatrix_MagmaDense_Columns(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNMatrix_MagmaDense_BlockRows(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNMatrix_MagmaDense_BlockColumns(SUNMatrix A); SUNDIALS_EXPORT sunindextype SUNMatrix_MagmaDense_NumBlocks(SUNMatrix A); SUNDIALS_EXPORT realtype** SUNMatrix_MagmaDense_BlockData(SUNMatrix A); SUNDIALS_EXPORT int SUNMatrix_MagmaDense_CopyToDevice(SUNMatrix A, realtype* h_data); SUNDIALS_EXPORT int SUNMatrix_MagmaDense_CopyFromDevice(SUNMatrix A, realtype* h_data); SUNDIALS_STATIC_INLINE realtype* SUNMatrix_MagmaDense_Block(SUNMatrix Amat, sunindextype k) { SUNMatrixContent_MagmaDense A = (SUNMatrixContent_MagmaDense) Amat->content; return( ((realtype*) A->data->ptr) + k*A->M*A->N ); } SUNDIALS_STATIC_INLINE realtype* SUNMatrix_MagmaDense_Column(SUNMatrix Amat, sunindextype j) { SUNMatrixContent_MagmaDense A = (SUNMatrixContent_MagmaDense) Amat->content; return( ((realtype*) A->data->ptr) + j*A->M ); } SUNDIALS_STATIC_INLINE realtype* SUNMatrix_MagmaDense_BlockColumn(SUNMatrix Amat, sunindextype k, sunindextype j) { SUNMatrixContent_MagmaDense A = (SUNMatrixContent_MagmaDense) Amat->content; return( ((realtype*) A->data->ptr) + k*A->M*A->N + j*A->M ); } /* --------------------------------------- * SUNMatrix API functions * ---------------------------------------*/ SUNDIALS_STATIC_INLINE SUNMatrix_ID SUNMatGetID_MagmaDense(SUNMatrix A) { return SUNMATRIX_MAGMADENSE; } SUNDIALS_EXPORT SUNMatrix SUNMatClone_MagmaDense(SUNMatrix A); SUNDIALS_EXPORT void SUNMatDestroy_MagmaDense(SUNMatrix A); SUNDIALS_EXPORT int SUNMatZero_MagmaDense(SUNMatrix A); SUNDIALS_EXPORT int SUNMatCopy_MagmaDense(SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAdd_MagmaDense(realtype c, SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAddI_MagmaDense(realtype c, SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvecSetup_MagmaDense(SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvec_MagmaDense(SUNMatrix A, N_Vector x, N_Vector y); SUNDIALS_EXPORT int SUNMatSpace_MagmaDense(SUNMatrix A, long int *lenrw, long int *leniw); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunmatrix/sunmatrix_cusparse.h0000644000176200001440000001237714645137106023236 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file is for the cuSPARSE implementation of the * SUNMATRIX module. * ----------------------------------------------------------------- */ #ifndef _SUNMATRIX_CUSPARSE_H #define _SUNMATRIX_CUSPARSE_H #include #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ------------------------------------------ * Implementation of SUNMATRIX_CUSPARSE * ------------------------------------------ */ /* storage formats */ #define SUNMAT_CUSPARSE_CSR 0 #define SUNMAT_CUSPARSE_BCSR 1 struct _SUNMatrix_Content_cuSparse { int M; int N; int NNZ; int nblocks; int blockrows; int blockcols; int blocknnz; int sparse_type; booleantype own_matd; booleantype fixed_pattern; booleantype matvec_issetup; SUNMemory colind; SUNMemory rowptrs; SUNMemory data; SUNMemoryHelper mem_helper; cusparseMatDescr_t mat_descr; #if CUDART_VERSION >= 11000 SUNMemory dBufferMem; size_t bufferSize; cusparseDnVecDescr_t vecX, vecY; cusparseSpMatDescr_t spmat_descr; #endif cusparseHandle_t cusp_handle; SUNCudaExecPolicy* exec_policy; }; typedef struct _SUNMatrix_Content_cuSparse *SUNMatrix_Content_cuSparse; /* ------------------------------------------------------------------ * Constructors. * ------------------------------------------------------------------ */ SUNDIALS_EXPORT SUNMatrix SUNMatrix_cuSparse_NewCSR(int M, int N, int NNZ, cusparseHandle_t cusp, SUNContext sunctx); SUNDIALS_EXPORT SUNMatrix SUNMatrix_cuSparse_MakeCSR(cusparseMatDescr_t mat_descr, int M, int N, int NNZ, int *rowptrs , int *colind , realtype *data, cusparseHandle_t cusp, SUNContext sunctx); /* Creates a CSR block-diagonal matrix where each block shares the same sparsity structure. Reduces memory usage by only storing the row pointers and column indices for one block. */ SUNDIALS_EXPORT SUNMatrix SUNMatrix_cuSparse_NewBlockCSR(int nblocks, int blockrows, int blockcols, int blocknnz, cusparseHandle_t cusp, SUNContext sunctx); /* ------------------------------------------------------------------ * Implementation specific routines. * ------------------------------------------------------------------ */ SUNDIALS_EXPORT int SUNMatrix_cuSparse_SparseType(SUNMatrix A); SUNDIALS_EXPORT int SUNMatrix_cuSparse_Rows(SUNMatrix A); SUNDIALS_EXPORT int SUNMatrix_cuSparse_Columns(SUNMatrix A); SUNDIALS_EXPORT int SUNMatrix_cuSparse_NNZ(SUNMatrix A); SUNDIALS_EXPORT int* SUNMatrix_cuSparse_IndexPointers(SUNMatrix A); SUNDIALS_EXPORT int* SUNMatrix_cuSparse_IndexValues(SUNMatrix A); SUNDIALS_EXPORT realtype* SUNMatrix_cuSparse_Data(SUNMatrix A); SUNDIALS_EXPORT int SUNMatrix_cuSparse_SetFixedPattern(SUNMatrix A, booleantype yesno); SUNDIALS_EXPORT int SUNMatrix_cuSparse_SetKernelExecPolicy(SUNMatrix A, SUNCudaExecPolicy* exec_policy); SUNDIALS_EXPORT int SUNMatrix_cuSparse_NumBlocks(SUNMatrix A); SUNDIALS_EXPORT int SUNMatrix_cuSparse_BlockRows(SUNMatrix A); SUNDIALS_EXPORT int SUNMatrix_cuSparse_BlockColumns(SUNMatrix A); SUNDIALS_EXPORT int SUNMatrix_cuSparse_BlockNNZ(SUNMatrix A); SUNDIALS_EXPORT realtype* SUNMatrix_cuSparse_BlockData(SUNMatrix A, int blockidx); SUNDIALS_EXPORT cusparseMatDescr_t SUNMatrix_cuSparse_MatDescr(SUNMatrix A); SUNDIALS_EXPORT int SUNMatrix_cuSparse_CopyToDevice(SUNMatrix device, realtype* h_data, int* h_idxptrs, int* h_idxvals); SUNDIALS_EXPORT int SUNMatrix_cuSparse_CopyFromDevice(SUNMatrix device, realtype* h_data, int* h_idxptrs, int* h_idxvals); /* ------------------------------------------------------------------ * SUNMatrix API routines. * ------------------------------------------------------------------ */ SUNDIALS_EXPORT SUNMatrix_ID SUNMatGetID_cuSparse(SUNMatrix A); SUNDIALS_EXPORT SUNMatrix SUNMatClone_cuSparse(SUNMatrix A); SUNDIALS_EXPORT void SUNMatDestroy_cuSparse(SUNMatrix A); SUNDIALS_EXPORT int SUNMatZero_cuSparse(SUNMatrix A); SUNDIALS_EXPORT int SUNMatCopy_cuSparse(SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAdd_cuSparse(realtype c, SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAddI_cuSparse(realtype c, SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvecSetup_cuSparse(SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvec_cuSparse(SUNMatrix A, N_Vector x, N_Vector y); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunnonlinsol/0000755000176200001440000000000014511135055017614 5ustar liggesusersStanHeaders/inst/include/sunnonlinsol/sunnonlinsol_petscsnes.h0000644000176200001440000001004414645137106024623 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the header file for the SUNNonlinearSolver module implementation of * a wrapper to the PETSc SNES nonlinear solvers. * * Part I defines the solver-specific content structure. * * Part II contains prototypes for the solver constructor and operations. * ---------------------------------------------------------------------------*/ #ifndef _SUNNONLINSOL_PETSCSNES_H #define _SUNNONLINSOL_PETSCSNES_H #include "sundials/sundials_types.h" #include "sundials/sundials_nvector.h" #include "sundials/sundials_nonlinearsolver.h" #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------------------------------------- * I. Content structure * ---------------------------------------------------------------------------*/ struct _SUNNonlinearSolverContent_PetscSNES { int sysfn_last_err; /* last error returned by the system function Sys */ PetscErrorCode petsc_last_err; /* last error return by PETSc */ long int nconvfails; /* number of nonlinear converge failures (recoverable or not) */ long int nni; /* number of nonlinear iterations */ void *imem; /* SUNDIALS integrator memory */ SNES snes; /* PETSc SNES context */ Vec r; /* nonlinear residual */ N_Vector y, f; /* wrappers for PETSc vectors in system function */ /* functions provided by the integrator */ SUNNonlinSolSysFn Sys; /* nonlinear system function */ }; typedef struct _SUNNonlinearSolverContent_PetscSNES *SUNNonlinearSolverContent_PetscSNES; /* ----------------------------------------------------------------------------- * II: Exported functions * ---------------------------------------------------------------------------*/ /* Constructor to create solver and allocates memory */ SUNDIALS_EXPORT SUNNonlinearSolver SUNNonlinSol_PetscSNES(N_Vector y, SNES snes, SUNContext sunctx); /* SUNNonlinearSolver API functions */ SUNDIALS_EXPORT SUNNonlinearSolver_Type SUNNonlinSolGetType_PetscSNES(SUNNonlinearSolver NLS); SUNDIALS_EXPORT int SUNNonlinSolInitialize_PetscSNES(SUNNonlinearSolver NLS); SUNDIALS_EXPORT int SUNNonlinSolSolve_PetscSNES(SUNNonlinearSolver NLS, N_Vector y0, N_Vector y, N_Vector w, realtype tol, booleantype callLSetup, void* mem); SUNDIALS_EXPORT int SUNNonlinSolSetSysFn_PetscSNES(SUNNonlinearSolver NLS, SUNNonlinSolSysFn SysFn); SUNDIALS_EXPORT int SUNNonlinSolGetNumIters_PetscSNES(SUNNonlinearSolver NLS, long int* nni); SUNDIALS_EXPORT int SUNNonlinSolGetNumConvFails_PetscSNES(SUNNonlinearSolver NLS, long int* nconvfails); SUNDIALS_EXPORT int SUNNonlinSolFree_PetscSNES(SUNNonlinearSolver NLS); /* Implementation specific functions */ SUNDIALS_EXPORT int SUNNonlinSolGetSNES_PetscSNES(SUNNonlinearSolver NLS, SNES* snes); SUNDIALS_EXPORT int SUNNonlinSolGetPetscError_PetscSNES(SUNNonlinearSolver NLS, PetscErrorCode* err); SUNDIALS_EXPORT int SUNNonlinSolGetSysFn_PetscSNES(SUNNonlinearSolver NLS, SUNNonlinSolSysFn* SysFn); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunnonlinsol/sunnonlinsol_fixedpoint.h0000644000176200001440000001413314645137106024770 0ustar liggesusers/* --------------------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * --------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * --------------------------------------------------------------------------- * This is the header file for the SUNNonlinearSolver module implementation of * the Anderson-accelerated fixed-point method. * * Part I defines the solver-specific content structure. * * Part II contains prototypes for the solver constructor and operations. * ---------------------------------------------------------------------------*/ #ifndef _SUNNONLINSOL_FIXEDPOINT_H #define _SUNNONLINSOL_FIXEDPOINT_H #include "sundials/sundials_types.h" #include "sundials/sundials_nvector.h" #include "sundials/sundials_nonlinearsolver.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*----------------------------------------------------------------------------- I. Content structure ---------------------------------------------------------------------------*/ struct _SUNNonlinearSolverContent_FixedPoint { /* functions provided by the integrator */ SUNNonlinSolSysFn Sys; /* fixed-point iteration function */ SUNNonlinSolConvTestFn CTest; /* convergence test function */ /* nonlinear solver variables */ int m; /* number of acceleration vectors to use */ int *imap; /* array of length m */ booleantype damping; /* flag to apply dampling in acceleration */ realtype beta; /* damping paramter */ realtype *R; /* array of length m*m */ realtype *gamma; /* array of length m */ realtype *cvals; /* array of length m+1 for fused vector op */ N_Vector *df; /* vector array of length m */ N_Vector *dg; /* vector array of length m */ N_Vector *q; /* vector array of length m */ N_Vector *Xvecs; /* array of length m+1 for fused vector op */ N_Vector yprev; /* temporary vectors for performing solve */ N_Vector gy; N_Vector fold; N_Vector gold; N_Vector delta; /* correction vector (change between 2 iterates) */ int curiter; /* current iteration number in a solve attempt */ int maxiters; /* maximum number of iterations per solve attempt */ long int niters; /* total number of iterations across all solves */ long int nconvfails; /* total number of convergence failures */ void *ctest_data; /* data to pass to convergence test function */ /* if 0 (default) nothing is printed, if 1 the residual is printed every iteration */ int print_level; /* if NULL nothing is printed, if 1 the residual is printed every iteration */ FILE* info_file; }; typedef struct _SUNNonlinearSolverContent_FixedPoint *SUNNonlinearSolverContent_FixedPoint; /* ----------------------------------------------------------------------------- II: Exported functions ---------------------------------------------------------------------------*/ /* Constructor to create solver and allocates memory */ SUNDIALS_EXPORT SUNNonlinearSolver SUNNonlinSol_FixedPoint(N_Vector y, int m, SUNContext sunctx); SUNDIALS_EXPORT SUNNonlinearSolver SUNNonlinSol_FixedPointSens(int count, N_Vector y, int m, SUNContext sunctx); /* core functions */ SUNDIALS_EXPORT SUNNonlinearSolver_Type SUNNonlinSolGetType_FixedPoint(SUNNonlinearSolver NLS); SUNDIALS_EXPORT int SUNNonlinSolInitialize_FixedPoint(SUNNonlinearSolver NLS); SUNDIALS_EXPORT int SUNNonlinSolSolve_FixedPoint(SUNNonlinearSolver NLS, N_Vector y0, N_Vector y, N_Vector w, realtype tol, booleantype callSetup, void *mem); SUNDIALS_EXPORT int SUNNonlinSolFree_FixedPoint(SUNNonlinearSolver NLS); /* set functions */ SUNDIALS_EXPORT int SUNNonlinSolSetSysFn_FixedPoint(SUNNonlinearSolver NLS, SUNNonlinSolSysFn SysFn); SUNDIALS_EXPORT int SUNNonlinSolSetConvTestFn_FixedPoint(SUNNonlinearSolver NLS, SUNNonlinSolConvTestFn CTestFn, void* ctest_data); SUNDIALS_EXPORT int SUNNonlinSolSetMaxIters_FixedPoint(SUNNonlinearSolver NLS, int maxiters); SUNDIALS_EXPORT int SUNNonlinSolSetDamping_FixedPoint(SUNNonlinearSolver NLS, realtype beta); /* get functions */ SUNDIALS_EXPORT int SUNNonlinSolGetNumIters_FixedPoint(SUNNonlinearSolver NLS, long int *niters); SUNDIALS_EXPORT int SUNNonlinSolGetCurIter_FixedPoint(SUNNonlinearSolver NLS, int *iter); SUNDIALS_EXPORT int SUNNonlinSolGetNumConvFails_FixedPoint(SUNNonlinearSolver NLS, long int *nconvfails); SUNDIALS_EXPORT int SUNNonlinSolGetSysFn_FixedPoint(SUNNonlinearSolver NLS, SUNNonlinSolSysFn *SysFn); SUNDIALS_EXPORT int SUNNonlinSolSetInfoFile_FixedPoint(SUNNonlinearSolver NLS, FILE* info_file); SUNDIALS_EXPORT int SUNNonlinSolSetPrintLevel_FixedPoint(SUNNonlinearSolver NLS, int print_level); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunnonlinsol/sunnonlinsol_newton.h0000644000176200001440000001263314645137106024134 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the header file for the SUNNonlinearSolver module implementation of * Newton's method. * * Part I defines the solver-specific content structure. * * Part II contains prototypes for the solver constructor and operations. * ---------------------------------------------------------------------------*/ #ifndef _SUNNONLINSOL_NEWTON_H #define _SUNNONLINSOL_NEWTON_H #include "sundials/sundials_types.h" #include "sundials/sundials_nvector.h" #include "sundials/sundials_nonlinearsolver.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------------------------------------- * I. Content structure * ---------------------------------------------------------------------------*/ struct _SUNNonlinearSolverContent_Newton { /* functions provided by the integrator */ SUNNonlinSolSysFn Sys; /* nonlinear system residual function */ SUNNonlinSolLSetupFn LSetup; /* linear solver setup function */ SUNNonlinSolLSolveFn LSolve; /* linear solver solve function */ SUNNonlinSolConvTestFn CTest; /* nonlinear solver convergence test function */ /* nonlinear solver variables */ N_Vector delta; /* Newton update vector */ booleantype jcur; /* Jacobian status, current = SUNTRUE / stale = SUNFALSE */ int curiter; /* current number of iterations in a solve attempt */ int maxiters; /* maximum number of iterations in a solve attempt */ long int niters; /* total number of nonlinear iterations across all solves */ long int nconvfails; /* total number of convergence failures across all solves */ void* ctest_data; /* data to pass to convergence test function */ /* if 0 (default) nothing is printed, if 1 the residual is printed every iteration */ int print_level; /* if NULL nothing is printed, if 1 the residual is printed every iteration */ FILE* info_file; }; typedef struct _SUNNonlinearSolverContent_Newton *SUNNonlinearSolverContent_Newton; /* ----------------------------------------------------------------------------- * II: Exported functions * ---------------------------------------------------------------------------*/ /* Constructor to create solver and allocates memory */ SUNDIALS_EXPORT SUNNonlinearSolver SUNNonlinSol_Newton(N_Vector y, SUNContext sunctx); SUNDIALS_EXPORT SUNNonlinearSolver SUNNonlinSol_NewtonSens(int count, N_Vector y, SUNContext sunctx); /* core functions */ SUNDIALS_EXPORT SUNNonlinearSolver_Type SUNNonlinSolGetType_Newton(SUNNonlinearSolver NLS); SUNDIALS_EXPORT int SUNNonlinSolInitialize_Newton(SUNNonlinearSolver NLS); SUNDIALS_EXPORT int SUNNonlinSolSolve_Newton(SUNNonlinearSolver NLS, N_Vector y0, N_Vector y, N_Vector w, realtype tol, booleantype callLSetup, void *mem); SUNDIALS_EXPORT int SUNNonlinSolFree_Newton(SUNNonlinearSolver NLS); /* set functions */ SUNDIALS_EXPORT int SUNNonlinSolSetSysFn_Newton(SUNNonlinearSolver NLS, SUNNonlinSolSysFn SysFn); SUNDIALS_EXPORT int SUNNonlinSolSetLSetupFn_Newton(SUNNonlinearSolver NLS, SUNNonlinSolLSetupFn LSetupFn); SUNDIALS_EXPORT int SUNNonlinSolSetLSolveFn_Newton(SUNNonlinearSolver NLS, SUNNonlinSolLSolveFn LSolveFn); SUNDIALS_EXPORT int SUNNonlinSolSetConvTestFn_Newton(SUNNonlinearSolver NLS, SUNNonlinSolConvTestFn CTestFn, void* ctest_data); SUNDIALS_EXPORT int SUNNonlinSolSetMaxIters_Newton(SUNNonlinearSolver NLS, int maxiters); /* get functions */ SUNDIALS_EXPORT int SUNNonlinSolGetNumIters_Newton(SUNNonlinearSolver NLS, long int *niters); SUNDIALS_EXPORT int SUNNonlinSolGetCurIter_Newton(SUNNonlinearSolver NLS, int *iter); SUNDIALS_EXPORT int SUNNonlinSolGetNumConvFails_Newton(SUNNonlinearSolver NLS, long int *nconvfails); SUNDIALS_EXPORT int SUNNonlinSolGetSysFn_Newton(SUNNonlinearSolver NLS, SUNNonlinSolSysFn *SysFn); SUNDIALS_EXPORT int SUNNonlinSolSetInfoFile_Newton(SUNNonlinearSolver NLS, FILE* info_file); SUNDIALS_EXPORT int SUNNonlinSolSetPrintLevel_Newton(SUNNonlinearSolver NLS, int print_level); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/idas/0000755000176200001440000000000014511135055015773 5ustar liggesusersStanHeaders/inst/include/idas/idas_spils.h0000644000176200001440000001255614645137106020316 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Header file for the deprecated Scaled, Preconditioned Iterative * Linear Solver interface in IDAS; these routines now just wrap * the updated IDA generic linear solver interface in idas_ls.h. * -----------------------------------------------------------------*/ #ifndef _IDASSPILS_H #define _IDASSPILS_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*=============================================================== Function Types (typedefs for equivalent types in idas_ls.h) ===============================================================*/ typedef IDALsPrecSetupFn IDASpilsPrecSetupFn; typedef IDALsPrecSolveFn IDASpilsPrecSolveFn; typedef IDALsJacTimesSetupFn IDASpilsJacTimesSetupFn; typedef IDALsJacTimesVecFn IDASpilsJacTimesVecFn; typedef IDALsPrecSetupFnB IDASpilsPrecSetupFnB; typedef IDALsPrecSetupFnBS IDASpilsPrecSetupFnBS; typedef IDALsPrecSolveFnB IDASpilsPrecSolveFnB; typedef IDALsPrecSolveFnBS IDASpilsPrecSolveFnBS; typedef IDALsJacTimesSetupFnB IDASpilsJacTimesSetupFnB; typedef IDALsJacTimesSetupFnBS IDASpilsJacTimesSetupFnBS; typedef IDALsJacTimesVecFnB IDASpilsJacTimesVecFnB; typedef IDALsJacTimesVecFnBS IDASpilsJacTimesVecFnBS; /*==================================================================== Exported Functions (wrappers for equivalent routines in idas_ls.h) ====================================================================*/ SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetLinearSolver instead") int IDASpilsSetLinearSolver(void *ida_mem, SUNLinearSolver LS); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetPreconditioner instead") int IDASpilsSetPreconditioner(void *ida_mem, IDASpilsPrecSetupFn pset, IDASpilsPrecSolveFn psolve); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetJacTimes instead") int IDASpilsSetJacTimes(void *ida_mem, IDASpilsJacTimesSetupFn jtsetup, IDASpilsJacTimesVecFn jtimes); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetEpsLin instead") int IDASpilsSetEpsLin(void *ida_mem, realtype eplifac); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetIncrementFactor instead") int IDASpilsSetIncrementFactor(void *ida_mem, realtype dqincfac); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetLinWorkSpace instead") int IDASpilsGetWorkSpace(void *ida_mem, long int *lenrwLS, long int *leniwLS); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetNumPrecEvals instead") int IDASpilsGetNumPrecEvals(void *ida_mem, long int *npevals); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetNumPrecSolves instead") int IDASpilsGetNumPrecSolves(void *ida_mem, long int *npsolves); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetNumLinIters instead") int IDASpilsGetNumLinIters(void *ida_mem, long int *nliters); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetNumLinConvFails instead") int IDASpilsGetNumConvFails(void *ida_mem, long int *nlcfails); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetNumJTSetupEvals instead") int IDASpilsGetNumJTSetupEvals(void *ida_mem, long int *njtsetups); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetNumJtimesEvals instead") int IDASpilsGetNumJtimesEvals(void *ida_mem, long int *njvevals); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetNumLinResEvals instead") int IDASpilsGetNumResEvals(void *ida_mem, long int *nrevalsLS); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetLastLinFlag instead") int IDASpilsGetLastFlag(void *ida_mem, long int *flag); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetLinReturnFlagName instead") char *IDASpilsGetReturnFlagName(long int flag); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetLinearSolverB instead") int IDASpilsSetLinearSolverB(void *ida_mem, int which, SUNLinearSolver LS); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetEpsLinB instead") int IDASpilsSetEpsLinB(void *ida_mem, int which, realtype eplifacB); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetIncrementFactorB instead") int IDASpilsSetIncrementFactorB(void *ida_mem, int which, realtype dqincfacB); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetPreconditionerB instead") int IDASpilsSetPreconditionerB(void *ida_mem, int which, IDASpilsPrecSetupFnB psetB, IDASpilsPrecSolveFnB psolveB); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetPreconditionerBS instead") int IDASpilsSetPreconditionerBS(void *ida_mem, int which, IDASpilsPrecSetupFnBS psetBS, IDASpilsPrecSolveFnBS psolveBS); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetJacTimesB instead") int IDASpilsSetJacTimesB(void *ida_mem, int which, IDASpilsJacTimesSetupFnB jtsetupB, IDASpilsJacTimesVecFnB jtimesB); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetJacTimesBS instead") int IDASpilsSetJacTimesBS(void *ida_mem, int which, IDASpilsJacTimesSetupFnBS jtsetupBS, IDASpilsJacTimesVecFnBS jtimesBS); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/idas/idas_direct.h0000644000176200001440000000543514645137106020434 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Header file for the deprecated direct linear solver interface in * IDA; these routines now just wrap the updated IDA generic * linear solver interface in idas_ls.h. * -----------------------------------------------------------------*/ #ifndef _IDADLS_H #define _IDADLS_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*================================================================= Function Types (typedefs for equivalent types in ida_ls.h) =================================================================*/ typedef IDALsJacFn IDADlsJacFn; typedef IDALsJacFnB IDADlsJacFnB; typedef IDALsJacFnBS IDADlsJacFnBS; /*=================================================================== Exported Functions (wrappers for equivalent routines in idas_ls.h) ===================================================================*/ SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetLinearSolver instead") int IDADlsSetLinearSolver(void *ida_mem, SUNLinearSolver LS, SUNMatrix A); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetJacFn instead") int IDADlsSetJacFn(void *ida_mem, IDADlsJacFn jac); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetLinWorkSpace instead") int IDADlsGetWorkSpace(void *ida_mem, long int *lenrwLS, long int *leniwLS); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetNumJacEvals instead") int IDADlsGetNumJacEvals(void *ida_mem, long int *njevals); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetNumLinResEvals instead") int IDADlsGetNumResEvals(void *ida_mem, long int *nrevalsLS); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetLastLinFlag instead") int IDADlsGetLastFlag(void *ida_mem, long int *flag); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDAGetLinReturnFlagName instead") char *IDADlsGetReturnFlagName(long int flag); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetLinearSolverB instead") int IDADlsSetLinearSolverB(void *ida_mem, int which, SUNLinearSolver LS, SUNMatrix A); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetJacFnB instead") int IDADlsSetJacFnB(void *ida_mem, int which, IDADlsJacFnB jacB); SUNDIALS_DEPRECATED_EXPORT_MSG("use IDASetJacFnBS instead") int IDADlsSetJacFnBS(void *ida_mem, int which, IDADlsJacFnBS jacBS); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/idas/idas_bbdpre.h0000644000176200001440000000665614645137106020426 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU, * Alan C. Hindmarsh, Radu Serban and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the IDABBDPRE module, for a * band-block-diagonal preconditioner, i.e. a block-diagonal * matrix with banded blocks. * -----------------------------------------------------------------*/ #ifndef _IDASBBDPRE_H #define _IDASBBDPRE_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*----------------- FORWARD PROBLEMS -----------------*/ /* User-supplied function Types */ typedef int (*IDABBDLocalFn)(sunindextype Nlocal, realtype tt, N_Vector yy, N_Vector yp, N_Vector gval, void *user_data); typedef int (*IDABBDCommFn)(sunindextype Nlocal, realtype tt, N_Vector yy, N_Vector yp, void *user_data); /* Exported Functions */ SUNDIALS_EXPORT int IDABBDPrecInit(void *ida_mem, sunindextype Nlocal, sunindextype mudq, sunindextype mldq, sunindextype mukeep, sunindextype mlkeep, realtype dq_rel_yy, IDABBDLocalFn Gres, IDABBDCommFn Gcomm); SUNDIALS_EXPORT int IDABBDPrecReInit(void *ida_mem, sunindextype mudq, sunindextype mldq, realtype dq_rel_yy); /* Optional output functions */ SUNDIALS_EXPORT int IDABBDPrecGetWorkSpace(void *ida_mem, long int *lenrwBBDP, long int *leniwBBDP); SUNDIALS_EXPORT int IDABBDPrecGetNumGfnEvals(void *ida_mem, long int *ngevalsBBDP); /*------------------ BACKWARD PROBLEMS ------------------*/ /* User-Supplied Function Types */ typedef int (*IDABBDLocalFnB)(sunindextype NlocalB, realtype tt, N_Vector yy, N_Vector yp, N_Vector yyB, N_Vector ypB, N_Vector gvalB, void *user_dataB); typedef int (*IDABBDCommFnB)(sunindextype NlocalB, realtype tt, N_Vector yy, N_Vector yp, N_Vector yyB, N_Vector ypB, void *user_dataB); /* Exported Functions */ SUNDIALS_EXPORT int IDABBDPrecInitB(void *ida_mem, int which, sunindextype NlocalB, sunindextype mudqB, sunindextype mldqB, sunindextype mukeepB, sunindextype mlkeepB, realtype dq_rel_yyB, IDABBDLocalFnB GresB, IDABBDCommFnB GcommB); SUNDIALS_EXPORT int IDABBDPrecReInitB(void *ida_mem, int which, sunindextype mudqB, sunindextype mldqB, realtype dq_rel_yyB); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/idas/idas_ls.h0000644000176200001440000002770214645137106017601 0ustar liggesusers/* ---------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Radu Serban @ LLNL * ---------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ---------------------------------------------------------------- * This is the header file for IDAS' linear solver interface. * ----------------------------------------------------------------*/ #ifndef _IDASLS_H #define _IDASLS_H #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*================================================================= IDALS Constants =================================================================*/ #define IDALS_SUCCESS 0 #define IDALS_MEM_NULL -1 #define IDALS_LMEM_NULL -2 #define IDALS_ILL_INPUT -3 #define IDALS_MEM_FAIL -4 #define IDALS_PMEM_NULL -5 #define IDALS_JACFUNC_UNRECVR -6 #define IDALS_JACFUNC_RECVR -7 #define IDALS_SUNMAT_FAIL -8 #define IDALS_SUNLS_FAIL -9 /* Return values for the adjoint module */ #define IDALS_NO_ADJ -101 #define IDALS_LMEMB_NULL -102 /*================================================================= Forward problems =================================================================*/ /*================================================================= IDALS user-supplied function prototypes =================================================================*/ typedef int (*IDALsJacFn)(realtype t, realtype c_j, N_Vector y, N_Vector yp, N_Vector r, SUNMatrix Jac, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); typedef int (*IDALsPrecSetupFn)(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, realtype c_j, void *user_data); typedef int (*IDALsPrecSolveFn)(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype c_j, realtype delta, void *user_data); typedef int (*IDALsJacTimesSetupFn)(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, realtype c_j, void *user_data); typedef int (*IDALsJacTimesVecFn)(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, N_Vector v, N_Vector Jv, realtype c_j, void *user_data, N_Vector tmp1, N_Vector tmp2); /*================================================================= IDALS Exported functions =================================================================*/ SUNDIALS_EXPORT int IDASetLinearSolver(void *ida_mem, SUNLinearSolver LS, SUNMatrix A); /*----------------------------------------------------------------- Optional inputs to the IDALS linear solver interface -----------------------------------------------------------------*/ SUNDIALS_EXPORT int IDASetJacFn(void *ida_mem, IDALsJacFn jac); SUNDIALS_EXPORT int IDASetPreconditioner(void *ida_mem, IDALsPrecSetupFn pset, IDALsPrecSolveFn psolve); SUNDIALS_EXPORT int IDASetJacTimes(void *ida_mem, IDALsJacTimesSetupFn jtsetup, IDALsJacTimesVecFn jtimes); SUNDIALS_EXPORT int IDASetEpsLin(void *ida_mem, realtype eplifac); SUNDIALS_EXPORT int IDASetLSNormFactor(void *ida_mem, realtype nrmfac); SUNDIALS_EXPORT int IDASetLinearSolutionScaling(void *ida_mem, booleantype onoff); SUNDIALS_EXPORT int IDASetIncrementFactor(void *ida_mem, realtype dqincfac); /*----------------------------------------------------------------- Optional outputs from the IDALS linear solver interface -----------------------------------------------------------------*/ SUNDIALS_EXPORT int IDAGetLinWorkSpace(void *ida_mem, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int IDAGetNumJacEvals(void *ida_mem, long int *njevals); SUNDIALS_EXPORT int IDAGetNumPrecEvals(void *ida_mem, long int *npevals); SUNDIALS_EXPORT int IDAGetNumPrecSolves(void *ida_mem, long int *npsolves); SUNDIALS_EXPORT int IDAGetNumLinIters(void *ida_mem, long int *nliters); SUNDIALS_EXPORT int IDAGetNumLinConvFails(void *ida_mem, long int *nlcfails); SUNDIALS_EXPORT int IDAGetNumJTSetupEvals(void *ida_mem, long int *njtsetups); SUNDIALS_EXPORT int IDAGetNumJtimesEvals(void *ida_mem, long int *njvevals); SUNDIALS_EXPORT int IDAGetNumLinResEvals(void *ida_mem, long int *nrevalsLS); SUNDIALS_EXPORT int IDAGetLastLinFlag(void *ida_mem, long int *flag); SUNDIALS_EXPORT char *IDAGetLinReturnFlagName(long int flag); /*================================================================= Backward problems =================================================================*/ /*================================================================= IDALS user-supplied function prototypes =================================================================*/ typedef int (*IDALsJacFnB)(realtype tt, realtype c_jB, N_Vector yy, N_Vector yp, N_Vector yyB, N_Vector ypB, N_Vector rrB, SUNMatrix JacB, void *user_dataB, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B); typedef int (*IDALsJacFnBS)(realtype tt, realtype c_jB, N_Vector yy, N_Vector yp, N_Vector *yS, N_Vector *ypS, N_Vector yyB, N_Vector ypB, N_Vector rrB, SUNMatrix JacB, void *user_dataB, N_Vector tmp1B, N_Vector tmp2B, N_Vector tmp3B); typedef int (*IDALsPrecSetupFnB)(realtype tt, N_Vector yy, N_Vector yp, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *user_dataB); typedef int (*IDALsPrecSetupFnBS)(realtype tt, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *user_dataB); typedef int (*IDALsPrecSolveFnB)(realtype tt, N_Vector yy, N_Vector yp, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector rvecB, N_Vector zvecB, realtype c_jB, realtype deltaB, void *user_dataB); typedef int (*IDALsPrecSolveFnBS)(realtype tt, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector rvecB, N_Vector zvecB, realtype c_jB, realtype deltaB, void *user_dataB); typedef int (*IDALsJacTimesSetupFnB)(realtype t, N_Vector yy, N_Vector yp, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *user_dataB); typedef int (*IDALsJacTimesSetupFnBS)(realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS, N_Vector yyB, N_Vector ypB, N_Vector rrB, realtype c_jB, void *user_dataB); typedef int (*IDALsJacTimesVecFnB)(realtype t, N_Vector yy, N_Vector yp, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector vB, N_Vector JvB, realtype c_jB, void *user_dataB, N_Vector tmp1B, N_Vector tmp2B); typedef int (*IDALsJacTimesVecFnBS)(realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS, N_Vector yyB, N_Vector ypB, N_Vector rrB, N_Vector vB, N_Vector JvB, realtype c_jB, void *user_dataB, N_Vector tmp1B, N_Vector tmp2B); /*================================================================= IDALS Exported functions =================================================================*/ SUNDIALS_EXPORT int IDASetLinearSolverB(void *ida_mem, int which, SUNLinearSolver LS, SUNMatrix A); /*----------------------------------------------------------------- Each IDASet***B or IDASet***BS function below links the main IDAS integrator with the corresponding IDALS optional input function for the backward integration. The 'which' argument is the int returned by IDACreateB. -----------------------------------------------------------------*/ SUNDIALS_EXPORT int IDASetJacFnB(void *ida_mem, int which, IDALsJacFnB jacB); SUNDIALS_EXPORT int IDASetJacFnBS(void *ida_mem, int which, IDALsJacFnBS jacBS); SUNDIALS_EXPORT int IDASetEpsLinB(void *ida_mem, int which, realtype eplifacB); SUNDIALS_EXPORT int IDASetLSNormFactorB(void *ida_mem, int which, realtype nrmfacB); SUNDIALS_EXPORT int IDASetLinearSolutionScalingB(void *ida_mem, int which, booleantype onoffB); SUNDIALS_EXPORT int IDASetIncrementFactorB(void *ida_mem, int which, realtype dqincfacB); SUNDIALS_EXPORT int IDASetPreconditionerB(void *ida_mem, int which, IDALsPrecSetupFnB psetB, IDALsPrecSolveFnB psolveB); SUNDIALS_EXPORT int IDASetPreconditionerBS(void *ida_mem, int which, IDALsPrecSetupFnBS psetBS, IDALsPrecSolveFnBS psolveBS); SUNDIALS_EXPORT int IDASetJacTimesB(void *ida_mem, int which, IDALsJacTimesSetupFnB jtsetupB, IDALsJacTimesVecFnB jtimesB); SUNDIALS_EXPORT int IDASetJacTimesBS(void *ida_mem, int which, IDALsJacTimesSetupFnBS jtsetupBS, IDALsJacTimesVecFnBS jtimesBS); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/idas/idas.h0000644000176200001440000006455514645137106017112 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the main IDAS solver. * -----------------------------------------------------------------*/ #ifndef _IDAS_H #define _IDAS_H #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------- * IDAS Constants * ----------------- */ /* itask */ #define IDA_NORMAL 1 #define IDA_ONE_STEP 2 /* icopt */ #define IDA_YA_YDP_INIT 1 #define IDA_Y_INIT 2 /* ism */ #define IDA_SIMULTANEOUS 1 #define IDA_STAGGERED 2 /* DQtype */ #define IDA_CENTERED 1 #define IDA_FORWARD 2 /* interp */ #define IDA_HERMITE 1 #define IDA_POLYNOMIAL 2 /* return values */ #define IDA_SUCCESS 0 #define IDA_TSTOP_RETURN 1 #define IDA_ROOT_RETURN 2 #define IDA_WARNING 99 #define IDA_TOO_MUCH_WORK -1 #define IDA_TOO_MUCH_ACC -2 #define IDA_ERR_FAIL -3 #define IDA_CONV_FAIL -4 #define IDA_LINIT_FAIL -5 #define IDA_LSETUP_FAIL -6 #define IDA_LSOLVE_FAIL -7 #define IDA_RES_FAIL -8 #define IDA_REP_RES_ERR -9 #define IDA_RTFUNC_FAIL -10 #define IDA_CONSTR_FAIL -11 #define IDA_FIRST_RES_FAIL -12 #define IDA_LINESEARCH_FAIL -13 #define IDA_NO_RECOVERY -14 #define IDA_NLS_INIT_FAIL -15 #define IDA_NLS_SETUP_FAIL -16 #define IDA_NLS_FAIL -17 #define IDA_MEM_NULL -20 #define IDA_MEM_FAIL -21 #define IDA_ILL_INPUT -22 #define IDA_NO_MALLOC -23 #define IDA_BAD_EWT -24 #define IDA_BAD_K -25 #define IDA_BAD_T -26 #define IDA_BAD_DKY -27 #define IDA_VECTOROP_ERR -28 #define IDA_CONTEXT_ERR -29 #define IDA_NO_QUAD -30 #define IDA_QRHS_FAIL -31 #define IDA_FIRST_QRHS_ERR -32 #define IDA_REP_QRHS_ERR -33 #define IDA_NO_SENS -40 #define IDA_SRES_FAIL -41 #define IDA_REP_SRES_ERR -42 #define IDA_BAD_IS -43 #define IDA_NO_QUADSENS -50 #define IDA_QSRHS_FAIL -51 #define IDA_FIRST_QSRHS_ERR -52 #define IDA_REP_QSRHS_ERR -53 #define IDA_UNRECOGNIZED_ERROR -99 /* adjoint return values */ #define IDA_NO_ADJ -101 #define IDA_NO_FWD -102 #define IDA_NO_BCK -103 #define IDA_BAD_TB0 -104 #define IDA_REIFWD_FAIL -105 #define IDA_FWD_FAIL -106 #define IDA_GETY_BADT -107 /* ------------------------------ * User-Supplied Function Types * ------------------------------ */ typedef int (*IDAResFn)(realtype tt, N_Vector yy, N_Vector yp, N_Vector rr, void *user_data); typedef int (*IDARootFn)(realtype t, N_Vector y, N_Vector yp, realtype *gout, void *user_data); typedef int (*IDAEwtFn)(N_Vector y, N_Vector ewt, void *user_data); typedef void (*IDAErrHandlerFn)(int error_code, const char *module, const char *function, char *msg, void *user_data); typedef int (*IDAQuadRhsFn)(realtype tres, N_Vector yy, N_Vector yp, N_Vector rrQ, void *user_data); typedef int (*IDASensResFn)(int Ns, realtype t, N_Vector yy, N_Vector yp, N_Vector resval, N_Vector *yyS, N_Vector *ypS, N_Vector *resvalS, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); typedef int (*IDAQuadSensRhsFn)(int Ns, realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS, N_Vector rrQ, N_Vector *rhsvalQS, void *user_data, N_Vector yytmp, N_Vector yptmp, N_Vector tmpQS); typedef int (*IDAResFnB)(realtype tt, N_Vector yy, N_Vector yp, N_Vector yyB, N_Vector ypB, N_Vector rrB, void *user_dataB); typedef int (*IDAResFnBS)(realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS, N_Vector yyB, N_Vector ypB, N_Vector rrBS, void *user_dataB); typedef int (*IDAQuadRhsFnB)(realtype tt, N_Vector yy, N_Vector yp, N_Vector yyB, N_Vector ypB, N_Vector rhsvalBQ, void *user_dataB); typedef int (*IDAQuadRhsFnBS)(realtype t, N_Vector yy, N_Vector yp, N_Vector *yyS, N_Vector *ypS, N_Vector yyB, N_Vector ypB, N_Vector rhsvalBQS, void *user_dataB); /* --------------------------------------- * Exported Functions -- Forward Problems * --------------------------------------- */ /* Initialization functions */ SUNDIALS_EXPORT void *IDACreate(SUNContext sunctx); SUNDIALS_EXPORT int IDAInit(void *ida_mem, IDAResFn res, realtype t0, N_Vector yy0, N_Vector yp0); SUNDIALS_EXPORT int IDAReInit(void *ida_mem, realtype t0, N_Vector yy0, N_Vector yp0); /* Tolerance input functions */ SUNDIALS_EXPORT int IDASStolerances(void *ida_mem, realtype reltol, realtype abstol); SUNDIALS_EXPORT int IDASVtolerances(void *ida_mem, realtype reltol, N_Vector abstol); SUNDIALS_EXPORT int IDAWFtolerances(void *ida_mem, IDAEwtFn efun); /* Initial condition calculation function */ SUNDIALS_EXPORT int IDACalcIC(void *ida_mem, int icopt, realtype tout1); /* Initial condition calculation optional input functions */ SUNDIALS_EXPORT int IDASetNonlinConvCoefIC(void *ida_mem, realtype epiccon); SUNDIALS_EXPORT int IDASetMaxNumStepsIC(void *ida_mem, int maxnh); SUNDIALS_EXPORT int IDASetMaxNumJacsIC(void *ida_mem, int maxnj); SUNDIALS_EXPORT int IDASetMaxNumItersIC(void *ida_mem, int maxnit); SUNDIALS_EXPORT int IDASetLineSearchOffIC(void *ida_mem, booleantype lsoff); SUNDIALS_EXPORT int IDASetStepToleranceIC(void *ida_mem, realtype steptol); SUNDIALS_EXPORT int IDASetMaxBacksIC(void *ida_mem, int maxbacks); /* Optional input functions */ SUNDIALS_EXPORT int IDASetErrHandlerFn(void *ida_mem, IDAErrHandlerFn ehfun, void *eh_data); SUNDIALS_EXPORT int IDASetErrFile(void *ida_mem, FILE *errfp); SUNDIALS_EXPORT int IDASetUserData(void *ida_mem, void *user_data); SUNDIALS_EXPORT int IDASetMaxOrd(void *ida_mem, int maxord); SUNDIALS_EXPORT int IDASetMaxNumSteps(void *ida_mem, long int mxsteps); SUNDIALS_EXPORT int IDASetInitStep(void *ida_mem, realtype hin); SUNDIALS_EXPORT int IDASetMaxStep(void *ida_mem, realtype hmax); SUNDIALS_EXPORT int IDASetStopTime(void *ida_mem, realtype tstop); SUNDIALS_EXPORT int IDASetNonlinConvCoef(void *ida_mem, realtype epcon); SUNDIALS_EXPORT int IDASetMaxErrTestFails(void *ida_mem, int maxnef); SUNDIALS_EXPORT int IDASetMaxNonlinIters(void *ida_mem, int maxcor); SUNDIALS_EXPORT int IDASetMaxConvFails(void *ida_mem, int maxncf); SUNDIALS_EXPORT int IDASetSuppressAlg(void *ida_mem, booleantype suppressalg); SUNDIALS_EXPORT int IDASetId(void *ida_mem, N_Vector id); SUNDIALS_EXPORT int IDASetConstraints(void *ida_mem, N_Vector constraints); SUNDIALS_EXPORT int IDASetNonlinearSolver(void *ida_mem, SUNNonlinearSolver NLS); SUNDIALS_EXPORT int IDASetNlsResFn(void *IDA_mem, IDAResFn res); /* Rootfinding initialization function */ SUNDIALS_EXPORT int IDARootInit(void *ida_mem, int nrtfn, IDARootFn g); /* Rootfinding optional input functions */ SUNDIALS_EXPORT int IDASetRootDirection(void *ida_mem, int *rootdir); SUNDIALS_EXPORT int IDASetNoInactiveRootWarn(void *ida_mem); /* Solver function */ SUNDIALS_EXPORT int IDASolve(void *ida_mem, realtype tout, realtype *tret, N_Vector yret, N_Vector ypret, int itask); /* Utility functions to update/compute y and yp based on ycor */ SUNDIALS_EXPORT int IDAComputeY(void *ida_mem, N_Vector ycor, N_Vector y); SUNDIALS_EXPORT int IDAComputeYp(void *ida_mem, N_Vector ycor, N_Vector yp); SUNDIALS_EXPORT int IDAComputeYSens(void *ida_mem, N_Vector *ycor, N_Vector *yyS); SUNDIALS_EXPORT int IDAComputeYpSens(void *ida_mem, N_Vector *ycor, N_Vector *ypS); /* Dense output function */ SUNDIALS_EXPORT int IDAGetDky(void *ida_mem, realtype t, int k, N_Vector dky); /* Optional output functions */ SUNDIALS_EXPORT int IDAGetWorkSpace(void *ida_mem, long int *lenrw, long int *leniw); SUNDIALS_EXPORT int IDAGetNumSteps(void *ida_mem, long int *nsteps); SUNDIALS_EXPORT int IDAGetNumResEvals(void *ida_mem, long int *nrevals); SUNDIALS_EXPORT int IDAGetNumLinSolvSetups(void *ida_mem, long int *nlinsetups); SUNDIALS_EXPORT int IDAGetNumErrTestFails(void *ida_mem, long int *netfails); SUNDIALS_EXPORT int IDAGetNumBacktrackOps(void *ida_mem, long int *nbacktr); SUNDIALS_EXPORT int IDAGetConsistentIC(void *ida_mem, N_Vector yy0_mod, N_Vector yp0_mod); SUNDIALS_EXPORT int IDAGetLastOrder(void *ida_mem, int *klast); SUNDIALS_EXPORT int IDAGetCurrentOrder(void *ida_mem, int *kcur); SUNDIALS_EXPORT int IDAGetCurrentCj(void *ida_mem, realtype *cj); SUNDIALS_EXPORT int IDAGetCurrentY(void *ida_mem, N_Vector *ycur); SUNDIALS_EXPORT int IDAGetCurrentYSens(void *ida_mem, N_Vector **yS); SUNDIALS_EXPORT int IDAGetCurrentYp(void *ida_mem, N_Vector *ypcur); SUNDIALS_EXPORT int IDAGetCurrentYpSens(void *ida_mem, N_Vector **ypS); SUNDIALS_EXPORT int IDAGetActualInitStep(void *ida_mem, realtype *hinused); SUNDIALS_EXPORT int IDAGetLastStep(void *ida_mem, realtype *hlast); SUNDIALS_EXPORT int IDAGetCurrentStep(void *ida_mem, realtype *hcur); SUNDIALS_EXPORT int IDAGetCurrentTime(void *ida_mem, realtype *tcur); SUNDIALS_EXPORT int IDAGetTolScaleFactor(void *ida_mem, realtype *tolsfact); SUNDIALS_EXPORT int IDAGetErrWeights(void *ida_mem, N_Vector eweight); SUNDIALS_EXPORT int IDAGetEstLocalErrors(void *ida_mem, N_Vector ele); SUNDIALS_EXPORT int IDAGetNumGEvals(void *ida_mem, long int *ngevals); SUNDIALS_EXPORT int IDAGetRootInfo(void *ida_mem, int *rootsfound); SUNDIALS_EXPORT int IDAGetIntegratorStats(void *ida_mem, long int *nsteps, long int *nrevals, long int *nlinsetups, long int *netfails, int *qlast, int *qcur, realtype *hinused, realtype *hlast, realtype *hcur, realtype *tcur); SUNDIALS_EXPORT int IDAGetNonlinearSystemData(void *ida_mem, realtype *tcur, N_Vector *yypred, N_Vector *yppred, N_Vector *yyn, N_Vector *ypn, N_Vector *res, realtype *cj, void **user_data); SUNDIALS_EXPORT int IDAGetNonlinearSystemDataSens(void *ida_mem, realtype *tcur, N_Vector **yySpred, N_Vector **ypSpred, N_Vector **yySn, N_Vector **ypSn, realtype *cj, void **user_data); SUNDIALS_EXPORT int IDAGetNumNonlinSolvIters(void *ida_mem, long int *nniters); SUNDIALS_EXPORT int IDAGetNumNonlinSolvConvFails(void *ida_mem, long int *nncfails); SUNDIALS_EXPORT int IDAGetNonlinSolvStats(void *ida_mem, long int *nniters, long int *nncfails); SUNDIALS_EXPORT char *IDAGetReturnFlagName(long int flag); /* Free function */ SUNDIALS_EXPORT void IDAFree(void **ida_mem); /* IDALS interface function that depends on IDAResFn */ SUNDIALS_EXPORT int IDASetJacTimesResFn(void *ida_mem, IDAResFn jtimesResFn); /* --------------------------------- * Exported Functions -- Quadrature * --------------------------------- */ /* Initialization functions */ SUNDIALS_EXPORT int IDAQuadInit(void *ida_mem, IDAQuadRhsFn rhsQ, N_Vector yQ0); SUNDIALS_EXPORT int IDAQuadReInit(void *ida_mem, N_Vector yQ0); /* Tolerance input functions */ SUNDIALS_EXPORT int IDAQuadSStolerances(void *ida_mem, realtype reltolQ, realtype abstolQ); SUNDIALS_EXPORT int IDAQuadSVtolerances(void *ida_mem, realtype reltolQ, N_Vector abstolQ); /* Optional input specification functions */ SUNDIALS_EXPORT int IDASetQuadErrCon(void *ida_mem, booleantype errconQ); /* Extraction and dense output functions */ SUNDIALS_EXPORT int IDAGetQuad(void *ida_mem, realtype *t, N_Vector yQout); SUNDIALS_EXPORT int IDAGetQuadDky(void *ida_mem, realtype t, int k, N_Vector dky); /* Optional output specification functions */ SUNDIALS_EXPORT int IDAGetQuadNumRhsEvals(void *ida_mem, long int *nrhsQevals); SUNDIALS_EXPORT int IDAGetQuadNumErrTestFails(void *ida_mem, long int *nQetfails); SUNDIALS_EXPORT int IDAGetQuadErrWeights(void *ida_mem, N_Vector eQweight); SUNDIALS_EXPORT int IDAGetQuadStats(void *ida_mem, long int *nrhsQevals, long int *nQetfails); /* Free function */ SUNDIALS_EXPORT void IDAQuadFree(void *ida_mem); /* ------------------------------------ * Exported Functions -- Sensitivities * ------------------------------------ */ /* Initialization functions */ SUNDIALS_EXPORT int IDASensInit(void *ida_mem, int Ns, int ism, IDASensResFn resS, N_Vector *yS0, N_Vector *ypS0); SUNDIALS_EXPORT int IDASensReInit(void *ida_mem, int ism, N_Vector *yS0, N_Vector *ypS0); /* Tolerance input functions */ SUNDIALS_EXPORT int IDASensSStolerances(void *ida_mem, realtype reltolS, realtype *abstolS); SUNDIALS_EXPORT int IDASensSVtolerances(void *ida_mem, realtype reltolS, N_Vector *abstolS); SUNDIALS_EXPORT int IDASensEEtolerances(void *ida_mem); /* Initial condition calculation function */ SUNDIALS_EXPORT int IDAGetSensConsistentIC(void *ida_mem, N_Vector *yyS0, N_Vector *ypS0); /* Optional input specification functions */ SUNDIALS_EXPORT int IDASetSensDQMethod(void *ida_mem, int DQtype, realtype DQrhomax); SUNDIALS_EXPORT int IDASetSensErrCon(void *ida_mem, booleantype errconS); SUNDIALS_EXPORT int IDASetSensMaxNonlinIters(void *ida_mem, int maxcorS); SUNDIALS_EXPORT int IDASetSensParams(void *ida_mem, realtype *p, realtype *pbar, int *plist); /* Integrator nonlinear solver specification functions */ SUNDIALS_EXPORT int IDASetNonlinearSolverSensSim(void *ida_mem, SUNNonlinearSolver NLS); SUNDIALS_EXPORT int IDASetNonlinearSolverSensStg(void *ida_mem, SUNNonlinearSolver NLS); /* Enable/disable sensitivities */ SUNDIALS_EXPORT int IDASensToggleOff(void *ida_mem); /* Extraction and dense output functions */ SUNDIALS_EXPORT int IDAGetSens(void *ida_mem, realtype *tret, N_Vector *yySout); SUNDIALS_EXPORT int IDAGetSens1(void *ida_mem, realtype *tret, int is, N_Vector yySret); SUNDIALS_EXPORT int IDAGetSensDky(void *ida_mem, realtype t, int k, N_Vector *dkyS); SUNDIALS_EXPORT int IDAGetSensDky1(void *ida_mem, realtype t, int k, int is, N_Vector dkyS); /* Optional output specification functions */ SUNDIALS_EXPORT int IDAGetSensNumResEvals(void *ida_mem, long int *nresSevals); SUNDIALS_EXPORT int IDAGetNumResEvalsSens(void *ida_mem, long int *nresevalsS); SUNDIALS_EXPORT int IDAGetSensNumErrTestFails(void *ida_mem, long int *nSetfails); SUNDIALS_EXPORT int IDAGetSensNumLinSolvSetups(void *ida_mem, long int *nlinsetupsS); SUNDIALS_EXPORT int IDAGetSensErrWeights(void *ida_mem, N_Vector_S eSweight); SUNDIALS_EXPORT int IDAGetSensStats(void *ida_mem, long int *nresSevals, long int *nresevalsS, long int *nSetfails, long int *nlinsetupsS); SUNDIALS_EXPORT int IDAGetSensNumNonlinSolvIters(void *ida_mem, long int *nSniters); SUNDIALS_EXPORT int IDAGetSensNumNonlinSolvConvFails(void *ida_mem, long int *nSncfails); SUNDIALS_EXPORT int IDAGetSensNonlinSolvStats(void *ida_mem, long int *nSniters, long int *nSncfails); /* Free function */ SUNDIALS_EXPORT void IDASensFree(void *ida_mem); /* ------------------------------------------------------- * Exported Functions -- Sensitivity dependent quadrature * ------------------------------------------------------- */ /* Initialization functions */ SUNDIALS_EXPORT int IDAQuadSensInit(void *ida_mem, IDAQuadSensRhsFn resQS, N_Vector *yQS0); SUNDIALS_EXPORT int IDAQuadSensReInit(void *ida_mem, N_Vector *yQS0); /* Tolerance input functions */ SUNDIALS_EXPORT int IDAQuadSensSStolerances(void *ida_mem, realtype reltolQS, realtype *abstolQS); SUNDIALS_EXPORT int IDAQuadSensSVtolerances(void *ida_mem, realtype reltolQS, N_Vector *abstolQS); SUNDIALS_EXPORT int IDAQuadSensEEtolerances(void *ida_mem); /* Optional input specification functions */ SUNDIALS_EXPORT int IDASetQuadSensErrCon(void *ida_mem, booleantype errconQS); /* Extraction and dense output functions */ SUNDIALS_EXPORT int IDAGetQuadSens(void *ida_mem, realtype *tret, N_Vector *yyQSout); SUNDIALS_EXPORT int IDAGetQuadSens1(void *ida_mem, realtype *tret, int is, N_Vector yyQSret); SUNDIALS_EXPORT int IDAGetQuadSensDky(void *ida_mem, realtype t, int k, N_Vector *dkyQS); SUNDIALS_EXPORT int IDAGetQuadSensDky1(void *ida_mem, realtype t, int k, int is, N_Vector dkyQS); /* Optional output specification functions */ SUNDIALS_EXPORT int IDAGetQuadSensNumRhsEvals(void *ida_mem, long int *nrhsQSevals); SUNDIALS_EXPORT int IDAGetQuadSensNumErrTestFails(void *ida_mem, long int *nQSetfails); SUNDIALS_EXPORT int IDAGetQuadSensErrWeights(void *ida_mem, N_Vector *eQSweight); SUNDIALS_EXPORT int IDAGetQuadSensStats(void *ida_mem, long int *nrhsQSevals, long int *nQSetfails); /* Free function */ SUNDIALS_EXPORT void IDAQuadSensFree(void* ida_mem); /* ---------------------------------------- * Exported Functions -- Backward Problems * ---------------------------------------- */ /* Initialization functions */ SUNDIALS_EXPORT int IDAAdjInit(void *ida_mem, long int steps, int interp); SUNDIALS_EXPORT int IDAAdjReInit(void *ida_mem); SUNDIALS_EXPORT void IDAAdjFree(void *ida_mem); /* Backward Problem Setup Functions */ SUNDIALS_EXPORT int IDACreateB(void *ida_mem, int *which); SUNDIALS_EXPORT int IDAInitB(void *ida_mem, int which, IDAResFnB resB, realtype tB0, N_Vector yyB0, N_Vector ypB0); SUNDIALS_EXPORT int IDAInitBS(void *ida_mem, int which, IDAResFnBS resS, realtype tB0, N_Vector yyB0, N_Vector ypB0); SUNDIALS_EXPORT int IDAReInitB(void *ida_mem, int which, realtype tB0, N_Vector yyB0, N_Vector ypB0); SUNDIALS_EXPORT int IDASStolerancesB(void *ida_mem, int which, realtype relTolB, realtype absTolB); SUNDIALS_EXPORT int IDASVtolerancesB(void *ida_mem, int which, realtype relTolB, N_Vector absTolB); SUNDIALS_EXPORT int IDAQuadInitB(void *ida_mem, int which, IDAQuadRhsFnB rhsQB, N_Vector yQB0); SUNDIALS_EXPORT int IDAQuadInitBS(void *ida_mem, int which, IDAQuadRhsFnBS rhsQS, N_Vector yQB0); SUNDIALS_EXPORT int IDAQuadReInitB(void *ida_mem, int which, N_Vector yQB0); SUNDIALS_EXPORT int IDAQuadSStolerancesB(void *ida_mem, int which, realtype reltolQB, realtype abstolQB); SUNDIALS_EXPORT int IDAQuadSVtolerancesB(void *ida_mem, int which, realtype reltolQB, N_Vector abstolQB); /* Consistent IC calculation functions */ SUNDIALS_EXPORT int IDACalcICB (void *ida_mem, int which, realtype tout1, N_Vector yy0, N_Vector yp0); SUNDIALS_EXPORT int IDACalcICBS(void *ida_mem, int which, realtype tout1, N_Vector yy0, N_Vector yp0, N_Vector *yyS0, N_Vector *ypS0); /* Solver Function For Forward Problems */ SUNDIALS_EXPORT int IDASolveF(void *ida_mem, realtype tout, realtype *tret, N_Vector yret, N_Vector ypret, int itask, int *ncheckPtr); /* Solver Function For Backward Problems */ SUNDIALS_EXPORT int IDASolveB(void *ida_mem, realtype tBout, int itaskB); /* Optional Input Functions For Adjoint Problems */ SUNDIALS_EXPORT int IDAAdjSetNoSensi(void *ida_mem); SUNDIALS_EXPORT int IDASetUserDataB(void *ida_mem, int which, void *user_dataB); SUNDIALS_EXPORT int IDASetMaxOrdB(void *ida_mem, int which, int maxordB); SUNDIALS_EXPORT int IDASetMaxNumStepsB(void *ida_mem, int which, long int mxstepsB); SUNDIALS_EXPORT int IDASetInitStepB(void *ida_mem, int which, realtype hinB); SUNDIALS_EXPORT int IDASetMaxStepB(void *ida_mem, int which, realtype hmaxB); SUNDIALS_EXPORT int IDASetSuppressAlgB(void *ida_mem, int which, booleantype suppressalgB); SUNDIALS_EXPORT int IDASetIdB(void *ida_mem, int which, N_Vector idB); SUNDIALS_EXPORT int IDASetConstraintsB(void *ida_mem, int which, N_Vector constraintsB); SUNDIALS_EXPORT int IDASetQuadErrConB(void *ida_mem, int which, int errconQB); SUNDIALS_EXPORT int IDASetNonlinearSolverB(void *ida_mem, int which, SUNNonlinearSolver NLS); /* Extraction And Dense Output Functions For Backward Problems */ SUNDIALS_EXPORT int IDAGetB(void* ida_mem, int which, realtype *tret, N_Vector yy, N_Vector yp); SUNDIALS_EXPORT int IDAGetQuadB(void *ida_mem, int which, realtype *tret, N_Vector qB); /* Optional Output Functions For Backward Problems */ SUNDIALS_EXPORT void *IDAGetAdjIDABmem(void *ida_mem, int which); SUNDIALS_EXPORT int IDAGetConsistentICB(void *ida_mem, int which, N_Vector yyB0, N_Vector ypB0); SUNDIALS_EXPORT int IDAGetAdjY(void *ida_mem, realtype t, N_Vector yy, N_Vector yp); typedef struct { void *my_addr; void *next_addr; realtype t0; realtype t1; long int nstep; int order; realtype step; } IDAadjCheckPointRec; SUNDIALS_EXPORT int IDAGetAdjCheckPointsInfo(void *ida_mem, IDAadjCheckPointRec *ckpnt); /* IDALS interface function that depends on IDAResFn */ SUNDIALS_EXPORT int IDASetJacTimesResFnB(void *ida_mem, int which, IDAResFn jtimesResFn); /* Undocumented Optional Output Functions For Backward Problems */ /* ----------------------------------------------------------------- * IDAGetAdjDataPointHermite * ----------------------------------------------------------------- * Returns the 2 vectors stored for cubic Hermite interpolation * at the data point 'which'. The user must allocate space for * yy and yd. Returns IDA_MEM_NULL if ida_mem is NULL, * IDA_ILL_INPUT if the interpolation type previously specified * is not IDA_HERMITE, or IDA_SUCCESS otherwise. * ----------------------------------------------------------------- * IDAGetAdjDataPointPolynomial * ----------------------------------------------------------------- * Returns the vector stored for polynomial interpolation * at the data point 'which'. The user must allocate space for * y. Returns IDA_MEM_NULL if ida_mem is NULL, IDA_ILL_INPUT if * the interpolation type previously specified is not * IDA_POLYNOMIAL, or IDA_SUCCESS otherwise. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int IDAGetAdjDataPointHermite(void *ida_mem, int which, realtype *t, N_Vector yy, N_Vector yd); SUNDIALS_EXPORT int IDAGetAdjDataPointPolynomial(void *ida_mem, int which, realtype *t, int *order, N_Vector y); /* ----------------------------------------------------------------- * IDAGetAdjCurrentCheckPoint * Returns the address of the 'active' check point. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int IDAGetAdjCurrentCheckPoint(void *ida_mem, void **addr); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/src/0000755000176200001440000000000014645137110015644 5ustar liggesusersStanHeaders/inst/include/src/stan/0000755000176200001440000000000014645137105016615 5ustar liggesusersStanHeaders/inst/include/src/stan/callbacks/0000755000176200001440000000000014645137105020534 5ustar liggesusersStanHeaders/inst/include/src/stan/callbacks/logger.hpp0000644000176200001440000000377014645137105022533 0ustar liggesusers#ifndef STAN_CALLBACKS_LOGGER_HPP #define STAN_CALLBACKS_LOGGER_HPP #include #include namespace stan { namespace callbacks { /** * The logger class defines the callback * used by Stan's algorithms to log messages in the * interfaces. The base class can be used as a no-op * implementation. * * These are the logging levels used by logger, * in order: * 1. debug * 2. info * 3. warn * 4. error * 5. fatal */ class logger { public: virtual ~logger() {} /** * Logs a message with debug log level * * @param[in] message message */ virtual void debug(const std::string& message) {} /** * Logs a message with debug log level. * * @param[in] message message */ virtual void debug(const std::stringstream& message) {} /** * Logs a message with info log level. * * @param[in] message message */ virtual void info(const std::string& message) {} /** * Logs a message with info log level. * * @param[in] message message */ virtual void info(const std::stringstream& message) {} /** * Logs a message with warn log level. * * @param[in] message message */ virtual void warn(const std::string& message) {} /** * Logs a message with warn log level. * * @param[in] message message */ virtual void warn(const std::stringstream& message) {} /** * Logs an error with error log level. * * @param[in] message message */ virtual void error(const std::string& message) {} /** * Logs an error with error log level. * * @param[in] message message */ virtual void error(const std::stringstream& message) {} /** * Logs an error with fatal log level. * * @param[in] message message */ virtual void fatal(const std::string& message) {} /** * Logs an error with fatal log level. * * @param[in] message message */ virtual void fatal(const std::stringstream& message) {} }; } // namespace callbacks } // namespace stan #endif StanHeaders/inst/include/src/stan/callbacks/writer.hpp0000644000176200001440000000174114645137105022564 0ustar liggesusers#ifndef STAN_CALLBACKS_WRITER_HPP #define STAN_CALLBACKS_WRITER_HPP #include #include #include namespace stan { namespace callbacks { /** * writer is a base class defining the interface * for Stan writer callbacks. The base class can be used as a * no-op implementation. */ class writer { public: /** * Virtual destructor. */ virtual ~writer() {} /** * Writes a set of names. * * @param[in] names Names in a std::vector */ virtual void operator()(const std::vector& names) {} /** * Writes a set of values. * * @param[in] state Values in a std::vector */ virtual void operator()(const std::vector& state) {} /** * Writes blank input. */ virtual void operator()() {} /** * Writes a string. * * @param[in] message A string */ virtual void operator()(const std::string& message) {} }; } // namespace callbacks } // namespace stan #endif StanHeaders/inst/include/src/stan/callbacks/stream_logger.hpp0000644000176200001440000000403614645137105024102 0ustar liggesusers#ifndef STAN_CALLBACKS_STREAM_LOGGER_HPP #define STAN_CALLBACKS_STREAM_LOGGER_HPP #include #include #include #include namespace stan { namespace callbacks { /** * stream_logger is an implementation of * logger that writes messages to separate * std::stringstream outputs. */ class stream_logger final : public logger { private: std::ostream& debug_; std::ostream& info_; std::ostream& warn_; std::ostream& error_; std::ostream& fatal_; public: /** * Constructs a stream_logger with an output * stream for each log level. * * @param[in,out] debug stream to output debug messages * @param[in,out] info stream to output info messages * @param[in,out] warn stream to output warn messages * @param[in,out] error stream to output error messages * @param[in,out] fatal stream to output fatal messages */ stream_logger(std::ostream& debug, std::ostream& info, std::ostream& warn, std::ostream& error, std::ostream& fatal) : debug_(debug), info_(info), warn_(warn), error_(error), fatal_(fatal) {} void debug(const std::string& message) { debug_ << message << std::endl; } void debug(const std::stringstream& message) { debug_ << message.str() << std::endl; } void info(const std::string& message) { info_ << message << std::endl; } void info(const std::stringstream& message) { info_ << message.str() << std::endl; } void warn(const std::string& message) { warn_ << message << std::endl; } void warn(const std::stringstream& message) { warn_ << message.str() << std::endl; } void error(const std::string& message) { error_ << message << std::endl; } void error(const std::stringstream& message) { error_ << message.str() << std::endl; } void fatal(const std::string& message) { fatal_ << message << std::endl; } void fatal(const std::stringstream& message) { fatal_ << message.str() << std::endl; } }; } // namespace callbacks } // namespace stan #endif StanHeaders/inst/include/src/stan/callbacks/tee_writer.hpp0000644000176200001440000000235314645137105023421 0ustar liggesusers#ifndef STAN_CALLBACKS_TEE_WRITER_HPP #define STAN_CALLBACKS_TEE_WRITER_HPP #include #include #include #include namespace stan { namespace callbacks { /** * tee_writer is an implementation that writes to * two writers. * * For any call to this writer, it will tee the call to both writers * provided in the constructor. */ class tee_writer final : public writer { public: /** * Constructor accepting two writers. * * @param[in, out] writer1 first writer * @param[in, out] writer2 second writer */ tee_writer(writer& writer1, writer& writer2) : writer1_(writer1), writer2_(writer2) {} virtual ~tee_writer() {} void operator()(const std::vector& names) { writer1_(names); writer2_(names); } void operator()(const std::vector& state) { writer1_(state); writer2_(state); } void operator()() { writer1_(); writer2_(); } void operator()(const std::string& message) { writer1_(message); writer2_(message); } private: /** * The first writer */ writer& writer1_; /** * The second writer */ writer& writer2_; }; } // namespace callbacks } // namespace stan #endif StanHeaders/inst/include/src/stan/callbacks/unique_stream_writer.hpp0000644000176200001440000000646314645137105025533 0ustar liggesusers#ifndef STAN_CALLBACKS_UNIQUE_STREAM_WRITER_HPP #define STAN_CALLBACKS_UNIQUE_STREAM_WRITER_HPP #include #include #include #include namespace stan { namespace callbacks { /** * unique_stream_writer is an implementation * of writer that holds a unique pointer to the stream it is * writing to. * @tparam Stream A type with with a valid `operator<<(std::string)` */ template class unique_stream_writer final : public writer { public: /** * Constructs a unique stream writer with an output stream * and an optional prefix for comments. * * @param[in, out] A unique pointer to a type inheriting from `std::ostream` * @param[in] comment_prefix string to stream before each comment line. * Default is "". */ explicit unique_stream_writer(std::unique_ptr&& output, const std::string& comment_prefix = "") : output_(std::move(output)), comment_prefix_(comment_prefix) {} unique_stream_writer(); unique_stream_writer(unique_stream_writer& other) = delete; unique_stream_writer(unique_stream_writer&& other) : output_(std::move(other.output_)), comment_prefix_(std::move(other.comment_prefix_)) {} /** * Virtual destructor */ virtual ~unique_stream_writer() {} /** * Writes a set of names on a single line in csv format followed * by a newline. * * Note: the names are not escaped. * * @param[in] names Names in a std::vector */ void operator()(const std::vector& names) { if (output_ == nullptr) return; write_vector(names); } /** * Get the underlying stream */ inline auto& get_stream() noexcept { return *output_; } /** * Writes a set of values in csv format followed by a newline. * * Note: the precision of the output is determined by the settings * of the stream on construction. * * @param[in] state Values in a std::vector */ void operator()(const std::vector& state) { write_vector(state); } /** * Writes the comment_prefix to the stream followed by a newline. */ void operator()() { if (output_ == nullptr) return; *output_ << comment_prefix_ << std::endl; } /** * Writes the comment_prefix then the message followed by a newline. * * @param[in] message A string */ void operator()(const std::string& message) { if (output_ == nullptr) return; *output_ << comment_prefix_ << message << std::endl; } private: /** * Output stream */ std::unique_ptr output_; /** * Comment prefix to use when printing comments: strings and blank lines */ std::string comment_prefix_; /** * Writes a set of values in csv format followed by a newline. * * Note: the precision of the output is determined by the settings * of the stream on construction. * * @param[in] v Values in a std::vector */ template void write_vector(const std::vector& v) { if (output_ == nullptr) return; if (v.empty()) { return; } auto last = v.end(); --last; for (auto it = v.begin(); it != last; ++it) { *output_ << *it << ","; } *output_ << v.back() << std::endl; } }; } // namespace callbacks } // namespace stan #endif StanHeaders/inst/include/src/stan/callbacks/stream_writer.hpp0000644000176200001440000000514014645137105024134 0ustar liggesusers#ifndef STAN_CALLBACKS_STREAM_WRITER_HPP #define STAN_CALLBACKS_STREAM_WRITER_HPP #include #include #include #include namespace stan { namespace callbacks { /** * stream_writer is an implementation * of writer that writes to a stream. */ class stream_writer : public writer { public: /** * Constructs a stream writer with an output stream * and an optional prefix for comments. * * @param[in, out] output stream to write * @param[in] comment_prefix string to stream before * each comment line. Default is "". */ explicit stream_writer(std::ostream& output, const std::string& comment_prefix = "") : output_(output), comment_prefix_(comment_prefix) {} /** * Virtual destructor */ virtual ~stream_writer() {} /** * Writes a set of names on a single line in csv format followed * by a newline. * * Note: the names are not escaped. * * @param[in] names Names in a std::vector */ void operator()(const std::vector& names) { write_vector(names); } /** * Writes a set of values in csv format followed by a newline. * * Note: the precision of the output is determined by the settings * of the stream on construction. * * @param[in] state Values in a std::vector */ void operator()(const std::vector& state) { write_vector(state); } /** * Writes the comment_prefix to the stream followed by a newline. */ void operator()() { output_ << comment_prefix_ << std::endl; } /** * Writes the comment_prefix then the message followed by a newline. * * @param[in] message A string */ void operator()(const std::string& message) { output_ << comment_prefix_ << message << std::endl; } private: /** * Output stream */ std::ostream& output_; /** * Comment prefix to use when printing comments: strings and blank lines */ std::string comment_prefix_; /** * Writes a set of values in csv format followed by a newline. * * Note: the precision of the output is determined by the settings * of the stream on construction. * * @param[in] v Values in a std::vector */ template void write_vector(const std::vector& v) { if (v.empty()) return; typename std::vector::const_iterator last = v.end(); --last; for (typename std::vector::const_iterator it = v.begin(); it != last; ++it) output_ << *it << ","; output_ << v.back() << std::endl; } }; } // namespace callbacks } // namespace stan #endif StanHeaders/inst/include/src/stan/callbacks/interrupt.hpp0000644000176200001440000000123614645137105023303 0ustar liggesusers#ifndef STAN_CALLBACKS_INTERRUPT_HPP #define STAN_CALLBACKS_INTERRUPT_HPP namespace stan { namespace callbacks { /** * interrupt is a base class defining the interface * for Stan interrupt callbacks. * * The interrupt is called from within Stan algorithms to allow * for the interfaces to handle interrupt signals (ctrl-c). */ class interrupt { public: /** * Callback function. * * This function is called by the algorithms allowing the interfaces * to break when necessary. */ virtual void operator()() {} /** * Virtual destructor. */ virtual ~interrupt() {} }; } // namespace callbacks } // namespace stan #endif StanHeaders/inst/include/src/stan/lang/0000755000176200001440000000000014645137105017536 5ustar liggesusersStanHeaders/inst/include/src/stan/lang/rethrow_located.hpp0000644000176200001440000000017714645137105023441 0ustar liggesusers#ifndef STAN_LANG_RETHROW_LOCATED_HPP #define STAN_LANG_RETHROW_LOCATED_HPP #include #endif StanHeaders/inst/include/src/stan/mcmc/0000755000176200001440000000000014645137105017534 5ustar liggesusersStanHeaders/inst/include/src/stan/mcmc/fixed_param_sampler.hpp0000644000176200001440000000071414645137105024251 0ustar liggesusers#ifndef STAN_MCMC_FIXED_PARAM_SAMPLER_HPP #define STAN_MCMC_FIXED_PARAM_SAMPLER_HPP #include #include #include namespace stan { namespace mcmc { class fixed_param_sampler : public base_mcmc { public: fixed_param_sampler() {} sample transition(sample& init_sample, callbacks::logger& logger) { return init_sample; } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/covar_adaptation.hpp0000644000176200001440000000271614645137105023571 0ustar liggesusers#ifndef STAN_MCMC_COVAR_ADAPTATION_HPP #define STAN_MCMC_COVAR_ADAPTATION_HPP #include #include #include namespace stan { namespace mcmc { class covar_adaptation : public windowed_adaptation { public: explicit covar_adaptation(int n) : windowed_adaptation("covariance"), estimator_(n) {} bool learn_covariance(Eigen::MatrixXd& covar, const Eigen::VectorXd& q) { if (adaptation_window()) estimator_.add_sample(q); if (end_adaptation_window()) { compute_next_window(); estimator_.sample_covariance(covar); double n = static_cast(estimator_.num_samples()); covar = (n / (n + 5.0)) * covar + 1e-3 * (5.0 / (n + 5.0)) * Eigen::MatrixXd::Identity(covar.rows(), covar.cols()); if (!covar.allFinite()) throw std::runtime_error( "Numerical overflow in metric adaptation. " "This occurs when the sampler encounters extreme values on the " "unconstrained space; this may happen when the posterior density " "function is too wide or improper. " "There may be problems with your model specification."); estimator_.restart(); ++adapt_window_counter_; return true; } ++adapt_window_counter_; return false; } protected: stan::math::welford_covar_estimator estimator_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/stepsize_adapter.hpp0000644000176200001440000000111614645137105023612 0ustar liggesusers#ifndef STAN_MCMC_STEPSIZE_ADAPTER_HPP #define STAN_MCMC_STEPSIZE_ADAPTER_HPP #include #include namespace stan { namespace mcmc { class stepsize_adapter : public base_adapter { public: stepsize_adapter() {} stepsize_adaptation& get_stepsize_adaptation() { return stepsize_adaptation_; } const stepsize_adaptation& get_stepsize_adaptation() const noexcept { return stepsize_adaptation_; } protected: stepsize_adaptation stepsize_adaptation_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/base_adaptation.hpp0000644000176200001440000000034514645137105023365 0ustar liggesusers#ifndef STAN_MCMC_BASE_ADAPTATION_HPP #define STAN_MCMC_BASE_ADAPTATION_HPP namespace stan { namespace mcmc { class base_adaptation { public: virtual void restart() {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/var_adaptation.hpp0000644000176200001440000000261114645137105023241 0ustar liggesusers#ifndef STAN_MCMC_VAR_ADAPTATION_HPP #define STAN_MCMC_VAR_ADAPTATION_HPP #include #include #include namespace stan { namespace mcmc { class var_adaptation : public windowed_adaptation { public: explicit var_adaptation(int n) : windowed_adaptation("variance"), estimator_(n) {} bool learn_variance(Eigen::VectorXd& var, const Eigen::VectorXd& q) { if (adaptation_window()) estimator_.add_sample(q); if (end_adaptation_window()) { compute_next_window(); estimator_.sample_variance(var); double n = static_cast(estimator_.num_samples()); var = (n / (n + 5.0)) * var + 1e-3 * (5.0 / (n + 5.0)) * Eigen::VectorXd::Ones(var.size()); if (!var.allFinite()) throw std::runtime_error( "Numerical overflow in metric adaptation. " "This occurs when the sampler encounters extreme values on the " "unconstrained space; this may happen when the posterior density " "function is too wide or improper. " "There may be problems with your model specification."); estimator_.restart(); ++adapt_window_counter_; return true; } ++adapt_window_counter_; return false; } protected: stan::math::welford_var_estimator estimator_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/base_adapter.hpp0000644000176200001440000000065614645137105022666 0ustar liggesusers#ifndef STAN_MCMC_BASE_ADAPTER_HPP #define STAN_MCMC_BASE_ADAPTER_HPP namespace stan { namespace mcmc { class base_adapter { public: base_adapter() : adapt_flag_(false) {} virtual void engage_adaptation() { adapt_flag_ = true; } virtual void disengage_adaptation() { adapt_flag_ = false; } bool adapting() { return adapt_flag_; } protected: bool adapt_flag_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/base_mcmc.hpp0000644000176200001440000000155414645137105022163 0ustar liggesusers#ifndef STAN_MCMC_BASE_MCMC_HPP #define STAN_MCMC_BASE_MCMC_HPP #include #include #include #include #include #include namespace stan { namespace mcmc { class base_mcmc { public: base_mcmc() {} virtual ~base_mcmc() {} virtual sample transition(sample& init_sample, callbacks::logger& logger) = 0; virtual void get_sampler_param_names(std::vector& names) {} virtual void get_sampler_params(std::vector& values) {} virtual void write_sampler_state(callbacks::writer& writer) {} virtual void get_sampler_diagnostic_names( std::vector& model_names, std::vector& names) {} virtual void get_sampler_diagnostics(std::vector& values) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/stepsize_covar_adapter.hpp0000644000176200001440000000225214645137105025006 0ustar liggesusers#ifndef STAN_MCMC_STEPSIZE_COVAR_ADAPTER_HPP #define STAN_MCMC_STEPSIZE_COVAR_ADAPTER_HPP #include #include #include #include namespace stan { namespace mcmc { class stepsize_covar_adapter : public base_adapter { public: explicit stepsize_covar_adapter(int n) : covar_adaptation_(n) {} stepsize_adaptation& get_stepsize_adaptation() { return stepsize_adaptation_; } const stepsize_adaptation& get_stepsize_adaptation() const noexcept { return stepsize_adaptation_; } covar_adaptation& get_covar_adaptation() { return covar_adaptation_; } void set_window_params(unsigned int num_warmup, unsigned int init_buffer, unsigned int term_buffer, unsigned int base_window, callbacks::logger& logger) { covar_adaptation_.set_window_params(num_warmup, init_buffer, term_buffer, base_window, logger); } protected: stepsize_adaptation stepsize_adaptation_; covar_adaptation covar_adaptation_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/chains.hpp0000644000176200001440000004762114645137105021524 0ustar liggesusers#ifndef STAN_MCMC_CHAINS_HPP #define STAN_MCMC_CHAINS_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace mcmc { using Eigen::Dynamic; /** * An mcmc::chains object stores parameter names and * dimensionalities along with samples from multiple chains. * *

Synchronization: For arbitrary concurrent use, the * read and write methods need to be read/write locked. Multiple * writers can be used concurrently if they write to different * chains. Readers for single chains need only be read/write locked * with writers of that chain. For reading across chains, full * read/write locking is required. Thus methods will be classified * as global or single-chain read or write methods. * *

Storage Order: Storage is column/last-index major. */ template class chains { private: std::vector param_names_; Eigen::Matrix samples_; Eigen::VectorXi warmup_; static double mean(const Eigen::VectorXd& x) { return (x.array() / x.size()).sum(); } static double variance(const Eigen::VectorXd& x) { double m = mean(x); return ((x.array() - m) / std::sqrt((x.size() - 1.0))).square().sum(); } static double sd(const Eigen::VectorXd& x) { return std::sqrt(variance(x)); } static double covariance(const Eigen::VectorXd& x, const Eigen::VectorXd& y, std::ostream* err = 0) { if (x.rows() != y.rows() && err) *err << "warning: covariance of different length chains"; using boost::accumulators::accumulator_set; using boost::accumulators::stats; using boost::accumulators::tag::covariance; using boost::accumulators::tag::covariate1; using boost::accumulators::tag::variance; accumulator_set > > acc; int M = std::min(x.size(), y.size()); for (int i = 0; i < M; i++) acc(x(i), boost::accumulators::covariate1 = y(i)); return boost::accumulators::covariance(acc) * M / (M - 1); } static double correlation(const Eigen::VectorXd& x, const Eigen::VectorXd& y, std::ostream* err = 0) { if (x.rows() != y.rows() && err) *err << "warning: covariance of different length chains"; using boost::accumulators::accumulator_set; using boost::accumulators::stats; using boost::accumulators::tag::covariance; using boost::accumulators::tag::covariate1; using boost::accumulators::tag::variance; accumulator_set > > acc_xy; accumulator_set > acc_y; int M = std::min(x.size(), y.size()); for (int i = 0; i < M; i++) { acc_xy(x(i), boost::accumulators::covariate1 = y(i)); acc_y(y(i)); } double cov = boost::accumulators::covariance(acc_xy); if (cov > -1e-8 && cov < 1e-8) return cov; return cov / std::sqrt(boost::accumulators::variance(acc_xy) * boost::accumulators::variance(acc_y)); } static double quantile(const Eigen::VectorXd& x, const double prob) { using boost::accumulators::accumulator_set; using boost::accumulators::left; using boost::accumulators::quantile; using boost::accumulators::quantile_probability; using boost::accumulators::right; using boost::accumulators::stats; using boost::accumulators::tag::tail; using boost::accumulators::tag::tail_quantile; double M = x.rows(); // size_t cache_size = std::min(prob, 1-prob)*M + 2; size_t cache_size = M; if (prob < 0.5) { accumulator_set > > acc( tail::cache_size = cache_size); for (int i = 0; i < M; i++) acc(x(i)); return quantile(acc, quantile_probability = prob); } accumulator_set > > acc( tail::cache_size = cache_size); for (int i = 0; i < M; i++) acc(x(i)); return quantile(acc, quantile_probability = prob); } static Eigen::VectorXd quantiles(const Eigen::VectorXd& x, const Eigen::VectorXd& probs) { using boost::accumulators::accumulator_set; using boost::accumulators::left; using boost::accumulators::quantile; using boost::accumulators::quantile_probability; using boost::accumulators::right; using boost::accumulators::stats; using boost::accumulators::tag::tail; using boost::accumulators::tag::tail_quantile; double M = x.rows(); // size_t cache_size = M/2 + 2; size_t cache_size = M; // 2 + 2; accumulator_set > > acc_left( tail::cache_size = cache_size); accumulator_set > > acc_right( tail::cache_size = cache_size); for (int i = 0; i < M; i++) { acc_left(x(i)); acc_right(x(i)); } Eigen::VectorXd q(probs.size()); for (int i = 0; i < probs.size(); i++) { if (probs(i) < 0.5) q(i) = quantile(acc_left, quantile_probability = probs(i)); else q(i) = quantile(acc_right, quantile_probability = probs(i)); } return q; } static Eigen::VectorXd autocorrelation(const Eigen::VectorXd& x) { using stan::math::index_type; using std::vector; typedef typename index_type >::type idx_t; std::vector ac; std::vector sample(x.size()); for (int i = 0; i < x.size(); i++) sample[i] = x(i); stan::math::autocorrelation(sample, ac); Eigen::VectorXd ac2(ac.size()); for (idx_t i = 0; i < ac.size(); i++) ac2(i) = ac[i]; return ac2; } static Eigen::VectorXd autocovariance(const Eigen::VectorXd& x) { using stan::math::index_type; using std::vector; typedef typename index_type >::type idx_t; std::vector ac; std::vector sample(x.size()); for (int i = 0; i < x.size(); i++) sample[i] = x(i); stan::math::autocovariance(sample, ac); Eigen::VectorXd ac2(ac.size()); for (idx_t i = 0; i < ac.size(); i++) ac2(i) = ac[i]; return ac2; } /** * Return the split potential scale reduction (split R hat) * for the specified parameter. * * Current implementation takes the minimum number of samples * across chains as the number of samples per chain. * * @param VectorXd * @param Dynamic * @param samples * * @return */ double split_potential_scale_reduction( const Eigen::Matrix& samples) const { int chains = samples.size(); int n_samples = samples(0).size(); for (int chain = 1; chain < chains; chain++) { n_samples = std::min(n_samples, static_cast(samples(chain).size())); } if (n_samples % 2 == 1) n_samples--; int n = n_samples / 2; Eigen::VectorXd split_chain_mean(2 * chains); Eigen::VectorXd split_chain_var(2 * chains); for (int chain = 0; chain < chains; chain++) { split_chain_mean(2 * chain) = mean(samples(chain).topRows(n)); split_chain_mean(2 * chain + 1) = mean(samples(chain).bottomRows(n)); split_chain_var(2 * chain) = variance(samples(chain).topRows(n)); split_chain_var(2 * chain + 1) = variance(samples(chain).bottomRows(n)); } double var_between = n * variance(split_chain_mean); double var_within = mean(split_chain_var); // rewrote [(n-1)*W/n + B/n]/W as (n-1+ B/W)/n return sqrt((var_between / var_within + n - 1) / n); } public: explicit chains(const std::vector& param_names) : param_names_(param_names) {} explicit chains(const stan::io::stan_csv& stan_csv) : chains(stan_csv.header) { if (stan_csv.samples.rows() > 0) add(stan_csv); } inline int num_chains() const { return samples_.size(); } inline int num_params() const { return param_names_.size(); } const std::vector& param_names() const { return param_names_; } const std::string& param_name(int j) const { return param_names_[j]; } int index(const std::string& name) const { int index = -1; for (int i = 0; i < param_names_.size(); i++) if (param_names_[i] == name) return i; return index; } void set_warmup(const int chain, const int warmup) { warmup_(chain) = warmup; } void set_warmup(const int warmup) { warmup_.setConstant(warmup); } const Eigen::VectorXi& warmup() const { return warmup_; } int warmup(const int chain) const { return warmup_(chain); } int num_samples(const int chain) const { return samples_(chain).rows(); } int num_samples() const { int n = 0; for (int chain = 0; chain < num_chains(); chain++) n += num_samples(chain); return n; } int num_kept_samples(const int chain) const { return num_samples(chain) - warmup(chain); } int num_kept_samples() const { int n = 0; for (int chain = 0; chain < num_chains(); chain++) n += num_kept_samples(chain); return n; } void add(const int chain, const Eigen::MatrixXd& sample) { if (sample.cols() != num_params()) throw std::invalid_argument( "add(chain, sample): number of columns" " in sample does not match chains"); if (num_chains() == 0 || chain >= num_chains()) { int n = num_chains(); // Need this block for Windows. conservativeResize // does not keep the references. Eigen::Matrix samples_copy(num_chains()); Eigen::VectorXi warmup_copy(num_chains()); for (int i = 0; i < n; i++) { samples_copy(i) = samples_(i); warmup_copy(i) = warmup_(i); } samples_.resize(chain + 1); warmup_.resize(chain + 1); for (int i = 0; i < n; i++) { samples_(i) = samples_copy(i); warmup_(i) = warmup_copy(i); } for (int i = n; i < chain + 1; i++) { samples_(i) = Eigen::MatrixXd(0, num_params()); warmup_(i) = 0; } } int row = samples_(chain).rows(); Eigen::MatrixXd new_samples(row + sample.rows(), num_params()); new_samples << samples_(chain), sample; samples_(chain) = new_samples; } void add(const Eigen::MatrixXd& sample) { if (sample.rows() == 0) return; if (sample.cols() != num_params()) throw std::invalid_argument( "add(sample): number of columns in" " sample does not match chains"); add(num_chains(), sample); } /** * Convert a vector of vector to Eigen::MatrixXd * * This method is added for the benefit of software wrapping * Stan (e.g., PyStan) so that it need not additionally wrap Eigen. * */ void add(const std::vector >& sample) { int n_row = sample.size(); if (n_row == 0) return; int n_col = sample[0].size(); Eigen::MatrixXd sample_copy(n_row, n_col); for (int i = 0; i < n_row; i++) { sample_copy.row(i) = Eigen::VectorXd::Map(&sample[i][0], sample[0].size()); } add(sample_copy); } void add(const stan::io::stan_csv& stan_csv) { if (stan_csv.header.size() != num_params()) throw std::invalid_argument( "add(stan_csv): number of columns in" " sample does not match chains"); for (int i = 0; i < num_params(); i++) { if (param_names_[i] != stan_csv.header[i]) { std::stringstream ss; ss << "add(stan_csv): header " << param_names_[i] << " does not match chain's header (" << stan_csv.header[i] << ")"; throw std::invalid_argument(ss.str()); } } add(stan_csv.samples); if (stan_csv.metadata.save_warmup) set_warmup(num_chains() - 1, stan_csv.metadata.num_warmup); } Eigen::VectorXd samples(const int chain, const int index) const { return samples_(chain).col(index).bottomRows(num_kept_samples(chain)); } Eigen::VectorXd samples(const int index) const { Eigen::VectorXd s(num_kept_samples()); int start = 0; for (int chain = 0; chain < num_chains(); chain++) { int n = num_kept_samples(chain); s.middleRows(start, n) = samples_(chain).col(index).bottomRows(n); start += n; } return s; } Eigen::VectorXd samples(const int chain, const std::string& name) const { return samples(chain, index(name)); } Eigen::VectorXd samples(const std::string& name) const { return samples(index(name)); } double mean(const int chain, const int index) const { return mean(samples(chain, index)); } double mean(const int index) const { return mean(samples(index)); } double mean(const int chain, const std::string& name) const { return mean(chain, index(name)); } double mean(const std::string& name) const { return mean(index(name)); } double sd(const int chain, const int index) const { return sd(samples(chain, index)); } double sd(const int index) const { return sd(samples(index)); } double sd(const int chain, const std::string& name) const { return sd(chain, index(name)); } double sd(const std::string& name) const { return sd(index(name)); } double variance(const int chain, const int index) const { return variance(samples(chain, index)); } double variance(const int index) const { return variance(samples(index)); } double variance(const int chain, const std::string& name) const { return variance(chain, index(name)); } double variance(const std::string& name) const { return variance(index(name)); } double covariance(const int chain, const int index1, const int index2) const { return covariance(samples(chain, index1), samples(chain, index2)); } double covariance(const int index1, const int index2) const { return covariance(samples(index1), samples(index2)); } double covariance(const int chain, const std::string& name1, const std::string& name2) const { return covariance(chain, index(name1), index(name2)); } double covariance(const std::string& name1, const std::string& name2) const { return covariance(index(name1), index(name2)); } double correlation(const int chain, const int index1, const int index2) const { return correlation(samples(chain, index1), samples(chain, index2)); } double correlation(const int index1, const int index2) const { return correlation(samples(index1), samples(index2)); } double correlation(const int chain, const std::string& name1, const std::string& name2) const { return correlation(chain, index(name1), index(name2)); } double correlation(const std::string& name1, const std::string& name2) const { return correlation(index(name1), index(name2)); } double quantile(const int chain, const int index, const double prob) const { return quantile(samples(chain, index), prob); } double quantile(const int index, const double prob) const { return quantile(samples(index), prob); } double quantile(int chain, const std::string& name, double prob) const { return quantile(chain, index(name), prob); } double quantile(const std::string& name, const double prob) const { return quantile(index(name), prob); } Eigen::VectorXd quantiles(int chain, int index, const Eigen::VectorXd& probs) const { return quantiles(samples(chain, index), probs); } Eigen::VectorXd quantiles(int index, const Eigen::VectorXd& probs) const { return quantiles(samples(index), probs); } Eigen::VectorXd quantiles(int chain, const std::string& name, const Eigen::VectorXd& probs) const { return quantiles(chain, index(name), probs); } Eigen::VectorXd quantiles(const std::string& name, const Eigen::VectorXd& probs) const { return quantiles(index(name), probs); } Eigen::Vector2d central_interval(int chain, int index, double prob) const { double low_prob = (1 - prob) / 2; double high_prob = 1 - low_prob; Eigen::Vector2d interval; interval << quantile(chain, index, low_prob), quantile(chain, index, high_prob); return interval; } Eigen::Vector2d central_interval(int index, double prob) const { double low_prob = (1 - prob) / 2; double high_prob = 1 - low_prob; Eigen::Vector2d interval; interval << quantile(index, low_prob), quantile(index, high_prob); return interval; } Eigen::Vector2d central_interval(int chain, const std::string& name, double prob) const { return central_interval(chain, index(name), prob); } Eigen::Vector2d central_interval(const std::string& name, double prob) const { return central_interval(index(name), prob); } Eigen::VectorXd autocorrelation(const int chain, const int index) const { return autocorrelation(samples(chain, index)); } Eigen::VectorXd autocorrelation(int chain, const std::string& name) const { return autocorrelation(chain, index(name)); } Eigen::VectorXd autocovariance(const int chain, const int index) const { return autocovariance(samples(chain, index)); } Eigen::VectorXd autocovariance(int chain, const std::string& name) const { return autocovariance(chain, index(name)); } // FIXME: reimplement using autocorrelation. double effective_sample_size(const int index) const { int n_chains = num_chains(); std::vector draws(n_chains); std::vector sizes(n_chains); int n_kept_samples = 0; for (int chain = 0; chain < n_chains; ++chain) { n_kept_samples = num_kept_samples(chain); draws[chain] = samples_(chain).col(index).bottomRows(n_kept_samples).data(); sizes[chain] = n_kept_samples; } return analyze::compute_effective_sample_size(draws, sizes); } double effective_sample_size(const std::string& name) const { return effective_sample_size(index(name)); } double split_effective_sample_size(const int index) const { int n_chains = num_chains(); std::vector draws(n_chains); std::vector sizes(n_chains); int n_kept_samples = 0; for (int chain = 0; chain < n_chains; ++chain) { n_kept_samples = num_kept_samples(chain); draws[chain] = samples_(chain).col(index).bottomRows(n_kept_samples).data(); sizes[chain] = n_kept_samples; } return analyze::compute_split_effective_sample_size(draws, sizes); } double split_effective_sample_size(const std::string& name) const { return split_effective_sample_size(index(name)); } double split_potential_scale_reduction(const int index) const { int n_chains = num_chains(); std::vector draws(n_chains); std::vector sizes(n_chains); int n_kept_samples = 0; for (int chain = 0; chain < n_chains; ++chain) { n_kept_samples = num_kept_samples(chain); draws[chain] = samples_(chain).col(index).bottomRows(n_kept_samples).data(); sizes[chain] = n_kept_samples; } return analyze::compute_split_potential_scale_reduction(draws, sizes); } double split_potential_scale_reduction(const std::string& name) const { return split_potential_scale_reduction(index(name)); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/sample.hpp0000644000176200001440000000306314645137105021530 0ustar liggesusers#ifndef STAN_MCMC_SAMPLE_HPP #define STAN_MCMC_SAMPLE_HPP #include #include #include namespace stan { namespace mcmc { class sample { public: sample(const Eigen::VectorXd& q, double log_prob, double stat) : cont_params_(q), log_prob_(log_prob), accept_stat_(stat) {} sample(Eigen::VectorXd&& q, double log_prob, double stat) // NOLINT : cont_params_(std::move(q)), log_prob_(log_prob), accept_stat_(stat) {} sample(const sample&) = default; sample(sample&&) = default; sample& operator=(const sample&) = default; sample& operator=(sample&&) = default; virtual ~sample() = default; int size_cont() const { return cont_params_.size(); } double cont_params(int k) const { return cont_params_(k); } void cont_params(Eigen::VectorXd& x) const { x = cont_params_; } const Eigen::VectorXd& cont_params() const { return cont_params_; } inline double log_prob() const { return log_prob_; } inline double accept_stat() const { return accept_stat_; } static void get_sample_param_names(std::vector& names) { names.push_back("lp__"); names.push_back("accept_stat__"); } void get_sample_params(std::vector& values) { values.push_back(log_prob_); values.push_back(accept_stat_); } private: Eigen::VectorXd cont_params_; // Continuous coordinates of sample double log_prob_; // Log probability of sample double accept_stat_; // Acceptance statistic of transition }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/0000755000176200001440000000000014645137105020303 5ustar liggesusersStanHeaders/inst/include/src/stan/mcmc/hmc/integrators/0000755000176200001440000000000014645137105022644 5ustar liggesusersStanHeaders/inst/include/src/stan/mcmc/hmc/integrators/expl_leapfrog.hpp0000644000176200001440000000222514645137105026205 0ustar liggesusers#ifndef STAN_MCMC_HMC_INTEGRATORS_EXPL_LEAPFROG_HPP #define STAN_MCMC_HMC_INTEGRATORS_EXPL_LEAPFROG_HPP #include #include #include namespace stan { namespace mcmc { template class expl_leapfrog : public base_leapfrog { public: expl_leapfrog() : base_leapfrog() {} void begin_update_p(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, callbacks::logger& logger) { z.p -= epsilon * hamiltonian.dphi_dq(z, logger); } void update_q(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, callbacks::logger& logger) { z.q += epsilon * hamiltonian.dtau_dp(z); hamiltonian.update_potential_gradient(z, logger); } void end_update_p(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, callbacks::logger& logger) { z.p -= epsilon * hamiltonian.dphi_dq(z, logger); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/integrators/base_integrator.hpp0000644000176200001440000000077514645137105026536 0ustar liggesusers#ifndef STAN_MCMC_HMC_INTEGRATORS_BASE_INTEGRATOR_HPP #define STAN_MCMC_HMC_INTEGRATORS_BASE_INTEGRATOR_HPP #include namespace stan { namespace mcmc { template class base_integrator { public: base_integrator() {} virtual void evolve(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, const double epsilon, callbacks::logger& logger) = 0; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/integrators/impl_leapfrog.hpp0000644000176200001440000000552514645137105026204 0ustar liggesusers#ifndef STAN_MCMC_HMC_INTEGRATORS_IMPL_LEAPFROG_HPP #define STAN_MCMC_HMC_INTEGRATORS_IMPL_LEAPFROG_HPP #include #include namespace stan { namespace mcmc { template class impl_leapfrog : public base_leapfrog { public: impl_leapfrog() : base_leapfrog(), max_num_fixed_point_(10), fixed_point_threshold_(1e-8) {} void begin_update_p(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, callbacks::logger& logger) { hat_phi(z, hamiltonian, epsilon, logger); hat_tau(z, hamiltonian, epsilon, this->max_num_fixed_point_, logger); } void update_q(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, callbacks::logger& logger) { // hat{T} = dT/dp * d/dq Eigen::VectorXd q_init = z.q + 0.5 * epsilon * hamiltonian.dtau_dp(z); Eigen::VectorXd delta_q(z.q.size()); for (int n = 0; n < this->max_num_fixed_point_; ++n) { delta_q = z.q; z.q.noalias() = q_init + 0.5 * epsilon * hamiltonian.dtau_dp(z); hamiltonian.update_metric(z, logger); delta_q -= z.q; if (delta_q.cwiseAbs().maxCoeff() < this->fixed_point_threshold_) break; } hamiltonian.update_gradients(z, logger); } void end_update_p(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, callbacks::logger& logger) { hat_tau(z, hamiltonian, epsilon, 1, logger); hat_phi(z, hamiltonian, epsilon, logger); } // hat{phi} = dphi/dq * d/dp void hat_phi(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, callbacks::logger& logger) { z.p -= epsilon * hamiltonian.dphi_dq(z, logger); } // hat{tau} = dtau/dq * d/dp void hat_tau(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, int num_fixed_point, callbacks::logger& logger) { Eigen::VectorXd p_init = z.p; Eigen::VectorXd delta_p(z.p.size()); for (int n = 0; n < num_fixed_point; ++n) { delta_p = z.p; z.p.noalias() = p_init - epsilon * hamiltonian.dtau_dq(z, logger); delta_p -= z.p; if (delta_p.cwiseAbs().maxCoeff() < this->fixed_point_threshold_) break; } } int max_num_fixed_point() { return this->max_num_fixed_point_; } void set_max_num_fixed_point(int n) { if (n > 0) this->max_num_fixed_point_ = n; } double fixed_point_threshold() { return this->fixed_point_threshold_; } void set_fixed_point_threshold(double t) { if (t > 0) this->fixed_point_threshold_ = t; } private: int max_num_fixed_point_; double fixed_point_threshold_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/integrators/base_leapfrog.hpp0000644000176200001440000000743214645137105026154 0ustar liggesusers#ifndef STAN_MCMC_HMC_INTEGRATORS_BASE_LEAPFROG_HPP #define STAN_MCMC_HMC_INTEGRATORS_BASE_LEAPFROG_HPP #include #include #include #include namespace stan { namespace mcmc { template class base_leapfrog : public base_integrator { public: base_leapfrog() : base_integrator() {} void evolve(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, const double epsilon, callbacks::logger& logger) { begin_update_p(z, hamiltonian, 0.5 * epsilon, logger); update_q(z, hamiltonian, epsilon, logger); end_update_p(z, hamiltonian, 0.5 * epsilon, logger); } void verbose_evolve(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, const double epsilon, callbacks::logger& logger) { std::stringstream msg; msg.precision(6); int width = 14; int nColumn = 4; msg << "Verbose Hamiltonian Evolution, Step Size = " << epsilon << ":"; logger.info(msg); msg.str(""); msg << " " << std::setw(nColumn * width) << std::setfill('-') << "" << std::setfill(' '); logger.info(msg); msg.str(""); msg << " " << std::setw(width) << std::left << "Poisson" << std::setw(width) << std::left << "Initial" << std::setw(width) << std::left << "Current" << std::setw(width) << std::left << "DeltaH"; logger.info(msg); msg.str(""); msg << " " << std::setw(width) << std::left << "Operator" << std::setw(width) << std::left << "Hamiltonian" << std::setw(width) << std::left << "Hamiltonian" << std::setw(width) << std::left << "/ Stepsize^{2}"; logger.info(msg); msg.str(""); msg << " " << std::setw(nColumn * width) << std::setfill('-') << "" << std::setfill(' '); logger.info(msg); double H0 = hamiltonian.H(z); begin_update_p(z, hamiltonian, 0.5 * epsilon, logger); double H1 = hamiltonian.H(z); msg.str(""); msg << " " << std::setw(width) << std::left << "hat{V}/2" << std::setw(width) << std::left << H0 << std::setw(width) << std::left << H1 << std::setw(width) << std::left << (H1 - H0) / (epsilon * epsilon); logger.info(msg); update_q(z, hamiltonian, epsilon, logger); double H2 = hamiltonian.H(z); msg.str(""); msg << " " << std::setw(width) << std::left << "hat{T}" << std::setw(width) << std::left << H0 << std::setw(width) << std::left << H2 << std::setw(width) << std::left << (H2 - H0) / (epsilon * epsilon); logger.info(msg); end_update_p(z, hamiltonian, 0.5 * epsilon, logger); double H3 = hamiltonian.H(z); msg.str(""); msg << " " << std::setw(width) << std::left << "hat{V}/2" << std::setw(width) << std::left << H0 << std::setw(width) << std::left << H3 << std::setw(width) << std::left << (H3 - H0) / (epsilon * epsilon); logger.info(msg); msg.str(""); msg << " " << std::setw(nColumn * width) << std::setfill('-') << "" << std::setfill(' '); logger.info(msg); } virtual void begin_update_p(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, callbacks::logger& logger) = 0; virtual void update_q(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, callbacks::logger& logger) = 0; virtual void end_update_p(typename Hamiltonian::PointType& z, Hamiltonian& hamiltonian, double epsilon, callbacks::logger& logger) = 0; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/0000755000176200001440000000000014645137105022771 5ustar liggesusersStanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/unit_e_metric.hpp0000644000176200001440000000273414645137105026336 0ustar liggesusers#ifndef STAN_MCMC_HMC_HAMILTONIANS_UNIT_E_METRIC_HPP #define STAN_MCMC_HMC_HAMILTONIANS_UNIT_E_METRIC_HPP #include #include #include #include namespace stan { namespace mcmc { // Euclidean manifold with unit metric template class unit_e_metric : public base_hamiltonian { public: explicit unit_e_metric(const Model& model) : base_hamiltonian(model) {} double T(unit_e_point& z) { return 0.5 * z.p.squaredNorm(); } double tau(unit_e_point& z) { return T(z); } double phi(unit_e_point& z) { return this->V(z); } double dG_dt(unit_e_point& z, callbacks::logger& logger) { return 2 * T(z) - z.q.dot(z.g); } Eigen::VectorXd dtau_dq(unit_e_point& z, callbacks::logger& logger) { return Eigen::VectorXd::Zero(this->model_.num_params_r()); } Eigen::VectorXd dtau_dp(unit_e_point& z) { return z.p; } Eigen::VectorXd dphi_dq(unit_e_point& z, callbacks::logger& logger) { return z.g; } void sample_p(unit_e_point& z, BaseRNG& rng) { boost::variate_generator > rand_unit_gaus(rng, boost::normal_distribution<>()); for (int i = 0; i < z.p.size(); ++i) z.p(i) = rand_unit_gaus(); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/softabs_point.hpp0000644000176200001440000000243714645137105026362 0ustar liggesusers#ifndef STAN_MCMC_HMC_HAMILTONIANS_SOFTABS_POINT_HPP #define STAN_MCMC_HMC_HAMILTONIANS_SOFTABS_POINT_HPP #include #include namespace stan { namespace mcmc { /** * Point in a phase space with a base * Riemannian manifold with SoftAbs metric */ class softabs_point : public ps_point { public: explicit softabs_point(int n) : ps_point(n), alpha(1.0), hessian(Eigen::MatrixXd::Identity(n, n)), eigen_deco(n), log_det_metric(0), softabs_lambda(Eigen::VectorXd::Zero(n)), softabs_lambda_inv(Eigen::VectorXd::Zero(n)), pseudo_j(Eigen::MatrixXd::Identity(n, n)) {} // SoftAbs regularization parameter double alpha; Eigen::MatrixXd hessian; // Eigendecomposition of the Hessian Eigen::SelfAdjointEigenSolver eigen_deco; // Log determinant of metric double log_det_metric; // SoftAbs transformed eigenvalues of Hessian Eigen::VectorXd softabs_lambda; Eigen::VectorXd softabs_lambda_inv; // Psuedo-Jacobian of the eigenvalues Eigen::MatrixXd pseudo_j; virtual inline void write_metric(stan::callbacks::writer& writer) { writer("No free parameters for SoftAbs metric"); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/dense_e_point.hpp0000644000176200001440000000271014645137105026315 0ustar liggesusers#ifndef STAN_MCMC_HMC_HAMILTONIANS_DENSE_E_POINT_HPP #define STAN_MCMC_HMC_HAMILTONIANS_DENSE_E_POINT_HPP #include #include namespace stan { namespace mcmc { /** * Point in a phase space with a base * Euclidean manifold with dense metric */ class dense_e_point : public ps_point { public: /** * Inverse mass matrix. */ Eigen::MatrixXd inv_e_metric_; /** * Construct a dense point in n-dimensional phase space * with identity matrix as inverse mass matrix. * * @param n number of dimensions */ explicit dense_e_point(int n) : ps_point(n), inv_e_metric_(n, n) { inv_e_metric_.setIdentity(); } /** * Set elements of mass matrix * * @param inv_e_metric initial mass matrix */ void set_metric(const Eigen::MatrixXd& inv_e_metric) { inv_e_metric_ = inv_e_metric; } /** * Write elements of mass matrix to string and handoff to writer. * * @param writer Stan writer callback */ inline void write_metric(stan::callbacks::writer& writer) { writer("Elements of inverse mass matrix:"); for (int i = 0; i < inv_e_metric_.rows(); ++i) { std::stringstream inv_e_metric_ss; inv_e_metric_ss << inv_e_metric_(i, 0); for (int j = 1; j < inv_e_metric_.cols(); ++j) inv_e_metric_ss << ", " << inv_e_metric_(i, j); writer(inv_e_metric_ss.str()); } } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/diag_e_metric.hpp0000644000176200001440000000314314645137105026256 0ustar liggesusers#ifndef STAN_MCMC_HMC_HAMILTONIANS_DIAG_E_METRIC_HPP #define STAN_MCMC_HMC_HAMILTONIANS_DIAG_E_METRIC_HPP #include #include #include #include #include namespace stan { namespace mcmc { // Euclidean manifold with diagonal metric template class diag_e_metric : public base_hamiltonian { public: explicit diag_e_metric(const Model& model) : base_hamiltonian(model) {} double T(diag_e_point& z) { return 0.5 * z.p.dot(z.inv_e_metric_.cwiseProduct(z.p)); } double tau(diag_e_point& z) { return T(z); } double phi(diag_e_point& z) { return this->V(z); } double dG_dt(diag_e_point& z, callbacks::logger& logger) { return 2 * T(z) - z.q.dot(z.g); } Eigen::VectorXd dtau_dq(diag_e_point& z, callbacks::logger& logger) { return Eigen::VectorXd::Zero(this->model_.num_params_r()); } Eigen::VectorXd dtau_dp(diag_e_point& z) { return z.inv_e_metric_.cwiseProduct(z.p); } Eigen::VectorXd dphi_dq(diag_e_point& z, callbacks::logger& logger) { return z.g; } void sample_p(diag_e_point& z, BaseRNG& rng) { boost::variate_generator > rand_diag_gaus(rng, boost::normal_distribution<>()); for (int i = 0; i < z.p.size(); ++i) z.p(i) = rand_diag_gaus() / sqrt(z.inv_e_metric_(i)); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/ps_point.hpp0000644000176200001440000000275314645137105025344 0ustar liggesusers#ifndef STAN_MCMC_HMC_HAMILTONIANS_PS_POINT_HPP #define STAN_MCMC_HMC_HAMILTONIANS_PS_POINT_HPP #include #include #include #include #include namespace stan { namespace mcmc { using Eigen::Dynamic; /** * Point in a generic phase space */ class ps_point { public: explicit ps_point(int n) : q(n), p(n), g(n) {} Eigen::VectorXd q; Eigen::VectorXd p; Eigen::VectorXd g; double V{0}; virtual inline void get_param_names(std::vector& model_names, std::vector& names) { names.reserve(q.size() + p.size() + g.size()); for (int i = 0; i < q.size(); ++i) names.emplace_back(model_names[i]); for (int i = 0; i < p.size(); ++i) names.emplace_back(std::string("p_") + model_names[i]); for (int i = 0; i < g.size(); ++i) names.emplace_back(std::string("g_") + model_names[i]); } virtual inline void get_params(std::vector& values) { values.reserve(q.size() + p.size() + g.size()); for (int i = 0; i < q.size(); ++i) values.push_back(q[i]); for (int i = 0; i < p.size(); ++i) values.push_back(p[i]); for (int i = 0; i < g.size(); ++i) values.push_back(g[i]); } /** * Writes the metric * * @param writer writer callback */ virtual inline void write_metric(stan::callbacks::writer& writer) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/base_hamiltonian.hpp0000644000176200001440000000554714645137105027012 0ustar liggesusers#ifndef STAN_MCMC_HMC_HAMILTONIANS_BASE_HAMILTONIAN_HPP #define STAN_MCMC_HMC_HAMILTONIANS_BASE_HAMILTONIAN_HPP #include #include #include #include #include #include #include #include namespace stan { namespace mcmc { template class base_hamiltonian { public: explicit base_hamiltonian(const Model& model) : model_(model) {} ~base_hamiltonian() {} typedef Point PointType; virtual double T(Point& z) = 0; double V(Point& z) { return z.V; } virtual double tau(Point& z) = 0; virtual double phi(Point& z) = 0; double H(Point& z) { return T(z) + V(z); } // The time derivative of the virial, G = \sum_{d = 1}^{D} q^{d} p_{d}. virtual double dG_dt(Point& z, callbacks::logger& logger) = 0; // tau = 0.5 p_{i} p_{j} Lambda^{ij} (q) virtual Eigen::VectorXd dtau_dq(Point& z, callbacks::logger& logger) = 0; virtual Eigen::VectorXd dtau_dp(Point& z) = 0; // phi = 0.5 * log | Lambda (q) | + V(q) virtual Eigen::VectorXd dphi_dq(Point& z, callbacks::logger& logger) = 0; virtual void sample_p(Point& z, BaseRNG& rng) = 0; void init(Point& z, callbacks::logger& logger) { this->update_potential_gradient(z, logger); } void update_potential(Point& z, callbacks::logger& logger) { try { z.V = -stan::model::log_prob_propto(model_, z.q); } catch (const std::exception& e) { this->write_error_msg_(e, logger); z.V = std::numeric_limits::infinity(); } } void update_potential_gradient(Point& z, callbacks::logger& logger) { try { stan::model::gradient(model_, z.q, z.V, z.g, logger); z.V = -z.V; } catch (const std::exception& e) { this->write_error_msg_(e, logger); z.V = std::numeric_limits::infinity(); } z.g = -z.g; } void update_metric(Point& z, callbacks::logger& logger) {} void update_metric_gradient(Point& z, callbacks::logger& logger) {} void update_gradients(Point& z, callbacks::logger& logger) { update_potential_gradient(z, logger); } protected: const Model& model_; void write_error_msg_(const std::exception& e, callbacks::logger& logger) { logger.error( "Informational Message: The current Metropolis proposal " "is about to be rejected because of the following issue:"); logger.error(e.what()); logger.error( "If this warning occurs sporadically, such as for highly " "constrained variable types like covariance matrices, " "then the sampler is fine,"); logger.error( "but if this warning occurs often then your model may be " "either severely ill-conditioned or misspecified."); logger.error(""); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/dense_e_metric.hpp0000644000176200001440000000340014645137105026444 0ustar liggesusers#ifndef STAN_MCMC_HMC_HAMILTONIANS_DENSE_E_METRIC_HPP #define STAN_MCMC_HMC_HAMILTONIANS_DENSE_E_METRIC_HPP #include #include #include #include #include #include namespace stan { namespace mcmc { // Euclidean manifold with dense metric template class dense_e_metric : public base_hamiltonian { public: explicit dense_e_metric(const Model& model) : base_hamiltonian(model) {} double T(dense_e_point& z) { return 0.5 * z.p.transpose() * z.inv_e_metric_ * z.p; } double tau(dense_e_point& z) { return T(z); } double phi(dense_e_point& z) { return this->V(z); } double dG_dt(dense_e_point& z, callbacks::logger& logger) { return 2 * T(z) - z.q.dot(z.g); } Eigen::VectorXd dtau_dq(dense_e_point& z, callbacks::logger& logger) { return Eigen::VectorXd::Zero(this->model_.num_params_r()); } Eigen::VectorXd dtau_dp(dense_e_point& z) { return z.inv_e_metric_ * z.p; } Eigen::VectorXd dphi_dq(dense_e_point& z, callbacks::logger& logger) { return z.g; } void sample_p(dense_e_point& z, BaseRNG& rng) { typedef typename stan::math::index_type::type idx_t; boost::variate_generator > rand_dense_gaus(rng, boost::normal_distribution<>()); Eigen::VectorXd u(z.p.size()); for (idx_t i = 0; i < u.size(); ++i) u(i) = rand_dense_gaus(); z.p = z.inv_e_metric_.llt().matrixU().solve(u); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/diag_e_point.hpp0000644000176200001440000000266114645137105026130 0ustar liggesusers#ifndef STAN_MCMC_HMC_HAMILTONIANS_DIAG_E_POINT_HPP #define STAN_MCMC_HMC_HAMILTONIANS_DIAG_E_POINT_HPP #include #include namespace stan { namespace mcmc { /** * Point in a phase space with a base * Euclidean manifold with diagonal metric */ class diag_e_point : public ps_point { public: /** * Vector of diagonal elements of inverse mass matrix. */ Eigen::VectorXd inv_e_metric_; /** * Construct a diag point in n-dimensional phase space * with vector of ones for diagonal elements of inverse mass matrix. * * @param n number of dimensions */ explicit diag_e_point(int n) : ps_point(n), inv_e_metric_(n) { inv_e_metric_.setOnes(); } /** * Set elements of mass matrix * * @param inv_e_metric initial mass matrix */ void set_metric(const Eigen::VectorXd& inv_e_metric) { inv_e_metric_ = inv_e_metric; } /** * Write elements of mass matrix to string and handoff to writer. * * @param writer Stan writer callback */ inline void write_metric(stan::callbacks::writer& writer) { writer("Diagonal elements of inverse mass matrix:"); std::stringstream inv_e_metric_ss; inv_e_metric_ss << inv_e_metric_(0); for (int i = 1; i < inv_e_metric_.size(); ++i) inv_e_metric_ss << ", " << inv_e_metric_(i); writer(inv_e_metric_ss.str()); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/unit_e_point.hpp0000644000176200001440000000103114645137105026171 0ustar liggesusers#ifndef STAN_MCMC_HMC_HAMILTONIANS_UNIT_E_POINT_HPP #define STAN_MCMC_HMC_HAMILTONIANS_UNIT_E_POINT_HPP #include namespace stan { namespace mcmc { /** * Point in a phase space with a base * Euclidean manifold with unit metric */ class unit_e_point : public ps_point { public: explicit unit_e_point(int n) : ps_point(n) {} }; inline void write_metric(stan::callbacks::writer& writer) { writer("No free parameters for unit metric"); } } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/hamiltonians/softabs_metric.hpp0000644000176200001440000001553614645137105026520 0ustar liggesusers#ifndef STAN_MCMC_HMC_HAMILTONIANS_SOFTABS_METRIC_HPP #define STAN_MCMC_HMC_HAMILTONIANS_SOFTABS_METRIC_HPP #include #include #include #include #include namespace stan { namespace mcmc { template struct softabs_fun { const Model& model_; std::ostream* o_; softabs_fun(const Model& m, std::ostream* out) : model_(m), o_(out) {} template T operator()(const Eigen::Matrix& x) const { // log_prob() requires non-const but doesn't modify its argument return model_.template log_prob( const_cast&>(x), o_); } }; // Riemannian manifold with SoftAbs metric template class softabs_metric : public base_hamiltonian { private: typedef typename stan::math::index_type::type idx_t; public: explicit softabs_metric(const Model& model) : base_hamiltonian(model) {} double T(softabs_point& z) { return this->tau(z) + 0.5 * z.log_det_metric; } double tau(softabs_point& z) { Eigen::VectorXd Qp = z.eigen_deco.eigenvectors().transpose() * z.p; return 0.5 * Qp.transpose() * z.softabs_lambda_inv.cwiseProduct(Qp); } double phi(softabs_point& z) { return this->V(z) + 0.5 * z.log_det_metric; } double dG_dt(softabs_point& z, callbacks::logger& logger) { return 2 * T(z) - z.q.dot(dtau_dq(z, logger) + dphi_dq(z, logger)); } Eigen::VectorXd dtau_dq(softabs_point& z, callbacks::logger& logger) { Eigen::VectorXd a = z.softabs_lambda_inv.cwiseProduct( z.eigen_deco.eigenvectors().transpose() * z.p); Eigen::MatrixXd A = a.asDiagonal() * z.eigen_deco.eigenvectors().transpose(); Eigen::MatrixXd B = z.pseudo_j.selfadjointView() * A; Eigen::MatrixXd C = A.transpose() * B; Eigen::VectorXd b(z.q.size()); stan::math::grad_tr_mat_times_hessian(softabs_fun(this->model_, 0), z.q, C, b); return 0.5 * b; } Eigen::VectorXd dtau_dp(softabs_point& z) { return z.eigen_deco.eigenvectors() * z.softabs_lambda_inv.cwiseProduct( z.eigen_deco.eigenvectors().transpose() * z.p); } Eigen::VectorXd dphi_dq(softabs_point& z, callbacks::logger& logger) { Eigen::VectorXd a = z.softabs_lambda_inv.cwiseProduct(z.pseudo_j.diagonal()); Eigen::MatrixXd A = a.asDiagonal() * z.eigen_deco.eigenvectors().transpose(); Eigen::MatrixXd B = z.eigen_deco.eigenvectors() * A; stan::math::grad_tr_mat_times_hessian(softabs_fun(this->model_, 0), z.q, B, a); return -0.5 * a + z.g; } void sample_p(softabs_point& z, BaseRNG& rng) { boost::variate_generator > rand_unit_gaus(rng, boost::normal_distribution<>()); Eigen::VectorXd a(z.p.size()); for (idx_t n = 0; n < z.p.size(); ++n) a(n) = sqrt(z.softabs_lambda(n)) * rand_unit_gaus(); z.p = z.eigen_deco.eigenvectors() * a; } void init(softabs_point& z, callbacks::logger& logger) { update_metric(z, logger); update_metric_gradient(z, logger); } void update_metric(softabs_point& z, callbacks::logger& logger) { math::hessian >(softabs_fun(this->model_, 0), z.q, z.V, z.g, z.hessian); z.V = -z.V; z.g = -z.g; z.hessian = -z.hessian; // Compute the eigen decomposition of the Hessian, // then perform the SoftAbs transformation z.eigen_deco.compute(z.hessian); for (idx_t i = 0; i < z.q.size(); ++i) { double lambda = z.eigen_deco.eigenvalues()(i); double alpha_lambda = z.alpha * lambda; double softabs_lambda = 0; // Thresholds defined such that the approximation // error is on the same order of double precision if (std::fabs(alpha_lambda) < lower_softabs_thresh) { softabs_lambda = (1.0 + (1.0 / 3.0) * alpha_lambda * alpha_lambda) / z.alpha; } else if (std::fabs(alpha_lambda) > upper_softabs_thresh) { softabs_lambda = std::fabs(lambda); } else { softabs_lambda = lambda / std::tanh(alpha_lambda); } z.softabs_lambda(i) = softabs_lambda; z.softabs_lambda_inv(i) = 1.0 / softabs_lambda; } // Compute the log determinant of the metric z.log_det_metric = 0; for (idx_t i = 0; i < z.q.size(); ++i) z.log_det_metric += std::log(z.softabs_lambda(i)); } void update_metric_gradient(softabs_point& z, callbacks::logger& logger) { // Compute the pseudo-Jacobian of the SoftAbs transform for (idx_t i = 0; i < z.q.size(); ++i) { for (idx_t j = 0; j <= i; ++j) { double delta = z.eigen_deco.eigenvalues()(i) - z.eigen_deco.eigenvalues()(j); if (std::fabs(delta) < jacobian_thresh) { double lambda = z.eigen_deco.eigenvalues()(i); double alpha_lambda = z.alpha * lambda; // Thresholds defined such that the approximation // error is on the same order of double precision if (std::fabs(alpha_lambda) < lower_softabs_thresh) { z.pseudo_j(i, j) = (2.0 / 3.0) * alpha_lambda * (1.0 - (2.0 / 15.0) * alpha_lambda * alpha_lambda); } else if (std::fabs(alpha_lambda) > upper_softabs_thresh) { z.pseudo_j(i, j) = lambda > 0 ? 1 : -1; } else { double sdx = std::sinh(alpha_lambda) / lambda; z.pseudo_j(i, j) = (z.softabs_lambda(i) - z.alpha / (sdx * sdx)) / lambda; } } else { z.pseudo_j(i, j) = (z.softabs_lambda(i) - z.softabs_lambda(j)) / delta; } } } } void update_gradients(softabs_point& z, callbacks::logger& logger) { update_metric_gradient(z, logger); } // Threshold below which a power series // approximation of the softabs function is used static constexpr double lower_softabs_thresh = 1e-4; // Threshold above which an asymptotic // approximation of the softabs function is used static constexpr double upper_softabs_thresh = 18; // Threshold below which an exact derivative is // used in the Jacobian calculation instead of // finite differencing static constexpr double jacobian_thresh = 1e-10; }; template constexpr double softabs_metric::lower_softabs_thresh; template constexpr double softabs_metric::upper_softabs_thresh; template constexpr double softabs_metric::jacobian_thresh; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/xhmc/0000755000176200001440000000000014645137105021242 5ustar liggesusersStanHeaders/inst/include/src/stan/mcmc/hmc/xhmc/adapt_unit_e_xhmc.hpp0000644000176200001440000000234214645137105025427 0ustar liggesusers#ifndef STAN_MCMC_HMC_XHMC_ADAPT_UNIT_E_XHMC_HPP #define STAN_MCMC_HMC_XHMC_ADAPT_UNIT_E_XHMC_HPP #include #include #include namespace stan { namespace mcmc { /** * Exhaustive Hamiltonian Monte Carlo (XHMC) with multinomial sampling * with a Gaussian-Euclidean disintegration and unit metric * and adaptive step size */ template class adapt_unit_e_xhmc : public unit_e_xhmc, public stepsize_adapter { public: adapt_unit_e_xhmc(const Model& model, BaseRNG& rng) : unit_e_xhmc(model, rng) {} ~adapt_unit_e_xhmc() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = unit_e_xhmc::transition(init_sample, logger); if (this->adapt_flag_) this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/xhmc/adapt_dense_e_xhmc.hpp0000644000176200001440000000314714645137105025552 0ustar liggesusers#ifndef STAN_MCMC_HMC_XHMC_ADAPT_DENSE_E_XHMC_HPP #define STAN_MCMC_HMC_XHMC_ADAPT_DENSE_E_XHMC_HPP #include #include #include namespace stan { namespace mcmc { /** * Exhaustive Hamiltonian Monte Carlo (XHMC) with multinomial sampling * with a Gaussian-Euclidean disintegration and adaptive * dense metric and adaptive step size */ template class adapt_dense_e_xhmc : public dense_e_xhmc, public stepsize_covar_adapter { public: adapt_dense_e_xhmc(const Model& model, BaseRNG& rng) : dense_e_xhmc(model, rng), stepsize_covar_adapter(model.num_params_r()) {} ~adapt_dense_e_xhmc() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = dense_e_xhmc::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); bool update = this->covar_adaptation_.learn_covariance( this->z_.inv_e_metric_, this->z_.q); if (update) { this->init_stepsize(logger); this->stepsize_adaptation_.set_mu(log(10 * this->nom_epsilon_)); this->stepsize_adaptation_.restart(); } } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/xhmc/base_xhmc.hpp0000644000176200001440000002164214645137105023711 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_BASE_XHMC_HPP #define STAN_MCMC_HMC_NUTS_BASE_XHMC_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace mcmc { /** * Exhaustive Hamiltonian Monte Carlo (XHMC) with multinomial sampling. * See http://arxiv.org/abs/1601.00225. */ template class Hamiltonian, template class Integrator, class BaseRNG> class base_xhmc : public base_hmc { public: base_xhmc(const Model& model, BaseRNG& rng) : base_hmc(model, rng), depth_(0), max_depth_(5), max_deltaH_(1000), x_delta_(0.1), n_leapfrog_(0), divergent_(0), energy_(0) {} ~base_xhmc() {} void set_max_depth(int d) { if (d > 0) max_depth_ = d; } void set_max_deltaH(double d) { max_deltaH_ = d; } void set_x_delta(double d) { if (d > 0) x_delta_ = d; } int get_max_depth() { return this->max_depth_; } double get_max_deltaH() { return this->max_deltaH_; } double get_x_delta() { return this->x_delta_; } sample transition(sample& init_sample, callbacks::logger& logger) { // Initialize the algorithm this->sample_stepsize(); this->seed(init_sample.cont_params()); this->hamiltonian_.sample_p(this->z_, this->rand_int_); this->hamiltonian_.init(this->z_, logger); ps_point z_plus(this->z_); ps_point z_minus(z_plus); ps_point z_sample(z_plus); ps_point z_propose(z_plus); double ave = this->hamiltonian_.dG_dt(this->z_, logger); double log_sum_weight = 0; // log(exp(H0 - H0)) double H0 = this->hamiltonian_.H(this->z_); int n_leapfrog = 0; double sum_metro_prob = 1; // exp(H0 - H0) // Build a trajectory until the NUTS criterion is no longer satisfied this->depth_ = 0; this->divergent_ = 0; while (this->depth_ < this->max_depth_) { // Build a new subtree in a random direction bool valid_subtree = false; double ave_subtree = 0; double log_sum_weight_subtree = -std::numeric_limits::infinity(); if (this->rand_uniform_() > 0.5) { this->z_.ps_point::operator=(z_plus); valid_subtree = build_tree(this->depth_, z_propose, ave_subtree, log_sum_weight_subtree, H0, 1, n_leapfrog, sum_metro_prob, logger); z_plus.ps_point::operator=(this->z_); } else { this->z_.ps_point::operator=(z_minus); valid_subtree = build_tree(this->depth_, z_propose, ave_subtree, log_sum_weight_subtree, H0, -1, n_leapfrog, sum_metro_prob, logger); z_minus.ps_point::operator=(this->z_); } if (!valid_subtree) break; std::tie(ave, log_sum_weight) = stable_sum( ave, log_sum_weight, ave_subtree, log_sum_weight_subtree); // Sample from an accepted subtree ++(this->depth_); double accept_prob = std::exp(log_sum_weight_subtree - log_sum_weight); if (this->rand_uniform_() < accept_prob) z_sample = z_propose; // Break if exhaustion criterion is satisfied if (std::fabs(ave) < x_delta_) break; } this->n_leapfrog_ = n_leapfrog; // Compute average acceptance probabilty across entire trajectory, // even over subtrees that may have been rejected double accept_prob = sum_metro_prob / static_cast(n_leapfrog + 1); this->z_.ps_point::operator=(z_sample); this->energy_ = this->hamiltonian_.H(this->z_); return sample(this->z_.q, -this->z_.V, accept_prob); } void get_sampler_param_names(std::vector& names) { names.push_back("stepsize__"); names.push_back("treedepth__"); names.push_back("n_leapfrog__"); names.push_back("divergent__"); names.push_back("energy__"); } void get_sampler_params(std::vector& values) { values.push_back(this->epsilon_); values.push_back(this->depth_); values.push_back(this->n_leapfrog_); values.push_back(this->divergent_); values.push_back(this->energy_); } /** * Recursively build a new subtree to completion or until * the subtree becomes invalid. Returns validity of the * resulting subtree. * * @param depth Depth of the desired subtree * @param z_propose State proposed from subtree * @param ave Weighted average of dG/dt across trajectory * @param log_sum_weight Log of summed weights across trajectory * @param H0 Hamiltonian of initial state * @param sign Direction in time to built subtree * @param n_leapfrog Summed number of leapfrog evaluations * @param sum_metro_prob Summed Metropolis probabilities across trajectory * @param logger Logger for messages * @return whether built tree is valid */ bool build_tree(int depth, ps_point& z_propose, double& ave, double& log_sum_weight, double H0, double sign, int& n_leapfrog, double& sum_metro_prob, callbacks::logger& logger) { // Base case if (depth == 0) { this->integrator_.evolve(this->z_, this->hamiltonian_, sign * this->epsilon_, logger); ++n_leapfrog; double h = this->hamiltonian_.H(this->z_); if (std::isnan(h)) h = std::numeric_limits::infinity(); if ((h - H0) > this->max_deltaH_) this->divergent_ = true; double dG_dt = this->hamiltonian_.dG_dt(this->z_, logger); std::tie(ave, log_sum_weight) = stable_sum(ave, log_sum_weight, dG_dt, H0 - h); if (H0 - h > 0) sum_metro_prob += 1; else sum_metro_prob += std::exp(H0 - h); z_propose = this->z_; return !this->divergent_; } // General recursion // Build the left subtree double ave_left = 0; double log_sum_weight_left = -std::numeric_limits::infinity(); bool valid_left = build_tree(depth - 1, z_propose, ave_left, log_sum_weight_left, H0, sign, n_leapfrog, sum_metro_prob, logger); if (!valid_left) return false; std::tie(ave, log_sum_weight) = stable_sum(ave, log_sum_weight, ave_left, log_sum_weight_left); // Build the right subtree ps_point z_propose_right(this->z_); double ave_right = 0; double log_sum_weight_right = -std::numeric_limits::infinity(); bool valid_right = build_tree(depth - 1, z_propose_right, ave_right, log_sum_weight_right, H0, sign, n_leapfrog, sum_metro_prob, logger); if (!valid_right) return false; std::tie(ave, log_sum_weight) = stable_sum(ave, log_sum_weight, ave_right, log_sum_weight_right); // Multinomial sample from right subtree double ave_subtree; double log_sum_weight_subtree; std::tie(ave_subtree, log_sum_weight_subtree) = stable_sum( ave_left, log_sum_weight_left, ave_right, log_sum_weight_right); double accept_prob = std::exp(log_sum_weight_right - log_sum_weight_subtree); if (this->rand_uniform_() < accept_prob) z_propose = z_propose_right; return std::abs(ave_subtree) >= x_delta_; } /** * a1 and a2 are running averages of the form * \f$ a1 = ( \sum_{n \in N1} w_{n} f_{n} ) * / ( \sum_{n \in N1} w_{n} ) \f$ * \f$ a2 = ( \sum_{n \in N2} w_{n} f_{n} ) * / ( \sum_{n \in N2} w_{n} ) \f$ * and the weights are the respective normalizing constants * \f$ w1 = \sum_{n \in N1} w_{n} \f$ * \f$ w2 = \sum_{n \in N2} w_{n}. \f$ * * This function returns the pooled average * \f$ sum_a = ( \sum_{n \in N1 \cup N2} w_{n} f_{n} ) * / ( \sum_{n \in N1 \cup N2} w_{n} ) \f$ * and the pooled weights * \f$ log_sum_w = log(w1 + w2). \f$ * * @param[in] a1 First running average, f1 / w1 * @param[in] log_w1 Log of first summed weight * @param[in] a2 Second running average * @param[in] log_w2 Log of second summed weight * @return Pair of average of input running averages and log of summed input * weights */ static std::pair stable_sum(double a1, double log_w1, double a2, double log_w2) { if (log_w2 > log_w1) { const double e = std::exp(log_w1 - log_w2); return std::make_pair((e * a1 + a2) / (1 + e), log_w2 + std::log1p(e)); } else { const double e = std::exp(log_w2 - log_w1); return std::make_pair((a1 + e * a2) / (1 + e), log_w1 + std::log1p(e)); } } int depth_; int max_depth_; double max_deltaH_; double x_delta_; int n_leapfrog_; bool divergent_; double energy_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/xhmc/dense_e_xhmc.hpp0000644000176200001440000000143414645137105024376 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_DENSE_E_XHMC_HPP #define STAN_MCMC_HMC_NUTS_DENSE_E_XHMC_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Exhaustive Hamiltonian Monte Carlo (XHMC) with multinomial sampling * with a Gaussian-Euclidean disintegration and dense metric */ template class dense_e_xhmc : public base_xhmc { public: dense_e_xhmc(const Model& model, BaseRNG& rng) : base_xhmc(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/xhmc/adapt_diag_e_xhmc.hpp0000644000176200001440000000320414645137105025352 0ustar liggesusers#ifndef STAN_MCMC_HMC_XHMC_ADAPT_DIAG_E_XHMC_HPP #define STAN_MCMC_HMC_XHMC_ADAPT_DIAG_E_XHMC_HPP #include #include #include namespace stan { namespace mcmc { /** * Exhaustive Hamiltonian Monte Carlo (XHMC) with multinomial sampling * with a Gaussian-Euclidean disintegration and adaptive * diagonal metric and adaptive step size */ template class adapt_diag_e_xhmc : public diag_e_xhmc, public stepsize_var_adapter { public: adapt_diag_e_xhmc(const Model& model, BaseRNG& rng) : diag_e_xhmc(model, rng), stepsize_var_adapter(model.num_params_r()) {} ~adapt_diag_e_xhmc() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = diag_e_xhmc::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); bool update = this->var_adaptation_.learn_variance(this->z_.inv_e_metric_, this->z_.q); if (update) { this->init_stepsize(logger); this->stepsize_adaptation_.set_mu(log(10 * this->nom_epsilon_)); this->stepsize_adaptation_.restart(); } } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/xhmc/unit_e_xhmc.hpp0000644000176200001440000000142314645137105024255 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_UNIT_E_XHMC_HPP #define STAN_MCMC_HMC_NUTS_UNIT_E_XHMC_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Exhaustive Hamiltonian Monte Carlo (XHMC) with multinomial sampling * with a Gaussian-Euclidean disintegration and unit metric */ template class unit_e_xhmc : public base_xhmc { public: unit_e_xhmc(const Model& model, BaseRNG& rng) : base_xhmc(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/xhmc/diag_e_xhmc.hpp0000644000176200001440000000142714645137105024206 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_DIAG_E_XHMC_HPP #define STAN_MCMC_HMC_NUTS_DIAG_E_XHMC_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Exhaustive Hamiltonian Monte Carlo (XHMC) with multinomial sampling * with a Gaussian-Euclidean disintegration and diagonal metric */ template class diag_e_xhmc : public base_xhmc { public: diag_e_xhmc(const Model& model, BaseRNG& rng) : base_xhmc(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/xhmc/adapt_softabs_xhmc.hpp0000644000176200001440000000236014645137105025605 0ustar liggesusers#ifndef STAN_MCMC_HMC_XHMC_ADAPT_SOFTABS_XHMC_HPP #define STAN_MCMC_HMC_XHMC_ADAPT_SOFTABS_XHMC_HPP #include #include #include namespace stan { namespace mcmc { /** * Exhaustive Hamiltonian Monte Carlo (XHMC) with multinomial sampling * with a Gaussian-Riemannian disintegration and SoftAbs metric * and adaptive step size */ template class adapt_softabs_xhmc : public softabs_xhmc, public stepsize_adapter { public: adapt_softabs_xhmc(const Model& model, BaseRNG& rng) : softabs_xhmc(model, rng) {} ~adapt_softabs_xhmc() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = softabs_xhmc::transition(init_sample, logger); if (this->adapt_flag_) this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/xhmc/softabs_xhmc.hpp0000644000176200001440000000143714645137105024440 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_SOFTABS_XHMC_HPP #define STAN_MCMC_HMC_NUTS_SOFTABS_XHMC_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Exhaustive Hamiltonian Monte Carlo (XHMC) with multinomial sampling * with a Gaussian-Riemannian disintegration and SoftAbs metric */ template class softabs_xhmc : public base_xhmc { public: softabs_xhmc(const Model& model, BaseRNG& rng) : base_xhmc(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/base_hmc.hpp0000644000176200001440000001311214645137105022553 0ustar liggesusers#ifndef STAN_MCMC_HMC_BASE_HMC_HPP #define STAN_MCMC_HMC_BASE_HMC_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace mcmc { /** * Base class for Hamiltonian samplers. * * @tparam Model The type of the Stan model. * @tparam Hamiltonian The type of Hamiltonians over the (unconstrained) * parameter space. * @tparam Integrator The type of integrator (e.g. leapfrog). * @tparam BaseRNG The type of random number generator. */ template class Hamiltonian, template class Integrator, class BaseRNG> class base_hmc : public base_mcmc { public: base_hmc(const Model& model, BaseRNG& rng) : base_mcmc(), z_(model.num_params_r()), integrator_(), hamiltonian_(model), rand_int_(rng), rand_uniform_(rand_int_), nom_epsilon_(0.1), epsilon_(nom_epsilon_), epsilon_jitter_(0.0) {} /** * format and write stepsize */ void write_sampler_stepsize(callbacks::writer& writer) { std::stringstream nominal_stepsize; nominal_stepsize << "Step size = " << get_nominal_stepsize(); writer(nominal_stepsize.str()); } /** * write elements of mass matrix */ void write_sampler_metric(callbacks::writer& writer) { z_.write_metric(writer); } /** * write stepsize and elements of mass matrix */ void write_sampler_state(callbacks::writer& writer) { write_sampler_stepsize(writer); write_sampler_metric(writer); } void get_sampler_diagnostic_names(std::vector& model_names, std::vector& names) { z_.get_param_names(model_names, names); } void get_sampler_diagnostics(std::vector& values) { z_.get_params(values); } void seed(const Eigen::VectorXd& q) { z_.q = q; } void init_hamiltonian(callbacks::logger& logger) { this->hamiltonian_.init(this->z_, logger); } void init_stepsize(callbacks::logger& logger) { ps_point z_init(this->z_); // Skip initialization for extreme step sizes if (this->nom_epsilon_ == 0 || this->nom_epsilon_ > 1e7 || std::isnan(this->nom_epsilon_)) return; this->hamiltonian_.sample_p(this->z_, this->rand_int_); this->hamiltonian_.init(this->z_, logger); // Guaranteed to be finite if randomly initialized double H0 = this->hamiltonian_.H(this->z_); this->integrator_.evolve(this->z_, this->hamiltonian_, this->nom_epsilon_, logger); double h = this->hamiltonian_.H(this->z_); if (std::isnan(h)) h = std::numeric_limits::infinity(); double delta_H = H0 - h; int direction = delta_H > std::log(0.8) ? 1 : -1; while (1) { this->z_.ps_point::operator=(z_init); this->hamiltonian_.sample_p(this->z_, this->rand_int_); this->hamiltonian_.init(this->z_, logger); double H0 = this->hamiltonian_.H(this->z_); this->integrator_.evolve(this->z_, this->hamiltonian_, this->nom_epsilon_, logger); double h = this->hamiltonian_.H(this->z_); if (std::isnan(h)) h = std::numeric_limits::infinity(); double delta_H = H0 - h; if ((direction == 1) && !(delta_H > std::log(0.8))) break; else if ((direction == -1) && !(delta_H < std::log(0.8))) break; else this->nom_epsilon_ = direction == 1 ? 2.0 * this->nom_epsilon_ : 0.5 * this->nom_epsilon_; if (this->nom_epsilon_ > 1e7) throw std::runtime_error( "Posterior is improper. " "Please check your model."); if (this->nom_epsilon_ == 0) throw std::runtime_error( "No acceptably small step size could " "be found. Perhaps the posterior is " "not continuous?"); } this->z_.ps_point::operator=(z_init); } /** * Gets the current point in the (unconstrained) parameter space. * * @return The current point in the (unconstrained) parameter space. */ typename Hamiltonian::PointType& z() { return z_; } /** * Gets the current point in the (unconstrained) parameters space. * * @return The current point in the (unconstrained) parameters space. */ const typename Hamiltonian::PointType& z() const noexcept { return z_; } virtual void set_nominal_stepsize(double e) { if (e > 0) nom_epsilon_ = e; } double get_nominal_stepsize() const noexcept { return this->nom_epsilon_; } double get_current_stepsize() const noexcept { return this->epsilon_; } virtual void set_stepsize_jitter(double j) { if (j > 0 && j < 1) epsilon_jitter_ = j; } double get_stepsize_jitter() const noexcept { return this->epsilon_jitter_; } void sample_stepsize() { this->epsilon_ = this->nom_epsilon_; if (this->epsilon_jitter_) this->epsilon_ *= 1.0 + this->epsilon_jitter_ * (2.0 * this->rand_uniform_() - 1.0); } protected: typename Hamiltonian::PointType z_; Integrator > integrator_; Hamiltonian hamiltonian_; BaseRNG& rand_int_; // Uniform(0, 1) RNG boost::uniform_01 rand_uniform_; double nom_epsilon_; double epsilon_; double epsilon_jitter_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts/0000755000176200001440000000000014645137105021274 5ustar liggesusersStanHeaders/inst/include/src/stan/mcmc/hmc/nuts/adapt_softabs_nuts.hpp0000644000176200001440000000234314645137105025672 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_ADAPT_SOFTABS_NUTS_HPP #define STAN_MCMC_HMC_NUTS_ADAPT_SOFTABS_NUTS_HPP #include #include #include namespace stan { namespace mcmc { /** * The No-U-Turn sampler (NUTS) with multinomial sampling * with a Gaussian-Riemannian disintegration and SoftAbs metric * and adaptive step size */ template class adapt_softabs_nuts : public softabs_nuts, public stepsize_adapter { public: adapt_softabs_nuts(const Model& model, BaseRNG& rng) : softabs_nuts(model, rng) {} ~adapt_softabs_nuts() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = softabs_nuts::transition(init_sample, logger); if (this->adapt_flag_) this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts/softabs_nuts.hpp0000644000176200001440000000142214645137105024516 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_SOFTABS_NUTS_HPP #define STAN_MCMC_HMC_NUTS_SOFTABS_NUTS_HPP #include #include #include #include namespace stan { namespace mcmc { /** * The No-U-Turn sampler (NUTS) with multinomial sampling * with a Gaussian-Riemannian disintegration and SoftAbs metric */ template class softabs_nuts : public base_nuts { public: softabs_nuts(const Model& model, BaseRNG& rng) : base_nuts(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts/unit_e_nuts.hpp0000644000176200001440000000140614645137105024342 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_UNIT_E_NUTS_HPP #define STAN_MCMC_HMC_NUTS_UNIT_E_NUTS_HPP #include #include #include #include namespace stan { namespace mcmc { /** * The No-U-Turn sampler (NUTS) with multinomial sampling * with a Gaussian-Euclidean disintegration and unit metric */ template class unit_e_nuts : public base_nuts { public: unit_e_nuts(const Model& model, BaseRNG& rng) : base_nuts(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts/diag_e_nuts.hpp0000644000176200001440000000141214645137105024264 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_DIAG_E_NUTS_HPP #define STAN_MCMC_HMC_NUTS_DIAG_E_NUTS_HPP #include #include #include #include namespace stan { namespace mcmc { /** * The No-U-Turn sampler (NUTS) with multinomial sampling * with a Gaussian-Euclidean disintegration and diagonal metric */ template class diag_e_nuts : public base_nuts { public: diag_e_nuts(const Model& model, BaseRNG& rng) : base_nuts(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts/base_nuts.hpp0000644000176200001440000002756014645137105024002 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_BASE_NUTS_HPP #define STAN_MCMC_HMC_NUTS_BASE_NUTS_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace mcmc { /** * The No-U-Turn sampler (NUTS) with multinomial sampling */ template class Hamiltonian, template class Integrator, class BaseRNG> class base_nuts : public base_hmc { public: base_nuts(const Model& model, BaseRNG& rng) : base_hmc(model, rng), depth_(0), max_depth_(5), max_deltaH_(1000), n_leapfrog_(0), divergent_(false), energy_(0) {} /** * specialized constructor for specified diag mass matrix */ base_nuts(const Model& model, BaseRNG& rng, Eigen::VectorXd& inv_e_metric) : base_hmc(model, rng, inv_e_metric), depth_(0), max_depth_(5), max_deltaH_(1000), n_leapfrog_(0), divergent_(false), energy_(0) {} /** * specialized constructor for specified dense mass matrix */ base_nuts(const Model& model, BaseRNG& rng, Eigen::MatrixXd& inv_e_metric) : base_hmc(model, rng, inv_e_metric), depth_(0), max_depth_(5), max_deltaH_(1000), n_leapfrog_(0), divergent_(false), energy_(0) {} ~base_nuts() {} void set_metric(const Eigen::MatrixXd& inv_e_metric) { this->z_.set_metric(inv_e_metric); } void set_metric(const Eigen::VectorXd& inv_e_metric) { this->z_.set_metric(inv_e_metric); } void set_max_depth(int d) { if (d > 0) max_depth_ = d; } void set_max_delta(double d) { max_deltaH_ = d; } int get_max_depth() { return this->max_depth_; } double get_max_delta() { return this->max_deltaH_; } sample transition(sample& init_sample, callbacks::logger& logger) { // Initialize the algorithm this->sample_stepsize(); this->seed(init_sample.cont_params()); this->hamiltonian_.sample_p(this->z_, this->rand_int_); this->hamiltonian_.init(this->z_, logger); ps_point z_fwd(this->z_); // State at forward end of trajectory ps_point z_bck(z_fwd); // State at backward end of trajectory ps_point z_sample(z_fwd); ps_point z_propose(z_fwd); // Momentum and sharp momentum at forward end of forward subtree Eigen::VectorXd p_fwd_fwd = this->z_.p; Eigen::VectorXd p_sharp_fwd_fwd = this->hamiltonian_.dtau_dp(this->z_); // Momentum and sharp momentum at backward end of forward subtree Eigen::VectorXd p_fwd_bck = this->z_.p; Eigen::VectorXd p_sharp_fwd_bck = p_sharp_fwd_fwd; // Momentum and sharp momentum at forward end of backward subtree Eigen::VectorXd p_bck_fwd = this->z_.p; Eigen::VectorXd p_sharp_bck_fwd = p_sharp_fwd_fwd; // Momentum and sharp momentum at backward end of backward subtree Eigen::VectorXd p_bck_bck = this->z_.p; Eigen::VectorXd p_sharp_bck_bck = p_sharp_fwd_fwd; // Integrated momenta along trajectory Eigen::VectorXd rho = this->z_.p.transpose(); // Log sum of state weights (offset by H0) along trajectory double log_sum_weight = 0; // log(exp(H0 - H0)) double H0 = this->hamiltonian_.H(this->z_); int n_leapfrog = 0; double sum_metro_prob = 0; // Build a trajectory until the no-u-turn // criterion is no longer satisfied this->depth_ = 0; this->divergent_ = false; while (this->depth_ < this->max_depth_) { // Build a new subtree in a random direction Eigen::VectorXd rho_fwd = Eigen::VectorXd::Zero(rho.size()); Eigen::VectorXd rho_bck = Eigen::VectorXd::Zero(rho.size()); bool valid_subtree = false; double log_sum_weight_subtree = -std::numeric_limits::infinity(); if (this->rand_uniform_() > 0.5) { // Extend the current trajectory forward this->z_.ps_point::operator=(z_fwd); rho_bck = rho; p_bck_fwd = p_fwd_fwd; p_sharp_bck_fwd = p_sharp_fwd_fwd; valid_subtree = build_tree( this->depth_, z_propose, p_sharp_fwd_bck, p_sharp_fwd_fwd, rho_fwd, p_fwd_bck, p_fwd_fwd, H0, 1, n_leapfrog, log_sum_weight_subtree, sum_metro_prob, logger); z_fwd.ps_point::operator=(this->z_); } else { // Extend the current trajectory backwards this->z_.ps_point::operator=(z_bck); rho_fwd = rho; p_fwd_bck = p_bck_bck; p_sharp_fwd_bck = p_sharp_bck_bck; valid_subtree = build_tree( this->depth_, z_propose, p_sharp_bck_fwd, p_sharp_bck_bck, rho_bck, p_bck_fwd, p_bck_bck, H0, -1, n_leapfrog, log_sum_weight_subtree, sum_metro_prob, logger); z_bck.ps_point::operator=(this->z_); } if (!valid_subtree) break; // Sample from accepted subtree ++(this->depth_); if (log_sum_weight_subtree > log_sum_weight) { z_sample = z_propose; } else { double accept_prob = std::exp(log_sum_weight_subtree - log_sum_weight); if (this->rand_uniform_() < accept_prob) z_sample = z_propose; } log_sum_weight = math::log_sum_exp(log_sum_weight, log_sum_weight_subtree); // Break when no-u-turn criterion is no longer satisfied rho = rho_bck + rho_fwd; // Demand satisfaction around merged subtrees bool persist_criterion = compute_criterion(p_sharp_bck_bck, p_sharp_fwd_fwd, rho); // Demand satisfaction between subtrees Eigen::VectorXd rho_extended = rho_bck + p_fwd_bck; persist_criterion &= compute_criterion(p_sharp_bck_bck, p_sharp_fwd_bck, rho_extended); rho_extended = rho_fwd + p_bck_fwd; persist_criterion &= compute_criterion(p_sharp_bck_fwd, p_sharp_fwd_fwd, rho_extended); if (!persist_criterion) break; } this->n_leapfrog_ = n_leapfrog; // Compute average acceptance probabilty across entire trajectory, // even over subtrees that may have been rejected double accept_prob = sum_metro_prob / static_cast(n_leapfrog); this->z_.ps_point::operator=(z_sample); this->energy_ = this->hamiltonian_.H(this->z_); return sample(this->z_.q, -this->z_.V, accept_prob); } void get_sampler_param_names(std::vector& names) { names.push_back("stepsize__"); names.push_back("treedepth__"); names.push_back("n_leapfrog__"); names.push_back("divergent__"); names.push_back("energy__"); } void get_sampler_params(std::vector& values) { values.push_back(this->epsilon_); values.push_back(this->depth_); values.push_back(this->n_leapfrog_); values.push_back(this->divergent_); values.push_back(this->energy_); } virtual bool compute_criterion(Eigen::VectorXd& p_sharp_minus, Eigen::VectorXd& p_sharp_plus, Eigen::VectorXd& rho) { return p_sharp_plus.dot(rho) > 0 && p_sharp_minus.dot(rho) > 0; } /** * Recursively build a new subtree to completion or until * the subtree becomes invalid. Returns validity of the * resulting subtree. * * @param depth Depth of the desired subtree * @param z_propose State proposed from subtree * @param p_sharp_beg Sharp momentum at beginning of new tree * @param p_sharp_end Sharp momentum at end of new tree * @param rho Summed momentum across trajectory * @param p_beg Momentum at beginning of returned tree * @param p_end Momentum at end of returned tree * @param H0 Hamiltonian of initial state * @param sign Direction in time to built subtree * @param n_leapfrog Summed number of leapfrog evaluations * @param log_sum_weight Log of summed weights across trajectory * @param sum_metro_prob Summed Metropolis probabilities across trajectory * @param logger Logger for messages */ bool build_tree(int depth, ps_point& z_propose, Eigen::VectorXd& p_sharp_beg, Eigen::VectorXd& p_sharp_end, Eigen::VectorXd& rho, Eigen::VectorXd& p_beg, Eigen::VectorXd& p_end, double H0, double sign, int& n_leapfrog, double& log_sum_weight, double& sum_metro_prob, callbacks::logger& logger) { // Base case if (depth == 0) { this->integrator_.evolve(this->z_, this->hamiltonian_, sign * this->epsilon_, logger); ++n_leapfrog; double h = this->hamiltonian_.H(this->z_); if (std::isnan(h)) h = std::numeric_limits::infinity(); if ((h - H0) > this->max_deltaH_) this->divergent_ = true; log_sum_weight = math::log_sum_exp(log_sum_weight, H0 - h); if (H0 - h > 0) sum_metro_prob += 1; else sum_metro_prob += std::exp(H0 - h); z_propose = this->z_; p_sharp_beg = this->hamiltonian_.dtau_dp(this->z_); p_sharp_end = p_sharp_beg; rho += this->z_.p; p_beg = this->z_.p; p_end = p_beg; return !this->divergent_; } // General recursion // Build the initial subtree double log_sum_weight_init = -std::numeric_limits::infinity(); // Momentum and sharp momentum at end of the initial subtree Eigen::VectorXd p_init_end(this->z_.p.size()); Eigen::VectorXd p_sharp_init_end(this->z_.p.size()); Eigen::VectorXd rho_init = Eigen::VectorXd::Zero(rho.size()); bool valid_init = build_tree(depth - 1, z_propose, p_sharp_beg, p_sharp_init_end, rho_init, p_beg, p_init_end, H0, sign, n_leapfrog, log_sum_weight_init, sum_metro_prob, logger); if (!valid_init) return false; // Build the final subtree ps_point z_propose_final(this->z_); double log_sum_weight_final = -std::numeric_limits::infinity(); // Momentum and sharp momentum at beginning of the final subtree Eigen::VectorXd p_final_beg(this->z_.p.size()); Eigen::VectorXd p_sharp_final_beg(this->z_.p.size()); Eigen::VectorXd rho_final = Eigen::VectorXd::Zero(rho.size()); bool valid_final = build_tree(depth - 1, z_propose_final, p_sharp_final_beg, p_sharp_end, rho_final, p_final_beg, p_end, H0, sign, n_leapfrog, log_sum_weight_final, sum_metro_prob, logger); if (!valid_final) return false; // Multinomial sample from right subtree double log_sum_weight_subtree = math::log_sum_exp(log_sum_weight_init, log_sum_weight_final); log_sum_weight = math::log_sum_exp(log_sum_weight, log_sum_weight_subtree); if (log_sum_weight_final > log_sum_weight_subtree) { z_propose = z_propose_final; } else { double accept_prob = std::exp(log_sum_weight_final - log_sum_weight_subtree); if (this->rand_uniform_() < accept_prob) z_propose = z_propose_final; } Eigen::VectorXd rho_subtree = rho_init + rho_final; rho += rho_subtree; // Demand satisfaction around merged subtrees bool persist_criterion = compute_criterion(p_sharp_beg, p_sharp_end, rho_subtree); // Demand satisfaction between subtrees rho_subtree = rho_init + p_final_beg; persist_criterion &= compute_criterion(p_sharp_beg, p_sharp_final_beg, rho_subtree); rho_subtree = rho_final + p_init_end; persist_criterion &= compute_criterion(p_sharp_init_end, p_sharp_end, rho_subtree); return persist_criterion; } int depth_; int max_depth_; double max_deltaH_; int n_leapfrog_; bool divergent_; double energy_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts/dense_e_nuts.hpp0000644000176200001440000000141714645137105024463 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_DENSE_E_NUTS_HPP #define STAN_MCMC_HMC_NUTS_DENSE_E_NUTS_HPP #include #include #include #include namespace stan { namespace mcmc { /** * The No-U-Turn sampler (NUTS) with multinomial sampling * with a Gaussian-Euclidean disintegration and dense metric */ template class dense_e_nuts : public base_nuts { public: dense_e_nuts(const Model& model, BaseRNG& rng) : base_nuts(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts/adapt_dense_e_nuts.hpp0000644000176200001440000000313214645137105025630 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_ADAPT_DENSE_E_NUTS_HPP #define STAN_MCMC_HMC_NUTS_ADAPT_DENSE_E_NUTS_HPP #include #include #include namespace stan { namespace mcmc { /** * The No-U-Turn sampler (NUTS) with multinomial sampling * with a Gaussian-Euclidean disintegration and adaptive * dense metric and adaptive step size */ template class adapt_dense_e_nuts : public dense_e_nuts, public stepsize_covar_adapter { public: adapt_dense_e_nuts(const Model& model, BaseRNG& rng) : dense_e_nuts(model, rng), stepsize_covar_adapter(model.num_params_r()) {} ~adapt_dense_e_nuts() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = dense_e_nuts::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); bool update = this->covar_adaptation_.learn_covariance( this->z_.inv_e_metric_, this->z_.q); if (update) { this->init_stepsize(logger); this->stepsize_adaptation_.set_mu(log(10 * this->nom_epsilon_)); this->stepsize_adaptation_.restart(); } } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts/adapt_diag_e_nuts.hpp0000644000176200001440000000316714645137105025446 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_ADAPT_DIAG_E_NUTS_HPP #define STAN_MCMC_HMC_NUTS_ADAPT_DIAG_E_NUTS_HPP #include #include #include namespace stan { namespace mcmc { /** * The No-U-Turn sampler (NUTS) with multinomial sampling * with a Gaussian-Euclidean disintegration and adaptive * diagonal metric and adaptive step size */ template class adapt_diag_e_nuts : public diag_e_nuts, public stepsize_var_adapter { public: adapt_diag_e_nuts(const Model& model, BaseRNG& rng) : diag_e_nuts(model, rng), stepsize_var_adapter(model.num_params_r()) {} ~adapt_diag_e_nuts() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = diag_e_nuts::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); bool update = this->var_adaptation_.learn_variance(this->z_.inv_e_metric_, this->z_.q); if (update) { this->init_stepsize(logger); this->stepsize_adaptation_.set_mu(log(10 * this->nom_epsilon_)); this->stepsize_adaptation_.restart(); } } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts/adapt_unit_e_nuts.hpp0000644000176200001440000000232514645137105025514 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_ADAPT_UNIT_E_NUTS_HPP #define STAN_MCMC_HMC_NUTS_ADAPT_UNIT_E_NUTS_HPP #include #include #include namespace stan { namespace mcmc { /** * The No-U-Turn sampler (NUTS) with multinomial sampling * with a Gaussian-Euclidean disintegration and unit metric * and adaptive step size */ template class adapt_unit_e_nuts : public unit_e_nuts, public stepsize_adapter { public: adapt_unit_e_nuts(const Model& model, BaseRNG& rng) : unit_e_nuts(model, rng) {} ~adapt_unit_e_nuts() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = unit_e_nuts::transition(init_sample, logger); if (this->adapt_flag_) this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts_classic/0000755000176200001440000000000014645137105022775 5ustar liggesusersStanHeaders/inst/include/src/stan/mcmc/hmc/nuts_classic/dense_e_nuts_classic.hpp0000644000176200001440000000237114645137105027665 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_CLASSIC_DENSE_E_NUTS_CLASSIC_HPP #define STAN_MCMC_HMC_NUTS_CLASSIC_DENSE_E_NUTS_CLASSIC_HPP #include #include #include #include namespace stan { namespace mcmc { // The No-U-Turn Sampler (NUTS) on a // Euclidean manifold with dense metric template class dense_e_nuts_classic : public base_nuts_classic { public: dense_e_nuts_classic(const Model& model, BaseRNG& rng) : base_nuts_classic(model, rng) {} // Note that the points don't need to be swapped // here since start.inv_e_metric_ = finish.inv_e_metric_ bool compute_criterion(ps_point& start, dense_e_point& finish, Eigen::VectorXd& rho) { return finish.p.transpose() * finish.inv_e_metric_ * (rho - finish.p) > 0 && start.p.transpose() * finish.inv_e_metric_ * (rho - start.p) > 0; } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts_classic/adapt_dense_e_nuts_classic.hpp0000644000176200001440000000321714645137105031036 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_CLASSIC_ADAPT_DENSE_E_NUTS_CLASSIC_HPP #define STAN_MCMC_HMC_NUTS_CLASSIC_ADAPT_DENSE_E_NUTS_CLASSIC_HPP #include #include #include namespace stan { namespace mcmc { // The No-U-Turn Sampler (NUTS) on a // Euclidean manifold with dense metric // and adaptive stepsize template class adapt_dense_e_nuts_classic : public dense_e_nuts_classic, public stepsize_covar_adapter { public: adapt_dense_e_nuts_classic(const Model& model, BaseRNG& rng) : dense_e_nuts_classic(model, rng), stepsize_covar_adapter(model.num_params_r()) {} ~adapt_dense_e_nuts_classic() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = dense_e_nuts_classic::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); bool update = this->covar_adaptation_.learn_covariance( this->z_.inv_e_metric_, this->z_.q); if (update) { this->init_stepsize(logger); this->stepsize_adaptation_.set_mu(log(10 * this->nom_epsilon_)); this->stepsize_adaptation_.restart(); } } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts_classic/diag_e_nuts_classic.hpp0000644000176200001440000000236414645137105027475 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_CLASSIC_DIAG_E_NUTS_CLASSIC_HPP #define STAN_MCMC_HMC_NUTS_CLASSIC_DIAG_E_NUTS_CLASSIC_HPP #include #include #include #include namespace stan { namespace mcmc { // The No-U-Turn Sampler (NUTS) on a // Euclidean manifold with diagonal metric template class diag_e_nuts_classic : public base_nuts_classic { public: diag_e_nuts_classic(const Model& model, BaseRNG& rng) : base_nuts_classic(model, rng) {} // Note that the points don't need to be swapped here // since start.inv_e_metric_ = finish.inv_e_metric_ bool compute_criterion(ps_point& start, diag_e_point& finish, Eigen::VectorXd& rho) { return finish.inv_e_metric_.cwiseProduct(finish.p).dot(rho - finish.p) > 0 && finish.inv_e_metric_.cwiseProduct(start.p).dot(rho - start.p) > 0; } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts_classic/adapt_diag_e_nuts_classic.hpp0000644000176200001440000000325514645137105030646 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_CLASSIC_ADAPT_DIAG_E_NUTS_CLASSIC_HPP #define STAN_MCMC_HMC_NUTS_CLASSIC_ADAPT_DIAG_E_NUTS_CLASSIC_HPP #include #include #include namespace stan { namespace mcmc { // The No-U-Turn Sampler (NUTS) on a // Euclidean manifold with diagonal metric // and adaptive stepsize template class adapt_diag_e_nuts_classic : public diag_e_nuts_classic, public stepsize_var_adapter { public: adapt_diag_e_nuts_classic(const Model& model, BaseRNG& rng) : diag_e_nuts_classic(model, rng), stepsize_var_adapter(model.num_params_r()) {} ~adapt_diag_e_nuts_classic() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = diag_e_nuts_classic::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); bool update = this->var_adaptation_.learn_variance(this->z_.inv_e_metric_, this->z_.q); if (update) { this->init_stepsize(logger); this->stepsize_adaptation_.set_mu(log(10 * this->nom_epsilon_)); this->stepsize_adaptation_.restart(); } } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts_classic/unit_e_nuts_classic.hpp0000644000176200001440000000206114645137105027542 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_CLASSIC_UNIT_E_NUTS_CLASSIC_HPP #define STAN_MCMC_HMC_NUTS_CLASSIC_UNIT_E_NUTS_CLASSIC_HPP #include #include #include #include namespace stan { namespace mcmc { // The No-U-Turn Sampler (NUTS) on a // Euclidean manifold with unit metric template class unit_e_nuts_classic : public base_nuts_classic { public: unit_e_nuts_classic(const Model& model, BaseRNG& rng) : base_nuts_classic(model, rng) {} bool compute_criterion(ps_point& start, unit_e_point& finish, Eigen::VectorXd& rho) { return finish.p.dot(rho - finish.p) > 0 && start.p.dot(rho - start.p) > 0; } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts_classic/adapt_unit_e_nuts_classic.hpp0000644000176200001440000000242414645137105030716 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_CLASSIC_ADAPT_UNIT_E_NUTS_CLASSIC_HPP #define STAN_MCMC_HMC_NUTS_CLASSIC_ADAPT_UNIT_E_NUTS_CLASSIC_HPP #include #include #include namespace stan { namespace mcmc { // The No-U-Turn Sampler (NUTS) on a // Euclidean manifold with unit metric // and adaptive stepsize template class adapt_unit_e_nuts_classic : public unit_e_nuts_classic, public stepsize_adapter { public: adapt_unit_e_nuts_classic(const Model& model, BaseRNG& rng) : unit_e_nuts_classic(model, rng) {} ~adapt_unit_e_nuts_classic() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = unit_e_nuts_classic::transition(init_sample, logger); if (this->adapt_flag_) this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/nuts_classic/base_nuts_classic.hpp0000644000176200001440000001524714645137105027203 0ustar liggesusers#ifndef STAN_MCMC_HMC_NUTS_CLASSIC_BASE_NUTS_CLASSIC_HPP #define STAN_MCMC_HMC_NUTS_CLASSIC_BASE_NUTS_CLASSIC_HPP #include #include #include #include #include #include #include #include namespace stan { namespace mcmc { struct nuts_util { // Constants through each recursion double log_u; double H0; int sign; // Aggregators through each recursion int n_tree; double sum_prob; bool criterion; // just to guarantee bool initializes to valid value nuts_util() : criterion(false) {} }; // The No-U-Turn Sampler (NUTS) with the // original slice sampler implementation template class Hamiltonian, template class Integrator, class BaseRNG> class base_nuts_classic : public base_hmc { public: base_nuts_classic(const Model& model, BaseRNG& rng) : base_hmc(model, rng), depth_(0), max_depth_(5), max_delta_(1000), n_leapfrog_(0), divergent_(0), energy_(0) {} ~base_nuts_classic() {} void set_max_depth(int d) { if (d > 0) max_depth_ = d; } void set_max_delta(double d) { max_delta_ = d; } int get_max_depth() { return this->max_depth_; } double get_max_delta() { return this->max_delta_; } sample transition(sample& init_sample, callbacks::logger& logger) { // Initialize the algorithm this->sample_stepsize(); nuts_util util; this->seed(init_sample.cont_params()); this->hamiltonian_.sample_p(this->z_, this->rand_int_); this->hamiltonian_.init(this->z_, logger); ps_point z_plus(this->z_); ps_point z_minus(z_plus); ps_point z_sample(z_plus); ps_point z_propose(z_plus); int n_cont = init_sample.cont_params().size(); Eigen::VectorXd rho_init = this->z_.p; Eigen::VectorXd rho_plus(n_cont); rho_plus.setZero(); Eigen::VectorXd rho_minus(n_cont); rho_minus.setZero(); util.H0 = this->hamiltonian_.H(this->z_); // Sample the slice variable util.log_u = std::log(this->rand_uniform_()); // Build a balanced binary tree until the NUTS criterion fails util.criterion = true; int n_valid = 0; this->depth_ = 0; this->divergent_ = 0; util.n_tree = 0; util.sum_prob = 0; while (util.criterion && (this->depth_ <= this->max_depth_)) { // Randomly sample a direction in time ps_point* z = 0; Eigen::VectorXd* rho = 0; if (this->rand_uniform_() > 0.5) { z = &z_plus; rho = &rho_plus; util.sign = 1; } else { z = &z_minus; rho = &rho_minus; util.sign = -1; } // And build a new subtree in that direction this->z_.ps_point::operator=(*z); int n_valid_subtree = build_tree(depth_, *rho, 0, z_propose, util, logger); ++(this->depth_); *z = this->z_; // Metropolis-Hastings sample the fresh subtree if (!util.criterion) break; double subtree_prob = 0; if (n_valid) { subtree_prob = static_cast(n_valid_subtree) / static_cast(n_valid); } else { subtree_prob = n_valid_subtree ? 1 : 0; } if (this->rand_uniform_() < subtree_prob) z_sample = z_propose; n_valid += n_valid_subtree; // Check validity of completed tree this->z_.ps_point::operator=(z_plus); Eigen::VectorXd delta_rho = rho_minus + rho_init + rho_plus; util.criterion = compute_criterion(z_minus, this->z_, delta_rho); } this->n_leapfrog_ = util.n_tree; double accept_prob = util.sum_prob / static_cast(util.n_tree); this->z_.ps_point::operator=(z_sample); this->energy_ = this->hamiltonian_.H(this->z_); return sample(this->z_.q, -this->z_.V, accept_prob); } void get_sampler_param_names(std::vector& names) { names.push_back("stepsize__"); names.push_back("treedepth__"); names.push_back("n_leapfrog__"); names.push_back("divergent__"); names.push_back("energy__"); } void get_sampler_params(std::vector& values) { values.push_back(this->epsilon_); values.push_back(this->depth_); values.push_back(this->n_leapfrog_); values.push_back(this->divergent_); values.push_back(this->energy_); } virtual bool compute_criterion( ps_point& start, typename Hamiltonian::PointType& finish, Eigen::VectorXd& rho) = 0; // Returns number of valid points in the completed subtree int build_tree(int depth, Eigen::VectorXd& rho, ps_point* z_init_parent, ps_point& z_propose, nuts_util& util, callbacks::logger& logger) { // Base case if (depth == 0) { this->integrator_.evolve(this->z_, this->hamiltonian_, util.sign * this->epsilon_, logger); rho += this->z_.p; if (z_init_parent) *z_init_parent = this->z_; z_propose = this->z_; double h = this->hamiltonian_.H(this->z_); if (std::isnan(h)) h = std::numeric_limits::infinity(); util.criterion = util.log_u + (h - util.H0) < this->max_delta_; if (!util.criterion) ++(this->divergent_); util.sum_prob += std::min(1.0, std::exp(util.H0 - h)); util.n_tree += 1; return (util.log_u + (h - util.H0) < 0); } else { // General recursion Eigen::VectorXd left_subtree_rho(rho.size()); left_subtree_rho.setZero(); ps_point z_init(this->z_); int n1 = build_tree(depth - 1, left_subtree_rho, &z_init, z_propose, util, logger); if (z_init_parent) *z_init_parent = z_init; if (!util.criterion) return 0; Eigen::VectorXd right_subtree_rho(rho.size()); right_subtree_rho.setZero(); ps_point z_propose_right(z_init); int n2 = build_tree(depth - 1, right_subtree_rho, 0, z_propose_right, util, logger); double accept_prob = static_cast(n2) / static_cast(n1 + n2); if (util.criterion && (this->rand_uniform_() < accept_prob)) z_propose = z_propose_right; Eigen::VectorXd& subtree_rho = left_subtree_rho; subtree_rho += right_subtree_rho; rho += subtree_rho; util.criterion &= compute_criterion(z_init, this->z_, subtree_rho); return n1 + n2; } } int depth_; int max_depth_; double max_delta_; int n_leapfrog_; int divergent_; double energy_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static/0000755000176200001440000000000014645137105021572 5ustar liggesusersStanHeaders/inst/include/src/stan/mcmc/hmc/static/adapt_diag_e_static_hmc.hpp0000644000176200001440000000345214645137105027066 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_ADAPT_DIAG_E_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_ADAPT_DIAG_E_STATIC_HMC_HPP #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation using the endpoint * of trajectories with a static integration time with a * Gaussian-Euclidean disintegration and adaptive diagonal metric and * adaptive step size */ template class adapt_diag_e_static_hmc : public diag_e_static_hmc, public stepsize_var_adapter { public: adapt_diag_e_static_hmc(const Model& model, BaseRNG& rng) : diag_e_static_hmc(model, rng), stepsize_var_adapter(model.num_params_r()) {} ~adapt_diag_e_static_hmc() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = diag_e_static_hmc::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); this->update_L_(); bool update = this->var_adaptation_.learn_variance(this->z_.inv_e_metric_, this->z_.q); if (update) { this->init_stepsize(logger); this->update_L_(); this->stepsize_adaptation_.set_mu(log(10 * this->nom_epsilon_)); this->stepsize_adaptation_.restart(); } } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static/adapt_unit_e_static_hmc.hpp0000644000176200001440000000256514645137105027145 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_ADAPT_UNIT_E_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_ADAPT_UNIT_E_STATIC_HMC_HPP #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation using the endpoint * of trajectories with a static integration time with a * Gaussian-Euclidean disintegration and unit metric and * adaptive step size */ template class adapt_unit_e_static_hmc : public unit_e_static_hmc, public stepsize_adapter { public: adapt_unit_e_static_hmc(const Model& model, BaseRNG& rng) : unit_e_static_hmc(model, rng) {} ~adapt_unit_e_static_hmc() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = unit_e_static_hmc::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); this->update_L_(); } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static/adapt_softabs_static_hmc.hpp0000644000176200001440000000260314645137105027314 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_ADAPT_SOFTABS_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_ADAPT_SOFTABS_STATIC_HMC_HPP #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation using the endpoint * of trajectories with a static integration time with a * Gaussian-Riemannian disintegration and SoftAbs metric and * adaptive step size */ template class adapt_softabs_static_hmc : public softabs_static_hmc, public stepsize_adapter { public: adapt_softabs_static_hmc(const Model& model, BaseRNG& rng) : softabs_static_hmc(model, rng) {} ~adapt_softabs_static_hmc() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = softabs_static_hmc::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); this->update_L_(); } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static/base_static_hmc.hpp0000644000176200001440000000621514645137105025417 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_BASE_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_BASE_STATIC_HMC_HPP #include #include #include #include #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation using the endpoint * of trajectories with a static integration time */ template class Hamiltonian, template class Integrator, class BaseRNG> class base_static_hmc : public base_hmc { public: base_static_hmc(const Model& model, BaseRNG& rng) : base_hmc(model, rng), T_(1), energy_(0) { update_L_(); } ~base_static_hmc() {} void set_metric(const Eigen::MatrixXd& inv_e_metric) { this->z_.set_metric(inv_e_metric); } void set_metric(const Eigen::VectorXd& inv_e_metric) { this->z_.set_metric(inv_e_metric); } sample transition(sample& init_sample, callbacks::logger& logger) { this->sample_stepsize(); this->seed(init_sample.cont_params()); this->hamiltonian_.sample_p(this->z_, this->rand_int_); this->hamiltonian_.init(this->z_, logger); ps_point z_init(this->z_); double H0 = this->hamiltonian_.H(this->z_); for (int i = 0; i < L_; ++i) this->integrator_.evolve(this->z_, this->hamiltonian_, this->epsilon_, logger); double h = this->hamiltonian_.H(this->z_); if (std::isnan(h)) h = std::numeric_limits::infinity(); double acceptProb = std::exp(H0 - h); if (acceptProb < 1 && this->rand_uniform_() > acceptProb) this->z_.ps_point::operator=(z_init); acceptProb = acceptProb > 1 ? 1 : acceptProb; this->energy_ = this->hamiltonian_.H(this->z_); return sample(this->z_.q, -this->hamiltonian_.V(this->z_), acceptProb); } void get_sampler_param_names(std::vector& names) { names.push_back("stepsize__"); names.push_back("int_time__"); names.push_back("energy__"); } void get_sampler_params(std::vector& values) { values.push_back(this->epsilon_); values.push_back(this->T_); values.push_back(this->energy_); } void set_nominal_stepsize_and_T(const double e, const double t) { if (e > 0 && t > 0) { this->nom_epsilon_ = e; T_ = t; update_L_(); } } void set_nominal_stepsize_and_L(const double e, const int l) { if (e > 0 && l > 0) { this->nom_epsilon_ = e; L_ = l; T_ = this->nom_epsilon_ * L_; } } void set_T(const double t) { if (t > 0) { T_ = t; update_L_(); } } void set_nominal_stepsize(const double e) { if (e > 0) { this->nom_epsilon_ = e; update_L_(); } } double get_T() { return this->T_; } int get_L() { return this->L_; } protected: double T_; int L_; double energy_; void update_L_() { L_ = static_cast(T_ / this->nom_epsilon_); L_ = L_ < 1 ? 1 : L_; } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static/softabs_static_hmc.hpp0000644000176200001440000000167614645137105026154 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_SOFTABS_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_SOFTABS_STATIC_HMC_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation using the endpoint * of trajectories with a static integration time with a * Gaussian-Riemannian disintegration and SoftAbs metric */ template class softabs_static_hmc : public base_static_hmc { public: softabs_static_hmc(const Model& model, BaseRNG& rng) : base_static_hmc(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static/diag_e_static_hmc.hpp0000644000176200001440000000166514645137105025721 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_DIAG_E_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_DIAG_E_STATIC_HMC_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation using the endpoint * of trajectories with a static integration time with a * Gaussian-Euclidean disintegration and diagonal metric */ template class diag_e_static_hmc : public base_static_hmc { public: diag_e_static_hmc(const Model& model, BaseRNG& rng) : base_static_hmc(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static/adapt_dense_e_static_hmc.hpp0000644000176200001440000000341514645137105027257 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_ADAPT_DENSE_E_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_ADAPT_DENSE_E_STATIC_HMC_HPP #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation using the endpoint * of trajectories with a static integration time with a * Gaussian-Euclidean disintegration and adaptive dense metric and * adaptive step size */ template class adapt_dense_e_static_hmc : public dense_e_static_hmc, public stepsize_covar_adapter { public: adapt_dense_e_static_hmc(const Model& model, BaseRNG& rng) : dense_e_static_hmc(model, rng), stepsize_covar_adapter(model.num_params_r()) {} ~adapt_dense_e_static_hmc() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = dense_e_static_hmc::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); this->update_L_(); bool update = this->covar_adaptation_.learn_covariance( this->z_.inv_e_metric_, this->z_.q); if (update) { this->init_stepsize(logger); this->update_L_(); this->stepsize_adaptation_.set_mu(log(10 * this->nom_epsilon_)); this->stepsize_adaptation_.restart(); } } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static/unit_e_static_hmc.hpp0000644000176200001440000000166114645137105025770 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_UNIT_E_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_UNIT_E_STATIC_HMC_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation using the endpoint * of trajectories with a static integration time with a * Gaussian-Euclidean disintegration and unit metric */ template class unit_e_static_hmc : public base_static_hmc { public: unit_e_static_hmc(const Model& model, BaseRNG& rng) : base_static_hmc(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static/dense_e_static_hmc.hpp0000644000176200001440000000167314645137105026112 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_DENSE_E_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_DENSE_E_STATIC_HMC_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation using the endpoint * of trajectories with a static integration time with a * Gaussian-Euclidean disintegration and dense metric */ template class dense_e_static_hmc : public base_static_hmc { public: dense_e_static_hmc(const Model& model, BaseRNG& rng) : base_static_hmc(model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static_uniform/0000755000176200001440000000000014645137105023331 5ustar liggesusersStanHeaders/inst/include/src/stan/mcmc/hmc/static_uniform/base_static_uniform.hpp0000644000176200001440000000753314645137105030072 0ustar liggesusers#ifndef STAN_MCMC_HMC_UNIFORM_BASE_STATIC_UNIFORM_HPP #define STAN_MCMC_HMC_UNIFORM_BASE_STATIC_UNIFORM_HPP #include #include #include #include #include #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation that uniformly samples * from trajectories with a static integration time */ template class Hamiltonian, template class Integrator, class BaseRNG> class base_static_uniform : public base_hmc { public: base_static_uniform(const Model& model, BaseRNG& rng) : base_hmc(model, rng), T_(1), energy_(0) { update_L_(); } ~base_static_uniform() {} sample transition(sample& init_sample, callbacks::logger& logger) { this->sample_stepsize(); this->seed(init_sample.cont_params()); this->hamiltonian_.sample_p(this->z_, this->rand_int_); this->hamiltonian_.init(this->z_, logger); ps_point z_init(this->z_); double H0 = this->hamiltonian_.H(this->z_); ps_point z_sample(this->z_); double sum_prob = 1; double sum_metro_prob = 1; boost::random::uniform_int_distribution<> uniform(0, L_ - 1); int Lp = uniform(this->rand_int_); for (int l = 0; l < Lp; ++l) { this->integrator_.evolve(this->z_, this->hamiltonian_, -this->epsilon_, logger); double h = this->hamiltonian_.H(this->z_); if (std::isnan(h)) h = std::numeric_limits::infinity(); double prob = std::exp(H0 - h); sum_prob += prob; sum_metro_prob += prob > 1 ? 1 : prob; if (this->rand_uniform_() < prob / sum_prob) z_sample = this->z_; } this->z_.ps_point::operator=(z_init); for (int l = 0; l < L_ - 1 - Lp; ++l) { this->integrator_.evolve(this->z_, this->hamiltonian_, this->epsilon_, logger); double h = this->hamiltonian_.H(this->z_); if (std::isnan(h)) h = std::numeric_limits::infinity(); double prob = std::exp(H0 - h); sum_prob += prob; sum_metro_prob += prob > 1 ? 1 : prob; if (this->rand_uniform_() < prob / sum_prob) z_sample = this->z_; } double accept_prob = sum_metro_prob / static_cast(L_); this->z_.ps_point::operator=(z_sample); this->energy_ = this->hamiltonian_.H(this->z_); return sample(this->z_.q, -this->hamiltonian_.V(this->z_), accept_prob); } void get_sampler_param_names(std::vector& names) { names.push_back("stepsize__"); names.push_back("int_time__"); names.push_back("energy__"); } void get_sampler_params(std::vector& values) { values.push_back(this->epsilon_); values.push_back(this->T_); values.push_back(this->energy_); } void set_nominal_stepsize_and_T(const double e, const double t) { if (e > 0 && t > 0) { this->nom_epsilon_ = e; T_ = t; update_L_(); } } void set_nominal_stepsize_and_L(const double e, const int l) { if (e > 0 && l > 0) { this->nom_epsilon_ = e; L_ = l; T_ = this->nom_epsilon_ * L_; } } void set_T(const double t) { if (t > 0) { T_ = t; update_L_(); } } void set_nominal_stepsize(const double e) { if (e > 0) { this->nom_epsilon_ = e; update_L_(); } } double get_T() { return this->T_; } int get_L() { return this->L_; } protected: double T_; int L_; double energy_; void update_L_() { L_ = static_cast(T_ / this->nom_epsilon_); L_ = L_ < 1 ? 1 : L_; } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static_uniform/adapt_diag_e_static_uniform.hpp0000644000176200001440000000346714645137105031543 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_UNIFORM_ADAPT_DIAG_E_STATIC_UNIFORM_HPP #define STAN_MCMC_HMC_STATIC_UNIFORM_ADAPT_DIAG_E_STATIC_UNIFORM_HPP #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation that uniformly samples * from trajectories with a static integration time with a * Gaussian-Euclidean disintegration and adaptive diagonal metric and * adaptive step size */ template class adapt_diag_e_static_uniform : public diag_e_static_uniform, public stepsize_var_adapter { public: adapt_diag_e_static_uniform(const Model& model, BaseRNG& rng) : diag_e_static_uniform(model, rng), stepsize_var_adapter(model.num_params_r()) {} ~adapt_diag_e_static_uniform() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = diag_e_static_uniform::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); bool update = this->var_adaptation_.learn_variance(this->z_.inv_e_metric_, this->z_.q); if (update) { this->init_stepsize(logger); this->stepsize_adaptation_.set_mu(log(10 * this->nom_epsilon_)); this->stepsize_adaptation_.restart(); } } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static_uniform/adapt_unit_e_static_uniform.hpp0000644000176200001440000000270114645137105031604 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_ADAPT_UNIT_E_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_ADAPT_UNIT_E_STATIC_HMC_HPP #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation that uniformly samples * from trajectories with a static integration time with a * Gaussian-Euclidean disintegration and unit metric and * adaptive step size */ template class adapt_unit_e_static_uniform : public unit_e_static_uniform, public stepsize_adapter { public: adapt_unit_e_static_uniform(const Model& model, BaseRNG& rng) : unit_e_static_uniform(model, rng) {} ~adapt_unit_e_static_uniform() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = unit_e_static_uniform::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); this->update_L_(); } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static_uniform/diag_e_static_uniform.hpp0000644000176200001440000000166714645137105030372 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_UNIFORM_DIAG_E_STATIC_UNIFORM_HPP #define STAN_MCMC_HMC_STATIC_UNIFORM_DIAG_E_STATIC_UNIFORM_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation that uniformly samples * from trajectories with a static integration time with a * Gaussian-Euclidean disintegration and diagonal metric */ template class diag_e_static_uniform : public base_static_uniform { public: diag_e_static_uniform(const Model& model, BaseRNG& rng) : base_static_uniform( model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static_uniform/dense_e_static_uniform.hpp0000644000176200001440000000173514645137105030560 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_UNIFORM_DENSE_E_STATIC_UNIFORM_HPP #define STAN_MCMC_HMC_STATIC_UNIFORM_DENSE_E_STATIC_UNIFORM_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation that uniformly samples * from trajectories with a static integration time with a * Gaussian-Euclidean disintegration and dense metric */ template class dense_e_static_uniform : public base_static_uniform { public: dense_e_static_uniform(const Model& model, BaseRNG& rng) : base_static_uniform( model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static_uniform/softabs_static_uniform.hpp0000644000176200001440000000174014645137105030613 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_UNIFORM_SOFTABS_STATIC_UNIFORM_HPP #define STAN_MCMC_HMC_STATIC_UNIFORM_SOFTABS_STATIC_UNIFORM_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation that uniformly samples * from trajectories with a static integration time with a * Gaussian-Riemannian disintegration and SoftAbs metric */ template class softabs_static_uniform : public base_static_uniform { public: softabs_static_uniform(const Model& model, BaseRNG& rng) : base_static_uniform( model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static_uniform/unit_e_static_uniform.hpp0000644000176200001440000000166314645137105030441 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_UNIFORM_UNIT_E_STATIC_UNIFORM_HPP #define STAN_MCMC_HMC_STATIC_UNIFORM_UNIT_E_STATIC_UNIFORM_HPP #include #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation that uniformly samples * from trajectories with a static integration time with a * Gaussian-Euclidean disintegration and unit metric */ template class unit_e_static_uniform : public base_static_uniform { public: unit_e_static_uniform(const Model& model, BaseRNG& rng) : base_static_uniform( model, rng) {} }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static_uniform/adapt_dense_e_static_uniform.hpp0000644000176200001440000000343314645137105031726 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_UNIFORM_ADAPT_DENSE_E_STATIC_UNIFORM_HPP #define STAN_MCMC_HMC_STATIC_UNIFORM_ADAPT_DENSE_E_STATIC_UNIFORM_HPP #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation that uniformly samples * from trajectories with a static integration time with a * Gaussian-Euclidean disintegration and adaptive dense metric and * adaptive step size */ template class adapt_dense_e_static_uniform : public dense_e_static_uniform, public stepsize_covar_adapter { public: adapt_dense_e_static_uniform(const Model& model, BaseRNG& rng) : dense_e_static_uniform(model, rng), stepsize_covar_adapter(model.num_params_r()) {} ~adapt_dense_e_static_uniform() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = dense_e_static_uniform::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); bool update = this->covar_adaptation_.learn_covariance( this->z_.inv_e_metric_, this->z_.q); if (update) { this->init_stepsize(logger); this->stepsize_adaptation_.set_mu(log(10 * this->nom_epsilon_)); this->stepsize_adaptation_.restart(); } } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/hmc/static_uniform/adapt_softabs_static_uniform.hpp0000644000176200001440000000271614645137105031770 0ustar liggesusers#ifndef STAN_MCMC_HMC_STATIC_ADAPT_SOFTABS_STATIC_HMC_HPP #define STAN_MCMC_HMC_STATIC_ADAPT_SOFTABS_STATIC_HMC_HPP #include #include #include namespace stan { namespace mcmc { /** * Hamiltonian Monte Carlo implementation that uniformly samples * from trajectories with a static integration time with a * Gaussian-Riemannian disintegration and SoftAbs metric and * adaptive step size */ template class adapt_softabs_static_uniform : public softabs_static_uniform, public stepsize_adapter { public: adapt_softabs_static_uniform(const Model& model, BaseRNG& rng) : softabs_static_uniform(model, rng) {} ~adapt_softabs_static_uniform() {} sample transition(sample& init_sample, callbacks::logger& logger) { sample s = softabs_static_uniform::transition(init_sample, logger); if (this->adapt_flag_) { this->stepsize_adaptation_.learn_stepsize(this->nom_epsilon_, s.accept_stat()); this->update_L_(); } return s; } void disengage_adaptation() { base_adapter::disengage_adaptation(); this->stepsize_adaptation_.complete_adaptation(this->nom_epsilon_); } }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/stepsize_var_adapter.hpp0000644000176200001440000000221514645137105024463 0ustar liggesusers#ifndef STAN_MCMC_STEPSIZE_VAR_ADAPTER_HPP #define STAN_MCMC_STEPSIZE_VAR_ADAPTER_HPP #include #include #include #include namespace stan { namespace mcmc { class stepsize_var_adapter : public base_adapter { public: explicit stepsize_var_adapter(int n) : var_adaptation_(n) {} stepsize_adaptation& get_stepsize_adaptation() { return stepsize_adaptation_; } const stepsize_adaptation& get_stepsize_adaptation() const noexcept { return stepsize_adaptation_; } var_adaptation& get_var_adaptation() { return var_adaptation_; } void set_window_params(unsigned int num_warmup, unsigned int init_buffer, unsigned int term_buffer, unsigned int base_window, callbacks::logger& logger) { var_adaptation_.set_window_params(num_warmup, init_buffer, term_buffer, base_window, logger); } protected: stepsize_adaptation stepsize_adaptation_; var_adaptation var_adaptation_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/stepsize_adaptation.hpp0000644000176200001440000000374414645137105024327 0ustar liggesusers#ifndef STAN_MCMC_STEPSIZE_ADAPTATION_HPP #define STAN_MCMC_STEPSIZE_ADAPTATION_HPP #include #include namespace stan { namespace mcmc { class stepsize_adaptation : public base_adaptation { public: stepsize_adaptation() : mu_(0.5), delta_(0.5), gamma_(0.05), kappa_(0.75), t0_(10) { restart(); } void set_mu(double m) { mu_ = m; } void set_delta(double d) { if (d > 0 && d < 1) delta_ = d; } void set_gamma(double g) { if (g > 0) gamma_ = g; } void set_kappa(double k) { if (k > 0) kappa_ = k; } void set_t0(double t) { if (t > 0) t0_ = t; } double get_mu() const noexcept { return mu_; } double get_delta() const noexcept { return delta_; } double get_gamma() const noexcept { return gamma_; } double get_kappa() const noexcept { return kappa_; } double get_t0() const noexcept { return t0_; } void restart() { counter_ = 0; s_bar_ = 0; x_bar_ = 0; } void learn_stepsize(double& epsilon, double adapt_stat) { ++counter_; adapt_stat = adapt_stat > 1 ? 1 : adapt_stat; // Nesterov Dual-Averaging of log(epsilon) const double eta = 1.0 / (counter_ + t0_); s_bar_ = (1.0 - eta) * s_bar_ + eta * (delta_ - adapt_stat); const double x = mu_ - s_bar_ * std::sqrt(counter_) / gamma_; const double x_eta = std::pow(counter_, -kappa_); x_bar_ = (1.0 - x_eta) * x_bar_ + x_eta * x; epsilon = std::exp(x); } void complete_adaptation(double& epsilon) { epsilon = std::exp(x_bar_); } protected: double counter_; // Adaptation iteration double s_bar_; // Moving average statistic double x_bar_; // Moving average parameter double mu_; // Asymptotic mean of parameter double delta_; // Target value of statistic double gamma_; // Adaptation scaling double kappa_; // Adaptation shrinkage double t0_; // Effective starting iteration }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/mcmc/windowed_adaptation.hpp0000644000176200001440000000745314645137105024302 0ustar liggesusers#ifndef STAN_MCMC_WINDOWED_ADAPTATION_HPP #define STAN_MCMC_WINDOWED_ADAPTATION_HPP #include #include #include #include namespace stan { namespace mcmc { class windowed_adaptation : public base_adaptation { public: explicit windowed_adaptation(std::string name) : estimator_name_(name) { num_warmup_ = 0; adapt_init_buffer_ = 0; adapt_term_buffer_ = 0; adapt_base_window_ = 0; restart(); } void restart() { adapt_window_counter_ = 0; adapt_window_size_ = adapt_base_window_; adapt_next_window_ = adapt_init_buffer_ + adapt_window_size_ - 1; } void set_window_params(unsigned int num_warmup, unsigned int init_buffer, unsigned int term_buffer, unsigned int base_window, callbacks::logger& logger) { if (num_warmup < 20) { logger.info("WARNING: No " + estimator_name_ + " estimation is"); logger.info(" performed for num_warmup < 20"); logger.info(""); return; } if (init_buffer + base_window + term_buffer > num_warmup) { logger.info( "WARNING: There aren't enough warmup " "iterations to fit the"); logger.info(" three stages of adaptation as currently" + std::string(" configured.")); num_warmup_ = num_warmup; adapt_init_buffer_ = 0.15 * num_warmup; adapt_term_buffer_ = 0.10 * num_warmup; adapt_base_window_ = num_warmup - (adapt_init_buffer_ + adapt_term_buffer_); logger.info( " Reducing each adaptation stage to " "15%/75%/10% of"); logger.info(" the given number of warmup iterations:"); std::stringstream init_buffer_msg; init_buffer_msg << " init_buffer = " << adapt_init_buffer_; logger.info(init_buffer_msg); std::stringstream adapt_window_msg; adapt_window_msg << " adapt_window = " << adapt_base_window_; logger.info(adapt_window_msg); std::stringstream term_buffer_msg; term_buffer_msg << " term_buffer = " << adapt_term_buffer_; logger.info(term_buffer_msg); logger.info(""); return; } num_warmup_ = num_warmup; adapt_init_buffer_ = init_buffer; adapt_term_buffer_ = term_buffer; adapt_base_window_ = base_window; restart(); } bool adaptation_window() { return (adapt_window_counter_ >= adapt_init_buffer_) && (adapt_window_counter_ < num_warmup_ - adapt_term_buffer_) && (adapt_window_counter_ != num_warmup_); } bool end_adaptation_window() { return (adapt_window_counter_ == adapt_next_window_) && (adapt_window_counter_ != num_warmup_); } void compute_next_window() { if (adapt_next_window_ == num_warmup_ - adapt_term_buffer_ - 1) return; adapt_window_size_ *= 2; adapt_next_window_ = adapt_window_counter_ + adapt_window_size_; if (adapt_next_window_ == num_warmup_ - adapt_term_buffer_ - 1) return; // Boundary of the following window, not the window just computed unsigned int next_window_boundary = adapt_next_window_ + 2 * adapt_window_size_; // If the following window overtakes the full adaptation window, // then stretch the current window to the end of the full window if (next_window_boundary >= num_warmup_ - adapt_term_buffer_) { adapt_next_window_ = num_warmup_ - adapt_term_buffer_ - 1; } } protected: std::string estimator_name_; unsigned int num_warmup_; unsigned int adapt_init_buffer_; unsigned int adapt_term_buffer_; unsigned int adapt_base_window_; unsigned int adapt_window_counter_; unsigned int adapt_next_window_; unsigned int adapt_window_size_; }; } // namespace mcmc } // namespace stan #endif StanHeaders/inst/include/src/stan/variational/0000755000176200001440000000000014645137105021126 5ustar liggesusersStanHeaders/inst/include/src/stan/variational/print_progress.hpp0000644000176200001440000000344014645137105024720 0ustar liggesusers#ifndef STAN_VARIATIONAL_PRINT_PROGRESS_HPP #define STAN_VARIATIONAL_PRINT_PROGRESS_HPP #include #include #include #include #include #include namespace stan { namespace variational { /** * Helper function for printing progress for variational inference * * @param[in] m total number of iterations * @param[in] start starting iteration * @param[in] finish final iteration * @param[in] refresh how frequently we want to print an update * @param[in] tune boolean indicates tuning vs. variational inference * @param[in] prefix prefix string * @param[in] suffix suffix string * @param[in,out] logger logger */ inline void print_progress(int m, int start, int finish, int refresh, bool tune, const std::string& prefix, const std::string& suffix, callbacks::logger& logger) { static const char* function = "stan::variational::print_progress"; stan::math::check_positive(function, "Total number of iterations", m); stan::math::check_nonnegative(function, "Starting iteration", start); stan::math::check_positive(function, "Final iteration", finish); stan::math::check_positive(function, "Refresh rate", refresh); int it_print_width = std::ceil(std::log10(static_cast(finish))); if (refresh > 0 && (start + m == finish || m - 1 == 0 || m % refresh == 0)) { std::stringstream ss; ss << prefix; ss << "Iteration: "; ss << std::setw(it_print_width) << m + start << " / " << finish; ss << " [" << std::setw(3) << (100 * (start + m)) / finish << "%] "; ss << (tune ? " (Adaptation)" : " (Variational Inference)"); ss << suffix; logger.info(ss); } } } // namespace variational } // namespace stan #endif StanHeaders/inst/include/src/stan/variational/advi.hpp0000644000176200001440000005066314645137105022574 0ustar liggesusers#ifndef STAN_VARIATIONAL_ADVI_HPP #define STAN_VARIATIONAL_ADVI_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace variational { /** * Automatic Differentiation Variational Inference * * Implements "black box" variational inference using stochastic gradient * ascent to maximize the Evidence Lower Bound for a given model * and variational family. * * @tparam Model class of model * @tparam Q class of variational distribution * @tparam BaseRNG class of random number generator */ template class advi { public: /** * Constructor * * @param[in] m stan model * @param[in] cont_params initialization of continuous parameters * @param[in,out] rng random number generator * @param[in] n_monte_carlo_grad number of samples for gradient computation * @param[in] n_monte_carlo_elbo number of samples for ELBO computation * @param[in] eval_elbo evaluate ELBO at every "eval_elbo" iters * @param[in] n_posterior_samples number of samples to draw from posterior * @throw std::runtime_error if n_monte_carlo_grad is not positive * @throw std::runtime_error if n_monte_carlo_elbo is not positive * @throw std::runtime_error if eval_elbo is not positive * @throw std::runtime_error if n_posterior_samples is not positive */ advi(Model& m, Eigen::VectorXd& cont_params, BaseRNG& rng, int n_monte_carlo_grad, int n_monte_carlo_elbo, int eval_elbo, int n_posterior_samples) : model_(m), cont_params_(cont_params), rng_(rng), n_monte_carlo_grad_(n_monte_carlo_grad), n_monte_carlo_elbo_(n_monte_carlo_elbo), eval_elbo_(eval_elbo), n_posterior_samples_(n_posterior_samples) { static const char* function = "stan::variational::advi"; math::check_positive(function, "Number of Monte Carlo samples for gradients", n_monte_carlo_grad_); math::check_positive(function, "Number of Monte Carlo samples for ELBO", n_monte_carlo_elbo_); math::check_positive(function, "Evaluate ELBO at every eval_elbo iteration", eval_elbo_); math::check_positive(function, "Number of posterior samples for output", n_posterior_samples_); } /** * Calculates the Evidence Lower BOund (ELBO) by sampling from * the variational distribution and then evaluating the log joint, * adjusted by the entropy term of the variational distribution. * * @param[in] variational variational approximation at which to evaluate * the ELBO. * @param logger logger for messages * @return the evidence lower bound. * @throw std::domain_error If, after n_monte_carlo_elbo_ number of draws * from the variational distribution all give non-finite log joint * evaluations. This means that the model is severely ill conditioned or * that the variational distribution has somehow collapsed. */ double calc_ELBO(const Q& variational, callbacks::logger& logger) const { static const char* function = "stan::variational::advi::calc_ELBO"; double elbo = 0.0; int dim = variational.dimension(); Eigen::VectorXd zeta(dim); int n_dropped_evaluations = 0; for (int i = 0; i < n_monte_carlo_elbo_;) { variational.sample(rng_, zeta); try { std::stringstream ss; double log_prob = model_.template log_prob(zeta, &ss); if (ss.str().length() > 0) logger.info(ss); stan::math::check_finite(function, "log_prob", log_prob); elbo += log_prob; ++i; } catch (const std::domain_error& e) { ++n_dropped_evaluations; if (n_dropped_evaluations >= n_monte_carlo_elbo_) { const char* name = "The number of dropped evaluations"; const char* msg1 = "has reached its maximum amount ("; const char* msg2 = "). Your model may be either severely " "ill-conditioned or misspecified."; stan::math::throw_domain_error(function, name, n_monte_carlo_elbo_, msg1, msg2); } } } elbo /= n_monte_carlo_elbo_; elbo += variational.entropy(); return elbo; } /** * Calculates the "black box" gradient of the ELBO. * * @param[in] variational variational approximation at which to evaluate * the ELBO. * @param[out] elbo_grad gradient of ELBO with respect to variational * approximation. * @param logger logger for messages */ void calc_ELBO_grad(const Q& variational, Q& elbo_grad, callbacks::logger& logger) const { static const char* function = "stan::variational::advi::calc_ELBO_grad"; stan::math::check_size_match( function, "Dimension of elbo_grad", elbo_grad.dimension(), "Dimension of variational q", variational.dimension()); stan::math::check_size_match( function, "Dimension of variational q", variational.dimension(), "Dimension of variables in model", cont_params_.size()); variational.calc_grad(elbo_grad, model_, cont_params_, n_monte_carlo_grad_, rng_, logger); } /** * Heuristic grid search to adapt eta to the scale of the problem. * * @param[in] variational initial variational distribution. * @param[in] adapt_iterations number of iterations to spend doing stochastic * gradient ascent at each proposed eta value. * @param[in,out] logger logger for messages * @return adapted (tuned) value of eta via heuristic grid search * @throw std::domain_error If either (a) the initial ELBO cannot be * computed at the initial variational distribution, (b) all step-size * proposals in eta_sequence fail. */ double adapt_eta(Q& variational, int adapt_iterations, callbacks::logger& logger) const { static const char* function = "stan::variational::advi::adapt_eta"; stan::math::check_positive(function, "Number of adaptation iterations", adapt_iterations); logger.info("Begin eta adaptation."); // Sequence of eta values to try during adaptation const int eta_sequence_size = 5; double eta_sequence[eta_sequence_size] = {100, 10, 1, 0.1, 0.01}; // Initialize ELBO tracking variables double elbo = -std::numeric_limits::max(); double elbo_best = -std::numeric_limits::max(); double elbo_init; try { elbo_init = calc_ELBO(variational, logger); } catch (const std::domain_error& e) { const char* name = "Cannot compute ELBO using the initial " "variational distribution."; const char* msg1 = "Your model may be either " "severely ill-conditioned or misspecified."; stan::math::throw_domain_error(function, name, "", msg1); } // Variational family to store gradients Q elbo_grad = Q(model_.num_params_r()); // Adaptive step-size sequence Q history_grad_squared = Q(model_.num_params_r()); double tau = 1.0; double pre_factor = 0.9; double post_factor = 0.1; double eta_best = 0.0; double eta; double eta_scaled; bool do_more_tuning = true; int eta_sequence_index = 0; while (do_more_tuning) { // Try next eta eta = eta_sequence[eta_sequence_index]; int print_progress_m; for (int iter_tune = 1; iter_tune <= adapt_iterations; ++iter_tune) { print_progress_m = eta_sequence_index * adapt_iterations + iter_tune; variational ::print_progress(print_progress_m, 0, adapt_iterations * eta_sequence_size, adapt_iterations, true, "", "", logger); // (ROBUST) Compute gradient of ELBO. It's OK if it diverges. // We'll try a smaller eta. try { calc_ELBO_grad(variational, elbo_grad, logger); } catch (const std::domain_error& e) { elbo_grad.set_to_zero(); } // Update step-size if (iter_tune == 1) { history_grad_squared += elbo_grad.square(); } else { history_grad_squared = pre_factor * history_grad_squared + post_factor * elbo_grad.square(); } eta_scaled = eta / sqrt(static_cast(iter_tune)); // Stochastic gradient update variational += eta_scaled * elbo_grad / (tau + history_grad_squared.sqrt()); } // (ROBUST) Compute ELBO. It's OK if it has diverged. try { elbo = calc_ELBO(variational, logger); } catch (const std::domain_error& e) { elbo = -std::numeric_limits::max(); } // Check if: // (1) ELBO at current eta is worse than the best ELBO // (2) the best ELBO hasn't gotten worse than the initial ELBO if (elbo < elbo_best && elbo_best > elbo_init) { std::stringstream ss; ss << "Success!" << " Found best value [eta = " << eta_best << "]"; if (eta_sequence_index < eta_sequence_size - 1) ss << (" earlier than expected."); else ss << "."; logger.info(ss); logger.info(""); do_more_tuning = false; } else { if (eta_sequence_index < eta_sequence_size - 1) { // Reset elbo_best = elbo; eta_best = eta; } else { // No more eta values to try, so use current eta if it // didn't diverge or fail if it did diverge if (elbo > elbo_init) { std::stringstream ss; ss << "Success!" << " Found best value [eta = " << eta_best << "]."; logger.info(ss); logger.info(""); eta_best = eta; do_more_tuning = false; } else { const char* name = "All proposed step-sizes"; const char* msg1 = "failed. Your model may be either " "severely ill-conditioned or misspecified."; stan::math::throw_domain_error(function, name, "", msg1); } } // Reset history_grad_squared.set_to_zero(); } ++eta_sequence_index; variational = Q(cont_params_); } return eta_best; } /** * Runs stochastic gradient ascent with an adaptive stepsize sequence. * * @param[in,out] variational initial variational distribution * @param[in] eta stepsize scaling parameter * @param[in] tol_rel_obj relative tolerance parameter for convergence * @param[in] max_iterations max number of iterations to run algorithm * @param[in,out] logger logger for messages * @param[in,out] diagnostic_writer writer for diagnostic information * @throw std::domain_error If the ELBO or its gradient is ever * non-finite, at any iteration */ void stochastic_gradient_ascent(Q& variational, double eta, double tol_rel_obj, int max_iterations, callbacks::logger& logger, callbacks::writer& diagnostic_writer) const { static const char* function = "stan::variational::advi::stochastic_gradient_ascent"; stan::math::check_positive(function, "Eta stepsize", eta); stan::math::check_positive( function, "Relative objective function tolerance", tol_rel_obj); stan::math::check_positive(function, "Maximum iterations", max_iterations); // Gradient parameters Q elbo_grad = Q(model_.num_params_r()); // Stepsize sequence parameters Q history_grad_squared = Q(model_.num_params_r()); double tau = 1.0; double pre_factor = 0.9; double post_factor = 0.1; double eta_scaled; // Initialize ELBO and convergence tracking variables double elbo(0.0); double elbo_best = -std::numeric_limits::max(); double elbo_prev = -std::numeric_limits::max(); double delta_elbo = std::numeric_limits::max(); double delta_elbo_ave = std::numeric_limits::max(); double delta_elbo_med = std::numeric_limits::max(); // Heuristic to estimate how far to look back in rolling window int cb_size = static_cast(std::max(0.1 * max_iterations / eval_elbo_, 2.0)); boost::circular_buffer elbo_diff(cb_size); logger.info("Begin stochastic gradient ascent."); logger.info( " iter" " ELBO" " delta_ELBO_mean" " delta_ELBO_med" " notes "); // Timing variables auto start = std::chrono::steady_clock::now(); // Main loop bool do_more_iterations = true; for (int iter_counter = 1; do_more_iterations; ++iter_counter) { // Compute gradient using Monte Carlo integration calc_ELBO_grad(variational, elbo_grad, logger); // Update step-size if (iter_counter == 1) { history_grad_squared += elbo_grad.square(); } else { history_grad_squared = pre_factor * history_grad_squared + post_factor * elbo_grad.square(); } eta_scaled = eta / sqrt(static_cast(iter_counter)); // Stochastic gradient update variational += eta_scaled * elbo_grad / (tau + history_grad_squared.sqrt()); // Check for convergence every "eval_elbo_"th iteration if (iter_counter % eval_elbo_ == 0) { elbo_prev = elbo; elbo = calc_ELBO(variational, logger); if (elbo > elbo_best) elbo_best = elbo; delta_elbo = rel_difference(elbo, elbo_prev); elbo_diff.push_back(delta_elbo); delta_elbo_ave = std::accumulate(elbo_diff.begin(), elbo_diff.end(), 0.0) / static_cast(elbo_diff.size()); delta_elbo_med = circ_buff_median(elbo_diff); std::stringstream ss; ss << " " << std::setw(4) << iter_counter << " " << std::setw(15) << std::fixed << std::setprecision(3) << elbo << " " << std::setw(16) << std::fixed << std::setprecision(3) << delta_elbo_ave << " " << std::setw(15) << std::fixed << std::setprecision(3) << delta_elbo_med; auto end = std::chrono::steady_clock::now(); double delta_t = std::chrono::duration_cast(end - start) .count() / 1000.0; std::vector print_vector; print_vector.clear(); print_vector.push_back(iter_counter); print_vector.push_back(delta_t); print_vector.push_back(elbo); diagnostic_writer(print_vector); if (delta_elbo_ave < tol_rel_obj) { ss << " MEAN ELBO CONVERGED"; do_more_iterations = false; } if (delta_elbo_med < tol_rel_obj) { ss << " MEDIAN ELBO CONVERGED"; do_more_iterations = false; } if (iter_counter > 10 * eval_elbo_) { if (delta_elbo_med > 0.5 || delta_elbo_ave > 0.5) { ss << " MAY BE DIVERGING... INSPECT ELBO"; } } logger.info(ss); if (do_more_iterations == false && rel_difference(elbo, elbo_best) > 0.05) { logger.info( "Informational Message: The ELBO at a previous " "iteration is larger than the ELBO upon " "convergence!"); logger.info( "This variational approximation may not " "have converged to a good optimum."); } } if (iter_counter == max_iterations) { logger.info( "Informational Message: The maximum number of " "iterations is reached! The algorithm may not have " "converged."); logger.info( "This variational approximation is not " "guaranteed to be meaningful."); do_more_iterations = false; } } } /** * Runs ADVI and writes to output. * * @param[in] eta eta parameter of stepsize sequence * @param[in] adapt_engaged boolean flag for eta adaptation * @param[in] adapt_iterations number of iterations for eta adaptation * @param[in] tol_rel_obj relative tolerance parameter for convergence * @param[in] max_iterations max number of iterations to run algorithm * @param[in,out] logger logger for messages * @param[in,out] parameter_writer writer for parameters * (typically to file) * @param[in,out] diagnostic_writer writer for diagnostic information */ int run(double eta, bool adapt_engaged, int adapt_iterations, double tol_rel_obj, int max_iterations, callbacks::logger& logger, callbacks::writer& parameter_writer, callbacks::writer& diagnostic_writer) const { diagnostic_writer("iter,time_in_seconds,ELBO"); // Initialize variational approximation Q variational = Q(cont_params_); if (adapt_engaged) { eta = adapt_eta(variational, adapt_iterations, logger); parameter_writer("Stepsize adaptation complete."); std::stringstream ss; ss << "eta = " << eta; parameter_writer(ss.str()); } stochastic_gradient_ascent(variational, eta, tol_rel_obj, max_iterations, logger, diagnostic_writer); // Write posterior mean of variational approximations. cont_params_ = variational.mean(); std::vector cont_vector(cont_params_.size()); for (int i = 0; i < cont_params_.size(); ++i) cont_vector.at(i) = cont_params_(i); std::vector disc_vector; std::vector values; std::stringstream msg; model_.write_array(rng_, cont_vector, disc_vector, values, true, true, &msg); if (msg.str().length() > 0) logger.info(msg); // The first row of lp_, log_p, and log_g. values.insert(values.begin(), {0, 0, 0}); parameter_writer(values); // Draw more from posterior and write on subsequent lines logger.info(""); std::stringstream ss; ss << "Drawing a sample of size " << n_posterior_samples_ << " from the approximate posterior... "; logger.info(ss); double log_p = 0; double log_g = 0; // Draw posterior sample. log_g is the log normal densities. for (int n = 0; n < n_posterior_samples_; ++n) { variational.sample_log_g(rng_, cont_params_, log_g); for (int i = 0; i < cont_params_.size(); ++i) { cont_vector.at(i) = cont_params_(i); } std::stringstream msg2; model_.write_array(rng_, cont_vector, disc_vector, values, true, true, &msg2); // log_p: Log probability in the unconstrained space log_p = model_.template log_prob(cont_params_, &msg2); if (msg2.str().length() > 0) logger.info(msg2); // Write lp__, log_p, and log_g. values.insert(values.begin(), {0, log_p, log_g}); parameter_writer(values); } logger.info("COMPLETED."); return stan::services::error_codes::OK; } // TODO(akucukelbir): move these things to stan math and test there /** * Compute the median of a circular buffer. * * @param[in] cb circular buffer with some number of values in it. * @return median of values in circular buffer. */ double circ_buff_median(const boost::circular_buffer& cb) const { // FIXME: naive implementation; creates a copy as a vector std::vector v; for (boost::circular_buffer::const_iterator i = cb.begin(); i != cb.end(); ++i) { v.push_back(*i); } size_t n = v.size() / 2; std::nth_element(v.begin(), v.begin() + n, v.end()); return v[n]; } /** * Compute the relative difference between two double values. * * @param[in] prev previous value * @param[in] curr current value * @return absolutely value of relative difference */ double rel_difference(double prev, double curr) const { return std::fabs((curr - prev) / prev); } protected: Model& model_; Eigen::VectorXd& cont_params_; BaseRNG& rng_; int n_monte_carlo_grad_; int n_monte_carlo_elbo_; int eval_elbo_; int n_posterior_samples_; }; } // namespace variational } // namespace stan #endif StanHeaders/inst/include/src/stan/variational/families/0000755000176200001440000000000014645137105022717 5ustar liggesusersStanHeaders/inst/include/src/stan/variational/families/normal_fullrank.hpp0000644000176200001440000004314214645137105026622 0ustar liggesusers#ifndef STAN_VARIATIONAL_NORMAL_FULLRANK_HPP #define STAN_VARIATIONAL_NORMAL_FULLRANK_HPP #include #include #include #include #include #include #include namespace stan { namespace variational { /** * Variational family approximation with full-rank multivariate * normal distribution. */ class normal_fullrank : public base_family { private: /** * Mean vector. */ Eigen::VectorXd mu_; /** * Cholesky factor of covariance: * Sigma = L_chol * L_chol.transpose() */ Eigen::MatrixXd L_chol_; /** * Dimensionality of distribution. */ const int dimension_; /** * Raise a domain exception if the specified vector contains * not-a-number values. * * @param[in] mu Mean vector. * @throw std::domain_error If the mean vector contains NaN * values or does not match this distribution's dimensionality. */ void validate_mean(const char* function, const Eigen::VectorXd& mu) { stan::math::check_not_nan(function, "Mean vector", mu); stan::math::check_size_match(function, "Dimension of input vector", mu.size(), "Dimension of current vector", dimension()); } /** * Raise a domain exception if the specified matrix is not * square, not lower triangular, or contains not-a-number * values. * * Warning: This function does not check that the * Cholesky factor is positive definite. * * @param[in] L_chol Cholesky factor for covariance matrix. * @throw std::domain_error If the specified matrix is not * square, is not lower triangular, if its size does not match * the dimensionality of this approximation, or if it contains * not-a-number values. */ void validate_cholesky_factor(const char* function, const Eigen::MatrixXd& L_chol) { stan::math::check_square(function, "Cholesky factor", L_chol); stan::math::check_lower_triangular(function, "Cholesky factor", L_chol); stan::math::check_size_match(function, "Dimension of mean vector", dimension(), "Dimension of Cholesky factor", L_chol.rows()); stan::math::check_not_nan(function, "Cholesky factor", L_chol); } public: /** * Construct a variational distribution of the specified * dimensionality with a zero mean and Cholesky factor of a zero * covariance matrix. * * @param[in] dimension Dimensionality of distribution. */ explicit normal_fullrank(size_t dimension) : mu_(Eigen::VectorXd::Zero(dimension)), L_chol_(Eigen::MatrixXd::Zero(dimension, dimension)), dimension_(dimension) {} /** * Construct a variational distribution with specified mean vector * and Cholesky factor for identity covariance. * * @param[in] cont_params Mean vector. */ explicit normal_fullrank(const Eigen::VectorXd& cont_params) : mu_(cont_params), L_chol_( Eigen::MatrixXd::Identity(cont_params.size(), cont_params.size())), dimension_(cont_params.size()) {} /** * Construct a variational distribution with specified mean and * Cholesky factor for covariance. * * Warning: Positive-definiteness is not enforced for the * Cholesky factor. * * @param[in] mu Mean vector. * @param[in] L_chol Cholesky factor of covariance. * @throws std::domain_error If the Cholesky factor is not * square or not lower triangular, if the mean and Cholesky factor * have different dimensionality, or if any of the elements is * not-a-number. */ normal_fullrank(const Eigen::VectorXd& mu, const Eigen::MatrixXd& L_chol) : mu_(mu), L_chol_(L_chol), dimension_(mu.size()) { static const char* function = "stan::variational::normal_fullrank"; validate_mean(function, mu); validate_cholesky_factor(function, L_chol); } /** * Return the dimensionality of the approximation. */ int dimension() const { return dimension_; } /** * Return the mean vector. */ const Eigen::VectorXd& mu() const { return mu_; } /** * Return the Cholesky factor of the covariance matrix. */ const Eigen::MatrixXd& L_chol() const { return L_chol_; } /** * Set the mean vector to the specified value. * * @param[in] mu Mean vector. * @throw std::domain_error If the size of the specified mean * vector does not match the stored dimension of this approximation. */ void set_mu(const Eigen::VectorXd& mu) { static const char* function = "stan::variational::set_mu"; validate_mean(function, mu); mu_ = mu; } /** * Set the Cholesky factor to the specified value. * * @param[in] L_chol Cholesky factor of covariance matrix. * @throw std::domain_error If the specified matrix is not * square, is not lower triangular, if its size does not match * the dimensionality of this approximation, or if it contains * not-a-number values. */ void set_L_chol(const Eigen::MatrixXd& L_chol) { static const char* function = "stan::variational::set_L_chol"; validate_cholesky_factor(function, L_chol); L_chol_ = L_chol; } /** * Set the mean vector and Cholesky factor for the covariance * matrix to zero. */ void set_to_zero() { mu_ = Eigen::VectorXd::Zero(dimension()); L_chol_ = Eigen::MatrixXd::Zero(dimension(), dimension()); } /** * Return a new full rank approximation resulting from squaring * the entries in the mean and Cholesky factor for the * covariance matrix. The new approximation does not hold * any references to this approximation. */ normal_fullrank square() const { return normal_fullrank(Eigen::VectorXd(mu_.array().square()), Eigen::MatrixXd(L_chol_.array().square())); } /** * Return a new full rank approximation resulting from taking * the square root of the entries in the mean and Cholesky * factor for the covariance matrix. The new approximation does * not hold any references to this approximation. * * Warning: No checks are carried out to ensure the * entries are non-negative before taking square roots, so * not-a-number values may result. */ normal_fullrank sqrt() const { return normal_fullrank(Eigen::VectorXd(mu_.array().sqrt()), Eigen::MatrixXd(L_chol_.array().sqrt())); } /** * Return this approximation after setting its mean vector and * Cholesky factor for covariance to the values given by the * specified approximation. * * @param[in] rhs Approximation from which to gather the mean and * covariance. * @return This approximation after assignment. * @throw std::domain_error If the dimensionality of the specified * approximation does not match this approximation's dimensionality. */ normal_fullrank& operator=(const normal_fullrank& rhs) { static const char* function = "stan::variational::normal_fullrank::operator="; stan::math::check_size_match(function, "Dimension of lhs", dimension(), "Dimension of rhs", rhs.dimension()); mu_ = rhs.mu(); L_chol_ = rhs.L_chol(); return *this; } /** * Add the mean and Cholesky factor of the covariance matrix of * the specified approximation to this approximation. * * @param[in] rhs Approximation from which to gather the mean and * covariance. * @return This approximation after adding the specified * approximation. * @throw std::domain_error If the dimensionality of the specified * approximation does not match this approximation's dimensionality. */ normal_fullrank& operator+=(const normal_fullrank& rhs) { static const char* function = "stan::variational::normal_fullrank::operator+="; stan::math::check_size_match(function, "Dimension of lhs", dimension(), "Dimension of rhs", rhs.dimension()); mu_ += rhs.mu(); L_chol_ += rhs.L_chol(); return *this; } /** * Return this approximation after elementwise division by the * specified approximation's mean and Cholesky factor for * covariance. * * @param[in] rhs Approximation from which to gather the mean and * covariance. * @return This approximation after elementwise division by the * specified approximation. * @throw std::domain_error If the dimensionality of the specified * approximation does not match this approximation's dimensionality. */ inline normal_fullrank& operator/=(const normal_fullrank& rhs) { static const char* function = "stan::variational::normal_fullrank::operator/="; stan::math::check_size_match(function, "Dimension of lhs", dimension(), "Dimension of rhs", rhs.dimension()); mu_.array() /= rhs.mu().array(); L_chol_.array() /= rhs.L_chol().array(); return *this; } /** * Return this approximation after adding the specified scalar * to each entry in the mean and cholesky factor for covariance. * * Warning: No finiteness check is made on the scalar, so * it may introduce NaNs. * * @param[in] scalar Scalar to add. * @return This approximation after elementwise addition of the * specified scalar. */ normal_fullrank& operator+=(double scalar) { mu_.array() += scalar; L_chol_.array() += scalar; return *this; } /** * Return this approximation after multiplying by the specified * scalar to each entry in the mean and cholesky factor for * covariance. * * Warning: No finiteness check is made on the scalar, so * it may introduce NaNs. * * @param[in] scalar Scalar to add. * @return This approximation after elementwise addition of the * specified scalar. */ normal_fullrank& operator*=(double scalar) { mu_ *= scalar; L_chol_ *= scalar; return *this; } /** * Returns the mean vector for this approximation. * * See: mu(). * * @return Mean vector for this approximation. */ const Eigen::VectorXd& mean() const { return mu(); } /** * Return the entropy of this approximation. * *

The entropy is defined by * 0.5 * dim * (1+log2pi) + 0.5 * log det (L^T L) * = 0.5 * dim * (1+log2pi) + sum(log(abs(diag(L)))). * * @return Entropy of this approximation */ double entropy() const { static double mult = 0.5 * (1.0 + stan::math::LOG_TWO_PI); double result = mult * dimension(); for (int d = 0; d < dimension(); ++d) { double tmp = fabs(L_chol_(d, d)); if (tmp != 0.0) result += log(tmp); } return result; } /** * Return the transform of the specified vector using the * Cholesky factor and mean vector. * * The transform is defined by * S^{-1}(eta) = L_chol * eta + mu. * * @param[in] eta Vector to transform. * @throw std::domain_error If the specified vector's size does * not match the dimensionality of this approximation. * @return Transformed vector. */ Eigen::VectorXd transform(const Eigen::VectorXd& eta) const { static const char* function = "stan::variational::normal_fullrank::transform"; stan::math::check_size_match(function, "Dimension of input vector", eta.size(), "Dimension of mean vector", dimension()); stan::math::check_not_nan(function, "Input vector", eta); return (L_chol_ * eta) + mu_; } template void sample(BaseRNG& rng, Eigen::VectorXd& eta) const { // Draw from standard normal and transform to real-coordinate space for (int d = 0; d < dimension(); ++d) eta(d) = stan::math::normal_rng(0, 1, rng); eta = transform(eta); } template void sample_log_g(BaseRNG& rng, Eigen::VectorXd& eta, double& log_g) const { // Draw from the approximation for (int d = 0; d < dimension(); ++d) { eta(d) = stan::math::normal_rng(0, 1, rng); } // Compute the log density before transformation log_g = calc_log_g(eta); // Transform to real-coordinate space eta = transform(eta); } double calc_log_g(const Eigen::VectorXd& eta) const { // Compute the log density wrt normal distribution dropping constants double log_g = 0; for (int d = 0; d < dimension(); ++d) { log_g += -stan::math::square(eta(d)) * 0.5; } return log_g; } /** * Calculates the "blackbox" gradient with respect to BOTH the * location vector (mu) and the cholesky factor of the scale * matrix (L_chol) in parallel. It uses the same gradient * computed from a set of Monte Carlo samples * * @tparam M Model class. * @tparam BaseRNG Class of base random number generator. * @param[in] elbo_grad Approximation to store "blackbox" gradient. * @param[in] m Model. * @param[in] cont_params Continuous parameters. * @param[in] n_monte_carlo_grad Sample size for gradient computation. * @param[in,out] rng Random number generator. * @param[in,out] logger logger for messages * @throw std::domain_error If the number of divergent * iterations exceeds its specified bounds. */ template void calc_grad(normal_fullrank& elbo_grad, M& m, Eigen::VectorXd& cont_params, int n_monte_carlo_grad, BaseRNG& rng, callbacks::logger& logger) const { static const char* function = "stan::variational::normal_fullrank::calc_grad"; stan::math::check_size_match(function, "Dimension of elbo_grad", elbo_grad.dimension(), "Dimension of variational q", dimension()); stan::math::check_size_match(function, "Dimension of variational q", dimension(), "Dimension of variables in model", cont_params.size()); Eigen::VectorXd mu_grad = Eigen::VectorXd::Zero(dimension()); Eigen::MatrixXd L_grad = Eigen::MatrixXd::Zero(dimension(), dimension()); double tmp_lp = 0.0; Eigen::VectorXd tmp_mu_grad = Eigen::VectorXd::Zero(dimension()); Eigen::VectorXd eta = Eigen::VectorXd::Zero(dimension()); Eigen::VectorXd zeta = Eigen::VectorXd::Zero(dimension()); // Naive Monte Carlo integration static const int n_retries = 10; for (int i = 0, n_monte_carlo_drop = 0; i < n_monte_carlo_grad;) { // Draw from standard normal and transform to real-coordinate space for (int d = 0; d < dimension(); ++d) { eta(d) = stan::math::normal_rng(0, 1, rng); } zeta = transform(eta); try { std::stringstream ss; stan::model::gradient(m, zeta, tmp_lp, tmp_mu_grad, &ss); if (ss.str().length() > 0) logger.info(ss); stan::math::check_finite(function, "Gradient of mu", tmp_mu_grad); mu_grad += tmp_mu_grad; for (int ii = 0; ii < dimension(); ++ii) { for (int jj = 0; jj <= ii; ++jj) { L_grad(ii, jj) += tmp_mu_grad(ii) * eta(jj); } } ++i; } catch (const std::exception& e) { ++n_monte_carlo_drop; if (n_monte_carlo_drop >= n_retries * n_monte_carlo_grad) { const char* name = "The number of dropped evaluations"; const char* msg1 = "has reached its maximum amount ("; int y = n_retries * n_monte_carlo_grad; const char* msg2 = "). Your model may be either severely " "ill-conditioned or misspecified."; stan::math::throw_domain_error(function, name, y, msg1, msg2); } } } mu_grad /= static_cast(n_monte_carlo_grad); L_grad /= static_cast(n_monte_carlo_grad); // Add gradient of entropy term L_grad.diagonal().array() += L_chol_.diagonal().array().inverse(); elbo_grad.set_mu(mu_grad); elbo_grad.set_L_chol(L_grad); } }; /** * Return a new approximation resulting from adding the mean and * covariance matrix Cholesky factor of the specified * approximations. * * @param[in] lhs First approximation. * @param[in] rhs Second approximation. * @return Sum of the specified approximations. * @throw std::domain_error If the dimensionalities do not match. */ inline normal_fullrank operator+(normal_fullrank lhs, const normal_fullrank& rhs) { return lhs += rhs; } /** * Return a new approximation resulting from elementwise division of * of the first specified approximation by the second. * * @param[in] lhs First approximation. * @param[in] rhs Second approximation. * @return Elementwise division of the specified approximations. * @throw std::domain_error If the dimensionalities do not match. */ inline normal_fullrank operator/(normal_fullrank lhs, const normal_fullrank& rhs) { return lhs /= rhs; } /** * Return a new approximation resulting from elementwise addition * of the specified scalar to the mean and Cholesky factor of * covariance entries for the specified approximation. * * @param[in] scalar Scalar value * @param[in] rhs Approximation. * @return Addition of scalar to specified approximation. */ inline normal_fullrank operator+(double scalar, normal_fullrank rhs) { return rhs += scalar; } /** * Return a new approximation resulting from elementwise * multiplication of the specified scalar to the mean and Cholesky * factor of covariance entries for the specified approximation. * * @param[in] scalar Scalar value * @param[in] rhs Approximation. * @return Multiplication of scalar by the specified approximation. */ inline normal_fullrank operator*(double scalar, normal_fullrank rhs) { return rhs *= scalar; } } // namespace variational } // namespace stan #endif StanHeaders/inst/include/src/stan/variational/families/normal_meanfield.hpp0000644000176200001440000003672014645137105026734 0ustar liggesusers#ifndef STAN_VARIATIONAL_NORMAL_MEANFIELD_HPP #define STAN_VARIATIONAL_NORMAL_MEANFIELD_HPP #include #include #include #include #include #include #include namespace stan { namespace variational { /** * Variational family approximation with mean-field (diagonal * covariance) multivariate normal distribution. */ class normal_meanfield : public base_family { private: /** * Mean vector. */ Eigen::VectorXd mu_; /** * Log standard deviation (log scale) vector. */ Eigen::VectorXd omega_; /** * Dimensionality of distribution. */ const int dimension_; public: /** * Construct a variational distribution of the specified * dimensionality with a zero mean and zero log standard * deviation (unit standard deviation). * * @param[in] dimension Dimensionality of distribution. */ explicit normal_meanfield(size_t dimension) : mu_(Eigen::VectorXd::Zero(dimension)), omega_(Eigen::VectorXd::Zero(dimension)), dimension_(dimension) {} /** * Construct a variational distribution with the specified mean * vector and zero log standard deviation (unit standard * deviation). * * @param[in] cont_params Mean vector. */ explicit normal_meanfield(const Eigen::VectorXd& cont_params) : mu_(cont_params), omega_(Eigen::VectorXd::Zero(cont_params.size())), dimension_(cont_params.size()) {} /** * Construct a variational distribution with the specified mean * and log standard deviation vectors. * * @param[in] mu Mean vector. * @param[in] omega Log standard deviation vector. * @throw std::domain_error If the sizes of mean and log * standard deviation vectors are different, or if either * contains a not-a-number value. */ normal_meanfield(const Eigen::VectorXd& mu, const Eigen::VectorXd& omega) : mu_(mu), omega_(omega), dimension_(mu.size()) { static const char* function = "stan::variational::normal_meanfield"; stan::math::check_size_match(function, "Dimension of mean vector", mu_.size(), "Dimension of log std vector", omega_.size()); stan::math::check_not_nan(function, "Mean vector", mu_); stan::math::check_not_nan(function, "Log std vector", omega_); } /** * Return the dimensionality of the approximation. */ int dimension() const { return dimension_; } /** * Return the mean vector. */ const Eigen::VectorXd& mu() const { return mu_; } /** * Return the log standard deviation vector. */ const Eigen::VectorXd& omega() const { return omega_; } /** * Set the mean vector to the specified value. * * @param[in] mu Mean vector. * @throw std::domain_error If the mean vector's size does not * match this approximation's dimensionality, or if it contains * not-a-number values. */ void set_mu(const Eigen::VectorXd& mu) { static const char* function = "stan::variational::normal_meanfield::set_mu"; stan::math::check_size_match(function, "Dimension of input vector", mu.size(), "Dimension of current vector", dimension()); stan::math::check_not_nan(function, "Input vector", mu); mu_ = mu; } /** * Set the log standard deviation vector to the specified * value. * * @param[in] omega Log standard deviation vector. * @throw std::domain_error If the log standard deviation * vector's size does not match this approximation's * dimensionality, or if it contains not-a-number values. */ void set_omega(const Eigen::VectorXd& omega) { static const char* function = "stan::variational::normal_meanfield::set_omega"; stan::math::check_size_match(function, "Dimension of input vector", omega.size(), "Dimension of current vector", dimension()); stan::math::check_not_nan(function, "Input vector", omega); omega_ = omega; } /** * Sets the mean and log standard deviation vector for this * approximation to zero. */ void set_to_zero() { mu_ = Eigen::VectorXd::Zero(dimension()); omega_ = Eigen::VectorXd::Zero(dimension()); } /** * Return a new mean field approximation resulting from squaring * the entries in the mean and log standard deviation. The new * approximation does not hold any references to this * approximation. */ normal_meanfield square() const { return normal_meanfield(Eigen::VectorXd(mu_.array().square()), Eigen::VectorXd(omega_.array().square())); } /** * Return a new mean field approximation resulting from taking * the square root of the entries in the mean and log standard * deviation. The new approximation does not hold any * references to this approximation. * * Warning: No checks are carried out to ensure the * entries are non-negative before taking square roots, so * not-a-number values may result. */ normal_meanfield sqrt() const { return normal_meanfield(Eigen::VectorXd(mu_.array().sqrt()), Eigen::VectorXd(omega_.array().sqrt())); } /** * Return this approximation after setting its mean vector and * Cholesky factor for covariance to the values given by the * specified approximation. * * @param[in] rhs Approximation from which to gather the mean * and log standard deviation vectors. * @return This approximation after assignment. * @throw std::domain_error If the dimensionality of the specified * approximation does not match this approximation's dimensionality. */ normal_meanfield& operator=(const normal_meanfield& rhs) { static const char* function = "stan::variational::normal_meanfield::operator="; stan::math::check_size_match(function, "Dimension of lhs", dimension(), "Dimension of rhs", rhs.dimension()); mu_ = rhs.mu(); omega_ = rhs.omega(); return *this; } /** * Add the mean and Cholesky factor of the covariance matrix of * the specified approximation to this approximation. * * @param[in] rhs Approximation from which to gather the mean * and log standard deviation vectors. * @return This approximation after adding the specified * approximation. * @throw std::domain_error If the size of the specified * approximation does not match the size of this approximation. */ normal_meanfield& operator+=(const normal_meanfield& rhs) { static const char* function = "stan::variational::normal_meanfield::operator+="; stan::math::check_size_match(function, "Dimension of lhs", dimension(), "Dimension of rhs", rhs.dimension()); mu_ += rhs.mu(); omega_ += rhs.omega(); return *this; } /** * Return this approximation after elementwise division by the * specified approximation's mean and log standard deviation * vectors. * * @param[in] rhs Approximation from which to gather the mean * and log standard deviation vectors. * @return This approximation after elementwise division by the * specified approximation. * @throw std::domain_error If the dimensionality of the specified * approximation does not match this approximation's dimensionality. */ inline normal_meanfield& operator/=(const normal_meanfield& rhs) { static const char* function = "stan::variational::normal_meanfield::operator/="; stan::math::check_size_match(function, "Dimension of lhs", dimension(), "Dimension of rhs", rhs.dimension()); mu_.array() /= rhs.mu().array(); omega_.array() /= rhs.omega().array(); return *this; } /** * Return this approximation after adding the specified scalar * to each entry in the mean and log standard deviation vectors. * * Warning: No finiteness check is made on the scalar, so * it may introduce NaNs. * * @param[in] scalar Scalar to add. * @return This approximation after elementwise addition of the * specified scalar. */ normal_meanfield& operator+=(double scalar) { mu_.array() += scalar; omega_.array() += scalar; return *this; } /** * Return this approximation after multiplying by the specified * scalar to each entry in the mean and log standard deviation * vectors. * * Warning: No finiteness check is made on the scalar, so * it may introduce NaNs. * * @param[in] scalar Scalar to add. * @return This approximation after elementwise addition of the * specified scalar. */ normal_meanfield& operator*=(double scalar) { mu_ *= scalar; omega_ *= scalar; return *this; } /** * Returns the mean vector for this approximation. * * See: mu(). * * @return Mean vector for this approximation. */ const Eigen::VectorXd& mean() const { return mu(); } /** * Return the entropy of the approximation. * *

The entropy is defined by * 0.5 * dim * (1+log2pi) + 0.5 * log det diag(sigma^2) * = 0.5 * dim * (1+log2pi) + sum(log(sigma)) * = 0.5 * dim * (1+log2pi) + sum(omega) * * @return Entropy of this approximation. */ double entropy() const { return 0.5 * static_cast(dimension()) * (1.0 + stan::math::LOG_TWO_PI) + omega_.sum(); } /** * Return the transform of the specified vector using the * Cholesky factor and mean vector. * * The transform is defined by * S^{-1}(eta) = sigma * eta + mu = exp(omega) * eta + mu. * * @param[in] eta Vector to transform. * @throw std::domain_error If the specified vector's size does * not match the dimensionality of this approximation. * @return Transformed vector. */ Eigen::VectorXd transform(const Eigen::VectorXd& eta) const { static const char* function = "stan::variational::normal_meanfield::transform"; stan::math::check_size_match(function, "Dimension of mean vector", dimension(), "Dimension of input vector", eta.size()); stan::math::check_not_nan(function, "Input vector", eta); // exp(omega) * eta + mu return eta.array().cwiseProduct(omega_.array().exp()) + mu_.array(); } /** * Calculates the "blackbox" gradient with respect to both the * location vector (mu) and the log-std vector (omega) in * parallel. It uses the same gradient computed from a set of * Monte Carlo samples. * * @tparam M Model class. * @tparam BaseRNG Class of base random number generator. * @param[in] elbo_grad Parameters to store "blackbox" gradient * @param[in] m Model. * @param[in] cont_params Continuous parameters. * @param[in] n_monte_carlo_grad Number of samples for gradient * computation. * @param[in,out] rng Random number generator. * @param[in,out] logger logger for messages * @throw std::domain_error If the number of divergent * iterations exceeds its specified bounds. */ template void calc_grad(normal_meanfield& elbo_grad, M& m, Eigen::VectorXd& cont_params, int n_monte_carlo_grad, BaseRNG& rng, callbacks::logger& logger) const { static const char* function = "stan::variational::normal_meanfield::calc_grad"; stan::math::check_size_match(function, "Dimension of elbo_grad", elbo_grad.dimension(), "Dimension of variational q", dimension()); stan::math::check_size_match(function, "Dimension of variational q", dimension(), "Dimension of variables in model", cont_params.size()); Eigen::VectorXd mu_grad = Eigen::VectorXd::Zero(dimension()); Eigen::VectorXd omega_grad = Eigen::VectorXd::Zero(dimension()); double tmp_lp = 0.0; Eigen::VectorXd tmp_mu_grad = Eigen::VectorXd::Zero(dimension()); Eigen::VectorXd eta = Eigen::VectorXd::Zero(dimension()); Eigen::VectorXd zeta = Eigen::VectorXd::Zero(dimension()); // Naive Monte Carlo integration static const int n_retries = 10; for (int i = 0, n_monte_carlo_drop = 0; i < n_monte_carlo_grad;) { // Draw from standard normal and transform to real-coordinate space for (int d = 0; d < dimension(); ++d) eta(d) = stan::math::normal_rng(0, 1, rng); zeta = transform(eta); try { std::stringstream ss; stan::model::gradient(m, zeta, tmp_lp, tmp_mu_grad, &ss); if (ss.str().length() > 0) logger.info(ss); stan::math::check_finite(function, "Gradient of mu", tmp_mu_grad); mu_grad += tmp_mu_grad; omega_grad.array() += tmp_mu_grad.array().cwiseProduct(eta.array()); ++i; } catch (const std::exception& e) { ++n_monte_carlo_drop; if (n_monte_carlo_drop >= n_retries * n_monte_carlo_grad) { const char* name = "The number of dropped evaluations"; const char* msg1 = "has reached its maximum amount ("; int y = n_retries * n_monte_carlo_grad; const char* msg2 = "). Your model may be either severely " "ill-conditioned or misspecified."; stan::math::throw_domain_error(function, name, y, msg1, msg2); } } } mu_grad /= static_cast(n_monte_carlo_grad); omega_grad /= static_cast(n_monte_carlo_grad); omega_grad.array() = omega_grad.array().cwiseProduct(omega_.array().exp()); omega_grad.array() += 1.0; // add entropy gradient (unit) elbo_grad.set_mu(mu_grad); elbo_grad.set_omega(omega_grad); } }; /** * Return a new approximation resulting from adding the mean and * log standard deviation of the specified approximations. * * @param[in] lhs First approximation. * @param[in] rhs Second approximation. * @return Sum of the specified approximations. * @throw std::domain_error If the dimensionalities do not match. */ inline normal_meanfield operator+(normal_meanfield lhs, const normal_meanfield& rhs) { return lhs += rhs; } /** * Return a new approximation resulting from elementwise division of * of the first specified approximation by the second. * * @param[in] lhs First approximation. * @param[in] rhs Second approximation. * @return Elementwise division of the specified approximations. * @throw std::domain_error If the dimensionalities do not match. */ inline normal_meanfield operator/(normal_meanfield lhs, const normal_meanfield& rhs) { return lhs /= rhs; } /** * Return a new approximation resulting from elementwise addition * of the specified scalar to the mean and log standard deviation * entries of the specified approximation. * * @param[in] scalar Scalar value * @param[in] rhs Approximation. * @return Addition of scalar to specified approximation. */ inline normal_meanfield operator+(double scalar, normal_meanfield rhs) { return rhs += scalar; } /** * Return a new approximation resulting from elementwise * multiplication of the specified scalar to the mean and log * standard deviation vectors of the specified approximation. * * @param[in] scalar Scalar value * @param[in] rhs Approximation. * @return Multiplication of scalar by the specified approximation. */ inline normal_meanfield operator*(double scalar, normal_meanfield rhs) { return rhs *= scalar; } } // namespace variational } // namespace stan #endif StanHeaders/inst/include/src/stan/variational/base_family.hpp0000644000176200001440000000714014645137105024114 0ustar liggesusers#ifndef STAN_VARIATIONAL_BASE_FAMILY_HPP #define STAN_VARIATIONAL_BASE_FAMILY_HPP #include #include #include #include namespace stan { namespace variational { class base_family { public: // Constructors base_family() {} /** * Return the dimensionality of the approximation. */ virtual int dimension() const = 0; // Distribution-based operations virtual const Eigen::VectorXd& mean() const = 0; virtual double entropy() const = 0; virtual Eigen::VectorXd transform(const Eigen::VectorXd& eta) const = 0; /** * Assign a draw from this mean field approximation to the * specified vector using the specified random number generator. * * @tparam BaseRNG Class of random number generator. * @param[in] rng Base random number generator. * @param[out] eta Vector to which the draw is assigned; dimension has to be * the same as the dimension of variational q. * @throws std::range_error If the index is out of range. */ template void sample(BaseRNG& rng, Eigen::VectorXd& eta) const { // Draw from standard normal and transform to real-coordinate space for (int d = 0; d < dimension(); ++d) eta(d) = stan::math::normal_rng(0, 1, rng); eta = transform(eta); } /** * Draw a posterior sample from the normal distribution, * and return its log normal density. The constants are dropped. * * @param[in] rng Base random number generator. * @param[out] eta Vector to which the draw is assigned; dimension has to be * the same as the dimension of variational q. eta will be transformed into * variational posteriors. * @param[out] log_g The log density in the variational approximation; * The constant term is dropped. * @throws std::range_error If the index is out of range. */ template void sample_log_g(BaseRNG& rng, Eigen::VectorXd& eta, double& log_g) const { // Draw from the approximation for (int d = 0; d < dimension(); ++d) { eta(d) = stan::math::normal_rng(0, 1, rng); } // Compute the log density before transformation log_g = calc_log_g(eta); // Transform to real-coordinate space eta = transform(eta); } /** * Compute the unnormalized log unit normal density wrt eta. All constants are * dropped. * * @param[in] eta Vector; dimension has to be the same as the dimension * of variational q. * @return The unnormalized log density in the variational approximation; * @throws std::range_error If the index is out of range. */ double calc_log_g(const Eigen::VectorXd& eta) const { double log_g = 0; for (int d = 0; d < dimension(); ++d) { log_g += -stan::math::square(eta(d)) * 0.5; } return log_g; } template void calc_grad(base_family& elbo_grad, M& m, Eigen::VectorXd& cont_params, int n_monte_carlo_grad, BaseRNG& rng, callbacks::logger& logger) const; protected: void write_error_msg_(std::ostream* error_msgs, const std::exception& e) const { if (!error_msgs) { return; } *error_msgs << std::endl << "Informational Message: The current gradient evaluation " << "of the ELBO is ignored because of the following issue:" << std::endl << e.what() << std::endl << "If this warning occurs often then your model may be " << "either severely ill-conditioned or misspecified." << std::endl; } }; } // namespace variational } // namespace stan #endif StanHeaders/inst/include/src/stan/optimization/0000755000176200001440000000000014645137105021343 5ustar liggesusersStanHeaders/inst/include/src/stan/optimization/bfgs_update.hpp0000644000176200001440000000356114645137105024344 0ustar liggesusers#ifndef STAN_OPTIMIZATION_BFGS_UPDATE_HPP #define STAN_OPTIMIZATION_BFGS_UPDATE_HPP #include namespace stan { namespace optimization { template class BFGSUpdate_HInv { public: typedef Eigen::Matrix VectorT; typedef Eigen::Matrix HessianT; /** * Update the inverse Hessian approximation. * * @param yk Difference between the current and previous gradient vector. * @param sk Difference between the current and previous state vector. * @param reset Whether to reset the approximation, forgetting about * previous values. * @return In the case of a reset, returns the optimal scaling of the * initial Hessian approximation which is useful for predicting * step-sizes. **/ inline Scalar update(const VectorT &yk, const VectorT &sk, bool reset = false) { Scalar rhok, skyk, B0fact; HessianT Hupd; skyk = yk.dot(sk); rhok = 1.0 / skyk; Hupd.noalias() = HessianT::Identity(yk.size(), yk.size()) - rhok * sk * yk.transpose(); if (reset) { B0fact = yk.squaredNorm() / skyk; _Hk.noalias() = ((1.0 / B0fact) * Hupd) * Hupd.transpose(); } else { B0fact = 1.0; _Hk = Hupd * _Hk * Hupd.transpose(); } _Hk.noalias() += rhok * sk * sk.transpose(); return B0fact; } /** * Compute the search direction based on the current (inverse) Hessian * approximation and given gradient. * * @param[out] pk The negative product of the inverse Hessian and gradient * direction gk. * @param[in] gk Gradient direction. **/ inline void search_direction(VectorT &pk, const VectorT &gk) const { pk.noalias() = -(_Hk * gk); } private: HessianT _Hk; }; } // namespace optimization } // namespace stan #endif StanHeaders/inst/include/src/stan/optimization/lbfgs_update.hpp0000644000176200001440000000667714645137105024533 0ustar liggesusers#ifndef STAN_OPTIMIZATION_LBFGS_UPDATE_HPP #define STAN_OPTIMIZATION_LBFGS_UPDATE_HPP #include #include #include #include namespace stan { namespace optimization { /** * Implement a limited memory version of the BFGS update. This * class maintains a circular buffer of inverse Hessian updates * which can be applied to compute the search direction. **/ template class LBFGSUpdate { public: typedef Eigen::Matrix VectorT; typedef Eigen::Matrix HessianT; // NOLINTNEXTLINE(build/include_what_you_use) typedef std::tuple UpdateT; explicit LBFGSUpdate(size_t L = 5) : _buf(L) {} /** * Set the number of inverse Hessian updates to keep. * * @param L New size of buffer. **/ void set_history_size(size_t L) { _buf.rset_capacity(L); } /** * Add a new set of update vectors to the history. * * @param yk Difference between the current and previous gradient vector. * @param sk Difference between the current and previous state vector. * @param reset Whether to reset the approximation, forgetting about * previous values. * @return In the case of a reset, returns the optimal scaling of the * initial Hessian * approximation which is useful for predicting step-sizes. **/ inline Scalar update(const VectorT &yk, const VectorT &sk, bool reset = false) { Scalar skyk = yk.dot(sk); Scalar B0fact; if (reset) { B0fact = yk.squaredNorm() / skyk; _buf.clear(); } else { B0fact = 1.0; } // New updates are pushed to the "back" of the circular buffer Scalar invskyk = 1.0 / skyk; _gammak = skyk / yk.squaredNorm(); _buf.push_back(); _buf.back() = std::tie(invskyk, yk, sk); return B0fact; } /** * Compute the search direction based on the current (inverse) Hessian * approximation and given gradient. * * @param[out] pk The negative product of the inverse Hessian and gradient * direction gk. * @param[in] gk Gradient direction. **/ inline void search_direction(VectorT &pk, const VectorT &gk) const { std::vector alphas(_buf.size()); typename boost::circular_buffer::const_reverse_iterator buf_rit; typename boost::circular_buffer::const_iterator buf_it; typename std::vector::const_iterator alpha_it; typename std::vector::reverse_iterator alpha_rit; pk.noalias() = -gk; for (buf_rit = _buf.rbegin(), alpha_rit = alphas.rbegin(); buf_rit != _buf.rend(); buf_rit++, alpha_rit++) { Scalar alpha; const Scalar &rhoi(std::get<0>(*buf_rit)); const VectorT &yi(std::get<1>(*buf_rit)); const VectorT &si(std::get<2>(*buf_rit)); alpha = rhoi * si.dot(pk); pk -= alpha * yi; *alpha_rit = alpha; } pk *= _gammak; for (buf_it = _buf.begin(), alpha_it = alphas.begin(); buf_it != _buf.end(); buf_it++, alpha_it++) { Scalar beta; const Scalar &rhoi(std::get<0>(*buf_it)); const VectorT &yi(std::get<1>(*buf_it)); const VectorT &si(std::get<2>(*buf_it)); beta = rhoi * yi.dot(pk); pk += (*alpha_it - beta) * si; } } protected: boost::circular_buffer _buf; Scalar _gammak; }; } // namespace optimization } // namespace stan #endif StanHeaders/inst/include/src/stan/optimization/newton.hpp0000644000176200001440000000467414645137105023401 0ustar liggesusers#ifndef STAN_OPTIMIZATION_NEWTON_HPP #define STAN_OPTIMIZATION_NEWTON_HPP #include #include #include #include namespace stan { namespace optimization { typedef Eigen::Matrix matrix_d; typedef Eigen::Matrix vector_d; // Negates any positive eigenvalues in H so that H is negative // definite, and then solves Hu = g and stores the result into // g. Avoids problems due to non-log-concave distributions. inline void make_negative_definite_and_solve(matrix_d& H, vector_d& g) { Eigen::SelfAdjointEigenSolver solver(H); matrix_d eigenvectors = solver.eigenvectors(); vector_d eigenvalues = solver.eigenvalues(); vector_d eigenprojections = eigenvectors.transpose() * g; for (int i = 0; i < g.size(); i++) { eigenprojections[i] = -eigenprojections[i] / fabs(eigenvalues[i]); } g = eigenvectors * eigenprojections; } template double newton_step(M& model, std::vector& params_r, std::vector& params_i, std::ostream* output_stream = 0) { std::vector gradient; std::vector hessian; double f0 = stan::model::grad_hess_log_prob( model, params_r, params_i, gradient, hessian); matrix_d H(params_r.size(), params_r.size()); for (size_t i = 0; i < hessian.size(); i++) { H(i) = hessian[i]; } vector_d g(params_r.size()); for (size_t i = 0; i < gradient.size(); i++) g(i) = gradient[i]; make_negative_definite_and_solve(H, g); // H.ldlt().solveInPlace(g); std::vector new_params_r(params_r.size()); double step_size = 2; double min_step_size = 1e-50; double f1 = -1e100; while (f1 < f0) { step_size *= 0.5; if (step_size < min_step_size) return f0; for (size_t i = 0; i < params_r.size(); i++) new_params_r[i] = params_r[i] - step_size * g[i]; try { f1 = stan::model::log_prob_grad(model, new_params_r, params_i, gradient); } catch (std::exception& e) { // FIXME: this is not a good way to handle a general exception f1 = -1e100; } } for (size_t i = 0; i < params_r.size(); i++) params_r[i] = new_params_r[i]; return f1; } } // namespace optimization } // namespace stan #endif StanHeaders/inst/include/src/stan/optimization/bfgs_linesearch.hpp0000644000176200001440000002147014645137105025176 0ustar liggesusers#ifndef STAN_OPTIMIZATION_BFGS_LINESEARCH_HPP #define STAN_OPTIMIZATION_BFGS_LINESEARCH_HPP #include #include #include #include #include namespace stan { namespace optimization { /** * Find the minima in an interval [loX, hiX] of a cubic function which * interpolates the points, function values and gradients provided. * * Implicitly, this function constructs an interpolating polynomial * g(x) = a_3 x^3 + a_2 x^2 + a_1 x + a_0 * such that g(0) = 0, g(x1) = f1, g'(0) = df0, g'(x1) = df1 where * g'(x) = 3 a_3 x^2 + 2 a_2 x + a_1 * is the derivative of g(x). It then computes the roots of g'(x) and * finds the minimal value of g(x) on the interval [loX,hiX] including * the end points. * * This function implements the full parameter version of CubicInterp(). * * @param df0 First derivative value, f'(x0) * @param x1 Second point * @param f1 Second function value, f(x1) * @param df1 Second derivative value, f'(x1) * @param loX Lower bound on the interval of solutions * @param hiX Upper bound on the interval of solutions **/ template Scalar CubicInterp(const Scalar &df0, const Scalar &x1, const Scalar &f1, const Scalar &df1, const Scalar &loX, const Scalar &hiX) { const Scalar c3((-12 * f1 + 6 * x1 * (df0 + df1)) / (x1 * x1 * x1)); const Scalar c2(-(4 * df0 + 2 * df1) / x1 + 6 * f1 / (x1 * x1)); const Scalar &c1(df0); const Scalar t_s = std::sqrt(c2 * c2 - 2.0 * c1 * c3); const Scalar s1 = -(c2 + t_s) / c3; const Scalar s2 = -(c2 - t_s) / c3; Scalar tmpF; Scalar minF, minX; // Check value at lower bound minF = loX * (loX * (loX * c3 / 3.0 + c2) / 2.0 + c1); minX = loX; // Check value at upper bound tmpF = hiX * (hiX * (hiX * c3 / 3.0 + c2) / 2.0 + c1); if (tmpF < minF) { minF = tmpF; minX = hiX; } // Check value of first root if (loX < s1 && s1 < hiX) { tmpF = s1 * (s1 * (s1 * c3 / 3.0 + c2) / 2.0 + c1); if (tmpF < minF) { minF = tmpF; minX = s1; } } // Check value of second root if (loX < s2 && s2 < hiX) { tmpF = s2 * (s2 * (s2 * c3 / 3.0 + c2) / 2.0 + c1); if (tmpF < minF) { minF = tmpF; minX = s2; } } return minX; } /** * Find the minima in an interval [loX, hiX] of a cubic function which * interpolates the points, function values and gradients provided. * * Implicitly, this function constructs an interpolating polynomial * g(x) = a_3 x^3 + a_2 x^2 + a_1 x + a_0 * such that g(x0) = f0, g(x1) = f1, g'(x0) = df0, g'(x1) = df1 where * g'(x) = 3 a_3 x^2 + 2 a_2 x + a_1 * is the derivative of g(x). It then computes the roots of g'(x) and * finds the minimal value of g(x) on the interval [loX,hiX] including * the end points. * * @param x0 First point * @param f0 First function value, f(x0) * @param df0 First derivative value, f'(x0) * @param x1 Second point * @param f1 Second function value, f(x1) * @param df1 Second derivative value, f'(x1) * @param loX Lower bound on the interval of solutions * @param hiX Upper bound on the interval of solutions **/ template Scalar CubicInterp(const Scalar &x0, const Scalar &f0, const Scalar &df0, const Scalar &x1, const Scalar &f1, const Scalar &df1, const Scalar &loX, const Scalar &hiX) { return x0 + CubicInterp(df0, x1 - x0, f1 - f0, df1, loX - x0, hiX - x0); } /** * An internal utility function for implementing WolfeLineSearch() **/ template int WolfLSZoom(Scalar &alpha, XType &newX, Scalar &newF, XType &newDF, FunctorType &func, const XType &x, const Scalar &f, const Scalar &dfp, const Scalar &c1dfp, const Scalar &c2dfp, const XType &p, Scalar alo, Scalar aloF, Scalar aloDFp, Scalar ahi, Scalar ahiF, Scalar ahiDFp, const Scalar &min_range) { Scalar d1, d2, newDFp; int itNum(0); while (1) { itNum++; if (std::fabs(alo - ahi) < min_range) return 1; if (itNum % 5 == 0) { alpha = 0.5 * (alo + ahi); } else { // Perform cubic interpolation to determine next point to try d1 = aloDFp + ahiDFp - 3 * (aloF - ahiF) / (alo - ahi); d2 = std::sqrt(d1 * d1 - aloDFp * ahiDFp); if (ahi < alo) d2 = -d2; alpha = ahi - (ahi - alo) * (ahiDFp + d2 - d1) / (ahiDFp - aloDFp + 2 * d2); if (!std::isfinite(alpha) || alpha < std::min(alo, ahi) + 0.01 * std::fabs(alo - ahi) || alpha > std::max(alo, ahi) - 0.01 * std::fabs(alo - ahi)) alpha = 0.5 * (alo + ahi); } newX = x + alpha * p; while (func(newX, newF, newDF)) { alpha = 0.5 * (alpha + std::min(alo, ahi)); if (std::fabs(std::min(alo, ahi) - alpha) < min_range) return 1; newX = x + alpha * p; } newDFp = newDF.dot(p); if (newF > (f + alpha * c1dfp) || newF >= aloF) { ahi = alpha; ahiF = newF; ahiDFp = newDFp; } else { if (std::fabs(newDFp) <= -c2dfp) break; if (newDFp * (ahi - alo) >= 0) { ahi = alo; ahiF = aloF; ahiDFp = aloDFp; } alo = alpha; aloF = newF; aloDFp = newDFp; } } return 0; } /** * Perform a line search which finds an approximate solution to: * \f[ * \min_\alpha f(x_0 + \alpha p) * \f] * satisfying the strong Wolfe conditions: * 1) \f$ f(x_0 + \alpha p) \leq f(x_0) + c_1 \alpha p^T g(x_0) \f$ * 2) \f$ \vert p^T g(x_0 + \alpha p) \vert \leq c_2 \vert p^T g(x_0) \vert \f$ * where \f$g(x) = \frac{\partial f}{\partial x}\f$ is the gradient of f(x). * * @tparam FunctorType A type which supports being called as * ret = func(x,f,g) * where x is the input point, f and g are the function value and * gradient at x and ret is non-zero if function evaluation fails. * * @param func Function which is being minimized. * * @param alpha First value of \f$ \alpha \f$ to try. Upon return this * contains the final value of the \f$ \alpha \f$. * * @param x1 Final point, equal to \f$ x_0 + \alpha p \f$. * * @param f1 Final point function value, equal to \f$ f(x_0 + \alpha p) \f$. * * @param gradx1 Final point gradient, equal to \f$ g(x_0 + \alpha p) \f$. * * @param p Search direction. It is assumed to be a descent direction such * that \f$ p^T g(x_0) < 0 \f$. * * @param x0 Value of starting point, \f$ x_0 \f$. * * @param f0 Value of function at starting point, \f$ f(x_0) \f$. * * @param gradx0 Value of function gradient at starting point, * \f$ g(x_0) \f$. * * @param c1 Parameter of the Wolfe conditions. \f$ 0 < c_1 < c_2 < 1 \f$ * Typically c1 = 1e-4. * * @param c2 Parameter of the Wolfe conditions. \f$ 0 < c_1 < c_2 < 1 \f$ * Typically c2 = 0.9. * * @param minAlpha Smallest allowable step-size. * * @param maxLSIts Maximum number line search iterations. * * @param maxLSRestarts Maximum number of times line search will * restart with \f$ f() \f$ failing. * * @return Returns zero on success, non-zero otherwise. **/ template int WolfeLineSearch(FunctorType &func, Scalar &alpha, XType &x1, Scalar &f1, XType &gradx1, const XType &p, const XType &x0, const Scalar &f0, const XType &gradx0, const Scalar &c1, const Scalar &c2, const Scalar &minAlpha, const Scalar &maxLSIts, const Scalar &maxLSRestarts) { const Scalar dfp(gradx0.dot(p)); const Scalar c1dfp(c1 * dfp); const Scalar c2dfp(c2 * dfp); Scalar alpha0(minAlpha); Scalar alpha1(alpha); Scalar prevF(f0); XType prevDF(gradx0); Scalar prevDFp(dfp); Scalar newDFp; int retCode = 0, nits = 0, lsRestarts = 0, ret; while (1) { if (nits >= maxLSIts) { retCode = 1; break; } x1.noalias() = x0 + alpha1 * p; ret = func(x1, f1, gradx1); if (ret != 0) { if (lsRestarts >= maxLSRestarts) { retCode = 1; break; } alpha1 = 0.5 * (alpha0 + alpha1); lsRestarts++; continue; } lsRestarts = 0; newDFp = gradx1.dot(p); if ((f1 > f0 + alpha * c1dfp) || (f1 >= prevF && nits > 0)) { retCode = WolfLSZoom(alpha, x1, f1, gradx1, func, x0, f0, dfp, c1dfp, c2dfp, p, alpha0, prevF, prevDFp, alpha1, f1, newDFp, 1e-16); break; } if (std::fabs(newDFp) <= -c2dfp) { alpha = alpha1; break; } if (newDFp >= 0) { retCode = WolfLSZoom(alpha, x1, f1, gradx1, func, x0, f0, dfp, c1dfp, c2dfp, p, alpha1, f1, newDFp, alpha0, prevF, prevDFp, 1e-16); break; } alpha0 = alpha1; prevF = f1; std::swap(prevDF, gradx1); prevDFp = newDFp; alpha1 *= 10.0; nits++; } return retCode; } } // namespace optimization } // namespace stan #endif StanHeaders/inst/include/src/stan/optimization/bfgs.hpp0000644000176200001440000003022414645137105022776 0ustar liggesusers#ifndef STAN_OPTIMIZATION_BFGS_HPP #define STAN_OPTIMIZATION_BFGS_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace optimization { typedef enum { TERM_SUCCESS = 0, TERM_ABSX = 10, TERM_ABSF = 20, TERM_RELF = 21, TERM_ABSGRAD = 30, TERM_RELGRAD = 31, TERM_MAXIT = 40, TERM_LSFAIL = -1 } TerminationCondition; template class ConvergenceOptions { public: ConvergenceOptions() { maxIts = 10000; fScale = 1.0; tolAbsX = 1e-8; tolAbsF = 1e-12; tolAbsGrad = 1e-8; tolRelF = 1e+4; tolRelGrad = 1e+3; } size_t maxIts; Scalar tolAbsX; Scalar tolAbsF; Scalar tolRelF; Scalar fScale; Scalar tolAbsGrad; Scalar tolRelGrad; }; template class LSOptions { public: LSOptions() { c1 = 1e-4; c2 = 0.9; alpha0 = 1e-3; minAlpha = 1e-12; maxLSIts = 20; maxLSRestarts = 10; } Scalar c1; Scalar c2; Scalar alpha0; Scalar minAlpha; Scalar maxLSIts; Scalar maxLSRestarts; }; template class BFGSMinimizer { public: typedef Eigen::Matrix VectorT; typedef Eigen::Matrix HessianT; protected: FunctorType &_func; VectorT _gk, _gk_1, _xk_1, _xk, _pk, _pk_1; Scalar _fk, _fk_1, _alphak_1; Scalar _alpha, _alpha0; size_t _itNum; std::string _note; QNUpdateType _qn; public: LSOptions _ls_opts; ConvergenceOptions _conv_opts; QNUpdateType &get_qnupdate() { return _qn; } const QNUpdateType &get_qnupdate() const { return _qn; } const Scalar &curr_f() const { return _fk; } const VectorT &curr_x() const { return _xk; } const VectorT &curr_g() const { return _gk; } const VectorT &curr_p() const { return _pk; } const Scalar &prev_f() const { return _fk_1; } const VectorT &prev_x() const { return _xk_1; } const VectorT &prev_g() const { return _gk_1; } const VectorT &prev_p() const { return _pk_1; } Scalar prev_step_size() const { return _pk_1.norm() * _alphak_1; } inline Scalar rel_grad_norm() const { return -_pk.dot(_gk) / std::max(std::fabs(_fk), _conv_opts.fScale); } inline Scalar rel_obj_decrease() const { return std::fabs(_fk_1 - _fk) / std::max(std::fabs(_fk_1), std::max(std::fabs(_fk), _conv_opts.fScale)); } const Scalar &alpha0() const { return _alpha0; } const Scalar &alpha() const { return _alpha; } const size_t iter_num() const { return _itNum; } const std::string ¬e() const { return _note; } std::string get_code_string(int retCode) { switch (retCode) { case TERM_SUCCESS: return std::string("Successful step completed"); case TERM_ABSF: return std::string( "Convergence detected: absolute change " "in objective function was below tolerance"); case TERM_RELF: return std::string( "Convergence detected: relative change " "in objective function was below tolerance"); case TERM_ABSGRAD: return std::string( "Convergence detected: " "gradient norm is below tolerance"); case TERM_RELGRAD: return std::string( "Convergence detected: relative " "gradient magnitude is below tolerance"); case TERM_ABSX: return std::string( "Convergence detected: " "absolute parameter change was below tolerance"); case TERM_MAXIT: return std::string( "Maximum number of iterations hit, " "may not be at an optima"); case TERM_LSFAIL: return std::string( "Line search failed to achieve a sufficient " "decrease, no more progress can be made"); default: return std::string("Unknown termination code"); } } explicit BFGSMinimizer(FunctorType &f) : _func(f) {} void initialize(const VectorT &x0) { int ret; _xk = x0; ret = _func(_xk, _fk, _gk); if (ret) { throw std::runtime_error("Error evaluating initial BFGS point."); } _pk = -_gk; _itNum = 0; _note = ""; } int step() { Scalar gradNorm, stepNorm; VectorT sk, yk; int retCode(0); int resetB(0); _itNum++; if (_itNum == 1) { resetB = 1; _note = ""; } else { resetB = 0; _note = ""; } while (true) { if (resetB) { // Reset the Hessian approximation _pk.noalias() = -_gk; } // Get an initial guess for the step size (alpha) if (_itNum > 1 && resetB != 2) { // use cubic interpolation based on the previous step _alpha0 = _alpha = std::min( 1.0, 1.01 * CubicInterp(_gk_1.dot(_pk_1), _alphak_1, _fk - _fk_1, _gk.dot(_pk_1), _ls_opts.minAlpha, 1.0)); } else { // On the first step (or, after a reset) use the default step size _alpha0 = _alpha = _ls_opts.alpha0; } // Perform the line search. If successful, the results are in the // variables: _xk_1, _fk_1 and _gk_1. retCode = WolfeLineSearch(_func, _alpha, _xk_1, _fk_1, _gk_1, _pk, _xk, _fk, _gk, _ls_opts.c1, _ls_opts.c2, _ls_opts.minAlpha, _ls_opts.maxLSIts, _ls_opts.maxLSRestarts); if (retCode) { // Line search failed... if (resetB) { // did a Hessian reset and it still failed, // and nothing left to try retCode = TERM_LSFAIL; return retCode; } else { // try resetting the Hessian approximation resetB = 2; _note += "LS failed, Hessian reset"; continue; } } else { break; } } // Swap things so that k is the most recent iterate std::swap(_fk, _fk_1); _xk.swap(_xk_1); _gk.swap(_gk_1); _pk.swap(_pk_1); sk.noalias() = _xk - _xk_1; yk.noalias() = _gk - _gk_1; gradNorm = _gk.norm(); stepNorm = sk.norm(); // Update QN approximation if (resetB) { // If the QN approximation was reset, automatically scale it // and update the step-size accordingly Scalar B0fact = _qn.update(yk, sk, true); _pk_1 /= B0fact; _alphak_1 = _alpha * B0fact; } else { _qn.update(yk, sk); _alphak_1 = _alpha; } // Compute search direction for next step _qn.search_direction(_pk, _gk); // Check for convergence if (std::fabs(_fk_1 - _fk) < _conv_opts.tolAbsF) { // Objective function improvement wasn't sufficient retCode = TERM_ABSF; } else if (gradNorm < _conv_opts.tolAbsGrad) { retCode = TERM_ABSGRAD; // Gradient norm was below threshold } else if (stepNorm < _conv_opts.tolAbsX) { retCode = TERM_ABSX; // Change in x was too small } else if (_itNum >= _conv_opts.maxIts) { retCode = TERM_MAXIT; // Max number of iterations hit } else if (rel_obj_decrease() < _conv_opts.tolRelF * std::numeric_limits::epsilon()) { // Relative improvement in objective function wasn't sufficient retCode = TERM_RELF; } else if (rel_grad_norm() < _conv_opts.tolRelGrad * std::numeric_limits::epsilon()) { // Relative gradient norm was below threshold retCode = TERM_RELGRAD; } else { // Step was successful more progress to be made retCode = TERM_SUCCESS; } return retCode; } int minimize(VectorT &x0) { int retcode; initialize(x0); while (!(retcode = step())) continue; x0 = _xk; return retcode; } }; template class ModelAdaptor { private: M &_model; std::vector _params_i; std::ostream *_msgs; std::vector _x, _g; size_t _fevals; public: ModelAdaptor(M &model, const std::vector ¶ms_i, std::ostream *msgs) : _model(model), _params_i(params_i), _msgs(msgs), _fevals(0) {} size_t fevals() const { return _fevals; } int operator()(const Eigen::Matrix &x, double &f) { using Eigen::Dynamic; using Eigen::Matrix; using stan::math::index_type; using stan::model::log_prob_propto; typedef typename index_type >::type idx_t; _x.resize(x.size()); for (idx_t i = 0; i < x.size(); i++) _x[i] = x[i]; try { f = -log_prob_propto(_model, _x, _params_i, _msgs); } catch (const std::exception &e) { if (_msgs) (*_msgs) << e.what() << std::endl; return 1; } if (std::isfinite(f)) { return 0; } else { if (_msgs) *_msgs << "Error evaluating model log probability: " "Non-finite function evaluation." << std::endl; return 2; } } int operator()(const Eigen::Matrix &x, double &f, Eigen::Matrix &g) { using Eigen::Dynamic; using Eigen::Matrix; using stan::math::index_type; using stan::model::log_prob_grad; typedef typename index_type >::type idx_t; _x.resize(x.size()); for (idx_t i = 0; i < x.size(); i++) _x[i] = x[i]; _fevals++; try { f = -log_prob_grad(_model, _x, _params_i, _g, _msgs); } catch (const std::exception &e) { if (_msgs) (*_msgs) << e.what() << std::endl; return 1; } g.resize(_g.size()); for (size_t i = 0; i < _g.size(); i++) { if (!std::isfinite(_g[i])) { if (_msgs) *_msgs << "Error evaluating model log probability: " "Non-finite gradient." << std::endl; return 3; } g[i] = -_g[i]; } if (std::isfinite(f)) { return 0; } else { if (_msgs) *_msgs << "Error evaluating model log probability: " << "Non-finite function evaluation." << std::endl; return 2; } } int df(const Eigen::Matrix &x, Eigen::Matrix &g) { double f; return (*this)(x, f, g); } }; /** * @tparam jacobian `true` to include Jacobian adjustment (default `false`) */ template class BFGSLineSearch : public BFGSMinimizer, QNUpdateType, Scalar, DimAtCompile> { private: ModelAdaptor _adaptor; public: typedef BFGSMinimizer, QNUpdateType, Scalar, DimAtCompile> BFGSBase; typedef typename BFGSBase::VectorT vector_t; typedef typename stan::math::index_type::type idx_t; BFGSLineSearch(M &model, const std::vector ¶ms_r, const std::vector ¶ms_i, std::ostream *msgs = 0) : BFGSBase(_adaptor), _adaptor(model, params_i, msgs) { initialize(params_r); } void initialize(const std::vector ¶ms_r) { Eigen::Matrix x; x.resize(params_r.size()); for (size_t i = 0; i < params_r.size(); i++) x[i] = params_r[i]; BFGSBase::initialize(x); } size_t grad_evals() { return _adaptor.fevals(); } double logp() { return -(this->curr_f()); } double grad_norm() { return this->curr_g().norm(); } void grad(std::vector &g) { const vector_t &cg(this->curr_g()); g.resize(cg.size()); for (idx_t i = 0; i < cg.size(); i++) g[i] = -cg[i]; } void params_r(std::vector &x) { const vector_t &cx(this->curr_x()); x.resize(cx.size()); for (idx_t i = 0; i < cx.size(); i++) x[i] = cx[i]; } }; } // namespace optimization } // namespace stan #endif StanHeaders/inst/include/src/stan/io/0000755000176200001440000000000014645137105017224 5ustar liggesusersStanHeaders/inst/include/src/stan/io/dump.hpp0000644000176200001440000005405614645137105020714 0ustar liggesusers#ifndef STAN_IO_DUMP_HPP #define STAN_IO_DUMP_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace io { /** * Reads data from S-plus dump format. * * A dump_reader parses data from the S-plus dump * format, a human-readable ASCII representation of arbitrarily * dimensioned arrays of integers and arrays of floating point * values. * *

Stan's dump reader is limited to reading * integers, scalars and arrays of arbitrary dimensionality of * integers and scalars. It is able to read from a file * consisting of a sequence of dumped variables. * *

There cannot be any NA * (i.e., undefined) values, because these cannot be * represented as double values. * *

The dump reader class follows a standard scanner pattern. * The method next() is called to scan the next * input. The type, dimensions, and values of the input is then * available through method calls. Here, the type is either * double or integer, and the values are the name of the variable * and its array of values. If there is a single value, the * dimension array is empty. For a list, the dimension * array contains a single entry for the number of values. * For an array, the dimensions are the dimensions of the array. * *

Reads are performed in an "S-compatible" mode whereby * a string such as "1" or "-127" denotes and integer, whereas * a string such as "1." or "0.9e-5" represents a floating * point value. * *

The dump reader treats "integer(x)" as an array of zeros * (type being integer and length x), where x any non-negative * integers and x can be omitted to indicate zero-length. * So the following are all legitimate: * "x <- integer()", * "x <- integer(0) ", and "x <- integer(3)". For array of zeros * of type double, we can replace the above "integer" with "double". * This is mainly for the purpose of supporting zero-size arrays * such as "x <- structure(integer(0), .Dim = c(2, 0))". * *

For dumping, arrays are indexed in last-index major fashion, * which corresponds to column-major order for matrices * represented as two-dimensional arrays. As a result, the first * indices change fastest. For instance, if there is an * three-dimensional array x with dimensions * [2,2,2], then there are 8 values, provided in the * order * *

[0,0,0], * [1,0,0], * [0,1,0], * [1,1,0], * [0,0,1], * [1,0,1], * [0,1,1], * [1,1,1]. * * definitions ::= definition+ * * definition ::= name ("<-" | '=') value optional_semicolon * * name ::= char* * | ''' char* ''' * | '"' char* '"' * * value ::= value | value * * value ::= T * | seq * | zero_array * | "structure" '(' seq ',' ".Dim" '=' seq ')' * | "structure" '(' zero_array ',' ".Dim" '=' seq ')' * * seq ::= int ':' int * | cseq * * seq ::= cseq * * cseq ::= 'c' '(' vseq ')' * * vseq ::= T * | T ',' vseq * * zero_array ::= "integer" * * zero_array ::= "double" * */ class dump_reader { private: std::string buf_; std::string name_; std::vector stack_i_; std::vector stack_r_; std::vector dims_; std::istream& in_; bool scan_single_char(char c_expected) { int c = in_.peek(); if (in_.fail()) return false; if (c != c_expected) return false; char c_skip; in_.get(c_skip); return true; } bool scan_optional_long() { if (scan_single_char('l')) return true; else if (scan_single_char('L')) return true; else return false; } bool scan_char(char c_expected) { char c; in_ >> c; if (in_.fail()) return false; if (c != c_expected) { in_.putback(c); return false; } return true; } bool scan_name_unquoted() { char c; in_ >> c; if (in_.fail()) return false; if (!std::isalpha(c)) return false; name_.push_back(c); while (in_.get(c)) { // get turns off auto space skip if (std::isalpha(c) || std::isdigit(c) || c == '_' || c == '.') { name_.push_back(c); } else { in_.putback(c); return true; } } return true; // but hit eos } bool scan_name() { if (scan_char('"')) { if (!scan_name_unquoted()) return false; if (!scan_char('"')) return false; } else if (scan_char('\'')) { if (!scan_name_unquoted()) return false; if (!scan_char('\'')) return false; } else { if (!scan_name_unquoted()) return false; } return true; } bool scan_chars(const char* s, bool case_sensitive = true) { for (size_t i = 0; s[i]; ++i) { char c; if (!(in_ >> c)) { for (size_t j = 1; j < i; ++j) in_.putback(s[i - j]); return false; } // all ASCII, so toupper is OK if ((case_sensitive && c != s[i]) || (!case_sensitive && ::toupper(c) != ::toupper(s[i]))) { in_.putback(c); for (size_t j = 1; j < i; ++j) in_.putback(s[i - j]); return false; } } return true; } bool scan_chars(std::string s, bool case_sensitive = true) { for (size_t i = 0; i < s.size(); ++i) { char c; if (!(in_ >> c)) { for (size_t j = 1; j < i; ++j) in_.putback(s[i - j]); return false; } // all ASCII, so toupper is OK if ((case_sensitive && c != s[i]) || (!case_sensitive && ::toupper(c) != ::toupper(s[i]))) { in_.putback(c); for (size_t j = 1; j < i; ++j) in_.putback(s[i - j]); return false; } } return true; } size_t scan_dim() { char c; buf_.clear(); while (in_.get(c)) { if (std::isspace(c)) continue; if (std::isdigit(c)) { buf_.push_back(c); } else { in_.putback(c); break; } } scan_optional_long(); size_t d = 0; try { d = boost::lexical_cast(buf_); } catch (const boost::bad_lexical_cast& exc) { std::string msg = "value " + buf_ + " beyond array dimension range"; throw std::invalid_argument(msg); } return d; } int scan_int() { char c; buf_.clear(); while (in_.get(c)) { if (std::isspace(c)) continue; if (std::isdigit(c)) { buf_.push_back(c); } else { in_.putback(c); break; } } return (get_int()); } int get_int() { int n = 0; try { n = boost::lexical_cast(buf_); } catch (const boost::bad_lexical_cast& exc) { std::string msg = "value " + buf_ + " beyond int range"; throw std::invalid_argument(msg); } return n; } double scan_double() { double x = 0; try { x = boost::lexical_cast(buf_); if (x == 0) validate_zero_buf(buf_); } catch (const boost::bad_lexical_cast& exc) { std::string msg = "value " + buf_ + " beyond numeric range"; throw std::invalid_argument(msg); } return x; } // scan number stores number or throws bad lexical cast exception void scan_number(bool negate_val) { // must take longest first! if (scan_chars("Inf")) { scan_chars("inity"); // read past if there stack_r_.push_back(negate_val ? -std::numeric_limits::infinity() : std::numeric_limits::infinity()); return; } if (scan_chars("NaN", false)) { stack_r_.push_back(std::numeric_limits::quiet_NaN()); return; } char c; bool is_double = false; buf_.clear(); while (in_.get(c)) { if (std::isdigit(c)) { // before pre-scan || c == '-' || c == '+') { buf_.push_back(c); } else if (c == '.' || c == 'e' || c == 'E' || c == '-' || c == '+') { is_double = true; buf_.push_back(c); } else { in_.putback(c); break; } } if (!is_double && stack_r_.size() == 0) { int n = get_int(); stack_i_.push_back(negate_val ? -n : n); scan_optional_long(); } else { for (size_t j = 0; j < stack_i_.size(); ++j) stack_r_.push_back(static_cast(stack_i_[j])); stack_i_.clear(); double x = scan_double(); stack_r_.push_back(negate_val ? -x : x); } } void scan_number() { char c; while (in_.get(c)) { if (std::isspace(c)) continue; in_.putback(c); break; } bool negate_val = scan_char('-'); if (!negate_val) scan_char('+'); // flush leading + return scan_number(negate_val); } bool scan_zero_integers() { if (!scan_char('(')) return false; if (scan_char(')')) { dims_.push_back(0U); return true; } int s = scan_int(); if (s < 0) return false; for (int i = 0; i < s; ++i) { stack_i_.push_back(0); } if (!scan_char(')')) return false; dims_.push_back(s); return true; } bool scan_zero_doubles() { if (!scan_char('(')) return false; if (scan_char(')')) { dims_.push_back(0U); return true; } int s = scan_int(); if (s < 0) return false; for (int i = 0; i < s; ++i) { stack_r_.push_back(0); } if (!scan_char(')')) return false; dims_.push_back(s); return true; } bool scan_seq_value() { if (!scan_char('(')) return false; if (scan_char(')')) { dims_.push_back(0U); return true; } scan_number(); // first entry while (scan_char(',')) { scan_number(); } dims_.push_back(stack_r_.size() + stack_i_.size()); return scan_char(')'); } bool scan_struct_value() { if (!scan_char('(')) return false; if (scan_chars("integer")) { scan_zero_integers(); } else if (scan_chars("double")) { scan_zero_doubles(); } else if (scan_char('c')) { scan_seq_value(); } else { int start = scan_int(); if (!scan_char(':')) return false; int end = scan_int(); if (start <= end) { for (int i = start; i <= end; ++i) stack_i_.push_back(i); } else { for (int i = start; i >= end; --i) stack_i_.push_back(i); } } dims_.clear(); if (!scan_char(',')) return false; if (!scan_char('.')) return false; if (!scan_chars("Dim")) return false; if (!scan_char('=')) return false; if (scan_char('c')) { if (!scan_char('(')) return false; size_t dim = scan_dim(); dims_.push_back(dim); while (scan_char(',')) { dim = scan_dim(); dims_.push_back(dim); } if (!scan_char(')')) return false; } else { size_t start = scan_dim(); if (!scan_char(':')) return false; size_t end = scan_dim(); if (start < end) { for (size_t i = start; i <= end; ++i) dims_.push_back(i); } else { for (size_t i = start; i >= end; --i) dims_.push_back(i); } } if (!scan_char(')')) return false; return true; } bool scan_value() { if (scan_char('c')) return scan_seq_value(); if (scan_chars("integer")) return scan_zero_integers(); if (scan_chars("double")) return scan_zero_doubles(); if (scan_chars("structure")) return scan_struct_value(); scan_number(); if (!scan_char(':')) return true; if (stack_i_.size() != 1) return false; scan_number(); if (stack_i_.size() != 2) return false; int start = stack_i_[0]; int end = stack_i_[1]; stack_i_.clear(); if (start <= end) { for (int i = start; i <= end; ++i) stack_i_.push_back(i); } else { for (int i = start; i >= end; --i) stack_i_.push_back(i); } dims_.push_back(stack_i_.size()); return true; } public: /** * Construct a reader for the specified input stream. * * @param in Input stream reference from which to read. */ explicit dump_reader(std::istream& in) : in_(in) {} /** * Destroy this reader. */ ~dump_reader() {} /** * Return the name of the most recently read variable. * * @return Name of most recently read variable. */ std::string name() { return name_; } /** * Return the dimensions of the most recently * read variable. * * @return Last dimensions. */ std::vector dims() { return dims_; } /** * Checks if the last item read is integer. * * Return true if the value(s) in the most recently * read item are integer values and false if * they are floating point. */ bool is_int() { // return stack_i_.size() > 0; return stack_r_.size() == 0; } /** * Returns the integer values from the last item if the * last item read was an integer and the empty vector otherwise. * * @return Integer values of last item. */ std::vector int_values() { return stack_i_; } /** * Returns the floating point values from the last item if the * last item read contained floating point values and the empty * vector otherwise. * * @return Floating point values of last item. */ std::vector double_values() { return stack_r_; } /** * Read the next value from the input stream, returning * true if successful and false if no * further input may be read. * * @return Return true if a fresh variable was read. * @throws bad_cast if bad number values encountered. */ bool next() { stack_r_.clear(); stack_i_.clear(); dims_.clear(); name_.erase(); if (!scan_name()) // set name return false; if (!scan_char('<')) // set <- return false; if (!scan_char('-')) return false; try { bool okSyntax = scan_value(); // set stack_r_, stack_i_, dims_ if (!okSyntax) { std::string msg = "syntax error"; throw std::invalid_argument(msg); } } catch (const std::invalid_argument& e) { std::string msg = "data " + name_ + " " + e.what(); throw std::invalid_argument(msg); } return true; } }; /** * Represents named arrays with dimensions. * * A dump object represents a dump of named arrays with dimensions. * The arrays may have any dimensionality. The values for an array * are typed to double or int. * *

See dump_reader for more information on the format. * *

Dump objects are created from reading dump files from an * input stream. * *

The dimensions and values of variables * may be accessed by name. */ class dump : public stan::io::var_context { private: std::map, std::vector>> vars_r_; std::map, std::vector>> vars_i_; std::vector const empty_vec_r_; std::vector const empty_vec_i_; std::vector const empty_vec_ui_; /** * Return true if this dump contains the specified * variable name is defined in the real values. This method * returns false if the values are all integers. * * @param name Variable name to test. * @return true if the variable exists in the * real values of the dump. */ bool contains_r_only(const std::string& name) const { return vars_r_.find(name) != vars_r_.end(); } public: /** * Construct a dump object from the specified input stream. * * Warning: This method does not close the input stream. * * @param in Input stream from which to read. */ explicit dump(std::istream& in) { dump_reader reader(in); while (reader.next()) { if (reader.is_int()) { vars_i_[reader.name()] = std::pair, std::vector>( reader.int_values(), reader.dims()); } else { vars_r_[reader.name()] = std::pair, std::vector>( reader.double_values(), reader.dims()); } } } /** * Return true if this dump contains the specified * variable name is defined. This method returns true * even if the values are all integers. * * @param name Variable name to test. * @return true if the variable exists. */ bool contains_r(const std::string& name) const { return contains_r_only(name) || contains_i(name); } /** * Return true if this dump contains an integer * valued array with the specified name. * * @param name Variable name to test. * @return true if the variable name has an integer * array value. */ bool contains_i(const std::string& name) const { return vars_i_.find(name) != vars_i_.end(); } /** * Return the double values for the variable with the specified * name or null. * * @param name Name of variable. * @return Values of variable. */ std::vector vals_r(const std::string& name) const { if (contains_r_only(name)) { return (vars_r_.find(name)->second).first; } else if (contains_i(name)) { std::vector vec_int = (vars_i_.find(name)->second).first; std::vector vec_r(vec_int.size()); for (size_t ii = 0; ii < vec_int.size(); ii++) { vec_r[ii] = vec_int[ii]; } return vec_r; } return empty_vec_r_; } std::vector> vals_c(const std::string& name) const { const auto val_r = vars_r_.find(name); if (val_r != vars_r_.end()) { std::vector> ret_c(val_r->second.first.size() / 2); int comp_iter; int real_iter; for (comp_iter = 0, real_iter = 0; real_iter < val_r->second.first.size(); comp_iter += 1, real_iter += 2) { ret_c[comp_iter] = std::complex{ val_r->second.first[real_iter], val_r->second.first[real_iter + 1]}; } return ret_c; } else if (contains_i(name)) { const auto val_i = vars_i_.find(name); if (val_i != vars_i_.end()) { std::vector> ret_c(val_i->second.first.size() / 2); int comp_iter; int real_iter; for (comp_iter = 0, real_iter = 0; real_iter < val_i->second.first.size(); comp_iter += 1, real_iter += 2) { ret_c[comp_iter] = std::complex{ static_cast(val_i->second.first[real_iter]), static_cast(val_i->second.first[real_iter + 1])}; } return ret_c; } } return std::vector>{}; } /** * Return the dimensions for the double variable with the specified * name. * * @param name Name of variable. * @return Dimensions of variable. */ std::vector dims_r(const std::string& name) const { if (contains_r_only(name)) { return (vars_r_.find(name)->second).second; } else if (contains_i(name)) { return (vars_i_.find(name)->second).second; } return empty_vec_ui_; } /** * Return the integer values for the variable with the specified * name. * * @param name Name of variable. * @return Values. */ std::vector vals_i(const std::string& name) const { if (contains_i(name)) { return (vars_i_.find(name)->second).first; } return empty_vec_i_; } /** * Return the dimensions for the integer variable with the specified * name. * * @param name Name of variable. * @return Dimensions of variable. */ std::vector dims_i(const std::string& name) const { if (contains_i(name)) { return (vars_i_.find(name)->second).second; } return empty_vec_ui_; } /** * Return a list of the names of the floating point variables in * the dump. * * @param names Vector to store the list of names in. */ virtual void names_r(std::vector& names) const { names.resize(0); for (std::map, std::vector>>::const_iterator it = vars_r_.begin(); it != vars_r_.end(); ++it) names.push_back((*it).first); } /** * Return a list of the names of the integer variables in * the dump. * * @param names Vector to store the list of names in. */ virtual void names_i(std::vector& names) const { names.resize(0); for (std::map, std::vector>>::const_iterator it = vars_i_.begin(); it != vars_i_.end(); ++it) names.push_back((*it).first); } /** * Check variable dimensions against variable declaration. * * @param stage stan program processing stage * @param name variable name * @param base_type declared stan variable type * @param dims_declared variable dimensions * @throw std::runtime_error if mismatch between declared * dimensions and dimensions found in context. */ void validate_dims(const std::string& stage, const std::string& name, const std::string& base_type, const std::vector& dims_declared) const { stan::io::validate_dims(*this, stage, name, base_type, dims_declared); } /** * Remove variable from the object. * * @param name Name of the variable to remove. * @return If variable is removed returns true, else * returns false. */ bool remove(const std::string& name) { return (vars_i_.erase(name) > 0) || (vars_r_.erase(name) > 0); } }; } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/empty_var_context.hpp0000644000176200001440000000625214645137105023514 0ustar liggesusers#ifndef STAN_IO_EMPTY_VAR_CONTEXT_HPP #define STAN_IO_EMPTY_VAR_CONTEXT_HPP #include #include #include #include namespace stan { namespace io { /** * This is an implementation of a var_context that doesn't contain * any variables. */ class empty_var_context : public var_context { public: /** * Destructor */ virtual ~empty_var_context() {} /** * Return true if the specified variable name is * defined. Always returns false. * * @param name Name of variable. * @return false */ bool contains_r(const std::string& name) const { return false; } /** * Always returns an empty vector. * * @param name Name of variable. * @return empty vector */ std::vector vals_r(const std::string& name) const { return std::vector(); } std::vector> vals_c(const std::string& name) const { return std::vector>(); } /** * Always returns an empty vector. * * @param name Name of variable. * @return empty vector */ std::vector dims_r(const std::string& name) const { return std::vector(); } /** * Return true if the specified variable name has * integer values. Always returns false. * * @param name Name of variable. * @return false */ bool contains_i(const std::string& name) const { return false; } /** * Returns an empty vector. * * @param name Name of variable. * @return empty vector */ std::vector vals_i(const std::string& name) const { return std::vector(); } /** * Return the dimensions of the specified floating point variable. * Returns an empty vector. * * @param name Name of variable. * @return empty vector */ std::vector dims_i(const std::string& name) const { return std::vector(); } /** * Check variable dimensions against variable declaration. * This context has no variables. * * @param stage stan program processing stage * @param name variable name * @param base_type declared stan variable type * @param dims_declared variable dimensions * @throw std::runtime_error if mismatch between declared * dimensions and dimensions found in context. */ void validate_dims(const std::string& stage, const std::string& name, const std::string& base_type, const std::vector& dims_declared) const { stan::io::validate_dims(*this, stage, name, base_type, dims_declared); } /** * Fill a list of the names of the floating point variables in * the context. This context has no variables. * * @param names Vector to store the list of names in. */ void names_r(std::vector& names) const { names.clear(); } /** * Fill a list of the names of the integer variables in * the context. This context has no variables. * * @param names Vector to store the list of names in. */ void names_i(std::vector& names) const { names.clear(); } }; } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/random_var_context.hpp0000644000176200001440000002032614645137105023634 0ustar liggesusers#ifndef STAN_IO_RANDOM_VAR_CONTEXT_HPP #define STAN_IO_RANDOM_VAR_CONTEXT_HPP #include #include #include #include #include #include #include namespace stan { namespace io { /** * This is an implementation of a var_context that initializes * the unconstrained values randomly. This is used for initialization. */ class random_var_context : public var_context { public: /** * Constructs a random var_context. * * On construction, this var_context will generate random * numbers on the unconstrained scale for the model provided. * Once generated, the class is immutable. * * This class only generates values for the parameters in the * Stan program and does not generate values for transformed parameters * or generated quantities. * * @tparam Model Model class * @tparam RNG Random number generator type * @param[in] model instantiated model to generate variables for * @param[in,out] rng pseudo-random number generator * @param[in] init_radius the unconstrained variables are uniform draws * from -init_radius to init_radius. * @param[in] init_zero indicates whether all unconstrained variables * should be initialized at 0. When init_zero is false, init_radius * must be greater than 0. */ template random_var_context(Model& model, RNG& rng, double init_radius, bool init_zero) : unconstrained_params_(model.num_params_r()) { size_t num_unconstrained_ = model.num_params_r(); model.get_param_names(names_, false, false); model.get_dims(dims_, false, false); if (init_zero) { for (size_t n = 0; n < num_unconstrained_; ++n) unconstrained_params_[n] = 0.0; } else { boost::random::uniform_real_distribution unif(-init_radius, init_radius); for (size_t n = 0; n < num_unconstrained_; ++n) unconstrained_params_[n] = unif(rng); } std::vector constrained_params; std::vector int_params; model.write_array(rng, unconstrained_params_, int_params, constrained_params, false, false, 0); vals_r_ = constrained_to_vals_r(constrained_params, dims_); } /** * Destructor. */ ~random_var_context() {} /** * Return true if the specified variable name is * defined. Will return true if the name matches * a parameter in the model. * * @param name Name of variable. * @return true if the name is a parameter in the * model. */ bool contains_r(const std::string& name) const { return std::find(names_.begin(), names_.end(), name) != names_.end(); } /** * Returns the values of the constrained variables. * * @param name Name of variable. * * @return the constrained values if the variable is in the * var_context; an empty vector is returned otherwise */ std::vector vals_r(const std::string& name) const { std::vector::const_iterator loc = std::find(names_.begin(), names_.end(), name); if (loc == names_.end()) return std::vector(); return vals_r_[loc - names_.begin()]; } std::vector> vals_c(const std::string& name) const { std::vector::const_iterator loc = std::find(names_.begin(), names_.end(), name); if (loc == names_.end()) { return std::vector>(); } else { const auto& val_r = vals_r_[loc - names_.begin()]; std::vector> ret_c(val_r.size() / 2); int comp_iter; int real_iter; for (comp_iter = 0, real_iter = 0; real_iter < val_r.size(); comp_iter += 1, real_iter += 2) { ret_c[comp_iter] = std::complex{val_r[real_iter], val_r[real_iter + 1]}; } return ret_c; } } /** * Returns the dimensions of the variable * * @param name Name of variable. * @return the dimensions of the variable if it exists; an empty vector * is returned otherwise */ std::vector dims_r(const std::string& name) const { std::vector::const_iterator loc = std::find(names_.begin(), names_.end(), name); if (loc == names_.end()) return std::vector(); return dims_[loc - names_.begin()]; } /** * Return true if the specified variable name has * integer values. Always returns false. * * @param name Name of variable. * @return false */ bool contains_i(const std::string& name) const { return false; } /** * Returns an empty vector. * * @param name Name of variable. * @return empty vector */ std::vector vals_i(const std::string& name) const { std::vector empty_vals_i; return empty_vals_i; } /** * Return the dimensions of the specified floating point variable. * Returns an empty vector. * * @param name Name of variable. * @return empty vector */ std::vector dims_i(const std::string& name) const { std::vector empty_dims_i; return empty_dims_i; } /** * Fill a list of the names of the floating point variables in * the context. This will return the names of the parameters in * the model. * * @param names Vector to store the list of names in. */ void names_r(std::vector& names) const { names = names_; } /** * Fill a list of the names of the integer variables in * the context. This context has no variables. * * @param names Vector to store the list of names in. */ void names_i(std::vector& names) const { names.clear(); } /** * Check variable dimensions against variable declaration. * Only used for data read in from file. * * @param stage stan program processing stage * @param name variable name * @param base_type declared stan variable type * @param dims_declared variable dimensions * @throw std::runtime_error if mismatch between declared * dimensions and dimensions found in context. */ void validate_dims(const std::string& stage, const std::string& name, const std::string& base_type, const std::vector& dims_declared) const { stan::io::validate_dims(*this, stage, name, base_type, dims_declared); } /** * Return the random initialization on the unconstrained scale. * * @return the unconstrained parameters */ std::vector get_unconstrained() const { return unconstrained_params_; } private: /** * Parameter names in the model */ std::vector names_; /** * Dimensions of parameters in the model */ std::vector> dims_; /** * Random parameter values of the model in the * unconstrained space */ std::vector unconstrained_params_; /** * Random parameter values of the model in the * constrained space */ std::vector> vals_r_; /** * Computes the size of a variable based on the dim provided. * * @param dim dimension of the variable * @return total size of the variable */ size_t dim_size(const std::vector& dim) { size_t size = 1; for (size_t j = 0; j < dim.size(); ++j) size *= dim[j]; return size; } /** * Returns a vector of constrained values in the format expected * out of the vals_r() function. * * @param[in] constrained constrained parameter values * @param[in] dims dimensions of each of the parameter values * @return constrained values reshaped to be returned in the vals_r * function */ std::vector> constrained_to_vals_r( const std::vector& constrained, const std::vector>& dims) { std::vector> vals_r(dims.size()); std::vector::const_iterator start = constrained.begin(); for (size_t i = 0; i < dims.size(); ++i) { size_t size = dim_size(dims[i]); vals_r[i] = std::vector(start, start + size); start += size; } return vals_r; } }; } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/json/0000755000176200001440000000000014645137105020175 5ustar liggesusersStanHeaders/inst/include/src/stan/io/json/json_data_handler.hpp0000644000176200001440000006107114645137105024352 0ustar liggesusers#ifndef STAN_IO_JSON_JSON_DATA_HANDLER_HPP #define STAN_IO_JSON_JSON_DATA_HANDLER_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace json { typedef std::pair, std::vector> var_r; typedef std::pair, std::vector> var_i; typedef std::map vars_map_r; typedef std::map vars_map_i; /** Enum of the kinds of structures the handler needs to manage. * Determined by the initial sequence of start elements following * the top-level set of keys in the JSON object. */ struct meta_type { enum { SCALAR = 0, // no start elements ARRAY = 1, // one or more "[" TUPLE = 2, // one or more "{" ARRAY_OF_TUPLES = 3, // one or more "[" followed by "{" }; }; /** Enum which tracks handler events. Used to identify first and last slot of a tuple. */ struct meta_event { enum { OBJ_OPEN = 0, // { OBJ_CLOSE = 1, // } KEY = 2, }; }; /** Tracks array dimensions. * Vector 'dims_acc' records number of elements seen since open_array. * Vector 'dims' records size of first row seen. * Int 'cur_dim' tracks nested array rows. */ class array_dims { public: std::vector dims; std::vector dims_acc; int cur_dim; array_dims() : dims(), dims_acc(), cur_dim(0) {} bool operator==(const array_dims& other) { return dims == other.dims && dims_acc == other.dims_acc && cur_dim == other.cur_dim; } bool operator!=(const array_dims& other) { return !operator==(other); } }; /** Tracks num slots in a tuple for array of tuple consistency. */ class tuple_slots { public: size_t slots; size_t slots_acc; bool is_first; tuple_slots() : slots(0), slots_acc(0), is_first(true) {} }; /** * A json_data_handler is an implementation of a * json_handler that restricts the allowed JSON text * to a single JSON object which define Stan variables. * The handler is responsible for populating the data structures * `vars_r` and `vars_i` which map Stan variable names to the values * and dimensions found in the JSON. * * In the JSON, each Stan variable is a JSON key : value pair. * The key is a string (the Stan variable name) and the value * is either a scalar variables, array, or a tuple. * Stan program variables can only be of type int or real (double). * The strings \"Inf\" and \"Infinity\" are mapped to positive infinity, * the strings \"-Inf\" and \"-Infinity\" are mapped to negative infinity, * and the string \"NaN\" is mapped to not-a-number. * Bare versions of Infinity, -Infinity, and NaN are also allowed. * * Tuple variables consist of a JSON object, whose keys correspond * to the slot number, counting from 1. Tuple elements can be arrays * or other tuples and array elements can be tuples, which allows * for any level of nested arrays within tuples, or tuples within arrays. * * For a Stan model variable which has nested tuples, only the innermost * tuple slot will correspond to a variable in the generated C++ code, * likewise, in the JSON object, only the innermost elements will be * int or real values. For arrays of tuples, the handler needs to track * both the array dimension and whether or not the values found so far * are of type real or int. To do this, the handler uses a series of * maps between tuple slots seen so far and the C++ storage type (int or real), * the variable meta-type (array, tuple, or array of tuples). * For arrays of tuples, we need to ensure that all tuple elements of an array * are of the same shape. To do this we track the number of slots in the tuple * as well as the dimensions of any array slots in the tuple. * * If the top-level object entry key is not a legal Stan variable name * the handler will not check the corresponding value, other than maintining * the event state and key_stack. */ class json_data_handler : public stan::json::json_handler { private: vars_map_r& vars_r; vars_map_i& vars_i; std::vector key_stack; std::map var_types_map; // vars_r and vars_i entries std::map slot_types_map; // all slots all vars parsed std::map slot_dims_map; std::map tuple_slots_map; std::map int_slots_map; std::vector values_r; // accumulates real var values std::vector values_i; // accumulates int var values size_t array_start_i; // index into values_i size_t array_start_r; // index into values_r int event; // tracks most recent meta_event bool not_stan_var; // accept non-Stan entries void reset_values() { // Once var values have been copied into var_context maps, // clear the accumulator vectors. values_r.clear(); values_i.clear(); array_start_i = 0; array_start_r = 0; } inline std::string key_str() { return boost::algorithm::join(key_stack, "."); } std::string outer_key_str() { std::string result; if (key_stack.size() > 1) { std::string slot = key_stack.back(); key_stack.pop_back(); result = key_str(); key_stack.push_back(slot); } return result; } bool is_init() { return (key_stack.empty() && var_types_map.empty() && slot_types_map.empty() && values_r.empty() && values_i.empty() && slot_dims_map.empty() && array_start_i == 0 && array_start_r == 0 && int_slots_map.empty()); } /** Stan variable names must start with a letter * and contain only letters, numbers, or an underscore. */ bool valid_varname(const std::string& name) { static const std::regex re("[a-zA-Z][a-zA-Z0-9_]*"); return std::regex_match(name, re); } bool is_array_tuples(const std::vector& keys) { std::vector stack(keys); std::string key; stack.pop_back(); while (!stack.empty()) { key = boost::algorithm::join(stack, "."); if (slot_types_map[key] == meta_type::ARRAY_OF_TUPLES) return true; stack.pop_back(); } return false; } array_dims get_outer_dims(const std::vector& keys) { std::vector stack(keys); std::string key; stack.pop_back(); while (!stack.empty()) { key = boost::algorithm::join(stack, "."); if (slot_dims_map.count(key) == 1) return slot_dims_map[key]; stack.pop_back(); } key = boost::algorithm::join(keys, "."); if (slot_dims_map.count(key) != 1) unexpected_error(key, "not an array"); return slot_dims_map[key]; } void set_outer_dims(array_dims update) { std::vector stack = key_stack; std::string key; stack.pop_back(); while (!stack.empty()) { key = boost::algorithm::join(stack, "."); if (slot_dims_map.count(key) == 1) break; stack.pop_back(); } if (stack.empty()) { key = boost::algorithm::join(key_stack, "."); unexpected_error(key, "ill-formed array"); } slot_dims_map[key] = update; } void promote_to_double() { if (int_slots_map[key_str()]) { int_slots_map[key_str()] = false; values_r.reserve(values_i.size()); values_r.insert(values_r.end(), values_i.begin(), values_i.end()); array_start_r = array_start_i; values_i.clear(); array_start_i = 0; } } /* Save non-tuple vars and innermost tuple slots to vars_i and vars_r. * Converts multi-dim arrays from row-major to column major. * For arrays of tuples we need to check that new elements are consistent * with previous tuple elements. */ void save_key_value_pair() { if (key_stack.empty()) return; if (not_stan_var) { key_stack.pop_back(); return; } std::string key = key_str(); if (slot_types_map.count(key) < 1) unexpected_error(key, "unknown variable"); if (slot_types_map[key] == meta_type::SCALAR || slot_types_map[key] == meta_type::ARRAY) { bool is_new = (vars_r.count(key) == 0 && vars_i.count(key) == 0); bool is_int = int_slots_map[key]; bool is_real = vars_r.count(key) == 1; bool was_int = !is_int && vars_i.count(key) == 1; std::vector dims; if (slot_dims_map.count(key) == 1) dims = slot_dims_map[key].dims; if (dims.size() > 1) { if (is_int) { std::vector cm_values_i(values_i.size()); to_column_major(key, cm_values_i, values_i, dims); values_i.assign(cm_values_i.begin(), cm_values_i.end()); } else { std::vector cm_values_r(values_r.size()); to_column_major(key, cm_values_r, values_r, dims); values_r.assign(cm_values_r.begin(), cm_values_r.end()); } } if (is_new) { var_types_map[key] = slot_types_map[key]; if (is_int) { std::pair, std::vector> pair; pair = make_pair(values_i, dims); vars_i[key] = pair; } else { std::pair, std::vector> pair; pair = make_pair(values_r, dims); vars_r[key] = pair; } } else { bool is_aot = false; std::string vname; for (auto& key : key_stack) { vname.append(key); if (slot_types_map[vname] == meta_type::ARRAY_OF_TUPLES) { is_aot = true; break; } vname.append("."); } if (!is_aot) unexpected_error(key, "not array of tuples"); bool consistent = true; if (is_int || was_int) { auto expect_dims = vars_i[key].second; size_t expect_vals_len = 1; for (auto& x : expect_dims) expect_vals_len *= x; if (is_int) { if (expect_vals_len != values_i.size()) consistent = false; } else { if (expect_vals_len != values_r.size()) consistent = false; } } else { auto expect_dims = vars_r[key].second; size_t expect_vals_len = 1; for (auto& x : expect_dims) expect_vals_len *= x; if (expect_vals_len != values_r.size()) consistent = false; } if (!consistent) { std::stringstream errorMsg; errorMsg << "Variable " << key << ": size mismatch between tuple elements."; throw json_error(errorMsg.str()); } var_types_map[key] = meta_type::ARRAY; if ((!is_int && was_int) || (is_int && is_real)) { // promote to double std::vector values_tmp; for (auto& x : vars_i[key].first) { values_tmp.push_back(x); } for (auto& x : values_r) values_tmp.push_back(x); std::pair, std::vector> pair; pair = make_pair(values_tmp, dims); vars_r[key] = pair; vars_i.erase(key); } else if (is_int) { for (auto& x : values_i) vars_i[key].first.push_back(x); vars_i[key].second = dims; } else { for (auto& x : values_r) vars_r[key].first.push_back(x); vars_r[key].second = dims; } } } key_stack.pop_back(); } /* For array of tuples, concatenate dimensions * Update vars_i and vars_r dimensions accordingly. */ void update_array_dims() { for (auto const& var : var_types_map) { if (var.second != meta_type::ARRAY) { continue; } std::vector all_dims; std::vector slots; split(slots, var.first, boost::is_any_of("."), boost::token_compress_on); std::string slot; for (size_t i = 0; i < slots.size(); ++i) { slot.append(slots[i]); if (slot_dims_map.count(slot) == 1 && !slot_dims_map[slot].dims.empty()) { for (auto& x : slot_dims_map[slot].dims) all_dims.push_back(x); } slot.append("."); } if (vars_i.count(var.first) == 1) { if (all_dims.size() == vars_i[var.first].second.size()) continue; else vars_i[var.first].second.assign(all_dims.begin(), all_dims.end()); } else if (vars_r.count(var.first) == 1) { if (all_dims.size() == vars_r[var.first].second.size()) continue; else vars_r[var.first].second.assign(all_dims.begin(), all_dims.end()); } else { std::stringstream errorMsg; errorMsg << "Variable: " << var.first << ", ill-formed JSON."; throw json_error(errorMsg.str()); } } } template void to_column_major(std::string vname, std::vector& cm_vals, const std::vector& rm_vals, const std::vector& dims) { size_t expected_size = 1; for (auto& x : dims) expected_size *= x; if (expected_size != rm_vals.size()) { std::stringstream errorMsg; errorMsg << "Variable: " << vname << ", error: ill-formed array."; throw json_error(errorMsg.str()); } for (size_t i = 0; i < rm_vals.size(); i++) { size_t idx = convert_offset_rtl_2_ltr(vname, i, dims); cm_vals[idx] = rm_vals[i]; } } void unexpected_error(const std::string& where, const std::string& what) { std::stringstream errorMsg; errorMsg << "Variable " << where << ", " << what << "."; throw json_error(errorMsg.str()); } public: /** * Construct a json_data_handler object. * * @param a_vars_r name-value map for real-valued variables * @param a_vars_i name-value map for int-valued variables */ json_data_handler(vars_map_r& a_vars_r, vars_map_i& a_vars_i) : json_handler(), vars_r(a_vars_r), vars_i(a_vars_i), key_stack(), var_types_map(), slot_types_map(), slot_dims_map(), tuple_slots_map(), int_slots_map(), values_r(), values_i(), array_start_i(0), array_start_r(0) {} /** Clear all maps before parsing next JSON object. * This means that we don't accumulate variable definitions * across calls to the parser. */ void start_text() { vars_i.clear(); vars_r.clear(); var_types_map.clear(); slot_types_map.clear(); slot_dims_map.clear(); tuple_slots_map.clear(); int_slots_map.clear(); reset_values(); not_stan_var = true; } /** Once all variable definitions have been processed, * update dimensions for array of tuple variables. */ void end_text() { update_array_dims(); } /** A key is either a top-level Stan variable name or a tuple slot id. * Logic handles edge case where key is the first slot of a tuple; * the name of the enclosing object is not used in the generated C++, * but we still need to track the number of tuple slots. */ void key(const std::string& key) { if (event != meta_event::OBJ_OPEN) { save_key_value_pair(); } event = meta_event::KEY; reset_values(); std::string outer = key_str(); key_stack.push_back(key); if (key_stack.size() == 1) { not_stan_var = !valid_varname(key); } if (not_stan_var) return; if (key_stack.size() == 1 && slot_types_map.count(key) == 1) { std::stringstream errorMsg; errorMsg << "Attempt to redefine variable: " << key << "."; throw json_error(errorMsg.str()); } else if (key_stack.size() > 1 && slot_types_map[outer] == meta_type::ARRAY_OF_TUPLES) { if (tuple_slots_map[outer].is_first) { tuple_slots_map[outer].slots++; } else { tuple_slots_map[outer].slots_acc++; } } std::string vname = key_str(); if (slot_types_map.count(vname) == 0) { slot_types_map[vname] = meta_type::SCALAR; int_slots_map[vname] = true; } } /** * A start object ("{") event changes the meta-type of the current key. * Initialize or update tuple slots. */ void start_object() { event = meta_event::OBJ_OPEN; if (is_init() || not_stan_var) return; std::string key = key_str(); if (slot_types_map[key] == meta_type::ARRAY) { slot_types_map[key] = meta_type::ARRAY_OF_TUPLES; } else if (slot_types_map[key_str()] == meta_type::SCALAR) { slot_types_map[key_str()] = meta_type::TUPLE; } if (slot_types_map[key] == meta_type::ARRAY_OF_TUPLES) { if (tuple_slots_map.count(key) == 0) { tuple_slots slots; tuple_slots_map[key] = slots; } else { tuple_slots_map[key].is_first = false; tuple_slots_map[key].slots_acc = 0; } } } /** An end object ("}") event closes either the top-level object or a tuple. * If this is an array of tuples, track or check the number tuple slots * and the array size. */ void end_object() { event = meta_event::OBJ_CLOSE; if (not_stan_var) { if (!key_stack.empty()) key_stack.pop_back(); return; } if (key_stack.size() > 1) { std::string tuple = outer_key_str(); if (slot_types_map[tuple] == meta_type::ARRAY_OF_TUPLES) { array_dims outer = get_outer_dims(key_stack); if (!outer.dims.empty()) { outer.dims_acc[outer.dims.size() - 1]++; set_outer_dims(outer); } if (tuple_slots_map.count(tuple) == 0) unexpected_error(tuple, "found close object, not a tuple var"); if (tuple_slots_map[tuple].is_first) { tuple_slots_map[tuple].is_first = false; } else { if (tuple_slots_map[tuple].slots_acc != tuple_slots_map[tuple].slots) { std::stringstream errorMsg; errorMsg << "Variable " << tuple << ": size mismatch between tuple elements."; throw json_error(errorMsg.str()); } } } } save_key_value_pair(); } /** For a start array ("[") event we first check that we're not currently * processing the values in an array. We need to do this because JSON * doesn't distinguish lists of heterogenous elements and arrays. * Then we add or update the dimensions of the array variable. */ void start_array() { if (key_stack.empty()) { throw json_error("Expecting JSON object, found array."); } if (not_stan_var) return; std::string key(key_str()); if (slot_types_map[key] == meta_type::SCALAR && !(values_r.empty() && values_r.empty())) { std::stringstream errorMsg; errorMsg << "Variable: " << key << ", error: non-scalar array value."; throw json_error(errorMsg.str()); } if (slot_types_map[key] == meta_type::SCALAR) slot_types_map[key] = meta_type::ARRAY; else if (slot_types_map[key] == meta_type::TUPLE) unexpected_error(key, "ill-formed tuple"); array_dims dims; if (slot_dims_map.count(key) == 1) dims = slot_dims_map[key]; dims.cur_dim++; if (dims.dims.empty() || dims.dims.size() < dims.cur_dim) { dims.dims.push_back(0); dims.dims_acc.push_back(0); } if (dims.cur_dim > 1) dims.dims_acc[dims.cur_dim - 2]++; slot_dims_map[key] = dims; array_start_i = values_i.size(); array_start_r = values_r.size(); } /** An array event ("]") closes the current array dimension. * The innermost array dimension are the scalar elements which * are found in the accumulator vectors `values_i` and `values_r`. * If processing the first row of an array, record the size of this row, * else check that the size of this row matches recorded row size. */ void end_array() { if (not_stan_var) return; if (slot_dims_map.count(key_str()) == 0) unexpected_error(key_str(), "ill-formed array"); std::string key(key_str()); array_dims dims = slot_dims_map[key]; int idx = dims.cur_dim - 1; bool is_int = int_slots_map[key]; bool is_last = (slot_types_map[key] != meta_type::ARRAY_OF_TUPLES && dims.cur_dim == dims.dims.size()); if (is_last && 0 == dims.dims[idx]) { // innermost row of scalar elts if (is_int) dims.dims[idx] = values_i.size() - array_start_i; else dims.dims[idx] = values_r.size() - array_start_r; } else if (0 == dims.dims[idx]) { // row of array or tuple elts dims.dims[idx] = dims.dims_acc[idx]; } else { bool is_rect = false; if (is_last) { if ((is_int && dims.dims[idx] == values_i.size() - array_start_i) || (!is_int && dims.dims[idx] == values_r.size() - array_start_r)) is_rect = true; } else if (dims.dims[idx] == dims.dims_acc[idx]) { is_rect = true; } if (!is_rect) { std::stringstream errorMsg; errorMsg << "Variable: " << key << ", error: non-rectangular array."; throw json_error(errorMsg.str()); } } dims.dims_acc[idx] = 0; dims.cur_dim--; slot_dims_map[key] = dims; } void null() { if (not_stan_var) return; std::stringstream errorMsg; errorMsg << "Variable: " << key_str() << ", error: null values not allowed."; throw json_error(errorMsg.str()); } void boolean(bool p) { if (not_stan_var) return; std::stringstream errorMsg; errorMsg << "Variable: " << key_str() << ", error: boolean values not allowed."; throw json_error(errorMsg.str()); } void string(const std::string& s) { if (not_stan_var) return; double tmp; if (0 == s.compare("-Inf")) { tmp = -std::numeric_limits::infinity(); } else if (0 == s.compare("-Infinity")) { tmp = -std::numeric_limits::infinity(); } else if (0 == s.compare("Inf")) { tmp = std::numeric_limits::infinity(); } else if (0 == s.compare("Infinity")) { tmp = std::numeric_limits::infinity(); } else if (0 == s.compare("NaN")) { tmp = std::numeric_limits::quiet_NaN(); } else { std::stringstream errorMsg; errorMsg << "Variable: " << key_str() << ", error: string values not allowed."; throw json_error(errorMsg.str()); } promote_to_double(); values_r.push_back(tmp); } void number_double(double x) { if (not_stan_var) return; promote_to_double(); values_r.push_back(x); } void number_int(int n) { if (not_stan_var) return; if (int_slots_map[key_str()]) { values_i.push_back(n); } else { values_r.push_back(n); } } void number_unsigned_int(unsigned n) { if (not_stan_var) return; // if integer overflow, promote numeric data to double if (n > (unsigned)std::numeric_limits::max()) promote_to_double(); if (int_slots_map[key_str()]) { values_i.push_back(static_cast(n)); } else { values_r.push_back(n); } } void number_int64(int64_t n) { if (not_stan_var) return; // the number doesn't fit in int (otherwise number_int() would be called) number_double(n); } void number_unsigned_int64(uint64_t n) { if (not_stan_var) return; // the number doesn't fit in int (otherwise number_unsigned_int() would be // called) number_double(n); } /** This function provides the column-major offset of an array element * given its row-major offset and the array dimensions. */ size_t convert_offset_rtl_2_ltr(std::string vname, size_t rtl_offset, const std::vector& dims) { size_t rtl_dsize = 1; for (size_t i = 1; i < dims.size(); i++) rtl_dsize *= dims[i]; if (rtl_offset >= rtl_dsize * dims[0]) { std::stringstream errorMsg; errorMsg << "Variable: " << vname << ", ill-formed data."; throw json_error(errorMsg.str()); } // calculate offset by working left-to-right to get array indices // for row-major offset left-most dimensions are divided out // for column-major offset successive dimensions are multiplied in size_t rem = rtl_offset; size_t ltr_offset = 0; size_t ltr_dsize = 1; for (size_t i = 0; i < dims.size() - 1; i++) { size_t idx = rem / rtl_dsize; ltr_offset += idx * ltr_dsize; rem = rem - idx * rtl_dsize; rtl_dsize = rtl_dsize / dims[i + 1]; ltr_dsize *= dims[i]; } ltr_offset += rem * ltr_dsize; // for loop stops 1 early return ltr_offset; } }; } // namespace json } // namespace stan #endif StanHeaders/inst/include/src/stan/io/json/rapidjson_parser.hpp0000644000176200001440000000657214645137105024265 0ustar liggesusers#ifndef STAN_IO_JSON_RAPIDJSON_PARSER_HPP #define STAN_IO_JSON_RAPIDJSON_PARSER_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace json { enum class ParsingState { Idle, Started, End }; template struct RapidJSONHandler { explicit RapidJSONHandler(Handler &h) : h_(h), state_(ParsingState::Idle) {} bool check_start() { if (state_ == ParsingState::Idle) { error_message_ = "expecting start of object ({) or array ([)"; return false; } return true; } bool Null() { h_.null(); return check_start(); } bool Bool(bool b) { h_.boolean(b); return check_start(); } bool Int(int i) { h_.number_int(i); return check_start(); } bool Uint(unsigned u) { h_.number_unsigned_int(u); return check_start(); } bool Int64(int64_t i) { h_.number_int64(i); return check_start(); } bool Uint64(uint64_t u) { h_.number_unsigned_int64(u); return check_start(); } bool Double(double d) { h_.number_double(d); return check_start(); } bool RawNumber(const char *str, rapidjson::SizeType length, bool copy) { // this will never get return true; } bool String(const char *str, rapidjson::SizeType length, bool copy) { h_.string(str); return check_start(); } bool StartObject() { state_ = ParsingState::Started; error_message_ = ""; h_.start_object(); return true; } bool Key(const char *str, rapidjson::SizeType length, bool copy) { h_.key(str); last_key_ = str; return check_start(); } bool EndObject(rapidjson::SizeType memberCount) { h_.end_object(); return true; } bool StartArray() { state_ = ParsingState::Started; error_message_ = ""; h_.start_array(); return true; } bool EndArray(rapidjson::SizeType elementCount) { h_.end_array(); return check_start(); } Handler &h_; ParsingState state_; std::string error_message_; std::string last_key_; }; /** * Parse the JSON text represented by the specified input stream, * sending events to the specified handler. * * @tparam Handler * @param in Input stream from which to parse * @param handler Handler for events from parser */ template void rapidjson_parse(std::istream &in, Handler &handler) { rapidjson::Reader reader; RapidJSONHandler filter(handler); rapidjson::IStreamWrapper isw(in); handler.start_text(); if (!reader.Parse(isw, filter)) { rapidjson::ParseErrorCode err = reader.GetParseErrorCode(); std::stringstream ss; ss << "Error in JSON parsing " << std::endl << "at offset " << reader.GetErrorOffset() << ": " << std::endl; if (filter.error_message_.size() > 0) { ss << filter.error_message_ << std::endl; } else { ss << rapidjson::GetParseError_En(err) << std::endl; } throw json_error(ss.str()); } handler.end_text(); } } // namespace json } // namespace stan #endif StanHeaders/inst/include/src/stan/io/json/json_handler.hpp0000644000176200001440000000414114645137105023354 0ustar liggesusers#ifndef STAN_IO_JSON_JSON_HANDLER_HPP #define STAN_IO_JSON_JSON_HANDLER_HPP #include namespace stan { namespace json { /** * Abstract base class for JSON handlers. More efficient to just * implement directly and pass in as a template, but this version * is available for convenience. */ class json_handler { public: json_handler() {} ~json_handler() {} /** * Handle the start of the text. */ virtual void start_text() {} /** * Handle the end of the text. */ virtual void end_text() {} /** * Handle the start of an array. */ virtual void start_array() {} /** * Handle the end of an array. */ virtual void end_array() {} /** * Handle the start of an object. */ virtual void start_object() {} /** * Handle the end of an object. */ virtual void end_object() {} /** * Handle the null literal value. */ virtual void null() {} /** * Handle the boolean literal value of the specified polarity. * * @param p polarity of boolean */ virtual void boolean(bool p) {} /** * Handle the specified double-precision floating point * value. * * @param x Value to handle. */ virtual void number_double(double x) {} /** * Handle the specified integer value. * * @param n Value to handle. */ virtual void number_int(int n) {} /** * Handle the specified unsigned integer value. * * @param n Value to handle. */ virtual void number_unsigned_int(unsigned n) {} /** * Handle the specified 64-bit integer value. * * @param n Value to handle. */ virtual void number_int64(int64_t n) {} /** * Handle the specified unsigned 64-bit integer value. * * @param n Value to handle. */ virtual void number_unsigned_int64(uint64_t n) {} /** * Handle the specified string value. * * @param s String value to handle. */ virtual void string(const std::string &s) {} /** * Handle the specified object key. * * @param s String object key to handle. */ virtual void key(const std::string &s) {} }; } // namespace json } // namespace stan #endif StanHeaders/inst/include/src/stan/io/json/json_error.hpp0000644000176200001440000000073014645137105023070 0ustar liggesusers#ifndef STAN_IO_JSON_JSON_ERROR_HPP #define STAN_IO_JSON_JSON_ERROR_HPP #include #include namespace stan { namespace json { /** * Exception type for JSON errors. */ struct json_error : public std::logic_error { /** * Construct a JSON error with the specified message * @param what Message to attach to error */ explicit json_error(const std::string &what) : logic_error(what) {} }; } // namespace json } // namespace stan #endif StanHeaders/inst/include/src/stan/io/json/json_data.hpp0000644000176200001440000002350414645137105022654 0ustar liggesusers#ifndef STAN_IO_JSON_JSON_DATA_HPP #define STAN_IO_JSON_JSON_DATA_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace json { /** * A json_data is a var_context object * that represents a set of named values which are typed to either * double or int and can be either scalar * value or an array of values of any dimensionality. * Arrays must be rectangular and the values of an array are all of * the same type, either double or int. * *

The dimensions and values of variables are accessed by variable name. * The values of a variable are stored as a vector of values and * a vector of array dimensions, where a scalar value consists of * a single value and an emtpy vector for the dimensionality. * Multidimensional arrays are stored in column-major order, * meaning the first index changes the most quickly. * If all the values of an array are int values, the array will be * stored as a vector of ints, else the array will be stored * as a vector of type double. * *

json_data objects are created by using the * json_parser and a json_data_handler * to read a single JSON text from an input stream. */ class json_data : public stan::io::var_context { private: vars_map_r vars_r_; vars_map_i vars_i_; std::vector const empty_vec_r_; std::vector const empty_vec_i_; std::vector const empty_vec_ui_; /** * Return true if this json_data contains the specified * variable name defined as a real-valued variable. This method * returns false if the values are all integers. * * @param name Variable name to test. * @return true if the variable exists in the * real values of the json_data. */ bool contains_r_only(const std::string &name) const { return vars_r_.find(name) != vars_r_.end(); } public: /** * Construct a json_data object from the specified input stream. * * Warning: This method does not close the input stream. * * @param in Input stream from which to read. * @throws json_exception if data is not well-formed stan data declaration */ explicit json_data(std::istream &in) : vars_r_(), vars_i_() { json_data_handler handler(vars_r_, vars_i_); rapidjson_parse(in, handler); } /** * Return true if this json_data contains the specified * variable name. This method returns true * even if the values are all integers. * * @param name Variable name to test. * @return true if the variable exists. */ bool contains_r(const std::string &name) const { return contains_r_only(name) || contains_i(name); } /** * Return true if this json_data contains an integer * valued array with the specified name. * * @param name Variable name to test. * @return true if the variable name has an integer * array value. */ bool contains_i(const std::string &name) const { return vars_i_.find(name) != vars_i_.end(); } /** * Return the double values for the variable with the specified * name or null. * * @param name Name of variable. * @return Values of variable. */ std::vector vals_r(const std::string &name) const { if (contains_r_only(name)) { return (vars_r_.find(name)->second).first; } else if (contains_i(name)) { std::vector vec_int = (vars_i_.find(name)->second).first; std::vector vec_r(vec_int.size()); for (size_t ii = 0; ii < vec_int.size(); ii++) { vec_r[ii] = vec_int[ii]; } return vec_r; } return empty_vec_r_; } /** * Read out the complex values for the variable with the specifed * name and return a flat vector of complex values. * * @param name Name of Variable of type string. * @return Vector of complex numbers with values equal to the read input. */ std::vector> vals_c(const std::string &name) const { if (contains_r_only(name)) { auto &&vec_r = (vars_r_.find(name)->second); auto &&val_r = vec_r.first; auto &&dim_r = vec_r.second; std::vector> vec_c(val_r.size() / 2); int offset = 1; for (int i = 0; i < dim_r.size() - 1; ++i) { offset *= dim_r[i]; } for (int i = 0; i < vec_c.size(); ++i) { vec_c[i] = std::complex{val_r[i], val_r[i + offset]}; } return vec_c; } else if (contains_i(name)) { auto &&vec_i = (vars_i_.find(name)->second); auto &&val_i = vec_i.first; auto &&dim_i = vec_i.second; std::vector> vec_c(val_i.size() / 2); int offset = 1; for (int i = 0; i < dim_i.size() - 1; ++i) { offset *= dim_i[i]; } for (int i = 0; i < vec_c.size(); ++i) { vec_c[i] = std::complex{static_cast(val_i[i]), static_cast(val_i[i + offset])}; } return vec_c; } return std::vector>{}; } /** * Return the dimensions for the variable with the specified * name. * * @param name Name of variable. * @return Dimensions of variable. */ std::vector dims_r(const std::string &name) const { if (contains_r_only(name)) { return (vars_r_.find(name)->second).second; } else if (contains_i(name)) { return (vars_i_.find(name)->second).second; } return empty_vec_ui_; } /** * Return the integer values for the variable with the specified * name. * * @param name Name of variable. * @return Values. */ std::vector vals_i(const std::string &name) const { if (contains_i(name)) { return (vars_i_.find(name)->second).first; } return empty_vec_i_; } /** * Return the dimensions for the integer variable with the specified * name. * * @param name Name of variable. * @return Dimensions of variable. */ std::vector dims_i(const std::string &name) const { if (contains_i(name)) { return (vars_i_.find(name)->second).second; } return empty_vec_ui_; } /** * Return a list of the names of the floating point variables in * the json_data. * * @param names Vector to store the list of names in. */ virtual void names_r(std::vector &names) const { names.resize(0); for (vars_map_r::const_iterator it = vars_r_.begin(); it != vars_r_.end(); ++it) names.push_back((*it).first); } /** * Return a list of the names of the integer variables in * the json_data. * * @param names Vector to store the list of names in. */ virtual void names_i(std::vector &names) const { names.resize(0); for (vars_map_i::const_iterator it = vars_i_.begin(); it != vars_i_.end(); ++it) names.push_back((*it).first); } /** * Remove variable from the object. * * @param name Name of the variable to remove. * @return If variable is removed returns true, else * returns false. */ bool remove(const std::string &name) { return (vars_i_.erase(name) > 0) || (vars_r_.erase(name) > 0); } void validate_dims(const std::string &stage, const std::string &name, const std::string &base_type, const std::vector &dims_declared) const { std::vector dims = dims_r(name); // JSON '[ ]' is ambiguous - any multi-dim variable with len 0 dim size_t num_elements = 1; if (dims.size() == 0) { // treat non-existent variables as size-0 objects num_elements = 0; } else { for (size_t i = 0; i < dims.size(); ++i) { num_elements *= dims[i]; } } size_t num_elements_expected = 1; for (size_t i = 0; i < dims_declared.size(); ++i) { num_elements_expected *= dims_declared[i]; } if (num_elements == 0 && num_elements_expected == 0) return; if (dims.size() != dims_declared.size()) { std::stringstream msg; msg << "mismatch in number dimensions declared and found in context" << "; processing stage=" << stage << "; variable name=" << name << "; dims declared="; dims_msg(msg, dims_declared); msg << "; dims found="; dims_msg(msg, dims); throw std::runtime_error(msg.str()); } for (size_t i = 0; i < dims.size(); ++i) { if (dims_declared[i] != dims[i]) { std::stringstream msg; msg << "mismatch in dimension declared and found in context" << "; processing stage=" << stage << "; variable name=" << name << "; position=" << i << "; dims declared="; dims_msg(msg, dims_declared); msg << "; dims found="; dims_msg(msg, dims); throw std::runtime_error(msg.str()); } } bool is_int_type = base_type == "int"; if (is_int_type) { if (!contains_i(name)) { std::stringstream msg; msg << (contains_r(name) ? "int variable contained non-int values" : "variable does not exist") << "; processing stage=" << stage << "; variable name=" << name << "; base type=" << base_type; throw std::runtime_error(msg.str()); } } else { if (!contains_r(name)) { std::stringstream msg; msg << "variable does not exist" << "; processing stage=" << stage << "; variable name=" << name << "; base type=" << base_type; throw std::runtime_error(msg.str()); } } } }; } // namespace json } // namespace stan #endif StanHeaders/inst/include/src/stan/io/var_context.hpp0000644000176200001440000001551314645137105022276 0ustar liggesusers#ifndef STAN_IO_VAR_CONTEXT_HPP #define STAN_IO_VAR_CONTEXT_HPP #include #include #include #include #include namespace stan { namespace io { /** * A var_context reads array variables of integer and * floating point type by name and dimension. * *

An array's dimensionality is described by a sequence of * (unsigned) integers. For instance, (7, 2, 3) picks * out a 7 by 2 by 3 array. The empty dimensionality sequence * () is used for scalars. * *

Multidimensional arrays are stored in column-major order, * meaning the first index changes the most quickly. * *

If a variable has integer variables, it should return * those integer values cast to floating point values when * accessed through the floating-point methods. */ class var_context { public: virtual ~var_context() {} /** * Return true if the specified variable name is * defined. This method should return true even * if the values are all integers. * * @param name Name of variable. * @return true if the variable exists with real * values. */ virtual bool contains_r(const std::string& name) const = 0; /** * Return the floating point values for the variable of the * specified variable name in last-index-major order. This * method should cast integers to floating point values if the * values of the named variable are all integers. * *

If there is no variable of the specified name, the empty * vector is returned. * * @param name Name of variable. * @return Sequence of values for the named variable. */ virtual std::vector vals_r(const std::string& name) const = 0; /** * Return the complex floating point values for the variable of the * specified variable name in last-index-major order. This * method should cast integers to floating point values if the * values of the named variable are all integers. * *

If there is no variable of the specified name, the empty * vector is returned. * * @param name Name of variable. * @return Sequence of values for the named variable. */ virtual std::vector> vals_c( const std::string& name) const = 0; /** * Return the dimensions for the specified floating point variable. * If the variable doesn't exist or if it is a scalar, the * return result should be the empty vector. * * @param name Name of variable. * @return Sequence of dimensions for the variable. */ virtual std::vector dims_r(const std::string& name) const = 0; /** * Return true if the specified variable name has * integer values. * * @param name Name of variable. * @return true if an integer variable of the specified * name is defined. */ virtual bool contains_i(const std::string& name) const = 0; /** * Return the integer values for the variable of the specified * name in last-index-major order or the empty sequence if the * variable is not defined. * * @param name Name of variable. * @return Sequence of integer values. */ virtual std::vector vals_i(const std::string& name) const = 0; /** * Return the dimensions of the specified floating point variable. * If the variable doesn't exist (or if it is a scalar), the * return result should be the empty vector. * * @param name Name of variable. * @return Sequence of dimensions for the variable. */ virtual std::vector dims_i(const std::string& name) const = 0; /** * Fill a list of the names of the floating point variables in * the context. * * @param names Vector to store the list of names in. */ virtual void names_r(std::vector& names) const = 0; /** * Fill a list of the names of the integer variables in * the context. * * @param names Vector to store the list of names in. */ virtual void names_i(std::vector& names) const = 0; /** * Check variable dimensions against variable declaration. * * @param stage stan program processing stage * @param name variable name * @param base_type declared stan variable type * @param dims_declared variable dimensions * @throw std::runtime_error if mismatch between declared * dimensions and dimensions found in context. */ virtual void validate_dims( const std::string& stage, const std::string& name, const std::string& base_type, const std::vector& dims_declared) const = 0; /** * Append vector of dimensions to message string. * * @param msg message string * @param dims array of dimension sizes */ void dims_msg(std::stringstream& msg, const std::vector& dims) const { msg << '('; for (size_t i = 0; i < dims.size(); ++i) { if (i > 0) msg << ','; msg << dims[i]; } msg << ')'; } static std::vector to_vec() { return std::vector(); } static std::vector to_vec(size_t n1) { std::vector v(1); v[0] = n1; return v; } static std::vector to_vec(size_t n1, size_t n2) { std::vector v(2); v[0] = n1; v[1] = n2; return v; } static std::vector to_vec(size_t n1, size_t n2, size_t n3) { std::vector v(3); v[0] = n1; v[1] = n2; v[2] = n3; return v; } static std::vector to_vec(size_t n1, size_t n2, size_t n3, size_t n4) { std::vector v(4); v[0] = n1; v[1] = n2; v[2] = n3; v[3] = n4; return v; } static std::vector to_vec(size_t n1, size_t n2, size_t n3, size_t n4, size_t n5) { std::vector v(5); v[0] = n1; v[1] = n2; v[2] = n3; v[3] = n4; v[4] = n5; return v; } static std::vector to_vec(size_t n1, size_t n2, size_t n3, size_t n4, size_t n5, size_t n6) { std::vector v(6); v[0] = n1; v[1] = n2; v[2] = n3; v[3] = n4; v[4] = n5; v[5] = n6; return v; } static std::vector to_vec(size_t n1, size_t n2, size_t n3, size_t n4, size_t n5, size_t n6, size_t n7) { std::vector v(7); v[0] = n1; v[1] = n2; v[2] = n3; v[3] = n4; v[4] = n5; v[5] = n6; v[6] = n7; return v; } static std::vector to_vec(size_t n1, size_t n2, size_t n3, size_t n4, size_t n5, size_t n6, size_t n7, size_t n8) { std::vector v(8); v[0] = n1; v[1] = n2; v[2] = n3; v[3] = n4; v[4] = n5; v[5] = n6; v[6] = n7; v[7] = n8; return v; } }; } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/deserializer.hpp0000644000176200001440000013461214645137105022426 0ustar liggesusers#ifndef STAN_IO_DESERIALIZER_HPP #define STAN_IO_DESERIALIZER_HPP #include namespace stan { namespace io { /** * A stream-based reader for integer, scalar, vector, matrix * and array data types, with Jacobian calculations. * * The template parameter T represents the type of * scalars and the values in vectors and matrices. The only * requirement on the template type T is that a * double can be copied into it, as in * * T t = 0.0; * * This includes double itself and the reverse-mode * algorithmic differentiation class stan::math::var. * *

For transformed values, the scalar type parameter T * must support the transforming operations, such as exp(x) * for positive-bounded variables. It must also support equality and * inequality tests with double values. * * @tparam T Basic scalar type. */ template class deserializer { private: Eigen::Map> map_r_; // map of reals. Eigen::Map> map_i_; // map of integers. size_t r_size_{0}; // size of reals available. size_t i_size_{0}; // size of integers available. size_t pos_r_{0}; // current position in map of reals. size_t pos_i_{0}; // current position in map of integers. /** * Return reference to current scalar and increment the internal counter. * @param m amount to move `pos_r_` up. */ inline const T& scalar_ptr_increment(size_t m) { pos_r_ += m; return map_r_.coeffRef(pos_r_ - m); } /** * Check there are at least m reals left to read * @param m Number of reals to read * @throws std::runtime_error if there aren't at least m reals left */ void check_r_capacity(size_t m) const { STAN_NO_RANGE_CHECKS_RETURN; if (pos_r_ + m > r_size_) { []() STAN_COLD_PATH { throw std::runtime_error("no more scalars to read"); }(); } } /** * Check there are at least m integers left to read * @param m Number of integers to read * @throws std::runtime_error if there aren't at least m integers left */ void check_i_capacity(size_t m) const { STAN_NO_RANGE_CHECKS_RETURN; if (pos_i_ + m > i_size_) { []() STAN_COLD_PATH { throw std::runtime_error("no more integers to read"); }(); } } template using conditional_var_val_t = std::conditional_t::value && is_var::value, return_var_matrix_t, K>; template using is_fp_or_ad = bool_constant::value || is_autodiff::value>; public: using matrix_t = Eigen::Matrix; using vector_t = Eigen::Matrix; using row_vector_t = Eigen::Matrix; using map_matrix_t = Eigen::Map; using map_vector_t = Eigen::Map; using map_row_vector_t = Eigen::Map; using var_matrix_t = stan::math::var_value< Eigen::Matrix>; using var_vector_t = stan::math::var_value>; using var_row_vector_t = stan::math::var_value>; /** * Construct a variable reader using the specified vectors * as the source of scalar and integer values for data. This * class holds a reference to the specified data vectors. * * Attempting to read beyond the end of the data or integer * value sequences raises a runtime exception. * * @param data_r Sequence of scalar values. * @param data_i Sequence of integer values. */ template * = nullptr> deserializer(const RVec& data_r, const IntVec& data_i) : map_r_(data_r.data(), data_r.size()), map_i_(data_i.data(), data_i.size()), r_size_(data_r.size()), i_size_(data_i.size()) {} /** * Return the number of scalars remaining to be read. * * @return Number of scalars left to read. */ inline size_t available() const noexcept { return r_size_ - pos_r_; } /** * Return the number of integers remaining to be read. * * @return Number of integers left to read. */ inline size_t available_i() const noexcept { return i_size_ - pos_i_; } /** * Return the next object in the sequence. * * @return Next scalar value. */ template >* = nullptr> inline auto read() { check_r_capacity(1); return map_r_.coeffRef(pos_r_++); } /** * Construct a complex variable from the next two reals in the sequence * * @return Next complex value */ template * = nullptr> inline auto read() { check_r_capacity(2); auto real = scalar_ptr_increment(1); auto imag = scalar_ptr_increment(1); return std::complex{real, imag}; } /** * Return the next integer in the integer sequence. * * @return Next integer value. */ template * = nullptr> inline auto read() { check_i_capacity(1); return map_i_.coeffRef(pos_i_++); } /** * Return an Eigen column vector of size `m`. * @tparam Ret The type to return. * @param m Size of column vector. */ template * = nullptr, require_not_vt_complex* = nullptr> inline auto read(Eigen::Index m) { if (unlikely(m == 0)) { return map_vector_t(nullptr, m); } else { check_r_capacity(m); return map_vector_t(&scalar_ptr_increment(m), m); } } /** * Return an Eigen column vector of size `m` with inner complex type. * @tparam Ret The type to return. * @param m Size of column vector. */ template * = nullptr, require_vt_complex* = nullptr> inline auto read(Eigen::Index m) { if (unlikely(m == 0)) { return Ret(map_vector_t(nullptr, m)); } else { check_r_capacity(2 * m); Ret ret(m); for (Eigen::Index i = 0; i < m; ++i) { auto real = scalar_ptr_increment(1); auto imag = scalar_ptr_increment(1); ret.coeffRef(i) = std::complex{real, imag}; } return ret; } } /** * Return an Eigen row vector of size `m`. * @tparam Ret The type to return. * @param m Size of row vector. */ template * = nullptr, require_not_vt_complex* = nullptr> inline auto read(Eigen::Index m) { if (unlikely(m == 0)) { return map_row_vector_t(nullptr, m); } else { check_r_capacity(m); return map_row_vector_t(&scalar_ptr_increment(m), m); } } /** * Return an Eigen row vector of size `m` with inner complex type. * @tparam Ret The type to return. * @param m Size of row vector. */ template * = nullptr, require_vt_complex* = nullptr> inline auto read(Eigen::Index m) { if (unlikely(m == 0)) { return Ret(map_row_vector_t(nullptr, m)); } else { check_r_capacity(2 * m); Ret ret(m); for (Eigen::Index i = 0; i < m; ++i) { auto real = scalar_ptr_increment(1); auto imag = scalar_ptr_increment(1); ret.coeffRef(i) = std::complex{real, imag}; } return ret; } } /** * Return an Eigen matrix of size `(rows, cols)`. * @tparam Ret The type to return. * @param rows The size of the rows of the matrix. * @param cols The size of the cols of the matrix. */ template * = nullptr, require_not_vt_complex* = nullptr> inline auto read(Eigen::Index rows, Eigen::Index cols) { if (rows == 0 || cols == 0) { return map_matrix_t(nullptr, rows, cols); } else { check_r_capacity(rows * cols); return map_matrix_t(&scalar_ptr_increment(rows * cols), rows, cols); } } /** * Return an Eigen matrix of size `(rows, cols)` with complex inner type. * @tparam Ret The type to return. * @param rows The size of the rows of the matrix. * @param cols The size of the cols of the matrix. */ template * = nullptr, require_vt_complex* = nullptr> inline auto read(Eigen::Index rows, Eigen::Index cols) { if (rows == 0 || cols == 0) { return Ret(map_matrix_t(nullptr, rows, cols)); } else { check_r_capacity(2 * rows * cols); Ret ret(rows, cols); for (Eigen::Index i = 0; i < rows * cols; ++i) { auto real = scalar_ptr_increment(1); auto imag = scalar_ptr_increment(1); ret.coeffRef(i) = std::complex{real, imag}; } return ret; } } /** * Return a `var_value` with inner Eigen type. * @tparam Ret The type to return. * @tparam T_ Should never be set by user, set to default value of `T` for * performing deduction on the class's inner type. * @tparam Sizes A parameter pack of integral types. * @param sizes A parameter pack of integral types representing the * dimensions of the `var_value` matrix or vector. */ template * = nullptr, require_var_matrix_t* = nullptr> inline auto read(Sizes... sizes) { using stan::math::promote_scalar_t; using var_v_t = promote_scalar_t>; return stan::math::to_var_value(this->read(sizes...)); } /** * Return an Eigen type when the deserializers inner class is not var. * @tparam Ret The type to return. * @tparam T_ Should never be set by user, set to default value of `T` for * performing deduction on the class's inner type. * @tparam Sizes A parameter pack of integral types. * @param sizes A parameter pack of integral types representing the * dimensions of the `var_value` matrix or vector. */ template * = nullptr, require_var_matrix_t* = nullptr> inline auto read(Sizes... sizes) { return this->read>(sizes...); } /** * Return an `std::vector` * @tparam Ret The type to return. * @tparam Sizes integral types. * @param m The size of the vector. * @param dims a possible set of inner container sizes passed to subsequent * `read` functions. */ template * = nullptr, require_not_same_t, T>* = nullptr> inline auto read(Eigen::Index m, Sizes... dims) { if (unlikely(m == 0)) { return std::decay_t(); } else { std::decay_t ret_vec; ret_vec.reserve(m); for (size_t i = 0; i < m; ++i) { ret_vec.emplace_back(this->read>(dims...)); } return ret_vec; } } /** * Return an `std::vector` of scalars * @tparam Ret The type to return. * @tparam Sizes integral types. * @param m The size of the vector. */ template * = nullptr, require_same_t, T>* = nullptr> inline auto read(Eigen::Index m) { if (unlikely(m == 0)) { return std::decay_t(); } else { check_r_capacity(m); const auto* start_pos = &this->map_r_.coeffRef(this->pos_r_); const auto* end_pos = &this->map_r_.coeffRef(this->pos_r_ + m); this->pos_r_ += m; return std::decay_t(start_pos, end_pos); } } /** * Return the next object transformed to have the specified * lower bound, possibly incrementing the specified reference with the * log of the absolute Jacobian determinant of the transform. * *

See stan::math::lb_constrain(T,double,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LB Type of lower bound. * @tparam LP Type of log prob. * @tparam Sizes A pack of possible sizes to construct the object from. * @param lb Lower bound on result. * @param lp Reference to log probability variable to increment. * @param sizes a pack of sizes to use to construct the return. */ template inline auto read_constrain_lb(const LB& lb, LP& lp, Sizes... sizes) { if (Jacobian) { return stan::math::lb_constrain(this->read(sizes...), lb, lp); } else { return stan::math::lb_constrain(this->read(sizes...), lb); } } /** * Return the next object transformed to have the specified * upper bound, possibly incrementing the specified reference with the * log of the absolute Jacobian determinant of the transform. * *

See stan::math::ub_constrain(T,double,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam UB Type of upper bound. * @tparam LP Type of log prob. * @param ub Upper bound on result. * @param lp Reference to log probability variable to increment. * @param sizes a pack of sizes to use to construct the return. */ template inline auto read_constrain_ub(const UB& ub, LP& lp, Sizes... sizes) { if (Jacobian) { return stan::math::ub_constrain(this->read(sizes...), ub, lp); } else { return stan::math::ub_constrain(this->read(sizes...), ub); } } /** * Return the next object transformed to be between the * the specified lower and upper bounds. * *

See stan::math::lub_constrain(T, double, double, T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LB Type of lower bound. * @tparam UB Type of upper bound. * @tparam LP Type of log probability. * @tparam Sizes A parameter pack of integral types. * @param lb Lower bound. * @param ub Upper bound. * @param lp Reference to log probability variable to increment. * @param sizes Pack of integrals to use to construct the return's type. */ template inline auto read_constrain_lub(const LB& lb, const UB& ub, LP& lp, Sizes... sizes) { if (Jacobian) { return stan::math::lub_constrain(this->read(sizes...), lb, ub, lp); } else { return stan::math::lub_constrain(this->read(sizes...), lb, ub); } } /** * Return the next object transformed to have the specified offset and * multiplier. * *

See stan::math::offset_multiplier_constrain(T, double, * double). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam Offset Type of offset. * @tparam Mult Type of multiplier. * @tparam LP Type of log probability. * @tparam Sizes A parameter pack of integral types. * @param offset Offset. * @param multiplier Multiplier. * @param lp Reference to log probability variable to increment. * @param sizes Pack of integrals to use to construct the return's type. * @return Next object transformed to fall between the specified * bounds. */ template inline auto read_constrain_offset_multiplier(const Offset& offset, const Mult& multiplier, LP& lp, Sizes... sizes) { using stan::math::offset_multiplier_constrain; if (Jacobian) { return offset_multiplier_constrain(this->read(sizes...), offset, multiplier, lp); } else { return offset_multiplier_constrain(this->read(sizes...), offset, multiplier); } } /** * Return the next unit_vector of the specified size (using one fewer * unconstrained scalars), incrementing the specified reference with the * log absolute Jacobian determinant. * *

See stan::math::unit_vector_constrain(Eigen::Matrix,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @tparam Sizes A parameter pack of integral types. * @param lp The reference to the variable holding the log * probability to increment. * @param sizes Pack of integrals to use to construct the return's type. * @return The next unit_vector of the specified size. * @throw std::invalid_argument if k is zero */ template * = nullptr> inline auto read_constrain_unit_vector(LP& lp, Sizes... sizes) { using stan::math::unit_vector_constrain; if (Jacobian) { return math::eval(unit_vector_constrain(this->read(sizes...), lp)); } else { return math::eval(unit_vector_constrain(this->read(sizes...))); } } /** * Return the next unit_vector of the specified size (using one fewer * unconstrained scalars), incrementing the specified reference with the * log absolute Jacobian determinant. * *

See stan::math::unit_vector_constrain(Eigen::Matrix,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @tparam Sizes A parameter pack of integral types. * @param lp The reference to the variable holding the log * probability to increment. * @param vecsize The size of the return vector. * @param sizes Pack of integrals to use to construct the return's type. * @return The next unit_vector of the specified size. * @throw std::invalid_argument if k is zero */ template * = nullptr> inline auto read_constrain_unit_vector(LP& lp, const size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back( this->read_constrain_unit_vector, Jacobian>( lp, sizes...)); } return ret; } /** * Return the next simplex of the specified size (using one fewer * unconstrained scalars), incrementing the specified reference with the * log absolute Jacobian determinant. * *

See stan::math::simplex_constrain(Eigen::Matrix,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @tparam Sizes A parameter pack of integral types. * @param lp The reference to the variable holding the log * probability to increment. * @param size Size of the returned simplex. * @return The next simplex of the specified size. * @throws std::invalid_argument if number of dimensions (`k`) is zero */ template * = nullptr> inline auto read_constrain_simplex(LP& lp, size_t size) { using stan::math::simplex_constrain; stan::math::check_positive("read_simplex", "size", size); if (Jacobian) { return simplex_constrain(this->read(size - 1), lp); } else { return simplex_constrain(this->read(size - 1)); } } /** * Return the next simplex of the specified size (using one fewer * unconstrained scalars), incrementing the specified reference with the * log absolute Jacobian determinant. * *

See stan::math::simplex_constrain(Eigen::Matrix,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @tparam Sizes A parameter pack of integral types. * @param lp The reference to the variable holding the log * probability to increment. * @param vecsize The size of the return vector. * @param sizes Pack of integrals to use to construct the return's type. * @return The next simplex of the specified size. * @throws std::invalid_argument if number of dimensions (`k`) is zero */ template * = nullptr> inline auto read_constrain_simplex(LP& lp, const size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back( this->read_constrain_simplex, Jacobian>(lp, sizes...)); } return ret; } /** * Return the next ordered vector of the specified * size, incrementing the specified reference with the log * absolute Jacobian of the determinant. * *

See stan::math::ordered_constrain(Matrix,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @tparam Sizes A parameter pack of integral types. * @param lp The reference to the variable holding the log * probability to increment. * @param sizes Pack of integrals to use to construct the return's type. * @return Next ordered vector of the specified size. */ template * = nullptr> inline auto read_constrain_ordered(LP& lp, Sizes... sizes) { using stan::math::ordered_constrain; if (Jacobian) { return ordered_constrain(this->read(sizes...), lp); } else { return ordered_constrain(this->read(sizes...)); } } /** * Return the next ordered vector of the specified * size, incrementing the specified reference with the log * absolute Jacobian of the determinant. * *

See stan::math::ordered_constrain(Matrix,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam Sizes A parameter pack of integral types. * @tparam LP Type of log probability. * @param lp The reference to the variable holding the log * probability to increment. * @param vecsize The size of the return vector. * @param sizes Pack of integrals to use to construct the return's type. * @return Next ordered vector of the specified size. */ template * = nullptr> inline auto read_constrain_ordered(LP& lp, const size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back( this->read_constrain_ordered, Jacobian>(lp, sizes...)); } return ret; } /** * Return the next positive_ordered vector of the specified * size, incrementing the specified reference with the log * absolute Jacobian of the determinant. * *

See stan::math::positive_ordered_constrain(Matrix,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam Sizes A parameter pack of integral types. * @tparam LP Type of log probability. * @param lp The reference to the variable holding the log * probability to increment. * @param sizes Pack of integrals to use to construct the return's type. * @return Next positive_ordered vector of the specified size. */ template * = nullptr> inline auto read_constrain_positive_ordered(LP& lp, Sizes... sizes) { using stan::math::positive_ordered_constrain; if (Jacobian) { return positive_ordered_constrain(this->read(sizes...), lp); } else { return positive_ordered_constrain(this->read(sizes...)); } } /** * Return the next positive_ordered vector of the specified * size, incrementing the specified reference with the log * absolute Jacobian of the determinant. * *

See stan::math::positive_ordered_constrain(Matrix,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam Sizes A parameter pack of integral types. * @tparam LP Type of log probability. * @param lp The reference to the variable holding the log * probability to increment. * @param vecsize The size of the return vector. * @param sizes Pack of integrals to use to construct the return's type. * @return Next positive_ordered vector of the specified size. */ template * = nullptr> inline auto read_constrain_positive_ordered(LP& lp, const size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back( this->read_constrain_positive_ordered, Jacobian>( lp, sizes...)); } return ret; } /** * Return the next Cholesky factor with the specified * dimensionality, reading from an unconstrained vector of the * appropriate size, and increment the log probability reference * with the log Jacobian adjustment for the transform. * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @param lp The reference to the variable holding the log * @param M Rows of Cholesky factor * @param N Columns of Cholesky factor * @return Next Cholesky factor. * @throw std::domain_error if the matrix is not a valid * Cholesky factor. */ template * = nullptr> inline auto read_constrain_cholesky_factor_cov(LP& lp, Eigen::Index M, Eigen::Index N) { if (Jacobian) { return stan::math::cholesky_factor_constrain( this->read>((N * (N + 1)) / 2 + (M - N) * N), M, N, lp); } else { return stan::math::cholesky_factor_constrain( this->read>((N * (N + 1)) / 2 + (M - N) * N), M, N); } } /** * Return the next Cholesky factor with the specified * dimensionality, reading from an unconstrained vector of the * appropriate size, and increment the log probability reference * with the log Jacobian adjustment for the transform. * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam Sizes A parameter pack of integral types. * @tparam LP Type of log probability. * @param lp The reference to the variable holding the log * probability to increment. * @param vecsize The size of the return vector. * @param sizes Pack of integrals to use to construct the return's type. * @return Next Cholesky factor. * @throw std::domain_error if the matrix is not a valid * Cholesky factor. */ template * = nullptr> inline auto read_constrain_cholesky_factor_cov(LP& lp, const size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back( this->read_constrain_cholesky_factor_cov, Jacobian>( lp, sizes...)); } return ret; } /** * Return the next Cholesky factor for a correlation matrix with * the specified dimensionality, reading from an unconstrained * vector of the appropriate size, and increment the log * probability reference with the log Jacobian adjustment for * the transform. * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @param lp Log probability reference to increment. * @param K Rows and columns of Cholesky factor * @return Next Cholesky factor for a correlation matrix. * @throw std::domain_error if the matrix is not a valid * Cholesky factor for a correlation matrix. */ template * = nullptr> inline auto read_constrain_cholesky_factor_corr(LP& lp, Eigen::Index K) { using stan::math::cholesky_corr_constrain; if (Jacobian) { return cholesky_corr_constrain( this->read>((K * (K - 1)) / 2), K, lp); } else { return cholesky_corr_constrain( this->read>((K * (K - 1)) / 2), K); } } /** * Return the next Cholesky factor for a correlation matrix with * the specified dimensionality, reading from an unconstrained * vector of the appropriate size, and increment the log * probability reference with the log Jacobian adjustment for * the transform. * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @tparam Sizes A parameter pack of integral types. * @param lp The reference to the variable holding the log * probability to increment. * @param vecsize The size of the return vector. * @param sizes Pack of integrals to use to construct the return's type. * @return Next Cholesky factor for a correlation matrix. * @throw std::domain_error if the matrix is not a valid * Cholesky factor for a correlation matrix. */ template * = nullptr> inline auto read_constrain_cholesky_factor_corr(LP& lp, const size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back( this->read_constrain_cholesky_factor_corr, Jacobian>(lp, sizes...)); } return ret; } /** * Return the next covariance matrix of the specified dimensionality, * incrementing the specified reference with the log absolute Jacobian * determinant. * *

See stan::math::cov_matrix_constrain(Matrix,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @param lp The reference to the variable holding the log * @param k Dimensionality of the (square) covariance matrix. * @return The next covariance matrix of the specified dimensionality. */ template * = nullptr> inline auto read_constrain_cov_matrix(LP& lp, Eigen::Index k) { using stan::math::cov_matrix_constrain; if (Jacobian) { return cov_matrix_constrain( this->read>(k + (k * (k - 1)) / 2), k, lp); } else { return cov_matrix_constrain( this->read>(k + (k * (k - 1)) / 2), k); } } /** * Return the next covariance matrix of the specified dimensionality, * incrementing the specified reference with the log absolute Jacobian * determinant. * *

See stan::math::cov_matrix_constrain(Matrix,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @tparam Sizes A parameter pack of integral types. * @param lp The reference to the variable holding the log * probability to increment. * @param vecsize The size of the return vector. * @param sizes Pack of integrals to use to construct the return's type. * @return The next covariance matrix of the specified dimensionality. */ template * = nullptr> auto read_constrain_cov_matrix(LP& lp, const size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back( this->read_constrain_cov_matrix, Jacobian>( lp, sizes...)); } return ret; } /** * Return the next object transformed to be a (partial) * correlation between -1 and 1, incrementing the specified * reference with the log of the absolute Jacobian determinant. * *

See stan::math::corr_constrain(T,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @param lp The reference to the variable holding the log * probability to increment. * @param k Dimensions of matrix return type. */ template * = nullptr, require_matrix_t* = nullptr> inline auto read_constrain_corr_matrix(LP& lp, Eigen::Index k) { using stan::math::corr_matrix_constrain; if (Jacobian) { return corr_matrix_constrain( this->read>((k * (k - 1)) / 2), k, lp); } else { return corr_matrix_constrain( this->read>((k * (k - 1)) / 2), k); } } /** * Specialization of `read_corr` for `std::vector` return types. * *

See stan::math::corr_constrain(T,T&). * * @tparam Ret The type to return. * @tparam Jacobian Whether to increment the log of the absolute Jacobian * determinant of the transform. * @tparam LP Type of log probability. * @tparam Sizes A parameter pack of integral types. * @param lp The reference to the variable holding the log * probability to increment. * @param vecsize The size of the return vector. * @param sizes Pack of integrals to use to construct the return's type. * @return The next scalar transformed to a correlation. */ template * = nullptr> inline auto read_constrain_corr_matrix(LP& lp, const size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back( this->read_constrain_corr_matrix, Jacobian>( lp, sizes...)); } return ret; } /** * Read a serialized lower bounded variable and unconstrain it * * @tparam Ret Type of output * @tparam L Type of lower bound * @tparam Sizes Types of dimensions of output * @param lb Lower bound * @param sizes dimensions * @return Unconstrained variable */ template inline auto read_free_lb(const L& lb, Sizes... sizes) { return stan::math::lb_free(this->read(sizes...), lb); } /** * Read a serialized lower bounded variable and unconstrain it * * @tparam Ret Type of output * @tparam U Type of upper bound * @tparam Sizes Types of dimensions of output * @param ub Upper bound * @param sizes dimensions * @return Unconstrained variable */ template inline auto read_free_ub(const U& ub, Sizes... sizes) { return stan::math::ub_free(this->read(sizes...), ub); } /** * Read a serialized bounded variable and unconstrain it * * @tparam Ret Type of output * @tparam L Type of lower bound * @tparam U Type of upper bound * @tparam Sizes Types of dimensions of output * @param lb Lower bound * @param ub Upper bound * @param sizes dimensions * @return Unconstrained variable */ template inline auto read_free_lub(const L& lb, const U& ub, Sizes... sizes) { return stan::math::lub_free(this->read(sizes...), lb, ub); } /** * Read a serialized offset-multiplied variable and unconstrain it * * @tparam Ret Type of output * @tparam O Type of offset * @tparam M Type of multiplier * @tparam Sizes Types of dimensions of output * @param offset Offset * @param multiplier Multiplier * @param sizes dimensions * @return Unconstrained variable */ template inline auto read_free_offset_multiplier(const O& offset, const M& multiplier, Sizes... sizes) { return stan::math::offset_multiplier_free(this->read(sizes...), offset, multiplier); } /** * Read a serialized unit vector and unconstrain it * * @tparam Ret Type of output * @return Unconstrained vector */ template * = nullptr> inline auto read_free_unit_vector(size_t size) { return stan::math::unit_vector_free(this->read(size)); } /** * Read serialized unit vectors and unconstrain them * * @tparam Ret Type of output * @tparam Sizes Types of dimensions of output * @param vecsize Vector size * @param sizes dimensions * @return Unconstrained vectors */ template * = nullptr> inline auto read_free_unit_vector(size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back(read_free_unit_vector>(sizes...)); } return ret; } /** * Read a serialized simplex and unconstrain it * * @tparam Ret Type of output * @return Unconstrained vector */ template * = nullptr> inline auto read_free_simplex(size_t size) { return stan::math::simplex_free(this->read(size)); } /** * Read serialized simplices and unconstrain them * * @tparam Ret Type of output * @tparam Sizes Types of dimensions of output * @param vecsize Vector size * @param sizes dimensions * @return Unconstrained vectors */ template * = nullptr> inline auto read_free_simplex(size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back(read_free_simplex>(sizes...)); } return ret; } /** * Read a serialized ordered and unconstrain it * * @tparam Ret Type of output * @return Unconstrained vector */ template * = nullptr> inline auto read_free_ordered(size_t size) { return stan::math::ordered_free(this->read(size)); } /** * Read serialized ordered vectors and unconstrain them * * @tparam Ret Type of output * @tparam Sizes Types of dimensions of output * @param vecsize Vector size * @param sizes dimensions * @return Unconstrained vectors */ template * = nullptr> inline auto read_free_ordered(size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back(read_free_ordered>(sizes...)); } return ret; } /** * Read a serialized positive ordered vector and unconstrain it * * @tparam Ret Type of output * @return Unconstrained vector */ template * = nullptr> inline auto read_free_positive_ordered(size_t size) { return stan::math::positive_ordered_free(this->read(size)); } /** * Read serialized positive ordered vectors and unconstrain them * * @tparam Ret Type of output * @tparam Sizes Types of dimensions of output * @param vecsize Vector size * @param sizes dimensions * @return Unconstrained vectors */ template * = nullptr> inline auto read_free_positive_ordered(size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back(read_free_positive_ordered>(sizes...)); } return ret; } /** * Read a serialized covariance matrix cholesky factor and unconstrain it * * @tparam Ret Type of output * @param M Rows of matrix * @param N Cols of matrix * @return Unconstrained matrix */ template inline auto read_free_cholesky_factor_cov(Eigen::Index M, Eigen::Index N) { return stan::math::cholesky_factor_free(this->read(M, N)); } /** * Read serialized covariance matrix cholesky factors and unconstrain them * * @tparam Ret Type of output * @tparam Sizes Types of dimensions of output * @param vecsize Vector size * @param sizes dimensions * @return Unconstrained matrices */ template * = nullptr> inline auto read_free_cholesky_factor_cov(size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back( read_free_cholesky_factor_cov>(sizes...)); } return ret; } /** * Read a serialized covariance matrix cholesky factor and unconstrain it * * @tparam Ret Type of output * @param M Rows/Cols of matrix * @return Unconstrained matrix */ template * = nullptr> inline auto read_free_cholesky_factor_corr(size_t M) { return stan::math::cholesky_corr_free(this->read(M, M)); } /** * Read serialized correlation matrix cholesky factors and unconstrain them * * @tparam Ret Type of output * @tparam Sizes Types of dimensions of output * @param vecsize Vector size * @param sizes dimensions * @return Unconstrained matrices */ template * = nullptr> inline auto read_free_cholesky_factor_corr(size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back( read_free_cholesky_factor_corr>(sizes...)); } return ret; } /** * Read a serialized covariance matrix cholesky factor and unconstrain it * * @tparam Ret Type of output * @param M Rows/Cols of matrix * @return Unconstrained matrix */ template * = nullptr> inline auto read_free_cov_matrix(size_t M) { return stan::math::cov_matrix_free(this->read(M, M)); } /** * Read serialized covariance matrices and unconstrain them * * @tparam Ret Type of output * @tparam Sizes Types of dimensions of output * @param vecsize Vector size * @param sizes dimensions * @return Unconstrained matrices */ template * = nullptr> inline auto read_free_cov_matrix(size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back(read_free_cov_matrix>(sizes...)); } return ret; } /** * Read a serialized covariance matrix cholesky factor and unconstrain it * * @tparam Ret Type of output * @param M Rows/Cols of matrix * @return Unconstrained matrix */ template * = nullptr> inline auto read_free_corr_matrix(size_t M) { return stan::math::corr_matrix_free(this->read(M, M)); } /** * Read serialized correlation matrices and unconstrain them * * @tparam Ret Type of output * @tparam Sizes Types of dimensions of output * @param vecsize Vector size * @param sizes dimensions * @return Unconstrained matrices */ template * = nullptr> inline auto read_free_corr_matrix(size_t vecsize, Sizes... sizes) { std::decay_t ret; ret.reserve(vecsize); for (size_t i = 0; i < vecsize; ++i) { ret.emplace_back(read_free_corr_matrix>(sizes...)); } return ret; } }; } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/array_var_context.hpp0000644000176200001440000003662614645137105023504 0ustar liggesusers#ifndef STAN_IO_ARRAY_VAR_CONTEXT_HPP #define STAN_IO_ARRAY_VAR_CONTEXT_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace io { /** * An array_var_context object represents a named arrays * with dimensions constructed from an array, a vector * of names, and a vector of all dimensions for each element. */ class array_var_context : public var_context { private: // Pair used in data maps template using data_pair_t = std::pair, std::vector>; std::map> vars_r_; // Holds data for reals std::map> vars_i_; // Holds data for doubles // When search for variable name fails, return one these const std::vector empty_vec_r_; const std::vector empty_vec_i_; const std::vector empty_vec_ui_; /** * Search over the real variables to check if a name is in the map * @param name The name of the variable to search for * @return logical indicating if the variable was found in the map of reals. */ bool contains_r_only(const std::string& name) const { return vars_r_.find(name) != vars_r_.end(); } /** * Check (1) if the vector size of dimensions is no smaller * than the name vector size; (2) if the size of the input * array is large enough for what is needed. * * @param names The names for each variable * @param array_size The total size of the vector holding the values we want * to access. * @param dims Vector holding the dimensions for each variable. * @return If the array size is equal to the number of dimensions, * a vector of the cumulative sum of the dimensions of each inner element of * dims. The return of this function is used in the add_* methods to get the * sequence of values For each variable. * @throw std::invalid_argument when size of dimensions is less * then array size or array is not long enough to hold * the dimensions of the data. */ template inline std::vector validate_dims( const std::vector& names, const T array_size, const std::vector>& dims) { const size_t num_par = names.size(); stan::math::check_less_or_equal("validate_dims", "array_var_context", dims.size(), num_par); std::vector elem_dims_total(dims.size() + 1); for (int i = 0; i < dims.size(); i++) { elem_dims_total[i + 1] = std::accumulate(dims[i].begin(), dims[i].end(), 1, std::multiplies()) + elem_dims_total[i]; } stan::math::check_less_or_equal("validate_dims", "array_var_context", elem_dims_total[dims.size()], array_size); return elem_dims_total; } /** * Adds a set of floating point variables to the floating point map. * @param names Names of each variable. * @param values The real values of variable in a contiguous * column major order container. * @param dims the dimensions for each variable. * @throw std::invalid_argument when size of dimensions is less * then array size or array is not long enough to hold * the dimensions of the data. */ void add_r(const std::vector& names, const std::vector& values, const std::vector>& dims) { std::vector dim_vec = validate_dims(names, values.size(), dims); for (size_t i = 0; i < names.size(); i++) { vars_r_.emplace(names[i], data_pair_t{{values.data() + dim_vec[i], values.data() + dim_vec[i + 1]}, dims[i]}); } } /** * Adds a set of floating point variables to the floating point map. * @param names Names of each variable. * @param values The real values of variable in an Eigen column vector. * @param dims the dimensions for each variable. * @throw std::invalid_argument when size of dimensions is less * then array size or array is not long enough to hold * the dimensions of the data. */ void add_r(const std::vector& names, const Eigen::VectorXd& values, const std::vector>& dims) { std::vector dim_vec = validate_dims(names, values.size(), dims); for (size_t i = 0; i < names.size(); i++) { vars_r_.emplace(names[i], data_pair_t{{values.data() + dim_vec[i], values.data() + dim_vec[i + 1]}, dims[i]}); } } /** * Adds a set of integer variables to the integer map. * @param names Names of each variable. * @param values The integer values of variable in a vector. * @param dims the dimensions for each variable. * @throw std::invalid_argument when size of dimensions is less * then array size or array is not long enough to hold * the dimensions of the data. */ void add_i(const std::vector& names, const std::vector& values, const std::vector>& dims) { std::vector dim_vec = validate_dims(names, values.size(), dims); for (size_t i = 0; i < names.size(); i++) { vars_i_.emplace(names[i], data_pair_t{{values.data() + dim_vec[i], values.data() + dim_vec[i + 1]}, dims[i]}); } } public: /** * Construct an array_var_context from only real value arrays. * * @param names_r names for each element * @param values_r a vector of double values for all elements * @param dim_r a vector of dimensions * @throw std::invalid_argument when size of dimensions is less * then array size or array is not long enough to hold * the dimensions of the data. */ array_var_context(const std::vector& names_r, const std::vector& values_r, const std::vector>& dim_r) { add_r(names_r, values_r, dim_r); } /** * Construct an array_var_context from an Eigen column vector. * * @param names_r names for each element * @param values_r a vector of double values for all elements * @param dim_r a vector of dimensions * @throw std::invalid_argument when size of dimensions is less * then array size or array is not long enough to hold * the dimensions of the data. */ array_var_context(const std::vector& names_r, const Eigen::VectorXd& values_r, const std::vector>& dim_r) { add_r(names_r, values_r, dim_r); } /** * Construct an array_var_context from only integer value arrays. * * @param names_i names for each element * @param values_i a vector of integer values for all elements * @param dim_i a vector of dimensions * @throw std::invalid_argument when size of dimensions is less * then array size or array is not long enough to hold * the dimensions of the data. */ array_var_context(const std::vector& names_i, const std::vector& values_i, const std::vector>& dim_i) { add_i(names_i, values_i, dim_i); } /** * Construct an array_var_context from arrays of both double * and integer separately * * @param names_r names for each element * @param values_r a vector of double values for all elements * @param dim_r a vector of dimensions * @param names_i names for each element * @param values_i a vector of integer values for all elements * @param dim_i a vector of dimensions * @throw std::invalid_argument when size of dimensions is less * then array size or array is not long enough to hold * the dimensions of the data. */ array_var_context(const std::vector& names_r, const std::vector& values_r, const std::vector>& dim_r, const std::vector& names_i, const std::vector& values_i, const std::vector>& dim_i) { add_i(names_i, values_i, dim_i); add_r(names_r, values_r, dim_r); } /** * Construct an array_var_context from arrays of both double * and integer separately * * @param names_r names for each element * @param values_r Eigen column vector of double elements. * @param dim_r a vector of dimensions * @param names_i names for each element * @param values_i a vector of integer values for all elements * @param dim_i a vector of dimensions * @throw std::invalid_argument when size of dimensions is less * then array size or array is not long enough to hold * the dimensions of the data. */ array_var_context(const std::vector& names_r, const Eigen::VectorXd& values_r, const std::vector>& dim_r, const std::vector& names_i, const std::vector& values_i, const std::vector>& dim_i) { add_i(names_i, values_i, dim_i); add_r(names_r, values_r, dim_r); } /** * Return true if this dump contains the specified * variable name is defined. This method returns true * even if the values are all integers. * * @param name Variable name to test. * @return true if the variable exists. */ bool contains_r(const std::string& name) const { return contains_r_only(name) || contains_i(name); } /** * Return true if this dump contains an integer * valued array with the specified name. * * @param name Variable name to test. * @return true if the variable name has an integer * array value. */ bool contains_i(const std::string& name) const { return vars_i_.find(name) != vars_i_.end(); } /** * Return the double values for the variable with the specified * name or null. * * @param name Name of variable. * @return Values of variable. * */ std::vector vals_r(const std::string& name) const { const auto ret_val_r = vars_r_.find(name); if (ret_val_r != vars_r_.end()) { return ret_val_r->second.first; } else { const auto ret_val_i = vars_i_.find(name); if (ret_val_i != vars_i_.end()) { return {ret_val_i->second.first.begin(), ret_val_i->second.first.end()}; } } return empty_vec_r_; } /** * Return the double values for the variable with the specified * name or null. * * @param name Name of variable. * @return Values of variable. * */ std::vector> vals_c(const std::string& name) const { const auto val_r = vars_r_.find(name); if (val_r != vars_r_.end()) { std::vector> ret_c(val_r->second.first.size() / 2); int comp_iter; int real_iter; for (comp_iter = 0, real_iter = 0; real_iter < val_r->second.first.size(); comp_iter += 1, real_iter += 2) { ret_c[comp_iter] = std::complex{ val_r->second.first[real_iter], val_r->second.first[real_iter + 1]}; } return ret_c; } else { const auto val_i = vars_i_.find(name); if (val_i != vars_i_.end()) { std::vector> ret_c(val_i->second.first.size() / 2); int comp_iter; int real_iter; for (comp_iter = 0, real_iter = 0; real_iter < val_i->second.first.size(); comp_iter += 1, real_iter += 2) { ret_c[comp_iter] = std::complex{ static_cast(val_i->second.first[real_iter]), static_cast(val_i->second.first[real_iter + 1])}; } return ret_c; } } return std::vector>{}; } /** * Return the dimensions for the double variable with the specified * name. * * @param name Name of variable. * @return Dimensions of variable. */ std::vector dims_r(const std::string& name) const { const auto ret_val_r = vars_r_.find(name); if (ret_val_r != vars_r_.end()) { return ret_val_r->second.second; } else { const auto ret_val_i = vars_i_.find(name); if (ret_val_i != vars_i_.end()) { return ret_val_i->second.second; } } return empty_vec_ui_; } /** * Return the integer values for the variable with the specified * name. * * @param name Name of variable. * @return Values. */ std::vector vals_i(const std::string& name) const { auto ret_val_i = vars_i_.find(name); if (ret_val_i != vars_i_.end()) { return ret_val_i->second.first; } return empty_vec_i_; } /** * Return the dimensions for the integer variable with the specified * name. * * @param name Name of variable. * @return Dimensions of variable. */ std::vector dims_i(const std::string& name) const { auto ret_val_i = vars_i_.find(name); if (ret_val_i != vars_i_.end()) { return ret_val_i->second.second; } return empty_vec_ui_; } /** * Check variable dimensions against variable declaration. * Only used for data read in from file. * * @param stage stan program processing stage * @param name variable name * @param base_type declared stan variable type * @param dims_declared variable dimensions * @throw std::runtime_error if mismatch between declared * dimensions and dimensions found in context. */ void validate_dims(const std::string& stage, const std::string& name, const std::string& base_type, const std::vector& dims_declared) const { size_t num_elts = 1; for (auto& d : dims_declared) { num_elts *= d; } if (num_elts == 0) { return; } stan::io::validate_dims(*this, stage, name, base_type, dims_declared); } /** * Return a list of the names of the floating point variables in * the dump. * * @param names Vector to store the list of names in. */ virtual void names_r(std::vector& names) const { names.clear(); names.reserve(vars_r_.size()); for (const auto& vars_r_iter : vars_r_) { names.push_back(vars_r_iter.first); } } /** * Return a list of the names of the integer variables in * the dump. * * @param names Vector to store the list of names in. */ virtual void names_i(std::vector& names) const { names.clear(); names.reserve(vars_i_.size()); for (const auto& vars_i_iter : vars_r_) { names.push_back(vars_i_iter.first); } } /** * Remove variable from the object. * * @param name Name of the variable to remove. * @return If variable is removed returns true, else * returns false. */ bool remove(const std::string& name) { return (vars_i_.erase(name) > 0) || (vars_r_.erase(name) > 0); } }; } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/ends_with.hpp0000644000176200001440000000072614645137105021726 0ustar liggesusers#ifndef STAN_IO_ENDS_WITH_HPP #define STAN_IO_ENDS_WITH_HPP #include namespace stan { namespace io { /** * Return true if the specified string ends with the specified * suffix. * * @param p suffix * @param s string to test * @return true if s has p as a suffix */ inline bool ends_with(const std::string& p, const std::string& s) { return s.size() >= p.size() && s.substr(s.size() - p.size()) == p; } } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/serializer.hpp0000644000176200001440000003724314645137105022117 0ustar liggesusers#ifndef STAN_IO_SERIALIZER_HPP #define STAN_IO_SERIALIZER_HPP #include namespace stan { namespace io { /** * A stream-based writer for scalar, vector, matrix, and array data types. * *`T` is the storage scalar type. Variables written by the serializer must * have a scalar type convertible to type `T`. * * @tparam T Basic scalar type. */ template class serializer { private: Eigen::Map> map_r_; // map of reals. size_t r_size_{0}; // size of reals available. size_t pos_r_{0}; // current position in map of reals. /** * Check there is room for at least m more reals to store * * @param m Number of reals to Write * @throws std::runtime_error if there isn't room for m reals */ void check_r_capacity(size_t m) const { if (pos_r_ + m > r_size_) { [](auto r_size_, auto pos_r_, auto m) STAN_COLD_PATH { throw std::runtime_error( std::string("In serializer: Storage capacity [") + std::to_string(r_size_) + "] exceeded while writing value of size [" + std::to_string(m) + "] from position [" + std::to_string(pos_r_) + "]. This is an internal error, if you see it please report it as" + " an issue on the Stan github repository."); }(r_size_, pos_r_, m); } } template using is_arithmetic_or_ad = bool_constant::value || is_autodiff::value>; public: using matrix_t = Eigen::Matrix; using vector_t = Eigen::Matrix; using row_vector_t = Eigen::Matrix; using map_matrix_t = Eigen::Map; using map_vector_t = Eigen::Map; using map_row_vector_t = Eigen::Map; using var_matrix_t = stan::math::var_value< Eigen::Matrix>; using var_vector_t = stan::math::var_value>; using var_row_vector_t = stan::math::var_value>; /** * Construct a variable serializer using data_r for storage. * * Attempting to write beyond the end of data_r will raise a runtime * exception. * * @param RVec Vector like class. * @param data_r Storage vector */ template * = nullptr> explicit serializer(RVec& data_r) : map_r_(data_r.data(), data_r.size()), r_size_(data_r.size()) {} /** * Return the number of scalars available to be written to. */ inline size_t available() const noexcept { return r_size_ - pos_r_; } /** * Write a scalar to storage * @tparam Scalar A Stan scalar class * @param x A scalar */ template >* = nullptr, require_not_var_matrix_t* = nullptr> inline void write(Scalar x) { check_r_capacity(1); map_r_.coeffRef(pos_r_) = x; pos_r_++; } /** * Write a complex variable to storage * @tparam Complex An `std::complex` type. * @param x The complex scalar */ template * = nullptr> inline void write(Complex&& x) { check_r_capacity(2); map_r_.coeffRef(pos_r_) = x.real(); map_r_.coeffRef(pos_r_ + 1) = x.imag(); pos_r_ += 2; } /** * Write an Eigen column vector to storage * @tparam Vec An Eigen type with compile time columns equal to 1. * @param vec The Eigen column vector */ template * = nullptr, require_not_vt_complex* = nullptr> inline void write(Vec&& vec) { check_r_capacity(vec.size()); map_vector_t(&map_r_.coeffRef(pos_r_), vec.size()) = vec; pos_r_ += vec.size(); } /** * Write a Eigen column vector with inner complex type to storage * @tparam Vec An Eigen type with compile time columns equal to 1. * @param vec The Eigen column vector */ template * = nullptr, require_vt_complex* = nullptr> inline void write(Vec&& vec) { check_r_capacity(2 * vec.size()); using stan::math::to_ref; auto&& vec_ref = to_ref(std::forward(vec)); for (Eigen::Index i = 0; i < vec_ref.size(); ++i) { map_r_.coeffRef(pos_r_) = vec_ref.coeff(i).real(); map_r_.coeffRef(pos_r_ + 1) = vec_ref.coeff(i).imag(); pos_r_ += 2; } } /** * Write an Eigen row vector to storage * @tparam Vec An Eigen type with compile time rows equal to 1. * @param vec The Eigen row vector */ template * = nullptr, require_not_vt_complex* = nullptr> inline void write(Vec&& vec) { check_r_capacity(vec.size()); map_row_vector_t(&map_r_.coeffRef(pos_r_), vec.size()) = vec; pos_r_ += vec.size(); } /** * Write an Eigen row vector with inner complex type to storage * @tparam Vec An Eigen type with compile time rows equal to 1. * @param vec The Eigen row vector */ template * = nullptr, require_vt_complex* = nullptr> inline void write(Vec&& vec) { using stan::math::to_ref; check_r_capacity(2 * vec.size()); auto&& vec_ref = to_ref(std::forward(vec)); for (Eigen::Index i = 0; i < vec_ref.size(); ++i) { map_r_.coeffRef(pos_r_) = vec_ref.coeff(i).real(); map_r_.coeffRef(pos_r_ + 1) = vec_ref.coeff(i).imag(); pos_r_ += 2; } } /** * Write a Eigen matrix of size `(rows, cols)` to storage * @tparam Mat An Eigen class with dynamic rows and columns * @param mat An Eigen object */ template * = nullptr, require_not_vt_complex* = nullptr> inline void write(Mat&& mat) { check_r_capacity(mat.size()); map_matrix_t(&map_r_.coeffRef(pos_r_), mat.rows(), mat.cols()) = mat; pos_r_ += mat.size(); } /** * Write a Eigen matrix of size `(rows, cols)` with complex inner type to * storage * @tparam Mat The type to write */ template * = nullptr, require_vt_complex* = nullptr> inline void write(Mat&& x) { check_r_capacity(2 * x.size()); using stan::math::to_ref; auto&& x_ref = to_ref(x); for (Eigen::Index i = 0; i < x.size(); ++i) { map_r_.coeffRef(pos_r_) = x_ref.coeff(i).real(); map_r_.coeffRef(pos_r_ + 1) = x_ref.coeff(i).imag(); pos_r_ += 2; } } /** * Write a `var_value` with inner Eigen type to storage * @tparam Ret The type to write * @tparam T_ Should never be set by user, set to default value of `T` for * performing deduction on the class's inner type. * dimensions of the `var_value` matrix or vector. */ template * = nullptr, require_var_matrix_t* = nullptr> inline void write(VarMat&& x) { check_r_capacity(x.size()); map_matrix_t(&map_r_.coeffRef(pos_r_), x.rows(), x.cols()) = stan::math::from_var_value(x); pos_r_ += x.size(); } /** * Write an Eigen type when the serializers inner class is not var. * @tparam VarMat The type to write * @tparam T_ Should never be set by user, set to default value of `T` for * performing deduction on the class's inner type. * dimensions of the `var_value` matrix or vector. */ template * = nullptr, require_var_matrix_t* = nullptr> inline void write(VarMat&& x) { this->write(stan::math::value_of(x)); } /** * Write a `std::vector` to storage * @tparam StdVec The type to write */ template * = nullptr> inline void write(StdVec&& x) { for (const auto& x_i : x) { this->write(x_i); } } /** * Write a serialized lower bounded variable and unconstrain it * * @tparam S A scalar, Eigen type, or `std::vector` * @tparam L Type of lower bound * @param lb Lower bound * @param x An object of the types listed above. */ template inline void write_free_lb(const L& lb, const S& x) { this->write(stan::math::lb_free(x, lb)); } /** * Write a serialized lower bounded variable and unconstrain it * * @tparam S A scalar, Eigen type, or `std::vector` * @tparam U Type of upper bound * @param ub Upper bound * @param x An object of the types listed above. */ template inline void write_free_ub(const U& ub, const S& x) { this->write(stan::math::ub_free(x, ub)); } /** * Write a serialized bounded variable and unconstrain it * * @tparam S A scalar, Eigen type, or `std::vector` * @tparam L Type of lower bound * @tparam U Type of upper bound * @param lb Lower bound * @param ub Upper bound * @param x An object of the types listed above. */ template inline void write_free_lub(const L& lb, const U& ub, const S& x) { this->write(stan::math::lub_free(x, lb, ub)); } /** * Write a serialized offset-multiplied variable and unconstrain it * * @tparam S A scalar, Eigen type, or `std::vector` * @tparam O Type of offset * @tparam M Type of multiplier * @param offset Offset * @param multiplier Multiplier * @param x An object of the types listed above. */ template inline void write_free_offset_multiplier(const O& offset, const M& multiplier, const S& x) { this->write(stan::math::offset_multiplier_free(x, offset, multiplier)); } /** * Write a serialized unit vector and unconstrain it * * @tparam Vec An Eigen type with either fixed rows or columns at compile * time. * @param x The vector to read from. */ template * = nullptr> inline void write_free_unit_vector(const Vec& x) { this->write(stan::math::unit_vector_free(x)); } /** * Write serialized unit vectors and unconstrain them * * @tparam StdVec A `std:vector` * @param x An std vector. */ template * = nullptr> inline void write_free_unit_vector(const StdVec& x) { for (const auto& ret_i : x) { this->write_free_unit_vector(ret_i); } } /** * Write a serialized simplex and unconstrain it * * @tparam Vec An Eigen type with either fixed rows or columns at compile * time. * @param x The vector to read from. */ template * = nullptr> inline void write_free_simplex(const Vec& x) { this->write(stan::math::simplex_free(x)); } /** * Write serialized simplices and unconstrain them * * @tparam StdVec A `std:vector` * @param x An std vector. */ template * = nullptr> inline void write_free_simplex(const StdVec& x) { for (const auto& ret_i : x) { this->write_free_simplex(ret_i); } } /** * Write a serialized ordered and unconstrain it * * @tparam Vec An Eigen type with either fixed rows or columns at compile * time. * @param x The vector to read from. */ template * = nullptr> inline void write_free_ordered(const Vec& x) { this->write(stan::math::ordered_free(x)); } /** * Write serialized ordered vectors and unconstrain them * * @tparam StdVec A `std:vector` * @param x An std vector. */ template * = nullptr> inline void write_free_ordered(const StdVec& x) { for (const auto& ret_i : x) { this->write_free_ordered(ret_i); } } /** * Write a serialized positive ordered vector and unconstrain it * * @tparam Vec An Eigen type with either fixed rows or columns at compile * time. * @param x The vector to read from. */ template * = nullptr> inline void write_free_positive_ordered(const Vec& x) { this->write(stan::math::positive_ordered_free(x)); } /** * Write serialized positive ordered vectors and unconstrain them * * @tparam StdVec A `std:vector` * @param x An std vector. */ template * = nullptr> inline void write_free_positive_ordered(const StdVec& x) { for (const auto& ret_i : x) { this->write_free_positive_ordered(ret_i); } } /** * Write a serialized covariance matrix cholesky factor and unconstrain it * * @tparam Mat An Eigen type with dynamic rows and columns at compile time. * @param x An Eigen matrix to write to the serialized vector. */ template * = nullptr> inline void write_free_cholesky_factor_cov(const Mat& x) { this->write(stan::math::cholesky_factor_free(x)); } /** * Write serialized covariance matrix cholesky factors and unconstrain them * * @tparam StdVec A `std:vector` * @param x An std vector. */ template * = nullptr> inline void write_free_cholesky_factor_cov(const StdVec& x) { for (const auto& ret_i : x) { this->write_free_cholesky_factor_cov(ret_i); } } /** * Write a serialized covariance matrix cholesky factor and unconstrain it * * @tparam Mat Type of input * @param x An Eigen matrix to write to the serialized vector. */ template * = nullptr> inline void write_free_cholesky_factor_corr(const Mat& x) { this->write(stan::math::cholesky_corr_free(x)); } /** * Write serialized correlation matrix cholesky factors and unconstrain them * * @tparam StdVec A `std:vector` * @param x An std vector. */ template * = nullptr> inline void write_free_cholesky_factor_corr(const StdVec& x) { for (const auto& ret_i : x) { this->write_free_cholesky_factor_corr(ret_i); } } /** * Write a serialized covariance matrix cholesky factor and unconstrain it * * @tparam Mat An Eigen type with dynamic rows and columns at compile time * @param x An Eigen matrix to write to the serialized vector. */ template * = nullptr> inline void write_free_cov_matrix(const Mat& x) { this->write(stan::math::cov_matrix_free(x)); } /** * Write serialized covariance matrices and unconstrain them * * @tparam StdVec A `std:vector` * @param x a standard vector. */ template * = nullptr> inline void write_free_cov_matrix(const StdVec& x) { for (const auto& ret_i : x) { this->write_free_cov_matrix(ret_i); } } /** * Write a serialized covariance matrix cholesky factor and unconstrain it * * @tparam Mat An Eigen type with dynamic rows and columns at compile time. * @param x An Eigen matrix to write to the serialized vector. */ template * = nullptr> inline void write_free_corr_matrix(const Mat& x) { this->write(stan::math::corr_matrix_free(x)); } /** * Write serialized correlation matrices and unconstrain them * * @tparam StdVec A `std:vector` * @param x An std vector. */ template * = nullptr> inline void write_free_corr_matrix(const StdVec& x) { for (const auto& ret_i : x) { this->write_free_corr_matrix(ret_i); } } }; } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/validate_dims.hpp0000644000176200001440000000474114645137105022550 0ustar liggesusers#ifndef STAN_IO_VALIDATE_DIMS_HPP #define STAN_IO_VALIDATE_DIMS_HPP #include #include #include namespace stan { namespace io { /** * Check variable dimensions against variable declaration. * * @param context The var context to check. * @param stage stan program processing stage * @param name variable name * @param base_type declared stan variable type * @param dims_declared variable dimensions * @throw std::runtime_error if mismatch between declared * dimensions and dimensions found in context. */ inline void validate_dims(const stan::io::var_context& context, const std::string& stage, const std::string& name, const std::string& base_type, const std::vector& dims_declared) { bool is_int_type = base_type == "int"; if (is_int_type) { if (!context.contains_i(name)) { std::stringstream msg; msg << (context.contains_r(name) ? "int variable contained non-int values" : "variable does not exist") << "; processing stage=" << stage << "; variable name=" << name << "; base type=" << base_type; throw std::runtime_error(msg.str()); } } else { if (!context.contains_r(name)) { std::stringstream msg; msg << "variable does not exist" << "; processing stage=" << stage << "; variable name=" << name << "; base type=" << base_type; throw std::runtime_error(msg.str()); } } std::vector dims = context.dims_r(name); if (dims.size() != dims_declared.size()) { std::stringstream msg; msg << "mismatch in number dimensions declared and found in context" << "; processing stage=" << stage << "; variable name=" << name << "; dims declared="; context.dims_msg(msg, dims_declared); msg << "; dims found="; context.dims_msg(msg, dims); throw std::runtime_error(msg.str()); } for (size_t i = 0; i < dims.size(); ++i) { if (dims_declared[i] != dims[i]) { std::stringstream msg; msg << "mismatch in dimension declared and found in context" << "; processing stage=" << stage << "; variable name=" << name << "; position=" << i << "; dims declared="; context.dims_msg(msg, dims_declared); msg << "; dims found="; context.dims_msg(msg, dims); throw std::runtime_error(msg.str()); } } } } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/chained_var_context.hpp0000644000176200001440000000511414645137105023745 0ustar liggesusers#ifndef STAN_IO_CHAINED_VAR_CONTEXT_HPP #define STAN_IO_CHAINED_VAR_CONTEXT_HPP #include #include #include #include namespace stan { namespace io { /** * A chained_var_context object represents two objects of var_context * as one. */ class chained_var_context : public var_context { private: const var_context& vc1_; const var_context& vc2_; public: chained_var_context(const var_context& v1, const var_context& v2) : vc1_(v1), vc2_(v2) {} bool contains_i(const std::string& name) const { return vc1_.contains_i(name) || vc2_.contains_i(name); } bool contains_r(const std::string& name) const { return vc1_.contains_r(name) || vc2_.contains_r(name); } std::vector vals_r(const std::string& name) const { return vc1_.contains_r(name) ? vc1_.vals_r(name) : vc2_.vals_r(name); } std::vector> vals_c(const std::string& name) const { return vc1_.contains_r(name) ? vc1_.vals_c(name) : vc2_.vals_c(name); } std::vector vals_i(const std::string& name) const { return vc1_.contains_i(name) ? vc1_.vals_i(name) : vc2_.vals_i(name); } std::vector dims_r(const std::string& name) const { return vc1_.contains_r(name) ? vc1_.dims_r(name) : vc2_.dims_r(name); } std::vector dims_i(const std::string& name) const { return vc1_.contains_r(name) ? vc1_.dims_i(name) : vc2_.dims_i(name); } void names_r(std::vector& names) const { vc1_.names_r(names); std::vector names2; vc2_.names_r(names2); names.insert(names.end(), names2.begin(), names2.end()); } void names_i(std::vector& names) const { vc1_.names_i(names); std::vector names2; vc2_.names_i(names2); names.insert(names.end(), names2.begin(), names2.end()); } /** * Check variable dimensions against variable declaration. * Only used for data read in from file. * * @param stage stan program processing stage * @param name variable name * @param base_type declared stan variable type * @param dims_declared variable dimensions * @throw std::runtime_error if mismatch between declared * dimensions and dimensions found in context. */ void validate_dims(const std::string& stage, const std::string& name, const std::string& base_type, const std::vector& dims_declared) const { stan::io::validate_dims(*this, stage, name, base_type, dims_declared); } }; } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/stan_csv_reader.hpp0000644000176200001440000002402414645137105023101 0ustar liggesusers#ifndef STAN_IO_STAN_CSV_READER_HPP #define STAN_IO_STAN_CSV_READER_HPP #include #include #include #include #include #include #include #include namespace stan { namespace io { inline void prettify_stan_csv_name(std::string& variable) { if (variable.find_first_of(":.") != std::string::npos) { std::vector parts; boost::split(parts, variable, boost::is_any_of(":")); for (auto& part : parts) { int pos = part.find('.'); if (pos > 0) { part[pos] = '['; std::replace(part.begin(), part.end(), '.', ','); part += "]"; } } variable = boost::algorithm::join(parts, "."); } } // FIXME: should consolidate with the options from // the command line in stan::lang struct stan_csv_metadata { int stan_version_major; int stan_version_minor; int stan_version_patch; std::string model; std::string data; std::string init; size_t chain_id; size_t seed; bool random_seed; size_t num_samples; size_t num_warmup; bool save_warmup; size_t thin; bool append_samples; std::string algorithm; std::string engine; int max_depth; stan_csv_metadata() : stan_version_major(0), stan_version_minor(0), stan_version_patch(0), model(""), data(""), init(""), chain_id(1), seed(0), random_seed(false), num_samples(0), num_warmup(0), save_warmup(false), thin(0), append_samples(false), algorithm(""), engine(""), max_depth(10) {} }; struct stan_csv_adaptation { double step_size; Eigen::MatrixXd metric; stan_csv_adaptation() : step_size(0), metric(0, 0) {} }; struct stan_csv_timing { double warmup; double sampling; stan_csv_timing() : warmup(0), sampling(0) {} }; struct stan_csv { stan_csv_metadata metadata; std::vector header; stan_csv_adaptation adaptation; Eigen::MatrixXd samples; stan_csv_timing timing; }; /** * Reads from a Stan output csv file. */ class stan_csv_reader { public: stan_csv_reader() {} ~stan_csv_reader() {} static bool read_metadata(std::istream& in, stan_csv_metadata& metadata, std::ostream* out) { std::stringstream ss; std::string line; if (in.peek() != '#') return false; while (in.peek() == '#') { std::getline(in, line); ss << line << '\n'; } ss.seekg(std::ios_base::beg); char comment; std::string lhs; std::string name; std::string value; while (ss.good()) { ss >> comment; std::getline(ss, lhs); size_t equal = lhs.find("="); if (equal != std::string::npos) { name = lhs.substr(0, equal); boost::trim(name); value = lhs.substr(equal + 1, lhs.size()); boost::trim(value); boost::replace_first(value, " (Default)", ""); } else { if (lhs.compare(" data") == 0) { ss >> comment; std::getline(ss, lhs); size_t equal = lhs.find("="); if (equal != std::string::npos) { name = lhs.substr(0, equal); boost::trim(name); value = lhs.substr(equal + 2, lhs.size()); boost::replace_first(value, " (Default)", ""); } if (name.compare("file") == 0) metadata.data = value; continue; } } if (name.compare("stan_version_major") == 0) { std::stringstream(value) >> metadata.stan_version_major; } else if (name.compare("stan_version_minor") == 0) { std::stringstream(value) >> metadata.stan_version_minor; } else if (name.compare("stan_version_patch") == 0) { std::stringstream(value) >> metadata.stan_version_patch; } else if (name.compare("model") == 0) { metadata.model = value; } else if (name.compare("num_samples") == 0) { std::stringstream(value) >> metadata.num_samples; } else if (name.compare("num_warmup") == 0) { std::stringstream(value) >> metadata.num_warmup; } else if (name.compare("save_warmup") == 0) { std::stringstream(value) >> metadata.save_warmup; } else if (name.compare("thin") == 0) { std::stringstream(value) >> metadata.thin; } else if (name.compare("id") == 0) { std::stringstream(value) >> metadata.chain_id; } else if (name.compare("init") == 0) { metadata.init = value; boost::trim(metadata.init); } else if (name.compare("seed") == 0) { std::stringstream(value) >> metadata.seed; metadata.random_seed = false; } else if (name.compare("append_samples") == 0) { std::stringstream(value) >> metadata.append_samples; } else if (name.compare("algorithm") == 0) { metadata.algorithm = value; } else if (name.compare("engine") == 0) { metadata.engine = value; } else if (name.compare("max_depth") == 0) { std::stringstream(value) >> metadata.max_depth; } } if (ss.good() == true) return false; return true; } // read_metadata static bool read_header(std::istream& in, std::vector& header, std::ostream* out, bool prettify_name = true) { std::string line; if (!std::isalpha(in.peek())) return false; std::getline(in, line); std::stringstream ss(line); header.resize(std::count(line.begin(), line.end(), ',') + 1); int idx = 0; while (ss.good()) { std::string token; std::getline(ss, token, ','); boost::trim(token); if (prettify_name) { prettify_stan_csv_name(token); } header[idx++] = token; } return true; } static bool read_adaptation(std::istream& in, stan_csv_adaptation& adaptation, std::ostream* out) { std::stringstream ss; std::string line; int lines = 0; if (in.peek() != '#' || in.good() == false) return false; while (in.peek() == '#') { std::getline(in, line); ss << line << std::endl; lines++; } ss.seekg(std::ios_base::beg); if (lines < 4) return false; char comment; // Buffer for comment indicator, # // Skip first two lines std::getline(ss, line); // Stepsize std::getline(ss, line, '='); boost::trim(line); ss >> adaptation.step_size; // Metric parameters std::getline(ss, line); std::getline(ss, line); std::getline(ss, line); int rows = lines - 3; int cols = std::count(line.begin(), line.end(), ',') + 1; adaptation.metric.resize(rows, cols); for (int row = 0; row < rows; row++) { std::stringstream line_ss; line_ss.str(line); line_ss >> comment; for (int col = 0; col < cols; col++) { std::string token; std::getline(line_ss, token, ','); boost::trim(token); std::stringstream(token) >> adaptation.metric(row, col); } std::getline(ss, line); // Read in next line } if (ss.good()) return false; else return true; } static bool read_samples(std::istream& in, Eigen::MatrixXd& samples, stan_csv_timing& timing, std::ostream* out) { std::stringstream ss; std::string line; int rows = 0; int cols = -1; if (in.peek() == '#' || in.good() == false) return false; while (in.good()) { bool comment_line = (in.peek() == '#'); bool empty_line = (in.peek() == '\n'); std::getline(in, line); if (empty_line) continue; if (!line.length()) break; if (comment_line) { if (line.find("(Warm-up)") != std::string::npos) { int left = 17; int right = line.find(" seconds"); double warmup; std::stringstream(line.substr(left, right - left)) >> warmup; timing.warmup += warmup; } else if (line.find("(Sampling)") != std::string::npos) { int left = 17; int right = line.find(" seconds"); double sampling; std::stringstream(line.substr(left, right - left)) >> sampling; timing.sampling += sampling; } } else { ss << line << '\n'; int current_cols = std::count(line.begin(), line.end(), ',') + 1; if (cols == -1) { cols = current_cols; } else if (cols != current_cols) { if (out) *out << "Error: expected " << cols << " columns, but found " << current_cols << " instead for row " << rows + 1 << std::endl; return false; } rows++; } in.peek(); } ss.seekg(std::ios_base::beg); if (rows > 0) { samples.resize(rows, cols); for (int row = 0; row < rows; row++) { std::getline(ss, line); std::stringstream ls(line); for (int col = 0; col < cols; col++) { std::getline(ls, line, ','); boost::trim(line); std::stringstream(line) >> samples(row, col); } } } return true; } /** * Parses the file. * * @param[in] in input stream to parse * @param[out] out output stream to send messages */ static stan_csv parse(std::istream& in, std::ostream* out) { stan_csv data; if (!read_metadata(in, data.metadata, out)) { if (out) *out << "Warning: non-fatal error reading metadata" << std::endl; } if (!read_header(in, data.header, out)) { if (out) *out << "Error: error reading header" << std::endl; throw std::invalid_argument("Error with header of input file in parse"); } if (!read_adaptation(in, data.adaptation, out)) { if (out) *out << "Warning: non-fatal error reading adaptation data" << std::endl; } data.timing.warmup = 0; data.timing.sampling = 0; if (!read_samples(in, data.samples, data.timing, out)) { if (out) *out << "Warning: non-fatal error reading samples" << std::endl; } return data; } }; } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/io/validate_zero_buf.hpp0000644000176200001440000000160514645137105023423 0ustar liggesusers#ifndef STAN_IO_VALIDATE_ZERO_BUF_HPP #define STAN_IO_VALIDATE_ZERO_BUF_HPP #include #include namespace stan { namespace io { /** * Throw an bad-cast exception if the specified buffer contains * a digit other than 0 before an e or E. The buffer argument * must implement size_t size() method and char * operator[](size_t). * * @tparam B Character buffer type * @throw boost::bad_lexical_cast if the buffer * contains non-zero characters before an exponentiation symbol. */ template void validate_zero_buf(const B& buf) { for (size_t i = 0; i < buf.size(); ++i) { if (buf[i] == 'e' || buf[i] == 'E') return; if (buf[i] >= '1' && buf[i] <= '9') boost::conversion::detail::throw_bad_cast(); } } } // namespace io } // namespace stan #endif StanHeaders/inst/include/src/stan/analyze/0000755000176200001440000000000014645137105020260 5ustar liggesusersStanHeaders/inst/include/src/stan/analyze/mcmc/0000755000176200001440000000000014645137105021177 5ustar liggesusersStanHeaders/inst/include/src/stan/analyze/mcmc/compute_effective_sample_size.hpp0000644000176200001440000002070414645137105030002 0ustar liggesusers#ifndef STAN_ANALYZE_MCMC_COMPUTE_EFFECTIVE_SAMPLE_SIZE_HPP #define STAN_ANALYZE_MCMC_COMPUTE_EFFECTIVE_SAMPLE_SIZE_HPP #include #include #include #include #include #include #include namespace stan { namespace analyze { /** * Computes the effective sample size (ESS) for the specified * parameter across all kept samples. The value returned is the * minimum of ESS and the number_total_draws * * log10(number_total_draws). * * See more details in Stan reference manual section "Effective * Sample Size". http://mc-stan.org/users/documentation * * Current implementation assumes draws are stored in contiguous * blocks of memory. Chains are trimmed from the back to match the * length of the shortest chain. Note that the effective sample size * can not be estimated with less than four draws. * * @param draws stores pointers to arrays of chains * @param sizes stores sizes of chains * @return effective sample size for the specified parameter */ inline double compute_effective_sample_size(std::vector draws, std::vector sizes) { int num_chains = sizes.size(); size_t num_draws = sizes[0]; for (int chain = 1; chain < num_chains; ++chain) { num_draws = std::min(num_draws, sizes[chain]); } if (num_draws < 4) { return std::numeric_limits::quiet_NaN(); } // check if chains are constant; all equal to first draw's value bool are_all_const = false; Eigen::VectorXd init_draw = Eigen::VectorXd::Zero(num_chains); for (int chain_idx = 0; chain_idx < num_chains; chain_idx++) { Eigen::Map> draw( draws[chain_idx], sizes[chain_idx]); for (int n = 0; n < num_draws; n++) { if (!std::isfinite(draw(n))) { return std::numeric_limits::quiet_NaN(); } } init_draw(chain_idx) = draw(0); if (draw.isApproxToConstant(draw(0))) { are_all_const |= true; } } if (are_all_const) { // If all chains are constant then return NaN // if they all equal the same constant value if (init_draw.isApproxToConstant(init_draw(0))) { return std::numeric_limits::quiet_NaN(); } } Eigen::Matrix acov(num_chains); Eigen::VectorXd chain_mean(num_chains); Eigen::VectorXd chain_var(num_chains); for (int chain = 0; chain < num_chains; ++chain) { Eigen::Map> draw( draws[chain], sizes[chain]); autocovariance(draw, acov(chain)); chain_mean(chain) = draw.mean(); chain_var(chain) = acov(chain)(0) * num_draws / (num_draws - 1); } double mean_var = chain_var.mean(); double var_plus = mean_var * (num_draws - 1) / num_draws; if (num_chains > 1) var_plus += math::variance(chain_mean); Eigen::VectorXd rho_hat_s(num_draws); rho_hat_s.setZero(); Eigen::VectorXd acov_s(num_chains); for (int chain = 0; chain < num_chains; ++chain) acov_s(chain) = acov(chain)(1); double rho_hat_even = 1.0; rho_hat_s(0) = rho_hat_even; double rho_hat_odd = 1 - (mean_var - acov_s.mean()) / var_plus; rho_hat_s(1) = rho_hat_odd; // Convert raw autocovariance estimators into Geyer's initial // positive sequence. Loop only until num_draws - 4 to // leave the last pair of autocorrelations as a bias term that // reduces variance in the case of antithetical chains. size_t s = 1; while (s < (num_draws - 4) && (rho_hat_even + rho_hat_odd) > 0) { for (int chain = 0; chain < num_chains; ++chain) acov_s(chain) = acov(chain)(s + 1); rho_hat_even = 1 - (mean_var - acov_s.mean()) / var_plus; for (int chain = 0; chain < num_chains; ++chain) acov_s(chain) = acov(chain)(s + 2); rho_hat_odd = 1 - (mean_var - acov_s.mean()) / var_plus; if ((rho_hat_even + rho_hat_odd) >= 0) { rho_hat_s(s + 1) = rho_hat_even; rho_hat_s(s + 2) = rho_hat_odd; } s += 2; } int max_s = s; // this is used in the improved estimate, which reduces variance // in antithetic case -- see tau_hat below if (rho_hat_even > 0) rho_hat_s(max_s + 1) = rho_hat_even; // Convert Geyer's initial positive sequence into an initial // monotone sequence for (int s = 1; s <= max_s - 3; s += 2) { if (rho_hat_s(s + 1) + rho_hat_s(s + 2) > rho_hat_s(s - 1) + rho_hat_s(s)) { rho_hat_s(s + 1) = (rho_hat_s(s - 1) + rho_hat_s(s)) / 2; rho_hat_s(s + 2) = rho_hat_s(s + 1); } } double num_total_draws = num_chains * num_draws; // Geyer's truncated estimator for the asymptotic variance // Improved estimate reduces variance in antithetic case double tau_hat = -1 + 2 * rho_hat_s.head(max_s).sum() + rho_hat_s(max_s + 1); return std::min(num_total_draws / tau_hat, num_total_draws * std::log10(num_total_draws)); } /** * Computes the effective sample size (ESS) for the specified * parameter across all kept samples. The value returned is the * minimum of ESS and the number_total_draws * * log10(number_total_draws). * * See more details in Stan reference manual section "Effective * Sample Size". http://mc-stan.org/users/documentation * * Current implementation assumes draws are stored in contiguous * blocks of memory. Chains are trimmed from the back to match the * length of the shortest chain. Note that the effective sample size * can not be estimated with less than four draws. Argument size * will be broadcast to same length as draws. * * @param draws stores pointers to arrays of chains * @param size size of chains * @return effective sample size for the specified parameter */ inline double compute_effective_sample_size(std::vector draws, size_t size) { int num_chains = draws.size(); std::vector sizes(num_chains, size); return compute_effective_sample_size(draws, sizes); } /** * Computes the split effective sample size (ESS) for the specified * parameter across all kept samples. The value returned is the * minimum of ESS and the number_total_draws * * log10(number_total_draws). When the number of total draws N is * odd, the (N+1)/2th draw is ignored. * * See more details in Stan reference manual section "Effective * Sample Size". http://mc-stan.org/users/documentation * * Current implementation assumes draws are stored in contiguous * blocks of memory. Chains are trimmed from the back to match the * length of the shortest chain. Note that the effective sample size * can not be estimated with less than four draws. * * @param draws stores pointers to arrays of chains * @param sizes stores sizes of chains * @return effective sample size for the specified parameter */ inline double compute_split_effective_sample_size( std::vector draws, std::vector sizes) { int num_chains = sizes.size(); size_t num_draws = sizes[0]; for (int chain = 1; chain < num_chains; ++chain) { num_draws = std::min(num_draws, sizes[chain]); } std::vector split_draws = split_chains(draws, sizes); double half = num_draws / 2.0; std::vector half_sizes(2 * num_chains, std::floor(half)); return compute_effective_sample_size(split_draws, half_sizes); } /** * Computes the split effective sample size (ESS) for the specified * parameter across all kept samples. The value returned is the * minimum of ESS and the number_total_draws * * log10(number_total_draws). When the number of total draws N is * odd, the (N+1)/2th draw is ignored. * * See more details in Stan reference manual section "Effective * Sample Size". http://mc-stan.org/users/documentation * * Current implementation assumes draws are stored in contiguous * blocks of memory. Chains are trimmed from the back to match the * length of the shortest chain. Note that the effective sample size * can not be estimated with less than four draws. Argument size * will be broadcast to same length as draws. * * @param draws stores pointers to arrays of chains * @param size size of chains * @return effective sample size for the specified parameter */ inline double compute_split_effective_sample_size( std::vector draws, size_t size) { int num_chains = draws.size(); std::vector sizes(num_chains, size); return compute_split_effective_sample_size(draws, sizes); } } // namespace analyze } // namespace stan #endif StanHeaders/inst/include/src/stan/analyze/mcmc/split_chains.hpp0000644000176200001440000000430314645137105024370 0ustar liggesusers#ifndef STAN_ANALYZE_MCMC_SPLIT_CHAINS_HPP #define STAN_ANALYZE_MCMC_SPLIT_CHAINS_HPP #include #include #include namespace stan { namespace analyze { /** * Splits each chain into two chains of equal length. When the * number of total draws N is odd, the (N+1)/2th draw is ignored. * * See more details in Stan reference manual section "Effective * Sample Size". http://mc-stan.org/users/documentation * * Current implementation assumes chains are all of equal size and * draws are stored in contiguous blocks of memory. * * @param draws stores pointers to arrays of chains * @param sizes stores sizes of chains * @return std::vector of pointers to twice as many arrays of half chains */ inline std::vector split_chains( const std::vector& draws, const std::vector& sizes) { int num_chains = sizes.size(); size_t num_draws = sizes[0]; for (int chain = 1; chain < num_chains; ++chain) { num_draws = std::min(num_draws, sizes[chain]); } double half = num_draws / 2.0; int half_draws = std::ceil(half); std::vector split_draws(2 * num_chains); for (int n = 0; n < num_chains; ++n) { split_draws[2 * n] = &draws[n][0]; split_draws[2 * n + 1] = &draws[n][half_draws]; } return split_draws; } /** * Splits each chain into two chains of equal length. When the * number of total draws N is odd, the (N+1)/2th draw is ignored. * * See more details in Stan reference manual section "Effective * Sample Size". http://mc-stan.org/users/documentation * * Current implementation assumes chains are all of equal size and * draws are stored in contiguous blocks of memory. Argument size * will be broadcast to same length as draws. * * @param draws stores pointers to arrays of chains * @param size size of chains * @return std::vector of pointers to twice as many arrays of half chains */ inline std::vector split_chains(std::vector draws, size_t size) { int num_chains = draws.size(); std::vector sizes(num_chains, size); return split_chains(draws, sizes); } } // namespace analyze } // namespace stan #endif StanHeaders/inst/include/src/stan/analyze/mcmc/compute_potential_scale_reduction.hpp0000644000176200001440000001440414645137105030671 0ustar liggesusers#ifndef STAN_ANALYZE_MCMC_COMPUTE_POTENTIAL_SCALE_REDUCTION_HPP #define STAN_ANALYZE_MCMC_COMPUTE_POTENTIAL_SCALE_REDUCTION_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace analyze { /** * Computes the potential scale reduction (Rhat) for the specified * parameter across all kept samples. * * See more details in Stan reference manual section "Potential * Scale Reduction". http://mc-stan.org/users/documentation * * Current implementation assumes draws are stored in contiguous * blocks of memory. Chains are trimmed from the back to match the * length of the shortest chain. * * @param draws stores pointers to arrays of chains * @param sizes stores sizes of chains * @return potential scale reduction for the specified parameter */ inline double compute_potential_scale_reduction( std::vector draws, std::vector sizes) { int num_chains = sizes.size(); size_t num_draws = sizes[0]; for (int chain = 1; chain < num_chains; ++chain) { num_draws = std::min(num_draws, sizes[chain]); } // check if chains are constant; all equal to first draw's value bool are_all_const = false; Eigen::VectorXd init_draw = Eigen::VectorXd::Zero(num_chains); for (int chain = 0; chain < num_chains; chain++) { Eigen::Map> draw( draws[chain], sizes[chain]); for (int n = 0; n < num_draws; n++) { if (!std::isfinite(draw(n))) { return std::numeric_limits::quiet_NaN(); } } init_draw(chain) = draw(0); if (draw.isApproxToConstant(draw(0))) { are_all_const |= true; } } if (are_all_const) { // If all chains are constant then return NaN // if they all equal the same constant value if (init_draw.isApproxToConstant(init_draw(0))) { return std::numeric_limits::quiet_NaN(); } } using boost::accumulators::accumulator_set; using boost::accumulators::stats; using boost::accumulators::tag::mean; using boost::accumulators::tag::variance; Eigen::VectorXd chain_mean(num_chains); accumulator_set> acc_chain_mean; Eigen::VectorXd chain_var(num_chains); double unbiased_var_scale = num_draws / (num_draws - 1.0); for (int chain = 0; chain < num_chains; ++chain) { accumulator_set> acc_draw; for (int n = 0; n < num_draws; ++n) { acc_draw(draws[chain][n]); } chain_mean(chain) = boost::accumulators::mean(acc_draw); acc_chain_mean(chain_mean(chain)); chain_var(chain) = boost::accumulators::variance(acc_draw) * unbiased_var_scale; } double var_between = num_draws * boost::accumulators::variance(acc_chain_mean) * num_chains / (num_chains - 1); double var_within = chain_var.mean(); // rewrote [(n-1)*W/n + B/n]/W as (n-1+ B/W)/n return sqrt((var_between / var_within + num_draws - 1) / num_draws); } /** * Computes the potential scale reduction (Rhat) for the specified * parameter across all kept samples. * * See more details in Stan reference manual section "Potential * Scale Reduction". http://mc-stan.org/users/documentation * * Current implementation assumes draws are stored in contiguous * blocks of memory. Chains are trimmed from the back to match the * length of the shortest chain. Argument size will be broadcast to * same length as draws. * * @param draws stores pointers to arrays of chains * @param size stores sizes of chains * @return potential scale reduction for the specified parameter */ inline double compute_potential_scale_reduction( std::vector draws, size_t size) { int num_chains = draws.size(); std::vector sizes(num_chains, size); return compute_potential_scale_reduction(draws, sizes); } /** * Computes the split potential scale reduction (Rhat) for the * specified parameter across all kept samples. When the number of * total draws N is odd, the (N+1)/2th draw is ignored. * * See more details in Stan reference manual section "Potential * Scale Reduction". http://mc-stan.org/users/documentation * * Current implementation assumes draws are stored in contiguous * blocks of memory. Chains are trimmed from the back to match the * length of the shortest chain. * * @param draws stores pointers to arrays of chains * @param sizes stores sizes of chains * @return potential scale reduction for the specified parameter */ inline double compute_split_potential_scale_reduction( std::vector draws, std::vector sizes) { int num_chains = sizes.size(); size_t num_draws = sizes[0]; for (int chain = 1; chain < num_chains; ++chain) { num_draws = std::min(num_draws, sizes[chain]); } std::vector split_draws = split_chains(draws, sizes); double half = num_draws / 2.0; std::vector half_sizes(2 * num_chains, std::floor(half)); return compute_potential_scale_reduction(split_draws, half_sizes); } /** * Computes the split potential scale reduction (Rhat) for the * specified parameter across all kept samples. When the number of * total draws N is odd, the (N+1)/2th draw is ignored. * * See more details in Stan reference manual section "Potential * Scale Reduction". http://mc-stan.org/users/documentation * * Current implementation assumes draws are stored in contiguous * blocks of memory. Chains are trimmed from the back to match the * length of the shortest chain. Argument size will be broadcast to * same length as draws. * * @param draws stores pointers to arrays of chains * @param size stores sizes of chains * @return potential scale reduction for the specified parameter */ inline double compute_split_potential_scale_reduction( std::vector draws, size_t size) { int num_chains = draws.size(); std::vector sizes(num_chains, size); return compute_split_potential_scale_reduction(draws, sizes); } } // namespace analyze } // namespace stan #endif StanHeaders/inst/include/src/stan/analyze/mcmc/autocovariance.hpp0000644000176200001440000001073114645137105024715 0ustar liggesusers#ifndef STAN_ANALYZE_MCMC_AUTOCOVARIANCE_HPP #define STAN_ANALYZE_MCMC_AUTOCOVARIANCE_HPP #include #include #include #include #include #include #include namespace stan { namespace analyze { /** * Write autocorrelation estimates for every lag for the specified * input sequence into the specified result using the specified FFT * engine. Normalizes lag-k autocorrelation estimators by N instead * of (N - k), yielding biased but more stable estimators as * discussed in Geyer (1992); see * https://projecteuclid.org/euclid.ss/1177011137. The return vector * will be resized to the same length as the input sequence with * lags given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

An FFT engine can be created for reuse for type double with: * *

 *     Eigen::FFT fft;
 * 
* * @tparam T Scalar type. * @param y Input sequence. * @param ac Autocorrelations. * @param fft FFT engine instance. */ template void autocorrelation(const Eigen::MatrixBase& y, Eigen::MatrixBase& ac, Eigen::FFT& fft) { size_t N = y.size(); size_t M = math::internal::fft_next_good_size(N); size_t Mt2 = 2 * M; // centered_signal = y-mean(y) followed by N zeros Eigen::Matrix centered_signal(Mt2); centered_signal.setZero(); centered_signal.head(N) = y.array() - y.mean(); Eigen::Matrix, Eigen::Dynamic, 1> freqvec(Mt2); fft.fwd(freqvec, centered_signal); // cwiseAbs2 == norm freqvec = freqvec.cwiseAbs2(); Eigen::Matrix, Eigen::Dynamic, 1> ac_tmp(Mt2); fft.inv(ac_tmp, freqvec); // use "biased" estimate as recommended by Geyer (1992) ac = ac_tmp.head(N).real().array() / (N * N * 2); ac /= ac(0); } /** * Write autocovariance estimates for every lag for the specified * input sequence into the specified result using the specified FFT * engine. Normalizes lag-k autocovariance estimators by N instead * of (N - k), yielding biased but more stable estimators as * discussed in Geyer (1992); see * https://projecteuclid.org/euclid.ss/1177011137. The return vector * will be resized to the same length as the input sequence with * lags given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

This method is just a light wrapper around the three-argument * autocovariance function * * @tparam T Scalar type. * @param y Input sequence. * @param acov Autocovariances. */ template void autocovariance(const Eigen::MatrixBase& y, Eigen::MatrixBase& acov) { Eigen::FFT fft; autocorrelation(y, acov, fft); using boost::accumulators::accumulator_set; using boost::accumulators::stats; using boost::accumulators::tag::variance; accumulator_set> acc; for (int n = 0; n < y.size(); ++n) { acc(y(n)); } acov = acov.array() * boost::accumulators::variance(acc); } /** * Write autocovariance estimates for every lag for the specified * input sequence into the specified result using the specified FFT * engine. Normalizes lag-k autocovariance estimators by N instead * of (N - k), yielding biased but more stable estimators as * discussed in Geyer (1992); see * https://projecteuclid.org/euclid.ss/1177011137. The return vector * will be resized to the same length as the input sequence with * lags given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

This method is just a light wrapper around the three-argument * autocovariance function * * @tparam T Scalar type. * @param y Input sequence. * @param acov Autocovariances. */ template void autocovariance(const std::vector& y, std::vector& acov) { size_t N = y.size(); acov.resize(N); const Eigen::Map> y_map(&y[0], N); Eigen::Map> acov_map(&acov[0], N); autocovariance(y_map, acov_map); } } // namespace analyze } // namespace stan #endif StanHeaders/inst/include/src/stan/model/0000755000176200001440000000000014645137105017715 5ustar liggesusersStanHeaders/inst/include/src/stan/model/model_base_crtp.hpp0000644000176200001440000003044014645137105023551 0ustar liggesusers#ifndef STAN_MODEL_MODEL_BASE_CRTP_HPP #define STAN_MODEL_MODEL_BASE_CRTP_HPP #include #ifdef STAN_MODEL_FVAR_VAR #include #endif #include #include #include namespace stan { namespace model { /** * Base class employing the curiously recursive template pattern for * static inheritance to adapt templated `log_prob` and `write_array` * methods to their untemplated virtual counterparts declared in * `model_base`. * * The derived class `M` is required to implement the following two * pairs of template functions, * * ``` * template * T log_prob(std::vector& params_r, * std::vector& params_i, * std::ostream* msgs = 0) const; * template * T log_prob(Eigen::Matrix& params_r, * std::ostream* msgs = 0) const; * ``` * * and * * ``` * template * void write_array(RNG& base_rng, * std::vector& params_r, * std::vector& params_i, * std::vector& vars, * bool include_tparams = true, * bool include_gqs = true, * std::ostream* msgs = 0) const; * * template * void write_array(RNG& base_rng, * Eigen::Matrix& params_r, * Eigen::Matrix& vars, * bool include_tparams = true, * bool include_gqs = true, * std::ostream* msgs = 0) const * ``` * *

The derived class `M` must be declared following the curiously * recursive template pattern, for example, if `M` is `foo_model`, * then `foo_model` should be declared as * * ``` * class foo_model : public stan::model::model_base_crtp { ... }; * ``` * * The recursion arises when the type of the declared class appears as * a template parameter in the class it extends. For example, * `foo_model` is declared to extend `model_base_crtp`. In * general, the template parameter `M` for this class is called the * derived class, and must be declared to extend `foo_model`. * * @tparam M type of derived model, which must implemented the * template methods defined in the class documentation */ template class model_base_crtp : public stan::model::model_base { public: /** * Construct a model with the specified number of real unconstrained * parameters. * * @param[in] num_params_r number of real unconstrained parameters */ explicit model_base_crtp(size_t num_params_r) : model_base(num_params_r) {} /** * Destroy this class. This is required to be virtual to allow * subclass references to clean up superclasses, but is otherwise a * no-op. */ virtual ~model_base_crtp() {} inline double log_prob(Eigen::VectorXd& theta, std::ostream* msgs) const override { return static_cast(this)->template log_prob( theta, msgs); } inline math::var log_prob(Eigen::Matrix& theta, std::ostream* msgs) const override { return static_cast(this)->template log_prob(theta, msgs); } inline double log_prob_jacobian(Eigen::VectorXd& theta, std::ostream* msgs) const override { return static_cast(this)->template log_prob(theta, msgs); } inline math::var log_prob_jacobian(Eigen::Matrix& theta, std::ostream* msgs) const override { return static_cast(this)->template log_prob(theta, msgs); } inline double log_prob_propto(Eigen::VectorXd& theta, std::ostream* msgs) const override { return static_cast(this)->template log_prob(theta, msgs); } inline math::var log_prob_propto(Eigen::Matrix& theta, std::ostream* msgs) const override { return static_cast(this)->template log_prob(theta, msgs); } inline double log_prob_propto_jacobian(Eigen::VectorXd& theta, std::ostream* msgs) const override { return static_cast(this)->template log_prob(theta, msgs); } inline math::var log_prob_propto_jacobian( Eigen::Matrix& theta, std::ostream* msgs) const override { return static_cast(this)->template log_prob(theta, msgs); } void write_array(boost::ecuyer1988& rng, Eigen::VectorXd& theta, Eigen::VectorXd& vars, bool include_tparams = true, bool include_gqs = true, std::ostream* msgs = 0) const override { return static_cast(this)->write_array( rng, theta, vars, include_tparams, include_gqs, msgs); } void unconstrain_array(const Eigen::VectorXd& params_r_constrained, Eigen::VectorXd& params_r, std::ostream* msgs = nullptr) const override { return static_cast(this)->unconstrain_array(params_r_constrained, params_r, msgs); } // TODO(carpenter): remove redundant std::vector methods below here ===== // ====================================================================== inline double log_prob(std::vector& theta, std::vector& theta_i, std::ostream* msgs) const override { return static_cast(this)->template log_prob( theta, theta_i, msgs); } inline math::var log_prob(std::vector& theta, std::vector& theta_i, std::ostream* msgs) const override { return static_cast(this)->template log_prob( theta, theta_i, msgs); } inline double log_prob_jacobian(std::vector& theta, std::vector& theta_i, std::ostream* msgs) const override { return static_cast(this)->template log_prob( theta, theta_i, msgs); } inline math::var log_prob_jacobian(std::vector& theta, std::vector& theta_i, std::ostream* msgs) const override { return static_cast(this)->template log_prob( theta, theta_i, msgs); } inline double log_prob_propto(std::vector& theta, std::vector& theta_i, std::ostream* msgs) const override { return static_cast(this)->template log_prob( theta, theta_i, msgs); } inline math::var log_prob_propto(std::vector& theta, std::vector& theta_i, std::ostream* msgs) const override { return static_cast(this)->template log_prob( theta, theta_i, msgs); } inline double log_prob_propto_jacobian(std::vector& theta, std::vector& theta_i, std::ostream* msgs) const override { return static_cast(this)->template log_prob( theta, theta_i, msgs); } inline math::var log_prob_propto_jacobian(std::vector& theta, std::vector& theta_i, std::ostream* msgs) const override { return static_cast(this)->template log_prob( theta, theta_i, msgs); } void write_array(boost::ecuyer1988& rng, std::vector& theta, std::vector& theta_i, std::vector& vars, bool include_tparams = true, bool include_gqs = true, std::ostream* msgs = 0) const override { return static_cast(this)->write_array( rng, theta, theta_i, vars, include_tparams, include_gqs, msgs); } void unconstrain_array(const std::vector& params_r_constrained, std::vector& params_r, std::ostream* msgs = nullptr) const override { return static_cast(this)->unconstrain_array(params_r_constrained, params_r, msgs); } void transform_inits(const io::var_context& context, Eigen::VectorXd& params_r, std::ostream* msgs) const override { return static_cast(this)->transform_inits(context, params_r, msgs); } #ifdef STAN_MODEL_FVAR_VAR /** * Return the log density for the specified unconstrained * parameters, without Jacobian and with normalizing constants for * probability functions. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ inline math::fvar log_prob( Eigen::Matrix, -1, 1>& params_r, std::ostream* msgs) const override { return static_cast(this)->template log_prob( params_r, msgs); } /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and with * normalizing constants for probability functions. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ inline math::fvar log_prob_jacobian( Eigen::Matrix, -1, 1>& params_r, std::ostream* msgs) const override { return static_cast(this)->template log_prob(params_r, msgs); } /** * Return the log density for the specified unconstrained * parameters, without Jacobian correction for constraints and * dropping normalizing constants. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ inline math::fvar log_prob_propto( Eigen::Matrix, -1, 1>& params_r, std::ostream* msgs) const override { return static_cast(this)->template log_prob(params_r, msgs); } /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and dropping * normalizing constants. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ inline math::fvar log_prob_propto_jacobian( Eigen::Matrix, -1, 1>& params_r, std::ostream* msgs) const override { return static_cast(this)->template log_prob(params_r, msgs); } #endif }; } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/rethrow_located.hpp0000644000176200001440000000771314645137105023623 0ustar liggesusers#ifndef STAN_MODEL_RETHROW_LOCATED_HPP #define STAN_MODEL_RETHROW_LOCATED_HPP #include #include #include #include #include #include #include namespace stan { namespace lang { /** * Returns true if the specified exception can be dynamically * cast to the template parameter type. * * @tparam E Type to test. * @param[in] e Exception to test. * @return true if exception can be dynamically cast to type. */ template bool is_type(const std::exception& e) { try { (void)dynamic_cast(e); return true; } catch (...) { return false; } } /** * Structure for a located exception for standard library * exception types that have no what-based constructors. * * @param E Type of original exception. */ template struct located_exception : public E { std::string what_; /** * Construct a located exception with no what message. */ located_exception() throw() : what_("") {} /** * Construct a located exception with the specified what * message and specified original type. * * @param[in] what Original what message. * @param[in] orig_type Original type. */ located_exception(const std::string& what, const std::string& orig_type) throw() : what_(what + " [origin: " + orig_type + "]") {} /** * Destroy a located exception. */ ~located_exception() throw() {} /** * Return the character sequence describing the exception, * including the original what message and original type if * constructed with such. * * @return Description of exception. */ const char* what() const throw() { return what_.c_str(); } }; /** * Rethrow an exception of type specified by the dynamic type of * the specified exception, adding the specified source file location to * the specified exception's message. * * @param[in] e original exception * @param[in] location string representing the source file location */ [[noreturn]] inline void rethrow_located(const std::exception& e, std::string location) { using std::bad_alloc; // -> exception using std::bad_cast; // -> exception using std::bad_exception; // -> exception using std::bad_typeid; // -> exception using std::domain_error; // -> logic_error using std::exception; using std::invalid_argument; // -> logic_error using std::ios_base; // ::failure -> exception using std::length_error; // -> logic_error using std::logic_error; // -> exception using std::out_of_range; // -> logic_error using std::overflow_error; // -> runtime_error using std::range_error; // -> runtime_error using std::runtime_error; // -> exception using std::underflow_error; // -> runtime_error // create message with trace of includes and location of error std::stringstream o; o << "Exception: " << e.what() << location; std::string s = o.str(); if (is_type(e)) throw located_exception(s, "bad_alloc"); if (is_type(e)) throw located_exception(s, "bad_cast"); if (is_type(e)) throw located_exception(s, "bad_exception"); if (is_type(e)) throw located_exception(s, "bad_typeid"); if (is_type(e)) throw domain_error(s); if (is_type(e)) throw invalid_argument(s); if (is_type(e)) throw length_error(s); if (is_type(e)) throw out_of_range(s); if (is_type(e)) throw logic_error(s); if (is_type(e)) throw overflow_error(s); if (is_type(e)) throw range_error(s); if (is_type(e)) throw underflow_error(s); if (is_type(e)) throw runtime_error(s); throw located_exception(s, "unknown original type"); } } // namespace lang } // namespace stan #endif StanHeaders/inst/include/src/stan/model/model_header.hpp0000644000176200001440000000122614645137105023037 0ustar liggesusers#ifndef STAN_MODEL_MODEL_HEADER_HPP #define STAN_MODEL_MODEL_HEADER_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/src/stan/model/grad_tr_mat_times_hessian.hpp0000644000176200001440000000127714645137105025633 0ustar liggesusers#ifndef STAN_MODEL_GRAD_TR_MAT_TIMES_HESSIAN_HPP #define STAN_MODEL_GRAD_TR_MAT_TIMES_HESSIAN_HPP #include #include #include namespace stan { namespace model { template void grad_tr_mat_times_hessian( const M& model, const Eigen::Matrix& x, const Eigen::Matrix& X, Eigen::Matrix& grad_tr_X_hess_f, std::ostream* msgs = 0) { stan::math::grad_tr_mat_times_hessian(model_functional(model, msgs), x, X, grad_tr_X_hess_f); } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/log_prob_grad.hpp0000644000176200001440000000604114645137105023227 0ustar liggesusers#ifndef STAN_MODEL_LOG_PROB_GRAD_HPP #define STAN_MODEL_LOG_PROB_GRAD_HPP #include #include #include namespace stan { namespace model { /** * Compute the gradient using reverse-mode automatic * differentiation, writing the result into the specified * gradient, using the specified perturbation. * * @tparam propto True if calculation is up to proportion * (double-only terms dropped). * @tparam jacobian_adjust_transform True if the log absolute * Jacobian determinant of inverse parameter transforms is added to * the log probability. * @tparam M Class of model. * @param[in] model Model. * @param[in] params_r Real-valued parameters. * @param[in] params_i Integer-valued parameters. * @param[out] gradient Vector into which gradient is written. * @param[in,out] msgs */ template double log_prob_grad(const M& model, std::vector& params_r, std::vector& params_i, std::vector& gradient, std::ostream* msgs = 0) { using stan::math::var; using std::vector; try { vector ad_params_r(params_r.size()); for (size_t i = 0; i < model.num_params_r(); ++i) { stan::math::var var_i(params_r[i]); ad_params_r[i] = var_i; } var adLogProb = model.template log_prob( ad_params_r, params_i, msgs); double lp = adLogProb.val(); adLogProb.grad(ad_params_r, gradient); stan::math::recover_memory(); return lp; } catch (const std::exception& ex) { stan::math::recover_memory(); throw; } } /** * Compute the gradient using reverse-mode automatic * differentiation, writing the result into the specified * gradient, using the specified perturbation. * * @tparam propto True if calculation is up to proportion * (double-only terms dropped). * @tparam jacobian_adjust_transform True if the log absolute * Jacobian determinant of inverse parameter transforms is added to * the log probability. * @tparam M Class of model. * @param[in] model Model. * @param[in] params_r Real-valued parameters. * @param[out] gradient Vector into which gradient is written. * @param[in,out] msgs */ template double log_prob_grad(const M& model, Eigen::VectorXd& params_r, Eigen::VectorXd& gradient, std::ostream* msgs = 0) { using stan::math::var; using std::vector; try { Eigen::Matrix ad_params_r(params_r.size()); for (size_t i = 0; i < model.num_params_r(); ++i) { stan::math::var var_i(params_r[i]); ad_params_r[i] = var_i; } var adLogProb = model.template log_prob( ad_params_r, msgs); double val = adLogProb.val(); stan::math::grad(adLogProb, ad_params_r, gradient); stan::math::recover_memory(); return val; } catch (std::exception& ex) { stan::math::recover_memory(); throw; } } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/hessian_times_vector.hpp0000644000176200001440000000123714645137105024646 0ustar liggesusers#ifndef STAN_MODEL_HESSIAN_TIMES_VECTOR_HPP #define STAN_MODEL_HESSIAN_TIMES_VECTOR_HPP #include #include #include namespace stan { namespace model { template void hessian_times_vector( const M& model, const Eigen::Matrix& x, const Eigen::Matrix& v, double& f, Eigen::Matrix& hess_f_dot_v, std::ostream* msgs = 0) { stan::math::hessian_times_vector(model_functional(model, msgs), x, v, f, hess_f_dot_v); } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/prob_grad.hpp0000644000176200001440000000443714645137105022375 0ustar liggesusers#ifndef STAN_MODEL_PROB_GRAD_HPP #define STAN_MODEL_PROB_GRAD_HPP #include #include #include #include #include #include namespace stan { namespace model { /** * Base class for models, holding the basic parameter sizes and * ranges for integer parameters. */ class prob_grad { protected: // TODO(carpenter): roll into model_base; remove int members/vars size_t num_params_r__; std::vector > param_ranges_i__; public: /** * Construct a model base class with the specified number of * unconstrained real parameters. * * @param num_params_r number of unconstrained real parameters */ explicit prob_grad(size_t num_params_r) : num_params_r__(num_params_r), param_ranges_i__(std::vector >(0)) {} /** * Construct a model base class with the specified number of * unconstrained real parameters and integer parameter ranges. * * @param num_params_r number of unconstrained real parameters * @param param_ranges_i integer parameter ranges */ prob_grad(size_t num_params_r, std::vector >& param_ranges_i) : num_params_r__(num_params_r), param_ranges_i__(param_ranges_i) {} /** * Destroy this class. */ virtual ~prob_grad() {} /** * Return number of unconstrained real parameters. * * @return number of unconstrained real parameters */ inline size_t num_params_r() const { return num_params_r__; } /** * Return number of integer parameters. * * @return number of integer parameters */ inline size_t num_params_i() const { return param_ranges_i__.size(); } /** * Return the ordered parameter range for the specified integer * variable. * * @param idx index of integer variable * @throw std::out_of_range if there index is beyond the range * of integer indexes * @return ordered pair of ranges */ inline std::pair param_range_i(size_t idx) const { if (idx <= param_ranges_i__.size()) { std::stringstream ss; ss << "param_range_i(): No integer paramter at index " << idx; std::string msg = ss.str(); throw std::out_of_range(msg); } return param_ranges_i__[idx]; } }; } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/finite_diff_grad.hpp0000644000176200001440000000366214645137105023700 0ustar liggesusers#ifndef STAN_MODEL_FINITE_DIFF_GRAD_HPP #define STAN_MODEL_FINITE_DIFF_GRAD_HPP #include #include #include namespace stan { namespace model { /** * Compute the gradient using finite differences for * the specified parameters, writing the result into the * specified gradient, using the specified perturbation. * * @tparam propto True if calculation is up to proportion * (double-only terms dropped). * @tparam jacobian_adjust_transform True if the log absolute * Jacobian determinant of inverse parameter transforms is added to the * log probability. * @tparam M Class of model. * @param model Model. * @param interrupt interrupt callback to be called before calculating * the finite differences for each parameter. * @param params_r Real-valued parameters. * @param params_i Integer-valued parameters. * @param[out] grad Vector into which gradient is written. * @param epsilon * @param[in,out] msgs */ template void finite_diff_grad(const M& model, stan::callbacks::interrupt& interrupt, std::vector& params_r, std::vector& params_i, std::vector& grad, double epsilon = 1e-6, std::ostream* msgs = 0) { std::vector perturbed(params_r); grad.resize(params_r.size()); for (size_t k = 0; k < params_r.size(); k++) { interrupt(); perturbed[k] += epsilon; double logp_plus = model.template log_prob( perturbed, params_i, msgs); perturbed[k] = params_r[k] - epsilon; double logp_minus = model.template log_prob( perturbed, params_i, msgs); double gradest = (logp_plus - logp_minus) / (2 * epsilon); grad[k] = gradest; perturbed[k] = params_r[k]; } } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/gradient.hpp0000644000176200001440000000212114645137105022217 0ustar liggesusers#ifndef STAN_MODEL_GRADIENT_HPP #define STAN_MODEL_GRADIENT_HPP #include #include #include #include #include #include namespace stan { namespace model { template void gradient(const M& model, const Eigen::Matrix& x, double& f, Eigen::Matrix& grad_f, std::ostream* msgs = 0) { stan::math::gradient(model_functional(model, msgs), x, f, grad_f); } template void gradient(const M& model, const Eigen::Matrix& x, double& f, Eigen::Matrix& grad_f, callbacks::logger& logger) { std::stringstream ss; try { stan::math::gradient(model_functional(model, &ss), x, f, grad_f); } catch (std::exception& e) { if (ss.str().length() > 0) logger.info(ss); throw; } if (ss.str().length() > 0) logger.info(ss); } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/log_prob_propto.hpp0000644000176200001440000000632414645137105023641 0ustar liggesusers#ifndef STAN_MODEL_LOG_PROB_PROPTO_HPP #define STAN_MODEL_LOG_PROB_PROPTO_HPP #include #include #include namespace stan { namespace model { /** * Helper function to calculate log probability for * double scalars up to a proportion. * * This implementation wraps the double values in * stan::math::var and calls the model's * log_prob() function with propto=true * and the specified parameter for applying the Jacobian * adjustment for transformed parameters. * * @tparam propto True if calculation is up to proportion * (double-only terms dropped). * @tparam jacobian_adjust_transform True if the log absolute * Jacobian determinant of inverse parameter transforms is added to * the log probability. * @tparam M Class of model. * @param[in] model Model. * @param[in] params_r Real-valued parameters. * @param[in] params_i Integer-valued parameters. * @param[in,out] msgs */ template double log_prob_propto(const M& model, std::vector& params_r, std::vector& params_i, std::ostream* msgs = 0) { using stan::math::var; using std::vector; try { vector ad_params_r; ad_params_r.reserve(model.num_params_r()); for (size_t i = 0; i < model.num_params_r(); ++i) ad_params_r.push_back(params_r[i]); double lp = model .template log_prob( ad_params_r, params_i, msgs) .val(); stan::math::recover_memory(); return lp; } catch (std::exception& ex) { stan::math::recover_memory(); throw; } } /** * Helper function to calculate log probability for * double scalars up to a proportion. * * This implementation wraps the double values in * stan::math::var and calls the model's * log_prob() function with propto=true * and the specified parameter for applying the Jacobian * adjustment for transformed parameters. * * @tparam propto True if calculation is up to proportion * (double-only terms dropped). * @tparam jacobian_adjust_transform True if the log absolute * Jacobian determinant of inverse parameter transforms is added to * the log probability. * @tparam M Class of model. * @param[in] model Model. * @param[in] params_r Real-valued parameters. * @param[in,out] msgs */ template double log_prob_propto(const M& model, Eigen::VectorXd& params_r, std::ostream* msgs = 0) { using stan::math::var; using std::vector; vector params_i(0); try { vector ad_params_r; ad_params_r.reserve(model.num_params_r()); for (size_t i = 0; i < model.num_params_r(); ++i) ad_params_r.push_back(params_r(i)); double lp = model .template log_prob( ad_params_r, params_i, msgs) .val(); stan::math::recover_memory(); return lp; } catch (std::exception& ex) { stan::math::recover_memory(); throw; } } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/grad_hess_log_prob.hpp0000644000176200001440000000577514645137105024266 0ustar liggesusers#ifndef STAN_MODEL_GRAD_HESS_LOG_PROB_HPP #define STAN_MODEL_GRAD_HESS_LOG_PROB_HPP #include #include #include namespace stan { namespace model { /** * Evaluate the log-probability, its gradient, and its Hessian * at params_r. This default version computes the Hessian * numerically by finite-differencing the gradient, at a cost of * O(params_r.size()^2). * * @tparam propto True if calculation is up to proportion * (double-only terms dropped). * @tparam jacobian_adjust_transform True if the log absolute * Jacobian determinant of inverse parameter transforms is added to the * log probability. * @tparam M Class of model. * @param[in] model Model. * @param[in] params_r Real-valued parameter vector. * @param[in] params_i Integer-valued parameter vector. * @param[out] gradient Vector to write gradient to. * @param[out] hessian Vector to write gradient to. hessian[i*D + j] * gives the element at the ith row and jth column of the Hessian * (where D=params_r.size()). * @param[in, out] msgs Stream to which print statements in Stan * programs are written, default is 0 */ template double grad_hess_log_prob(const M& model, std::vector& params_r, std::vector& params_i, std::vector& gradient, std::vector& hessian, std::ostream* msgs = 0) { static const double epsilon = 1e-3; static const double half_epsilon = 0.5 * epsilon; static const int order = 4; static const double perturbations[order] = {-2 * epsilon, -1 * epsilon, epsilon, 2 * epsilon}; static const double coefficients[order] = {1.0 / 12.0, -2.0 / 3.0, 2.0 / 3.0, -1.0 / 12.0}; static const double half_epsilon_coeff[order] = {half_epsilon * coefficients[0], half_epsilon * coefficients[1], half_epsilon * coefficients[2], half_epsilon * coefficients[3]}; double result = log_prob_grad( model, params_r, params_i, gradient, msgs); hessian.assign(params_r.size() * params_r.size(), 0); std::vector temp_grad(params_r.size()); std::vector perturbed_params(params_r.begin(), params_r.end()); for (size_t d = 0; d < params_r.size(); ++d) { const int row_iter = d * params_r.size(); for (int i = 0; i < order; ++i) { perturbed_params[d] = params_r[d] + perturbations[i]; log_prob_grad(model, perturbed_params, params_i, temp_grad); for (size_t dd = 0; dd < params_r.size(); ++dd) { const double increment = half_epsilon_coeff[i] * temp_grad[dd]; const int col_iter = dd * params_r.size(); hessian[dd + row_iter] += increment; hessian[d + col_iter] += increment; } } perturbed_params[d] = params_r[d]; } return result; } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/test_gradients.hpp0000644000176200001440000000637714645137105023462 0ustar liggesusers#ifndef STAN_MODEL_TEST_GRADIENTS_HPP #define STAN_MODEL_TEST_GRADIENTS_HPP #include #include #include #include #include #include #include namespace stan { namespace model { /** * Test the log_prob_grad() function's ability to produce * accurate gradients using finite differences. This shouldn't * be necessary when using autodiff, but is useful for finding * bugs in hand-written code (or var). * * @tparam propto True if calculation is up to proportion * (double-only terms dropped). * @tparam jacobian_adjust_transform True if the log absolute * Jacobian determinant of inverse parameter transforms is added to the * log probability. * @tparam Model Class of model. * @param[in] model Model. * @param[in] params_r Real-valued parameter vector. * @param[in] params_i Integer-valued parameter vector. * @param[in] epsilon Real-valued scalar saying how much to perturb. * Reasonable value is 1e-6. * @param[in] error Real-valued scalar saying how much error to allow. * Reasonable value is 1e-6. * @param[in,out] interrupt callback to be called at every iteration * @param[in,out] logger Logger for messages * @param[in,out] parameter_writer Writer callback for file output * @return number of failed gradient comparisons versus allowed * error, so 0 if all gradients pass */ template int test_gradients(const Model& model, std::vector& params_r, std::vector& params_i, double epsilon, double error, stan::callbacks::interrupt& interrupt, stan::callbacks::logger& logger, stan::callbacks::writer& parameter_writer) { std::stringstream msg; std::vector grad; double lp = log_prob_grad( model, params_r, params_i, grad, &msg); if (msg.str().length() > 0) { logger.info(msg); parameter_writer(msg.str()); } std::vector grad_fd; finite_diff_grad(model, interrupt, params_r, params_i, grad_fd, epsilon, &msg); if (msg.str().length() > 0) { logger.info(msg); parameter_writer(msg.str()); } int num_failed = 0; std::stringstream lp_msg; lp_msg << " Log probability=" << lp; parameter_writer(); parameter_writer(lp_msg.str()); parameter_writer(); logger.info(""); logger.info(lp_msg); logger.info(""); std::stringstream header; header << std::setw(10) << "param idx" << std::setw(16) << "value" << std::setw(16) << "model" << std::setw(16) << "finite diff" << std::setw(16) << "error"; parameter_writer(header.str()); logger.info(header); for (size_t k = 0; k < params_r.size(); k++) { std::stringstream line; line << std::setw(10) << k << std::setw(16) << params_r[k] << std::setw(16) << grad[k] << std::setw(16) << grad_fd[k] << std::setw(16) << (grad[k] - grad_fd[k]); parameter_writer(line.str()); logger.info(line); if (std::fabs(grad[k] - grad_fd[k]) > error) num_failed++; } return num_failed; } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/gradient_dot_vector.hpp0000644000176200001440000000131414645137105024452 0ustar liggesusers#ifndef STAN_MODEL_GRADIENT_DOT_VECTOR_HPP #define STAN_MODEL_GRADIENT_DOT_VECTOR_HPP #include #include #include namespace stan { namespace model { template void gradient_dot_vector(const M& model, const Eigen::Matrix& x, const Eigen::Matrix& v, double& f, double& grad_f_dot_v, std::ostream* msgs = 0) { stan::math::gradient_dot_vector(model_functional(model, msgs), x, v, f, grad_f_dot_v); } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/model_base.hpp0000644000176200001440000007164614645137105022536 0ustar liggesusers#ifndef STAN_MODEL_MODEL_BASE_HPP #define STAN_MODEL_MODEL_BASE_HPP #include #include #ifdef STAN_MODEL_FVAR_VAR #include #endif #include #include #include #include #include #include namespace stan { namespace model { /** * The base class for models defining all virtual methods required for * services. Any class extending this class and defining all of its * virtual methods can be used with any of the Stan services for * sampling, optimization, or variational inference. * *

Implementation Details: The reason there are so many * overloads of the `log_prob` and `write_array` methods is that * template methods cannot be declared virtual. This class extends * `stan::model::prob_grad` in order to define sizing for the number * of unconstrained parameters; thus it is not a pure virtual base * class. * *

The approach to defining models used by the Stan language code * generator is use the curiously recursive template base class defined * in the extension `stan::model::model_base_crtp`. */ class model_base : public prob_grad { public: /** * Construct a model with the specified number of real valued * unconstrained parameters. * * @param[in] num_params_r number of real-valued, unconstrained * parameters */ explicit model_base(size_t num_params_r) : prob_grad(num_params_r) {} /** * Destructor. This class has a no-op destructor. */ virtual ~model_base() {} /** * Return the name of the model. * * @return model name */ virtual std::string model_name() const = 0; /** * Returns the compile information of the model: * stanc version and stanc flags used to compile the model. * * @return model name */ virtual std::vector model_compile_info() const = 0; /** * Set the specified argument to sequence of parameters, transformed * parameters, and generated quantities in the order in which they * were declared. The input sequence is cleared and resized. * * @param[in,out] names sequence of names parameters, transformed * parameters, and generated quantities * @param[in] include_tparams true if transformed parameters should * be included * @param[in] include_gqs true if generated quantities should be * included */ virtual void get_param_names(std::vector& names, bool include_tparams = true, bool include_gqs = true) const = 0; /** * WARNING: This function bakes in the assumption that the * parameter values are rectangular. This is already not true * for Tuple types, and will be fundamentally broken for ragged * arrays or sparse matrices. * * Set the dimensionalities of constrained parameters, transformed * parameters, and generated quantities. The input sequence is * cleared and resized. The dimensions of each parameter * dimensionality is represented by a sequence of sizes. Scalar * real and integer parameters are represented as an empty sequence * of dimensions. * *

Indexes are output in the order they are used in indexing. For * example, a 2 x 3 x 4 array will have dimensionality * `std::vector{ 2, 3, 4 }`, whereas a 2-dimensional array * of 3-vectors will have dimensionality `std::vector{ 2, 3 * }`, and a 2-dimensional array of 3 x 4 matrices will have * dimensionality `std::vector{2, 3, 4}`. * * @param[in,out] dimss sequence of dimensions specifications to set * @param[in] include_tparams true if transformed parameters should * be included * @param[in] include_gqs true if generated quantities should be * included */ virtual void get_dims(std::vector >& dimss, bool include_tparams = true, bool include_gqs = true) const = 0; /** * Set the specified sequence to the indexed, scalar, constrained * parameter names. Each variable is output with a * period-separated list of indexes as a suffix, indexing from 1. * *

A real parameter `alpha` will produce output `alpha` with no * indexes. * *

A 3-dimensional vector (row vector, simplex, etc.) * `theta` will produce output `theta.1`, `theta.2`, `theta.3`. The * dimensions are the constrained dimensions. * *

Matrices are output column major to match their internal * representation in the Stan math library, so that a 2 x 3 matrix * `X` will produce output `X.1.1, X.2.1, X.1.2, X.2.2, X.1.3, * X.2.3`. * *

Arrays are handled in natural C++ order, 2 x 3 x 4 array `a` * will produce output `a.1.1.1`, `a.1.1.2`, `a.1.1.3`, `a.1.1.4`, * `a.1.2.1`, `a.1.2.2`, `a.1.2.3`, `a.1.2.4`, `a.1.3.1`, `a.1.3.2`, * `a.1.3.3`, `a.1.3.4`, `a.2.1.1`, `a.2.1.2`, `a.2.1.3`, `a.2.1.4`, * `a.2.2.1`, `a.2.2.2`, `a.2.2.3`, `a.2.2.4`, `a.2.3.1`, `a.2.3.2`, * `a.2.3.3`, `a.2.3.4`. * *

Arrays of vectors are handled as expected, so that a * 2-dimensional array of 3-vectors is output as `B.1.1`, `B.2.1`, * `B.1.2`, `B.2.2`, `B.1.3`, `B.2.3`. *

Arrays of matrices are generated in row-major order for the * array components and column-major order for the matrix component. * Thus a 2-dimensional array of 3 by 4 matrices `B` will be of * dimensionality 2 x 3 x 4 (indexes `B[1:2, 1:3, 1:4]`) and will be output * as `B.1.1.1`, `B.2.1.1`, `B.1.2.1`, `B.2.2.1`, `B.1.3.1`, * `B.2.3.1`, `B.1.1.2`, `B.2.1.2`, `B.1.2.2`, `B.2.2.2`, `B.1.3.2`, * `B.2.3.2`, `B.1.1.3`, `B.2.1.3`, `B.1.2.3`, `B.2.2.3`, `B.1.3.3`, * `B.2.3.3`, `B.1.1.4`, `B.2.1.4`, `B.1.2.4`, `B.2.2.4`, `B.1.3.4`, * `B.2.3.4` */ virtual void constrained_param_names(std::vector& param_names, bool include_tparams = true, bool include_gqs = true) const = 0; /** * Set the specified sequence of parameter names to the * unconstrained parameter names. Each unconstrained parameter is * represented as a simple one-dimensional sequence of values. The * actual transforms are documented in the reference manual. * *

The sizes will not be the declared sizes for types such as * simplexes, correlation, and covariance matrices. A simplex of * size `N` has `N - 1` unconstrained parameters, an `N` by `N` * correlation matrix (or Cholesky factor thereof) has `N` choose 2 * unconstrained parameters, and a covariance matrix (or Cholesky * factor thereof) has `N` choose 2 plus `N` unconstrained * parameters. * *

Full details of the transforms and their underlying * representations as sequences are detailed in the Stan reference * manual. This also provides details of the order of each * parameter type. * * @param[in,out] param_names sequence of names to set * @param[in] include_tparams true if transformed parameters should * be included * @param[in] include_gqs true if generated quantities should be * included */ virtual void unconstrained_param_names(std::vector& param_names, bool include_tparams = true, bool include_gqs = true) const = 0; /** * Return the log density for the specified unconstrained * parameters, without Jacobian and with normalizing constants for * probability functions. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual double log_prob(Eigen::VectorXd& params_r, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, without Jacobian and with normalizing constants for * probability functions. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::var log_prob(Eigen::Matrix& params_r, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and with * normalizing constants for probability functions. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual double log_prob_jacobian(Eigen::VectorXd& params_r, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and with * normalizing constants for probability functions. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::var log_prob_jacobian(Eigen::Matrix& params_r, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, without Jacobian correction for constraints and * dropping normalizing constants. * *

This method is for completeness as `double`-based inputs are * always constant and will thus cause all probability functions to * be dropped from the result. To get the value of this * calculation, use the overload for `math::var`. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual double log_prob_propto(Eigen::VectorXd& params_r, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, without Jacobian correction for constraints and * dropping normalizing constants. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::var log_prob_propto(Eigen::Matrix& params_r, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and dropping * normalizing constants. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * *

This method is for completeness as `double`-based inputs are * always constant and will thus cause all probability functions to * be dropped from the result. To get the value of this * calculation, use the overload for `math::var`. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual double log_prob_propto_jacobian(Eigen::VectorXd& params_r, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and dropping * normalizing constants. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::var log_prob_propto_jacobian( Eigen::Matrix& params_r, std::ostream* msgs) const = 0; /** * Convenience template function returning the log density for the * specified unconstrained parameters, with Jacobian and normalizing * constant inclusion controlled by the template parameters. * *

This non-virtual template method delegates to the appropriate * overloaded virtual function. This allows external interfaces to * call the convenient template methods rather than the individual * virtual functions. * * @tparam propto `true` if normalizing constants should be dropped * and result returned up to an additive constant * @tparam jacobian `true` if the log Jacobian adjustment is * included for the change of variables from unconstrained to * constrained parameters * @tparam T type of scalars in the vector of parameters * @param[in] params_r unconstrained parameters * @param[in,out] msgs stream to which messages are written * @return log density with normalizing constants and Jacobian * included as specified by the template parameters */ template inline T log_prob(Eigen::Matrix& params_r, std::ostream* msgs) const { if (propto && jacobian) return log_prob_propto_jacobian(params_r, msgs); else if (propto && !jacobian) return log_prob_propto(params_r, msgs); else if (!propto && jacobian) return log_prob_jacobian(params_r, msgs); else // if (!propto && !jacobian) return log_prob(params_r, msgs); } /** * Read constrained parameter values from the specified context, * unconstrain them, then concatenate the unconstrained sequences * into the specified parameter sequence. Output messages go to the * specified stream. * * @param[in] context definitions of variable values * @param[in,out] params_r unconstrained parameter values produced * @param[in,out] msgs stream to which messages are written */ virtual void transform_inits(const io::var_context& context, Eigen::VectorXd& params_r, std::ostream* msgs) const = 0; /** * Convert the specified sequence of unconstrained parameters to a * sequence of constrained parameters, optionally including * transformed parameters and including generated quantities. The * generated quantities may use the random number generator. Any * messages are written to the specified stream. The output * parameter sequence will be resized if necessary to match the * number of constrained scalar parameters. * * @param base_rng RNG to use for generated quantities * @param[in] params_r unconstrained parameters input * @param[in,out] params_constrained_r constrained parameters produced * @param[in] include_tparams true if transformed parameters are * included in output * @param[in] include_gqs true if generated quantities are included * in output * @param[in,out] msgs msgs stream to which messages are written */ virtual void write_array(boost::ecuyer1988& base_rng, Eigen::VectorXd& params_r, Eigen::VectorXd& params_constrained_r, bool include_tparams = true, bool include_gqs = true, std::ostream* msgs = 0) const = 0; /** * Convert the specified sequence of constrained parameters to a * sequence of unconstrained parameters. * * This is the inverse of write_array. The output will be resized * if necessary to match the number of unconstrained parameters. * * @param[in] params_r_constrained constrained parameters input * @param[in,out] params_r unconstrained parameters produced * @param[in,out] msgs msgs stream to which messages are written */ virtual void unconstrain_array(const Eigen::VectorXd& params_r_constrained, Eigen::VectorXd& params_r, std::ostream* msgs = nullptr) const = 0; // TODO(carpenter): cut redundant std::vector versions from here === /** * Return the log density for the specified unconstrained * parameters, without Jacobian and with normalizing constants for * probability functions. * * \deprecated Use Eigen vector versions * * @param[in] params_r unconstrained parameters * @param[in] params_i integer parameters (ignored) * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual double log_prob(std::vector& params_r, std::vector& params_i, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, without Jacobian and with normalizing constants for * probability functions. * * \deprecated Use Eigen vector versions * * @param[in] params_r unconstrained * @param[in] params_i integer parameters (ignored) * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::var log_prob(std::vector& params_r, std::vector& params_i, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and with * normalizing constants for probability functions. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * * \deprecated Use Eigen vector versions * * @param[in] params_r unconstrained parameters * @param[in] params_i integer parameters (ignored) * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual double log_prob_jacobian(std::vector& params_r, std::vector& params_i, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and with * normalizing constants for probability functions. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * * \deprecated Use Eigen vector versions * * @param[in] params_r unconstrained parameters * @param[in] params_i integer parameters (ignored) * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::var log_prob_jacobian(std::vector& params_r, std::vector& params_i, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, without Jacobian correction for constraints and * dropping normalizing constants. * *

This method is for completeness as `double`-based inputs are * always constant and will thus cause all probability functions to * be dropped from the result. To get the value of this * calculation, use the overload for `math::var`. * * \deprecated Use Eigen vector versions * * @param[in] params_r unconstrained parameters * @param[in] params_i integer parameters (ignored) * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual double log_prob_propto(std::vector& params_r, std::vector& params_i, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, without Jacobian correction for constraints and * dropping normalizing constants. * * \deprecated Use Eigen vector versions * * @param[in] params_r unconstrained parameters * @param[in] params_i integer parameters (ignored) * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::var log_prob_propto(std::vector& params_r, std::vector& params_i, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and dropping * normalizing constants. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * *

This method is for completeness as `double`-based inputs are * always constant and will thus cause all probability functions to * be dropped from the result. To get the value of this * calculation, use the overload for `math::var`. * * \deprecated Use Eigen vector versions * * @param[in] params_r unconstrained parameters * @param[in] params_i integer parameters (ignored) * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual double log_prob_propto_jacobian(std::vector& params_r, std::vector& params_i, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and dropping * normalizing constants. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * * \deprecated Use Eigen vector versions * * @param[in] params_r unconstrained parameters * @param[in] params_i integer parameters (ignored) * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::var log_prob_propto_jacobian(std::vector& params_r, std::vector& params_i, std::ostream* msgs) const = 0; /** * Convenience template function returning the log density for the * specified unconstrained parameters, with Jacobian and normalizing * constant inclusion controlled by the template parameters. * *

This non-virtual template method delegates to the appropriate * overloaded virtual function. This allows external interfaces to * call the convenient template methods rather than the individual * virtual functions. * * \deprecated Use Eigen vector versions * * @tparam propto `true` if normalizing constants should be dropped * and result returned up to an additive constant * @tparam jacobian `true` if the log Jacobian adjustment is * included for the change of variables from unconstrained to * constrained parameters. * @tparam T type of scalars in the vector of parameters * @param[in] params_r unconstrained parameters * @param[in] params_i integer parameters (ignored) * @param[in,out] msgs stream to which messages are written * @return log density with normalizing constants and Jacobian * included as specified by the template parameters */ template inline T log_prob(std::vector& params_r, std::vector& params_i, std::ostream* msgs) const { if (propto && jacobian) return log_prob_propto_jacobian(params_r, params_i, msgs); else if (propto && !jacobian) return log_prob_propto(params_r, params_i, msgs); else if (!propto && jacobian) return log_prob_jacobian(params_r, params_i, msgs); else // if (!propto && !jacobian) return log_prob(params_r, params_i, msgs); } /** * Read constrained parameter values from the specified context, * unconstrain them, then concatenate the unconstrained sequences * into the specified parameter sequence. Output messages go to the * specified stream. * * \deprecated Use Eigen vector versions * * @param[in] context definitions of variable values * @param[in] params_i integer parameters (ignored) * @param[in,out] params_r unconstrained parameter values produced * @param[in,out] msgs stream to which messages are written */ virtual void transform_inits(const io::var_context& context, std::vector& params_i, std::vector& params_r, std::ostream* msgs) const = 0; /** * Convert the specified sequence of unconstrained parameters to a * sequence of constrained parameters, optionally including * transformed parameters and including generated quantities. The * generated quantities may use the random number generator. Any * messages are written to the specified stream. The output * parameter sequence will be resized if necessary to match the * number of constrained scalar parameters. * * @param base_rng RNG to use for generated quantities * @param[in] params_r unconstrained parameters input * @param[in] params_i integer parameters (ignored) * @param[in,out] params_r_constrained constrained parameters produced * @param[in] include_tparams true if transformed parameters are * included in output * @param[in] include_gqs true if generated quantities are included * in output * @param[in,out] msgs msgs stream to which messages are written */ virtual void write_array(boost::ecuyer1988& base_rng, std::vector& params_r, std::vector& params_i, std::vector& params_r_constrained, bool include_tparams = true, bool include_gqs = true, std::ostream* msgs = 0) const = 0; /** * Convert the specified sequence of constrained parameters to a * sequence of unconstrained parameters. * * This is the inverse of write_array. The output will be resized * if necessary to match the number of unconstrained parameters. * * @param[in] params_r_constrained constrained parameters input * @param[in,out] params_r unconstrained parameters produced * @param[in,out] msgs msgs stream to which messages are written */ virtual void unconstrain_array( const std::vector& params_r_constrained, std::vector& params_r, std::ostream* msgs = nullptr) const = 0; #ifdef STAN_MODEL_FVAR_VAR /** * Return the log density for the specified unconstrained * parameters, without Jacobian and with normalizing constants for * probability functions. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::fvar log_prob( Eigen::Matrix, -1, 1>& params_r, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and with * normalizing constants for probability functions. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::fvar log_prob_jacobian( Eigen::Matrix, -1, 1>& params_r, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, without Jacobian correction for constraints and * dropping normalizing constants. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::fvar log_prob_propto( Eigen::Matrix, -1, 1>& params_r, std::ostream* msgs) const = 0; /** * Return the log density for the specified unconstrained * parameters, with Jacobian correction for constraints and dropping * normalizing constants. * *

The Jacobian is of the inverse transform from unconstrained * parameters to constrained parameters; full details for Stan * language types can be found in the language reference manual. * * @param[in] params_r unconstrained parameters * @param[in,out] msgs message stream * @return log density for specified parameters */ virtual math::fvar log_prob_propto_jacobian( Eigen::Matrix, -1, 1>& params_r, std::ostream* msgs) const = 0; #endif }; } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/hessian.hpp0000644000176200001440000000124614645137105022063 0ustar liggesusers#ifndef STAN_MODEL_HESSIAN_HPP #define STAN_MODEL_HESSIAN_HPP #include #include #include namespace stan { namespace model { template void hessian(const M& model, const Eigen::Matrix& x, double& f, Eigen::Matrix& grad_f, Eigen::Matrix& hess_f, std::ostream* msgs = 0) { stan::math::hessian >(model_functional(model, msgs), x, f, grad_f, hess_f); } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/indexing.hpp0000644000176200001440000000077314645137105022242 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_HPP #define STAN_MODEL_INDEXING_HPP #include #ifdef STAN_OPENCL #include #include #endif #include #include #include #include #include #include #endif StanHeaders/inst/include/src/stan/model/indexing/0000755000176200001440000000000014645161272021524 5ustar liggesusersStanHeaders/inst/include/src/stan/model/indexing/assign_varmat.hpp0000644000176200001440000005114314645137105025075 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_ASSIGN_VARMAT_HPP #define STAN_MODEL_INDEXING_ASSIGN_VARMAT_HPP #include #include #include #include #include #include #include #include namespace stan { namespace model { namespace internal { template using require_var_matrix_or_arithmetic_eigen = require_any_t, stan::math::disjunction< std::is_arithmetic>, is_eigen_matrix_dynamic>>; template using require_var_vector_or_arithmetic_eigen = require_any_t, stan::math::disjunction< std::is_arithmetic>, is_eigen_vector>>; template using require_var_row_vector_or_arithmetic_eigen = require_any_t< is_var_row_vector, stan::math::disjunction>, is_eigen_row_vector>>; } // namespace internal /** * Indexing Notes: * The different index types: * index_uni - A single cell. * index_multi - Access multiple cells. * index_omni - A no-op for all indices along a dimension. * index_min - index from min:N * index_max - index from 1:max * index_min_max - index from min:max * nil_index_list - no-op * The order of the overloads are * vector / row_vector: * - all index overloads * matrix: * - all row index overloads * - Assigns a subset of rows. * - column/row overloads * - overload on both the row and column indices. * - column overloads * - These take a subset of columns and then call the row slice assignment * over the column subset. * Std vector: * - single element and elementwise overloads * - General overload for nested std vectors. */ /** * Assign to a single element of an Eigen Vector. * * Types: vector[uni] <- scalar * * @tparam Vec `var_value` with inner Eigen type with either dynamic rows or * columns, but not both. * @tparam U Type of value (must be assignable to T). * @param[in] x Vector variable to be assigned. * @param[in] y Value to assign. * @param[in] name Name of variable * @param[in] idx index holding which cell to assign to. * @throw std::out_of_range If the index is out of bounds. */ template * = nullptr, require_stan_scalar_t* = nullptr> inline void assign(VarVec&& x, const U& y, const char* name, index_uni idx) { stan::math::check_range("var_vector[uni] assign", name, x.size(), idx.n_); const auto coeff_idx = idx.n_ - 1; double prev_val = x.val().coeffRef(coeff_idx); x.vi_->val_.coeffRef(coeff_idx) = stan::math::value_of(y); stan::math::reverse_pass_callback([x, y, coeff_idx, prev_val]() mutable { x.vi_->val_.coeffRef(coeff_idx) = prev_val; if (!is_constant::value) { math::adjoint_of(y) += x.adj().coeffRef(coeff_idx); } x.adj().coeffRef(coeff_idx) = 0.0; }); } /** * Assign to a non-contiguous subset of elements in a vector. * * Types: vector[multi] <- vector * * @tparam Vec1 `var_value` with inner Eigen type with either dynamic rows or * columns, but not both. * @tparam Vec2 `var_value` with inner Eigen type with either dynamic rows or * columns, but not both. * @param[in] x Vector to be assigned. * @param[in] y Value vector. * @param[in] name Name of variable * @param[in] idx Index holding an `std::vector` of cells to assign to. * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the value size isn't the same as * the indexed size. */ template * = nullptr, internal::require_var_vector_or_arithmetic_eigen* = nullptr> inline void assign(Vec1&& x, const Vec2& y, const char* name, const index_multi& idx) { stan::math::check_size_match("vector[multi] assign", name, idx.ns_.size(), "right hand side", y.size()); const auto x_size = x.size(); const auto assign_size = idx.ns_.size(); arena_t> x_idx(assign_size); arena_t> prev_vals(assign_size); Eigen::Matrix y_idx_vals(assign_size); std::unordered_set x_set; x_set.reserve(assign_size); const auto& y_val = stan::math::value_of(y); // We have to use two loops to avoid aliasing issues. for (int i = assign_size - 1; i >= 0; --i) { if (likely(x_set.insert(idx.ns_[i]).second)) { stan::math::check_range("vector[multi] assign", name, x_size, idx.ns_[i]); x_idx[i] = idx.ns_[i] - 1; prev_vals.coeffRef(i) = x.vi_->val_.coeffRef(x_idx[i]); y_idx_vals.coeffRef(i) = y_val.coeff(i); } else { x_idx[i] = -1; } } for (int i = assign_size - 1; i >= 0; --i) { if (likely(x_idx[i] != -1)) { x.vi_->val_.coeffRef(x_idx[i]) = y_idx_vals.coeff(i); } } if (!is_constant::value) { stan::math::reverse_pass_callback([x, y, x_idx, prev_vals]() mutable { for (Eigen::Index i = 0; i < x_idx.size(); ++i) { if (likely(x_idx[i] != -1)) { x.vi_->val_.coeffRef(x_idx[i]) = prev_vals.coeffRef(i); prev_vals.coeffRef(i) = x.adj().coeffRef(x_idx[i]); x.adj().coeffRef(x_idx[i]) = 0.0; } } for (Eigen::Index i = 0; i < x_idx.size(); ++i) { if (likely(x_idx[i] != -1)) { math::forward_as>(y) .adj() .coeffRef(i) += prev_vals.coeff(i); } } }); } else { stan::math::reverse_pass_callback([x, x_idx, prev_vals]() mutable { for (Eigen::Index i = 0; i < x_idx.size(); ++i) { if (likely(x_idx[i] != -1)) { x.vi_->val_.coeffRef(x_idx[i]) = prev_vals.coeff(i); prev_vals.coeffRef(i) = x.adj().coeff(x_idx[i]); x.adj().coeffRef(x_idx[i]) = 0.0; } } }); } } /** * Assign to a cell of an Eigen Matrix. * * Types: mat[single, single] = scalar * * @tparam Mat `var_value` with inner Eigen type with dynamic rows and columns. * @tparam U Scalar type. * @param[in] x Matrix variable to be assigned. * @param[in] y Value scalar. * @param[in] name Name of variable * @param[in] row_idx uni index for selecting rows * @param[in] col_idx uni index for selecting columns * @throw std::out_of_range If either of the indices are out of bounds. */ template * = nullptr, require_stan_scalar_t* = nullptr> inline void assign(Mat&& x, const U& y, const char* name, index_uni row_idx, index_uni col_idx) { stan::math::check_range("matrix[uni,uni] assign", name, x.rows(), row_idx.n_); stan::math::check_range("matrix[uni,uni] assign", name, x.cols(), col_idx.n_); const int row_idx_val = row_idx.n_ - 1; const int col_idx_val = col_idx.n_ - 1; double prev_val = x.val().coeffRef(row_idx_val, col_idx_val); x.vi_->val_.coeffRef(row_idx_val, col_idx_val) = stan::math::value_of(y); stan::math::reverse_pass_callback( [x, y, row_idx_val, col_idx_val, prev_val]() mutable { x.vi_->val_.coeffRef(row_idx_val, col_idx_val) = prev_val; if (!is_constant::value) { math::adjoint_of(y) += x.adj().coeff(row_idx_val, col_idx_val); } x.adj().coeffRef(row_idx_val, col_idx_val) = 0.0; }); } /** * Assign multiple possibly unordered cells of row vector to a row of an eigen * matrix. * * Types: mat[uni, multi] = row_vector * * @tparam Mat1 `var_value` with inner Eigen type with dynamic rows and columns. * @tparam Vec `var_value` with inner Eigen type with dynamic columns and * compile time rows of 1. * @param[in] x Matrix variable to be assigned. * @param[in] y Row vector. * @param[in] name Name of variable * @param[in] row_idx uni index for selecting rows * @param[in] col_idx multi index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and value matrix do not match. */ template * = nullptr, internal::require_var_row_vector_or_arithmetic_eigen* = nullptr> inline void assign(Mat1&& x, const Vec& y, const char* name, index_uni row_idx, const index_multi& col_idx) { stan::math::check_range("matrix[uni, multi] assign", name, x.rows(), row_idx.n_); const auto assign_cols = col_idx.ns_.size(); stan::math::check_size_match("matrix[uni, multi] assign columns", name, assign_cols, "right hand side", y.size()); const int row_idx_val = row_idx.n_ - 1; arena_t> x_idx(assign_cols); arena_t> prev_val(assign_cols); Eigen::Matrix y_val_idx(assign_cols); std::unordered_set x_set; x_set.reserve(assign_cols); const auto& y_val = stan::math::value_of(y); // Need to remove duplicates for cases like {2, 3, 2, 2} for (int i = assign_cols - 1; i >= 0; --i) { if (likely(x_set.insert(col_idx.ns_[i]).second)) { stan::math::check_range("matrix[uni, multi] assign", name, x.cols(), col_idx.ns_[i]); x_idx[i] = col_idx.ns_[i] - 1; prev_val.coeffRef(i) = x.val().coeff(row_idx_val, x_idx[i]); y_val_idx.coeffRef(i) = y_val.coeff(i); } else { x_idx[i] = -1; } } for (int i = assign_cols - 1; i >= 0; --i) { if (likely(x_idx[i] != -1)) { x.vi_->val_.coeffRef(row_idx_val, x_idx[i]) = y_val_idx.coeff(i); } } if (!is_constant::value) { stan::math::reverse_pass_callback( [x, y, row_idx_val, x_idx, prev_val]() mutable { for (size_t i = 0; i < x_idx.size(); ++i) { if (likely(x_idx[i] != -1)) { x.vi_->val_.coeffRef(row_idx_val, x_idx[i]) = prev_val.coeff(i); prev_val.coeffRef(i) = x.adj().coeff(row_idx_val, x_idx[i]); x.adj().coeffRef(row_idx_val, x_idx[i]) = 0.0; } } for (size_t i = 0; i < x_idx.size(); ++i) { if (likely(x_idx[i] != -1)) { math::forward_as>(y) .adj() .coeffRef(i) += prev_val.coeff(i); } } }); } else { stan::math::reverse_pass_callback( [x, row_idx_val, x_idx, prev_val]() mutable { for (size_t i = 0; i < x_idx.size(); ++i) { if (likely(x_idx[i] != -1)) { x.vi_->val_.coeffRef(row_idx_val, x_idx[i]) = prev_val.coeff(i); prev_val.coeffRef(i) = x.adj().coeffRef(row_idx_val, x_idx[i]); x.adj().coeffRef(row_idx_val, x_idx[i]) = 0.0; } } }); } } /** * Assign to multiple possibly unordered rows of a matrix from an input * matrix. * * Types: mat[multi] = mat * * @tparam Mat1 `var_value` with inner Eigen type with dynamic rows and columns. * @tparam Mat2 `var_value` with inner Eigen type with dynamic rows and columns. * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] idx Multiple index * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and value matrix do not match. */ template * = nullptr, internal::require_var_matrix_or_arithmetic_eigen* = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, const index_multi& idx) { const auto assign_rows = idx.ns_.size(); stan::math::check_size_match("matrix[multi] assign rows", name, assign_rows, "right hand side rows", y.rows()); stan::math::check_size_match("matrix[multi] assign columns", name, x.cols(), "right hand side rows", y.cols()); arena_t> x_idx(assign_rows); arena_t> prev_vals(assign_rows, x.cols()); Eigen::Matrix y_val_idx(assign_rows, x.cols()); std::unordered_set x_set; x_set.reserve(assign_rows); const auto& y_val = stan::math::value_of(y); // Need to remove duplicates for cases like {2, 3, 2, 2} for (int i = assign_rows - 1; i >= 0; --i) { if (likely(x_set.insert(idx.ns_[i]).second)) { stan::math::check_range("matrix[multi, multi] assign row", name, x.rows(), idx.ns_[i]); x_idx[i] = idx.ns_[i] - 1; prev_vals.row(i) = x.vi_->val_.row(x_idx[i]); y_val_idx.row(i) = y_val.row(i); } else { x_idx[i] = -1; } } for (int i = assign_rows - 1; i >= 0; --i) { if (likely(x_idx[i] != -1)) { x.vi_->val_.row(x_idx[i]) = y_val_idx.row(i); } } if (!is_constant::value) { stan::math::reverse_pass_callback([x, y, prev_vals, x_idx]() mutable { for (size_t i = 0; i < x_idx.size(); ++i) { if (likely(x_idx[i] != -1)) { x.vi_->val_.row(x_idx[i]) = prev_vals.row(i); prev_vals.row(i) = x.adj().row(x_idx[i]); x.adj().row(x_idx[i]).fill(0); } } for (size_t i = 0; i < x_idx.size(); ++i) { if (likely(x_idx[i] != -1)) { math::forward_as>(y) .adj() .row(i) += prev_vals.row(i); } } }); } else { stan::math::reverse_pass_callback([x, prev_vals, x_idx]() mutable { for (size_t i = 0; i < x_idx.size(); ++i) { if (likely(x_idx[i] != -1)) { x.vi_->val_.row(x_idx[i]) = prev_vals.row(i); prev_vals.row(i) = x.adj().row(x_idx[i]); x.adj().row(x_idx[i]).setZero(); } } }); } } /** * Assign to multiple possibly unordered cell's of a matrix from an input * matrix. * * Types: mat[multi, multi] = mat * * @tparam Mat1 `var_value` with inner Eigen type with dynamic rows and columns. * @tparam Mat2 `var_value` with inner Eigen type with dynamic rows and columns. * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] row_idx multi index for selecting rows * @param[in] col_idx multi index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and value matrix do not match. */ template * = nullptr, internal::require_var_matrix_or_arithmetic_eigen* = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, const index_multi& row_idx, const index_multi& col_idx) { const auto assign_rows = row_idx.ns_.size(); const auto assign_cols = col_idx.ns_.size(); stan::math::check_size_match("matrix[multi,multi] assign rows", name, assign_rows, "right hand side rows", y.rows()); stan::math::check_size_match("matrix[multi,multi] assign columns", name, assign_cols, "right hand side columns", y.cols()); using arena_vec = std::vector>; arena_vec x_col_idx(assign_cols); arena_vec x_row_idx(assign_rows); std::unordered_set x_set; x_set.reserve(assign_cols); std::unordered_set x_row_set; x_row_set.reserve(assign_rows); arena_t> prev_vals(assign_rows, assign_cols); Eigen::Matrix y_vals(assign_rows, assign_cols); // Need to remove duplicates for cases like {{2, 3, 2, 2}, {1, 2, 2}} for (int i = assign_rows - 1; i >= 0; --i) { if (likely(x_row_set.insert(row_idx.ns_[i]).second)) { stan::math::check_range("matrix[multi, multi] assign row", name, x.rows(), row_idx.ns_[i]); x_row_idx[i] = row_idx.ns_[i] - 1; } else { x_row_idx[i] = -1; } } const auto& y_val = stan::math::value_of(y); for (int j = assign_cols - 1; j >= 0; --j) { if (likely(x_set.insert(col_idx.ns_[j]).second)) { stan::math::check_range("matrix[multi, multi] assign col", name, x.cols(), col_idx.ns_[j]); x_col_idx[j] = col_idx.ns_[j] - 1; for (int i = assign_rows - 1; i >= 0; --i) { if (likely(x_row_idx[i] != -1)) { prev_vals.coeffRef(i, j) = x.vi_->val_.coeff(x_row_idx[i], x_col_idx[j]); y_vals.coeffRef(i, j) = y_val.coeff(i, j); } } } else { x_col_idx[j] = -1; } } for (int j = assign_cols - 1; j >= 0; --j) { if (likely(x_col_idx[j] != -1)) { for (int i = assign_rows - 1; i >= 0; --i) { if (likely(x_row_idx[i] != -1)) { x.vi_->val_.coeffRef(x_row_idx[i], x_col_idx[j]) = y_vals.coeff(i, j); } } } } if (!is_constant::value) { stan::math::reverse_pass_callback( [x, y, prev_vals, x_col_idx, x_row_idx]() mutable { for (int j = 0; j < x_col_idx.size(); ++j) { if (likely(x_col_idx[j] != -1)) { for (int i = 0; i < x_row_idx.size(); ++i) { if (likely(x_row_idx[i] != -1)) { x.vi_->val_.coeffRef(x_row_idx[i], x_col_idx[j]) = prev_vals.coeff(i, j); prev_vals.coeffRef(i, j) = x.adj().coeff(x_row_idx[i], x_col_idx[j]); x.adj().coeffRef(x_row_idx[i], x_col_idx[j]) = 0; } } } } for (int j = 0; j < x_col_idx.size(); ++j) { if (likely(x_col_idx[j] != -1)) { for (int i = 0; i < x_row_idx.size(); ++i) { if (likely(x_row_idx[i] != -1)) { math::forward_as>(y) .adj() .coeffRef(i, j) += prev_vals.coeff(i, j); } } } } }); } else { stan::math::reverse_pass_callback( [x, prev_vals, x_col_idx, x_row_idx]() mutable { for (int j = 0; j < x_col_idx.size(); ++j) { if (likely(x_col_idx[j] != -1)) { for (int i = 0; i < x_row_idx.size(); ++i) { if (likely(x_row_idx[i] != -1)) { x.vi_->val_.coeffRef(x_row_idx[i], x_col_idx[j]) = prev_vals.coeff(i, j); prev_vals.coeffRef(i, j) = x.adj().coeff(x_row_idx[i], x_col_idx[j]); x.adj().coeffRef(x_row_idx[i], x_col_idx[j]) = 0; } } } } }); } } /** * Assign to a non-contiguous set of columns of a matrix. * * Types: mat[Idx, multi] = mat * * @tparam Mat1 `var_value` with inner Eigen type with dynamic rows and columns. * @tparam Mat2 `var_value` with inner Eigen type * @tparam Idx The row index type * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] row_idx index for selecting rows * @param[in] col_idx multi index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and value matrix do not match. */ template * = nullptr, internal::require_var_matrix_or_arithmetic_eigen* = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, const Idx& row_idx, const index_multi& col_idx) { const auto assign_cols = col_idx.ns_.size(); stan::math::check_size_match("matrix[..., multi] assign columns", name, assign_cols, "right hand side columns", y.cols()); std::unordered_set x_set; const auto& y_eval = y.eval(); x_set.reserve(assign_cols); // Need to remove duplicates for cases like {2, 3, 2, 2} for (int j = assign_cols - 1; j >= 0; --j) { if (likely(x_set.insert(col_idx.ns_[j]).second)) { stan::math::check_range("matrix[..., multi] assign col", name, x.cols(), col_idx.ns_[j]); assign(x.col(col_idx.ns_[j] - 1), y_eval.col(j), name, row_idx); } } } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/indexing/rvalue.hpp0000644000176200001440000006677614645137105023557 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_RVALUE_HPP #define STAN_MODEL_INDEXING_RVALUE_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace model { /** * Indexing Notes: * The different index types: * index_uni - A single cell. * index_multi - Access multiple cells. * index_omni - A no-op for all indices along a dimension. * index_min - index from min:N * index_max - index from 1:max * index_min_max - index from min:max * nil_index_list - no-op * The order of the overloads are * vector / row_vector: * - all index overloads * matrix: * - all row index overloads * - Return a subset of rows. * - column/row overloads * - overload on both the row and column indices. * - column overloads * - These take a subset of columns and then call the row slice rvalue * over the column subset. * Std vector: * - single element and elementwise overloads * - General overload for nested std vectors. */ /** * Return the result of indexing a specified value with * a nil index list, which just returns the value. * * Types: T[] : T * * @tparam T Scalar type. * @param[in] x Value to index. * @return Input value. */ template inline std::decay_t rvalue(T&& x, const char* /*name*/) { return std::forward(x); } template inline T& rvalue(T& x, const char* /*name*/) { return x; } template inline const T& rvalue(const T& x, const char* /*name*/) { return x; } /** * Return the result of indexing a type without taking a subset. Mostly used as * an intermediary rvalue function when doing multiple subsets. * * Types: plain_type[omni] : plain_type * * @tparam T A type that is a plain object. * @param[in] x an object. * @return Result of indexing matrix. */ template inline std::decay_t rvalue(T&& x, const char* /*name*/, index_omni /*idx*/) { return std::forward(x); } template inline T& rvalue(T& x, const char* /*name*/, index_omni /*idx*/) { return x; } template inline const T& rvalue(const T& x, const char* /*name*/, index_omni /*idx*/) { return x; } /** * Return the result of indexing a type without taking a subset * * Types: type[omni, omni] : type * * @tparam T Any type. * @param[in] x an object. * @param[in] name String form of expression being evaluated. * @return Result of indexing matrix. */ template inline std::decay_t rvalue(T&& x, const char* name, index_omni /*idx1*/, index_omni /*idx2*/) { return std::forward(x); } template inline T& rvalue(T& x, const char* name, index_omni /*idx1*/, index_omni /*idx2*/) { return x; } template inline const T& rvalue(const T& x, const char* name, index_omni /*idx1*/, index_omni /*idx2*/) { return x; } /** * Return a single element of a Vector. * * Types: vector[uni] : scaler * * @tparam Vec An Eigen vector or `var_value` where `T` is an eigen vector. * @param[in] v Vector being indexed. * @param[in] name String form of expression being evaluated. * @param[in] idx One single index. * @return Result of indexing vector. */ template * = nullptr, require_not_std_vector_t* = nullptr> inline auto rvalue(Vec&& v, const char* name, index_uni idx) { using stan::math::to_ref; math::check_range("vector[uni] indexing", name, v.size(), idx.n_); return v.coeff(idx.n_ - 1); } /** * Return a non-contiguous subset of elements in a vector. * * Types: vector[multi] = vector * * @tparam EigVec Eigen type with either dynamic rows or columns, but not both. * @param[in] v Eigen vector type. * @param[in] name Name of variable * @param[in] idx Sequence of integers. * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the value size isn't the same as * the indexed size. */ template * = nullptr> inline auto rvalue(EigVec&& v, const char* name, const index_multi& idx) { return stan::math::make_holder( [name, &idx](auto& v_ref) { return plain_type_t::NullaryExpr( idx.ns_.size(), [name, &idx, &v_ref](Eigen::Index i) { math::check_range("vector[multi] indexing", name, v_ref.size(), idx.ns_[i]); return v_ref.coeff(idx.ns_[i] - 1); }); }, stan::math::to_ref(v)); } /** * Return a range of a vector * * Types: vector[min_max] = vector * * @tparam Vec An Eigen vector or `var_value` where `T` is an eigen vector. * @param[in] v Vector being indexed. * @param[in] name String form of expression being evaluated. * @param[in] idx An index to select from a minimum to maximum range. * @return Result of indexing vector. */ template * = nullptr, require_not_std_vector_t* = nullptr> inline auto rvalue(Vec&& v, const char* name, index_min_max idx) { math::check_range("vector[min_max] min indexing", name, v.size(), idx.min_); const Eigen::Index slice_start = idx.min_ - 1; if (idx.max_ >= idx.min_) { math::check_range("vector[min_max] max indexing", name, v.size(), idx.max_); const Eigen::Index slice_size = idx.max_ - slice_start; return v.segment(slice_start, slice_size); } else { return v.segment(slice_start, 0); } } /** * Return a tail slice of a vector * * Types: vector[min:N] = vector * * @tparam Vec An Eigen vector or `var_value` where `T` is an eigen vector. * @param[in] x vector * @param[in] name Name of variable * @param[in] idx An indexing from a specific minimum index to the end out * of a bottom row of a matrix * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr, require_not_std_vector_t* = nullptr> inline auto rvalue(Vec&& x, const char* name, index_min idx) { stan::math::check_range("vector[min] indexing", name, x.size(), idx.min_); return x.tail(x.size() - idx.min_ + 1); } /** * Return a head slice of a vector * * Types: vector[1:max] <- vector * * @tparam Vec An Eigen vector or `var_value` where `T` is an eigen vector. * @param[in] x vector. * @param[in] name Name of variable * @param[in] idx An indexing from the start of the container up to * the specified maximum index (inclusive). * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr, require_not_std_vector_t* = nullptr> inline auto rvalue(Vec&& x, const char* name, index_max idx) { if (idx.max_ > 0) { stan::math::check_range("vector[max] indexing", name, x.size(), idx.max_); return x.head(idx.max_); } else { return x.head(0); } } /** * Return the result of indexing the matrix with a * sequence consisting of one single index, returning a row vector. * * Types: matrix[uni] : row vector * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @param[in] x matrix. * @param[in] name String form of expression being evaluated. * @param[in] idx uni-index * @return Result of indexing matrix. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, index_uni idx) { math::check_range("matrix[uni] indexing", name, x.rows(), idx.n_); return x.row(idx.n_ - 1); } /** * Return the specified Eigen matrix at the specified multi index. * * Types: matrix[multi] = matrix * * @tparam EigMat Eigen type with dynamic rows and columns. * @param[in] x Eigen type * @param[in] name Name of variable * @param[in] idx A multi index for selecting a set of rows. * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline plain_type_t rvalue(EigMat&& x, const char* name, const index_multi& idx) { for (int i = 0; i < idx.ns_.size(); ++i) { math::check_range("matrix[multi] row indexing", name, x.rows(), idx.ns_[i]); } return stan::math::make_holder( [&idx](auto& x_ref) { return plain_type_t::NullaryExpr( idx.ns_.size(), x_ref.cols(), [&idx, &x_ref](Eigen::Index i, Eigen::Index j) { return x_ref.coeff(idx.ns_[i] - 1, j); }); }, stan::math::to_ref(x)); } /** * Return the result of indexing the matrix with a min index * returning back a block of rows min:N and all cols * * Types: matrix[min:N] = matrix * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @param[in] x matrix. * @param[in] name String form of expression being evaluated. * @param[in] idx index_min * @return Result of indexing matrix. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, index_min idx) { const auto row_size = x.rows() - (idx.min_ - 1); math::check_range("matrix[min] row indexing", name, x.rows(), idx.min_); return x.bottomRows(row_size); } /** * Return the 1:max rows of a matrix. * * Types: matrix[:max] = matrix * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @param[in] x matrix * @param[in] name Name of variable * @param[in] idx An indexing from the start of the container up to * the specified maximum index (inclusive). * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, index_max idx) { if (idx.max_ > 0) { math::check_range("matrix[max] row indexing", name, x.rows(), idx.max_); return x.topRows(idx.max_); } else { return x.topRows(0); } } /** * Return a range of rows for a matrix. * * Types: matrix[min_max] = matrix * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @param[in] x Dense matrix * @param[in] name Name of variable * @param[in] idx A min max index to select a range of rows. * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, index_min_max idx) { math::check_range("matrix[min_max] min row indexing", name, x.rows(), idx.min_); if (idx.max_ >= idx.min_) { math::check_range("matrix[min_max] max row indexing", name, x.rows(), idx.max_); return x.middleRows(idx.min_ - 1, idx.max_ - idx.min_ + 1); } else { return x.middleRows(idx.min_ - 1, 0); } } /** * Return the result of indexing a matrix with two min_max * indices, returning back a block of a matrix. * * Types: matrix[min_max, min_max] = matrix * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @param[in] x Eigen matrix. * @param[in] name String form of expression being evaluated. * @param[in] row_idx Min max index for selecting rows. * @param[in] col_idx Min max index for selecting cols. * @return Result of indexing matrix. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, index_min_max row_idx, index_min_max col_idx) { math::check_range("matrix[min_max, min_max] min row indexing", name, x.rows(), row_idx.min_); math::check_range("matrix[min_max, min_max] min column indexing", name, x.cols(), col_idx.min_); if (row_idx.max_ >= row_idx.min_ && col_idx.max_ >= col_idx.min_) { math::check_range("matrix[min_max, min_max] max row indexing", name, x.rows(), row_idx.max_); math::check_range("matrix[min_max, min_max] max column indexing", name, x.cols(), col_idx.max_); return x.block(row_idx.min_ - 1, col_idx.min_ - 1, row_idx.max_ - (row_idx.min_ - 1), col_idx.max_ - (col_idx.min_ - 1)); } else if (row_idx.max_ >= row_idx.min_) { math::check_range("matrix[min_max, min_max] max row indexing", name, x.rows(), row_idx.max_); return x.block(row_idx.min_ - 1, col_idx.min_ - 1, row_idx.max_ - (row_idx.min_ - 1), 0); } else if (col_idx.max_ >= col_idx.min_) { math::check_range("matrix[min_max, min_max] max column indexing", name, x.cols(), col_idx.max_); return x.block(row_idx.min_ - 1, col_idx.min_ - 1, 0, col_idx.max_ - (col_idx.min_ - 1)); } else { return x.block(row_idx.min_ - 1, col_idx.min_ - 1, 0, 0); } } /** * Return a scalar from a matrix * * Types: matrix[uni,uni] : scalar * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @param[in] x Matrix to index. * @param[in] name String form of expression being evaluated. * @param[in] row_idx uni index for selecting rows. * @param[in] col_idx uni index for selecting cols. * @return Result of indexing matrix. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, index_uni row_idx, index_uni col_idx) { math::check_range("matrix[uni,uni] row indexing", name, x.rows(), row_idx.n_); math::check_range("matrix[uni,uni] column indexing", name, x.cols(), col_idx.n_); return x.coeff(row_idx.n_ - 1, col_idx.n_ - 1); } /** * Return a row of an Eigen matrix with possibly unordered cells. * * Types: matrix[uni, multi] = row vector * * @tparam EigMat Eigen type with dynamic rows and columns. * @param[in] x Matrix to index. * @param[in] name Name of variable * @param[in] row_idx uni index for selecting rows. * @param[in] col_idx multi index for selecting cols. * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline Eigen::Matrix, 1, Eigen::Dynamic> rvalue( EigMat&& x, const char* name, index_uni row_idx, const index_multi& col_idx) { math::check_range("matrix[uni, multi] row indexing", name, x.rows(), row_idx.n_); return stan::math::make_holder( [name, row_idx, &col_idx](auto& x_ref) { return Eigen::Matrix, 1, Eigen::Dynamic>:: NullaryExpr(col_idx.ns_.size(), [name, row_i = row_idx.n_ - 1, &col_idx, &x_ref](Eigen::Index i) { math::check_range("matrix[uni, multi] column indexing", name, x_ref.cols(), col_idx.ns_[i]); return x_ref.coeff(row_i, col_idx.ns_[i] - 1); }); }, stan::math::to_ref(x)); } /** * Return a column of an Eigen matrix that is a possibly non-contiguous subset * of the input Eigen matrix. * * Types: matrix[multi, uni] = vector * * @tparam EigMat Eigen type with dynamic rows and columns. * @param[in] x Matrix to index. * @param[in] name Name of variable * @param[in] row_idx multi index for selecting rows. * @param[in] col_idx uni index for selecting cols. * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, 1> rvalue( EigMat&& x, const char* name, const index_multi& row_idx, index_uni col_idx) { math::check_range("matrix[multi, uni] column indexing", name, x.cols(), col_idx.n_); return stan::math::make_holder( [name, &row_idx, col_idx](auto& x_ref) { return Eigen::Matrix, Eigen::Dynamic, 1>:: NullaryExpr(row_idx.ns_.size(), [name, &row_idx, col_i = col_idx.n_ - 1, &x_ref](Eigen::Index i) { math::check_range("matrix[multi, uni] row indexing", name, x_ref.rows(), row_idx.ns_[i]); return x_ref.coeff(row_idx.ns_[i] - 1, col_i); }); }, stan::math::to_ref(x)); } /** * Return an Eigen matrix that is a possibly non-contiguous subset of the input * Eigen matrix. * * Types: matrix[multi, multi] = matrix * * @tparam EigMat An eigen matrix * @param[in] x Matrix to index. * @param[in] name String form of expression being evaluated. * @param[in] row_idx multi index for selecting rows. * @param[in] col_idx multi index for selecting cols. * @return Result of indexing matrix. */ template * = nullptr> inline plain_type_t rvalue(EigMat&& x, const char* name, const index_multi& row_idx, const index_multi& col_idx) { const auto& x_ref = stan::math::to_ref(x); const Eigen::Index rows = row_idx.ns_.size(); const Eigen::Index cols = col_idx.ns_.size(); plain_type_t x_ret(rows, cols); for (Eigen::Index j = 0; j < cols; ++j) { for (Eigen::Index i = 0; i < rows; ++i) { const Eigen::Index m = row_idx.ns_[i]; const Eigen::Index n = col_idx.ns_[j]; math::check_range("matrix[multi,multi] row indexing", name, x_ref.rows(), m); math::check_range("matrix[multi,multi] column indexing", name, x_ref.cols(), n); x_ret.coeffRef(i, j) = x_ref.coeff(m - 1, n - 1); } } return x_ret; } /** * Return a column of a matrix with the range of the column specificed * by another index. * * Types: matrix[Idx, uni] = vector * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @param[in] x matrix. * @param[in] name String form of expression being evaluated. * @param[in] row_idx index for selecting rows. * @param[in] col_idx uni index for selecting cols. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, const Idx& row_idx, index_uni col_idx) { math::check_range("matrix[..., uni] column indexing", name, x.cols(), col_idx.n_); return rvalue(x.col(col_idx.n_ - 1), name, row_idx); } /** * Return an Eigen matrix of possibly unordered columns with each column * range specified by another index. * * Types: matrix[Idx, multi] = matrix * * @tparam EigMat An eigen matrix * @param[in] x Eigen matrix. * @param[in] name String form of expression being evaluated. * @param[in] row_idx index for selecting rows. * @param[in] col_idx multi index for selecting cols. * @return Result of indexing matrix. */ template * = nullptr, require_not_same_t, index_uni>* = nullptr> inline plain_type_t rvalue(EigMat&& x, const char* name, const Idx& row_idx, const index_multi& col_idx) { const auto& x_ref = stan::math::to_ref(x); const int rows = rvalue_index_size(row_idx, x_ref.rows()); plain_type_t x_ret(rows, col_idx.ns_.size()); for (int j = 0; j < col_idx.ns_.size(); ++j) { const Eigen::Index n = col_idx.ns_[j]; math::check_range("matrix[..., multi] column indexing", name, x_ref.cols(), n); x_ret.col(j) = rvalue(x_ref.col(n - 1), name, row_idx); } return x_ret; } /** * Return the matrix with all columns and a slice of rows. * * Types: matrix[Idx, omni] = matrix * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @param[in] x type * @param[in] name Name of variable * @param[in] row_idx index for selecting rows. * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, const Idx& row_idx, index_omni /*col_idx*/) { return rvalue(std::forward(x), name, row_idx); } /** * Return columns min:N of the matrix with the range of the columns * defined by another index. * * Types: matrix[Idx, min] = matrix * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @tparam Idx An index. * @param[in] x type * @param[in] name Name of variable * @param[in] row_idx index for selecting rows. * @param[in] col_idx min index for selecting cols. * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, const Idx& row_idx, index_min col_idx) { const Eigen::Index col_size = x.cols() - (col_idx.min_ - 1); math::check_range("matrix[..., min] column indexing", name, x.cols(), col_idx.min_); return rvalue(x.rightCols(col_size), name, row_idx); } /** * Return columns 1:max of input matrix with the range of the columns * defined by another index. * * Types: matrix[Idx, max] = matrix * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @tparam Idx An index. * @param[in] x Eigen type * @param[in] name Name of variable * @param[in] row_idx index for selecting rows. * @param[in] col_idx max index for selecting cols. * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, const Idx& row_idx, index_max col_idx) { if (col_idx.max_ > 0) { math::check_range("matrix[..., max] column indexing", name, x.cols(), col_idx.max_); return rvalue(x.leftCols(col_idx.max_), name, row_idx); } else { return rvalue(x.leftCols(0), name, row_idx); } } /** * Return the result of indexing the specified matrix with a * min_max_index returning a block from min to max. * * Types: matrix[Idx, min_max] = matrix * * @tparam Mat An eigen matrix or `var_value` whose inner type is an Eigen * matrix. * @tparam Idx Type of index. * @param[in] x Matrix to index. * @param[in] name String form of expression being evaluated. * @param[in] row_idx index for selecting rows. * @param[in] col_idx min max index for selecting cols. * @return Result of indexing matrix. */ template * = nullptr> inline auto rvalue(Mat&& x, const char* name, const Idx& row_idx, index_min_max col_idx) { math::check_range("matrix[..., min_max] min column indexing", name, x.cols(), col_idx.min_); const Eigen::Index col_start = col_idx.min_ - 1; if (col_idx.max_ >= col_idx.min_) { math::check_range("matrix[..., min_max] max column indexing", name, x.cols(), col_idx.max_); return rvalue(x.middleCols(col_start, col_idx.max_ - col_start), name, row_idx); } else { return rvalue(x.middleCols(col_start, 0), name, row_idx); } } /** * Return the result of indexing the specified array with * a list of indexes beginning with a single index; the result is * determined recursively. Note that arrays are represented as * standard library vectors. * * Types: std::vector[uni | Idx] : T[Idx] * * @tparam T Type of list elements. * @tparam Idx Index list type for indexes after first index. * @param[in] v Container of list elements. * @param[in] name String form of expression being evaluated. * @param[in] idx1 first index. * @param[in] idxs remaining indices. * @return Result of indexing array. */ template * = nullptr, require_not_t>* = nullptr> inline auto rvalue(StdVec&& v, const char* name, index_uni idx1, const Idxs&... idxs) { math::check_range("array[uni, ...] index", name, v.size(), idx1.n_); return rvalue(std::move(v[idx1.n_ - 1]), name, idxs...); } template * = nullptr> inline auto rvalue(StdVec& v, const char* name, index_uni idx1, const Idxs&... idxs) { math::check_range("array[uni, ...] index", name, v.size(), idx1.n_); return rvalue(v[idx1.n_ - 1], name, idxs...); } /** * Return the result of indexing the specified array with * a single index. * * Types: std::vector[uni] : T * * @tparam StdVec a standard vector * @param[in] v Container of list elements. * @param[in] name String form of expression being evaluated. * @param[in] idx single index * @return Result of indexing array. */ template * = nullptr> inline const auto& rvalue(const StdVec& v, const char* name, index_uni idx) { math::check_range("array[uni, ...] index", name, v.size(), idx.n_); return v[idx.n_ - 1]; } template * = nullptr> inline auto& rvalue(StdVec& v, const char* name, index_uni idx) { math::check_range("array[uni, ...] index", name, v.size(), idx.n_); return v[idx.n_ - 1]; } template * = nullptr, require_not_t>* = nullptr> inline auto rvalue(StdVec&& v, const char* name, index_uni idx) { math::check_range("array[uni, ...] index", name, v.size(), idx.n_); return std::move(v[idx.n_ - 1]); } /** * Return the result of indexing the specified array with * a list of indexes beginning with a multiple index; the result is * determined recursively. Note that arrays are represented as * standard library vectors. * * Types: std::vector[Idx1, Idx2] : std::vector[Idx2] * * @tparam T Type of list elements. * @tparam Idx1 Index list type for first index. * @tparam Idx2 Index list type for second index index. * @param[in] v Container of list elements. * @param[in] name String form of expression being evaluated. * @param[in] idx1 first index * @param[in] idxs remaining indices * @return Result of indexing array. */ template * = nullptr, require_not_same_t* = nullptr> inline auto rvalue(StdVec&& v, const char* name, const Idx1& idx1, const Idxs&... idxs) { using inner_type = plain_type_t; const auto index_size = rvalue_index_size(idx1, v.size()); stan::math::check_greater_or_equal("array[..., ...] indexing", "size", index_size, 0); std::vector result(index_size); if ((std::is_same, index_min_max>::value || std::is_same, index_max>::value) && index_size == 0) { return result; } for (int i = 0; i < index_size; ++i) { const int n = rvalue_at(i, idx1); math::check_range("array[..., ...] index", name, v.size(), n); if ((!std::is_same, index_multi>::value) && std::is_rvalue_reference::value) { result[i] = rvalue(std::move(v[n - 1]), name, idxs...); } else { result[i] = rvalue(v[n - 1], name, idxs...); } } return result; } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/indexing/deep_copy.hpp0000644000176200001440000000135414645137105024205 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_DEEP_COPY_HPP #define STAN_MODEL_INDEXING_DEEP_COPY_HPP #include #include #include namespace stan { namespace model { /** * Return the specified argument as a constant reference. * *

Warning: because of the usage pattern of this class, this * function only needs to return value references, not actual * copies. The functions that call this overload recursively will * be doing the actual copies with assignment. * * @tparam T Any type. * @param x Input value. * @return Copy of input. */ template inline plain_type_t deep_copy(T&& x) { return std::forward(x); } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/indexing/index.hpp0000644000176200001440000000477414645137105023356 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_INDEX_HPP #define STAN_MODEL_INDEXING_INDEX_HPP #include #include namespace stan { namespace model { // SINGLE INDEXING (reduces dimensionality) /** * Structure for an indexing consisting of a single index. * Applying this index reduces the dimensionality of the container * to which it is applied by one. */ struct index_uni { int n_; /** * Construct a single indexing from the specified index. * * @param n single index. */ constexpr explicit index_uni(int n) noexcept : n_(n) {} }; // MULTIPLE INDEXING (does not reduce dimensionality) /** * Structure for an indexing consisting of multiple indexes. The * indexes do not need to be unique or in order. */ struct index_multi { std::vector ns_; /** * Construct a multiple indexing from the specified indexes. * * @param ns multiple indexes. */ template * = nullptr> explicit index_multi(T&& ns) noexcept : ns_(std::forward(ns)) {} }; /** * Structure for an indexing that consists of all indexes for a * container. Applying this index is a no-op. */ struct index_omni {}; /** * Structure for an indexing from a minimum index (inclusive) to * the end of a container. */ struct index_min { int min_; /** * Construct an indexing from the specified minimum index (inclusive). * * @param min minimum index (inclusive). */ constexpr explicit index_min(int min) noexcept : min_(min) {} }; /** * Structure for an indexing from the start of a container to a * specified maximum index (inclusive). */ struct index_max { int max_; /** * Construct an indexing from the start of the container up to * the specified maximum index (inclusive). * * @param max maximum index (inclusive). */ constexpr explicit index_max(int max) noexcept : max_(max) {} }; /** * Structure for an indexing from a minimum index (inclusive) to a * maximum index (inclusive). */ struct index_min_max { int min_; int max_; /** * Return whether the index is positive or negative */ inline bool is_ascending() const noexcept { return min_ <= max_; } /** * Construct an indexing from the specified minimum index * (inclusive) and maximum index (inclusive). * * @param min minimum index (inclusive). * @param max maximum index (inclusive). */ constexpr index_min_max(int min, int max) noexcept : min_(min), max_(max) {} }; } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/indexing/assign_cl.hpp0000644000176200001440000003235614645137105024206 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_ASSIGN_CL_HPP #define STAN_MODEL_INDEXING_ASSIGN_CL_HPP #ifdef STAN_OPENCL #include #include #include #include #include namespace stan { namespace model { namespace internal { inline constexpr const char* print_index_type(stan::model::index_uni) { return "uni index"; } inline const char* print_index_type(stan::model::index_multi) { return "multi index"; } inline constexpr const char* print_index_type(stan::model::index_min) { return "min index"; } inline constexpr const char* print_index_type(stan::model::index_max) { return "max index"; } inline constexpr const char* print_index_type(stan::model::index_min_max) { return "min max index"; } inline constexpr const char* print_index_type(stan::model::index_omni) { return "omni index"; } #ifdef STAN_OPENCL inline const char* print_index_type(const stan::math::matrix_cl&) { return "multi index"; } #endif } // namespace internal // prim /** * Assign one primitive kernel generator expression to another, using given * index. * * @tparam ExprLhs type of the assignable prim expression on the left hand side * of the assignment * @tparam ExprRhs type of the prim expression on the right hand side of the * assignment * @tparam RowIndex type of index (a Stan index type or `matrix_cl` instad * of `index_multi`) * @param[in,out] expr_lhs expression on the left hand side of the assignment * @param expr_rhs expression on the right hand side of the assignment * @param name Name of lvalue variable * @param row_index index used for indexing `expr_lhs` * @throw std::out_of_range If the index is out of bounds. * @throw std::invalid_argument If the right hand side size isn't the same as * the indexed left hand side size. */ template * = nullptr, require_all_kernel_expressions_and_none_scalar_t* = nullptr> inline void assign(ExprLhs&& expr_lhs, ExprRhs&& expr_rhs, const char* name, const RowIndex& row_index) { decltype(auto) lhs = rvalue(expr_lhs, name, row_index); stan::math::check_size_match(internal::print_index_type(row_index), "left hand side rows", lhs.rows(), name, expr_rhs.rows()); stan::math::check_size_match(internal::print_index_type(row_index), "left hand side cols", lhs.cols(), name, expr_rhs.cols()); lhs = std::forward(expr_rhs); } /** * Assign one primitive kernel generator expression to another, using given * indices. * * @tparam ExprLhs type of the assignable prim expression on the left hand side * of the assignment * @tparam ExprRhs type of the prim expression on the right hand side of the * assignment * @tparam RowIndex type of row index (a Stan index type or `matrix_cl` * instad of `index_multi`) * @tparam ColIndex type of column index (a Stan index type or `matrix_cl` * instad of `index_multi`) * @param[in,out] expr_lhs expression on the left hand side of the assignment * @param expr_rhs expression on the right hand side of the assignment * @param name Name of lvalue variable * @param row_index index used for indexing rows of `expr_lhs` * @param col_index index used for indexing columns of `expr_lhs` * @throw std::out_of_range If the index is out of bounds. * @throw std::invalid_argument If the right hand side size isn't the same as * the indexed left hand side size. */ template * = nullptr, require_all_kernel_expressions_and_none_scalar_t* = nullptr, require_any_not_same_t* = nullptr> inline void assign(ExprLhs&& expr_lhs, ExprRhs&& expr_rhs, const char* name, const RowIndex& row_index, const ColIndex& col_index) { decltype(auto) lhs = rvalue(expr_lhs, name, row_index, col_index); stan::math::check_size_match(internal::print_index_type(row_index), "left hand side rows", lhs.rows(), name, expr_rhs.rows()); stan::math::check_size_match(internal::print_index_type(col_index), "left hand side cols", lhs.cols(), name, expr_rhs.cols()); lhs = std::forward(expr_rhs); } /** * Assign a scalar to a primitive kernel generator expression, using given uni * indices. * * @tparam ExprLhs type of the assignable prim expression on the left hand side * of the assignment * @tparam ScalRhs type of the prim scalar on the right hand side of the * assignment * @param[in,out] expr_lhs expression on the left hand side of the assignment * @param scal_rhs scalar on the right hand side of the assignment * @param name Name of lvalue variable * @param row_index index used for indexing rows of `expr_lhs` * @param col_index index used for indexing columns of `expr_lhs` * @throw std::out_of_range If the index is out of bounds. */ template * = nullptr, require_stan_scalar_t* = nullptr> inline void assign(ExprLhs&& expr_lhs, const ScalRhs& scal_rhs, const char* name, const index_uni row_index, const index_uni col_index) { math::block_zero_based(expr_lhs, row_index.n_ - 1, col_index.n_ - 1, 1, 1) = math::constant(scal_rhs, 1, 1); } // rev /** * Assign one primitive or reverse mode kernel generator expression to a reverse * mode one, using given index. * * @tparam ExprLhs type of the assignable rev expression on the left hand side * of the assignment * @tparam ExprRhs type of the prim or rev expression on the right hand side of * the assignment * @tparam RowIndex type of index * @param[in,out] expr_lhs expression on the left hand side of the assignment * @param expr_rhs expression on the right hand side of the assignment * @param name Name of lvalue variable * @param row_index index used for indexing `expr_lhs` (a Stan index type or * `matrix_cl` instad of `index_multi`) * @throw std::out_of_range If the index is out of bounds. * @throw std::invalid_argument If the right hand side size isn't the same as * the indexed left hand side size. */ template * = nullptr, require_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline void assign(ExprLhs&& expr_lhs, const ExprRhs& expr_rhs, const char* name, RowIndex&& row_index) { decltype(auto) lhs_val = rvalue(expr_lhs.val_op(), name, row_index); stan::math::check_size_match(internal::print_index_type(row_index), "left hand side rows", lhs_val.rows(), name, expr_rhs.rows()); stan::math::check_size_match(internal::print_index_type(row_index), "left hand side columns", lhs_val.cols(), name, expr_rhs.cols()); math::arena_matrix_cl prev_vals = lhs_val; lhs_val = math::value_of(expr_rhs); // assign the values math::reverse_pass_callback( [expr_lhs, expr_rhs, name, prev_vals, row_index = math::to_arena(std::forward(row_index))]() mutable { auto&& lhs_val = rvalue(expr_lhs.val_op(), name, row_index); decltype(auto) lhs_adj = rvalue(expr_lhs.adj(), name, row_index); math::results(lhs_val, math::adjoint_of(expr_rhs), lhs_adj) = math::expressions( prev_vals, math::calc_if::value>( math::adjoint_of(expr_rhs) + lhs_adj), math::constant(0.0, lhs_adj.rows(), lhs_adj.cols())); }); } // the "forwarding" overload inline void assign(math::var_value>& expr_lhs, math::var_value>&& expr_rhs, const char* /*name*/, index_omni /*row_index*/) { expr_lhs.vi_ = expr_rhs.vi_; } /** * Assign one primitive or reverse mode kernel generator expression to a reverse * mode one, using given indices. * * @tparam ExprLhs type of the assignable rev expression on the left hand side * of the assignment * @tparam ExprRhs type of the prim or rev expression on the right hand side of * the assignment * @tparam RowIndex type of row index (a Stan index type or `matrix_cl` * instad of `index_multi`) * @tparam ColIndex type of column index (a Stan index type or `matrix_cl` * instad of `index_multi`) * @param[in,out] expr_lhs expression on the left hand side of the assignment * @param expr_rhs expression on the right hand side of the assignment * @param name Name of lvalue variable * @param row_index index used for indexing rows of `expr_lhs` * @param col_index index used for indexing columns of `expr_lhs` * @throw std::out_of_range If the index is out of bounds. * @throw std::invalid_argument If the right hand side size isn't the same as * the indexed left hand side size. */ template < typename ExprLhs, typename ExprRhs, typename RowIndex, typename ColIndex, require_rev_kernel_expression_t* = nullptr, require_all_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_any_not_same_t* = nullptr> inline void assign(ExprLhs&& expr_lhs, const ExprRhs& expr_rhs, const char* name, RowIndex&& row_index, ColIndex&& col_index) { decltype(auto) lhs = rvalue(expr_lhs.val_op(), name, row_index, col_index); stan::math::check_size_match(internal::print_index_type(row_index), "left hand side rows", lhs.rows(), name, expr_rhs.rows()); stan::math::check_size_match(internal::print_index_type(col_index), "left hand side cols", lhs.cols(), name, expr_rhs.cols()); math::arena_matrix_cl prev_vals = lhs; lhs = math::value_of(expr_rhs); // assign the values math::reverse_pass_callback( [expr_lhs, expr_rhs, name, prev_vals, row_index = math::to_arena(std::forward(row_index)), col_index = math::to_arena(std::forward(col_index))]() mutable { decltype(auto) lhs_val = rvalue(expr_lhs.val_op(), name, row_index, col_index); decltype(auto) lhs_adj = rvalue(expr_lhs.adj(), name, row_index, col_index); math::results(lhs_val, math::adjoint_of(expr_rhs), lhs_adj) = math::expressions( prev_vals, math::calc_if::value>( math::adjoint_of(expr_rhs) + lhs_adj), math::constant(0.0, lhs_adj.rows(), lhs_adj.cols())); }); } // the "forwarding" overload inline void assign(math::var_value>& expr_lhs, math::var_value>&& expr_rhs, const char* /*name*/, index_omni /*row_index*/, index_omni /*col_index*/) { expr_lhs.vi_ = expr_rhs.vi_; } /** * Assign a primitive or reverse mode scalar to a reverse mode kernel generator * expression, using given uni indices. * * @tparam ExprLhs type of the assignable rev expression on the left hand side * of the assignment * @tparam ScalRhs type of the prim or rev scalar on the right hand side of the * assignment * @param[in,out] expr_lhs expression on the left hand side of the assignment * @param scal_rhs scalar on the right hand side of the assignment * @param name Name of lvalue variable * @param row_index index used for indexing rows of `expr_lhs` * @param col_index index used for indexing columns of `expr_lhs` * @throw std::out_of_range If the index is out of bounds. */ template * = nullptr, require_stan_scalar_t* = nullptr> inline void assign(ExprLhs&& expr_lhs, const ScalRhs& scal_rhs, const char* name, const index_uni row_index, const index_uni col_index) { decltype(auto) lhs_val = math::block_zero_based( expr_lhs.val_op(), row_index.n_ - 1, col_index.n_ - 1, 1, 1); math::arena_matrix_cl prev_val = lhs_val; lhs_val = math::constant(math::value_of(scal_rhs), 1, 1); // assign the value math::reverse_pass_callback( [expr_lhs, scal_rhs, row_index, col_index, prev_val]() mutable { auto&& lhs_val = math::block_zero_based( expr_lhs.val_op(), row_index.n_ - 1, col_index.n_ - 1, 1, 1); decltype(auto) lhs_adj = math::block_zero_based( expr_lhs.adj(), row_index.n_ - 1, col_index.n_ - 1, 1, 1); if (!is_constant::value) { math::adjoint_of(scal_rhs) += math::from_matrix_cl(lhs_adj); } math::results(lhs_adj, lhs_val) = math::expressions(math::constant(0.0, 1, 1), prev_val); }); } } // namespace model } // namespace stan #endif #endif StanHeaders/inst/include/src/stan/model/indexing/access_helpers.hpp0000644000176200001440000001332514645137105025222 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_ACCESS_HELPERS_HPP #define STAN_MODEL_INDEXING_ACCESS_HELPERS_HPP #include #include #include #include namespace stan { namespace model { namespace internal { // Internal helpers so we can reuse min_max index assign for Eigen/var template * = nullptr> inline auto rowwise_reverse(const T& x) { return x.rowwise_reverse(); } template * = nullptr> inline auto rowwise_reverse(const T& x) { return x.rowwise().reverse(); } template * = nullptr> inline auto colwise_reverse(const T& x) { return x.colwise_reverse(); } template * = nullptr> inline auto colwise_reverse(const T& x) { return x.colwise().reverse(); } /** * Assign two Stan scalars * @tparam T1 A scalar * @tparam T2 A scalar * @param x The value to assign to * @param y The value to assign from. * @param name The name of the variable being assigned to (unused) */ template , is_stan_scalar>* = nullptr> void assign_impl(T1&& x, T2&& y, const char* name) { x = std::forward(y); } /** * Assign two var matrices * @tparam T1 A `var_value` with inner type derived from `Eigen::EigenBase` * @tparam T2 A `var_value` with inner type derived from `Eigen::EigenBase` * @param x The value to assign to * @param y The value to assign from. */ template * = nullptr> void assign_impl(T1&& x, T2&& y, const char* name) { // We are allowed to assign to fully uninitialized matrix if (!x.is_uninitialized()) { static constexpr const char* obj_type = is_vector::value ? "vector" : "matrix"; stan::math::check_size_match( (std::string(obj_type) + " assign columns").c_str(), name, x.cols(), "right hand side columns", y.cols()); stan::math::check_size_match( (std::string(obj_type) + " assign rows").c_str(), name, x.rows(), "right hand side rows", y.rows()); } x = std::forward(y); } /** * Assign an Eigen object to another Eigen object. * @tparam T1 A type derived from `Eigen::EigenBase` * @tparam T2 A type derived from `Eigen::EigenBase` * @param x The value to assign to * @param y The value to assign from. */ template * = nullptr> void assign_impl(T1&& x, T2&& y, const char* name) { // We are allowed to assign to fully uninitialized matrix if (x.size() != 0) { static constexpr const char* obj_type = is_vector::value ? "vector" : "matrix"; stan::math::check_size_match( (std::string(obj_type) + " assign columns").c_str(), name, x.cols(), "right hand side columns", y.cols()); stan::math::check_size_match( (std::string(obj_type) + " assign rows").c_str(), name, x.rows(), "right hand side rows", y.rows()); } x = std::forward(y); } /** * Assign one standard vector to another * @tparam T1 A standard vector * @tparam T2 A standard vector * @param x The value to assign to * @param y The value to assign from. */ template * = nullptr> void assign_impl(T1&& x, T2&& y, const char* name) { // We are allowed to assign to fully uninitialized matrix if (x.size() != 0) { stan::math::check_size_match("assign array size", name, x.size(), "right hand side", y.size()); } x = std::forward(y); } /** * Assigning an `Eigen::Matrix` to a `var` * In this case we need to * 1. Store the previous values from `x` * 2. Assign the values from `y` to the values of `x` * 3. Setup a reverse pass callback that sets the `x` values to it's previous * values and then zero's out the adjoints. * * @tparam Mat1 A `var_value` with inner type derived from `EigenBase` * @tparam Mat2 A type derived from `EigenBase` with an arithmetic scalar. * @param x The var matrix to assign to * @param y The eigen matrix to assign from. * @param name The name of the variable being assigned to */ template * = nullptr, require_eigen_st* = nullptr> void assign_impl(Mat1&& x, Mat2&& y, const char* name) { if (!x.is_uninitialized()) { static constexpr const char* obj_type = is_vector::value ? "vector" : "matrix"; stan::math::check_size_match( (std::string(obj_type) + " assign columns").c_str(), name, x.cols(), "right hand side columns", y.cols()); stan::math::check_size_match( (std::string(obj_type) + " assign rows").c_str(), name, x.rows(), "right hand side rows", y.rows()); auto prev_vals = stan::math::to_arena(x.val()); x.vi_->val_ = std::forward(y); stan::math::reverse_pass_callback([x, prev_vals]() mutable { x.vi_->val_ = prev_vals; x.vi_->adj_.setZero(); }); } else { x = stan::math::var_value>(std::forward(y)); } } template struct is_tuple_impl : std::false_type {}; template struct is_tuple_impl> : std::true_type {}; template struct is_tuple : is_tuple_impl> {}; template , internal::is_tuple>* = nullptr> inline void assign_impl(Tuple1&& x, Tuple2&& y, const char* name) { x = std::forward(y); } } // namespace internal } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/indexing/rvalue_varmat.hpp0000644000176200001440000002603014645137105025104 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_RVALUE_VARMAT_HPP #define STAN_MODEL_INDEXING_RVALUE_VARMAT_HPP #include #include #include #include #include #include namespace stan { namespace model { /** * Indexing Notes: * The different index types: * index_uni - A single cell. * index_multi - Access multiple cells. * index_omni - A no-op for all indices along a dimension. * index_min - index from min:N * index_max - index from 1:max * index_min_max - index from min:max * nil_index_list - no-op * The order of the overloads are * vector / row_vector: * - all index overloads * matrix: * - all row index overloads * - Return a subset of rows. * - column/row overloads * - overload on both the row and column indices. * - column overloads * - These take a subset of columns and then call the row slice rvalue * over the column subset. * Std vector: * - single element and elementwise overloads * - General overload for nested std vectors. */ /** * Return a non-contiguous subset of elements in a vector. * * Types: vector[multi] = vector * * @tparam Vec `var_value` with inner Eigen type with either dynamic rows or * columns, but not both. * @param[in] x `var_value` with inner Eigen vector type. * @param[in] name Name of variable * @param[in] idx Sequence of integers. * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the value size isn't the same as * the indexed size. */ template * = nullptr> inline auto rvalue(Vec&& x, const char* name, const index_multi& idx) { using stan::math::arena_allocator; using stan::math::check_range; using stan::math::reverse_pass_callback; using stan::math::var_value; using arena_std_vec = std::vector>; const Eigen::Index x_size = x.size(); const auto ret_size = idx.ns_.size(); arena_t> x_ret_vals(ret_size); arena_std_vec row_idx(ret_size); for (int i = 0; i < ret_size; ++i) { check_range("vector[multi] assign range", name, x_size, idx.ns_[i]); row_idx[i] = idx.ns_[i] - 1; x_ret_vals.coeffRef(i) = x.vi_->val_.coeff(row_idx[i]); } var_value>> x_ret(x_ret_vals); reverse_pass_callback([x, x_ret, row_idx]() mutable { for (Eigen::Index i = 0; i < row_idx.size(); ++i) { x.adj().coeffRef(row_idx[i]) += x_ret.adj().coeff(i); } }); return x_ret; } /** * Return a non-contiguous subset of elements in a matrix. * * Types: matrix[multi] = matrix * * @tparam VarMat `var_value` with inner Eigen type with dynamic rows and * columns. * @param[in] x `var_value` with inner Eigen type * @param[in] name Name of variable * @param[in] idx An index containing integers of rows to access. * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline auto rvalue(VarMat&& x, const char* name, const index_multi& idx) { using stan::math::arena_allocator; using stan::math::check_range; using stan::math::reverse_pass_callback; using stan::math::var_value; using arena_std_vec = std::vector>; const auto ret_rows = idx.ns_.size(); arena_t> x_ret_vals(ret_rows, x.cols()); arena_std_vec row_idx(ret_rows); for (int i = 0; i < ret_rows; ++i) { check_range("matrix[multi] subset range", name, x.rows(), idx.ns_[i]); row_idx[i] = idx.ns_[i] - 1; x_ret_vals.row(i) = x.val().row(row_idx[i]); } var_value>> x_ret(x_ret_vals); reverse_pass_callback([x, x_ret, row_idx]() mutable { for (Eigen::Index i = 0; i < row_idx.size(); ++i) { x.adj().row(row_idx[i]) += x_ret.adj().row(i); } }); return x_ret; } /** * Return a row of a matrix with possibly unordered cells. * * Types: matrix[uni, multi] = row vector * * @tparam VarMat `var_value` with inner Eigen type with dynamic rows and * columns. * @param[in] x `var_value` to index. * @param[in] name Name of variable * @param[in] row_idx uni index for selecting rows. * @param[in] col_idx multi index for selecting cols. * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline auto rvalue(VarMat&& x, const char* name, index_uni row_idx, const index_multi& col_idx) { using stan::math::arena_allocator; using stan::math::check_range; using stan::math::reverse_pass_callback; using stan::math::var_value; using arena_std_vec = std::vector>; check_range("matrix[uni, multi] index range", name, x.rows(), row_idx.n_); const auto ret_size = col_idx.ns_.size(); arena_t> x_ret_vals(ret_size); arena_std_vec col_idx_vals(ret_size); const int row_idx_val = row_idx.n_ - 1; for (int j = 0; j < ret_size; ++j) { check_range("matrix[multi] subset range", name, x.cols(), col_idx.ns_[j]); col_idx_vals[j] = col_idx.ns_[j] - 1; x_ret_vals.coeffRef(j) = x.val().coeff(row_idx_val, col_idx_vals[j]); } var_value> x_ret(x_ret_vals); reverse_pass_callback([x, x_ret, row_idx_val, col_idx_vals]() mutable { for (Eigen::Index i = 0; i < col_idx_vals.size(); ++i) { x.adj().coeffRef(row_idx_val, col_idx_vals[i]) += x_ret.adj().coeff(i); } }); return x_ret; } /** * Return a column of a matrix that is a possibly non-contiguous subset * of the input matrix. * * Types: matrix[multi, uni] = vector * * @tparam VarMat `var_value` with inner Eigen type with dynamic rows and * columns. * @param[in] x `var_value` to index. * @param[in] name Name of variable * @param[in] row_idx multi index for selecting rows. * @param[in] col_idx uni index for selecting cols. * @throw std::out_of_range If any of the indices are out of bounds. */ template * = nullptr> inline auto rvalue(VarMat&& x, const char* name, const index_multi& row_idx, index_uni col_idx) { using stan::math::arena_allocator; using stan::math::check_range; using stan::math::reverse_pass_callback; using stan::math::var_value; using arena_std_vec = std::vector>; check_range("matrix[multi, uni] rvalue range", name, x.cols(), col_idx.n_); const auto ret_size = row_idx.ns_.size(); arena_t> x_ret_val(ret_size); arena_std_vec row_idx_vals(ret_size); const int col_idx_val = col_idx.n_ - 1; for (int i = 0; i < ret_size; ++i) { check_range("matrix[multi, uni] rvalue range", name, x.rows(), row_idx.ns_[i]); row_idx_vals[i] = row_idx.ns_[i] - 1; x_ret_val.coeffRef(i) = x.val().coeff(row_idx_vals[i], col_idx_val); } var_value> x_ret(x_ret_val); reverse_pass_callback([x, x_ret, col_idx_val, row_idx_vals]() mutable { for (Eigen::Index i = 0; i < row_idx_vals.size(); ++i) { x.adj().coeffRef(row_idx_vals[i], col_idx_val) += x_ret.adj().coeff(i); } }); return x_ret; } /** * Return a matrix that is a possibly non-contiguous subset of the input * matrix. * * Types: matrix[multi, multi] = matrix * * @tparam VarMat `var_value` with an inner eigen matrix * @param[in] x `var_value` to index. * @param[in] name String form of expression being evaluated. * @param[in] row_idx uni index for selecting rows. * @param[in] col_idx multi index for selecting cols. * @return Result of indexing matrix. */ template * = nullptr> inline auto rvalue(VarMat&& x, const char* name, const index_multi& row_idx, const index_multi& col_idx) { using stan::math::arena_allocator; using stan::math::check_range; using stan::math::reverse_pass_callback; using stan::math::var_value; using arena_std_vec = std::vector>; const auto ret_rows = row_idx.ns_.size(); const auto ret_cols = col_idx.ns_.size(); const Eigen::Index x_rows = x.rows(); arena_t>> x_ret_val(ret_rows, ret_cols); arena_std_vec row_idx_vals(ret_rows); arena_std_vec col_idx_vals(ret_cols); // We only want to check these once for (int i = 0; i < ret_rows; ++i) { check_range("matrix[multi,multi] row index", name, x_rows, row_idx.ns_[i]); row_idx_vals[i] = row_idx.ns_[i] - 1; } for (int j = 0; j < ret_cols; ++j) { check_range("matrix[multi,multi] col index", name, x.cols(), col_idx.ns_[j]); col_idx_vals[j] = col_idx.ns_[j] - 1; for (int i = 0; i < ret_rows; ++i) { x_ret_val.coeffRef(i, j) = x.val().coeff(row_idx_vals[i], col_idx_vals[j]); } } var_value>> x_ret(x_ret_val); reverse_pass_callback([x, x_ret, col_idx_vals, row_idx_vals]() mutable { for (int j = 0; j < col_idx_vals.size(); ++j) { for (int i = 0; i < row_idx_vals.size(); ++i) { x.adj().coeffRef(row_idx_vals[i], col_idx_vals[j]) += x_ret.adj().coeff(i, j); } } }); return x_ret; } /** * Return a matrix of possibly unordered columns with each column * range specified by another index. * * Types: matrix[Idx, multi] = matrix * * @tparam VarMat `var_value` with inner an eigen matrix * @param[in] x `var_value` object. * @param[in] name String form of expression being evaluated. * @param[in] row_idx index for selecting rows. * @param[in] col_idx multi index for selecting cols. * @return Result of indexing matrix. */ template * = nullptr> inline auto rvalue(VarMat&& x, const char* name, const Idx& row_idx, const index_multi& col_idx) { using stan::math::arena_allocator; using stan::math::check_range; using stan::math::reverse_pass_callback; using stan::math::var_value; using arena_std_vec = std::vector>; const auto ret_rows = rvalue_index_size(row_idx, x.rows()); const auto ret_cols = col_idx.ns_.size(); arena_t> x_ret_val(ret_rows, ret_cols); arena_std_vec col_idx_vals(ret_cols); for (int j = 0; j < ret_cols; ++j) { check_range("matrix[..., multi] col index", name, x.cols(), col_idx.ns_[j]); col_idx_vals[j] = col_idx.ns_[j] - 1; x_ret_val.col(j) = rvalue(x.val().col(col_idx_vals[j]), name, row_idx); } var_value> x_ret(x_ret_val); // index_multi is the only index with dynamic memory so row_idx is safe reverse_pass_callback([row_idx, x, x_ret, col_idx_vals]() mutable { for (size_t j = 0; j < col_idx_vals.size(); ++j) { for (size_t i = 0; i < x_ret.rows(); ++i) { const int n = rvalue_at(i, row_idx) - 1; x.adj().coeffRef(n, col_idx_vals[j]) += x_ret.adj().coeff(i, j); } } }); return x_ret; } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/indexing/rvalue_cl.hpp0000644000176200001440000003312014645137105024206 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_RVALUE_CL_HPP #define STAN_MODEL_INDEXING_RVALUE_CL_HPP #ifdef STAN_OPENCL #include #include #include #include namespace stan { namespace model { namespace internal { inline auto cl_row_index(index_uni i, int rows, const char* name) { math::check_range("uni indexing", name, rows, i.n_); return math::constant(i.n_ - 1, 1, -1); } template * = nullptr, require_all_vt_same* = nullptr> inline auto cl_row_index(const T& i, int rows, const char* name) { return math::rowwise_broadcast(i - 1); } inline auto cl_row_index(index_omni /*i*/, int rows, const char* name) { return math::row_index(rows, -1); } inline auto cl_row_index(index_min i, int rows, const char* name) { stan::math::check_range("min indexing", name, rows, i.min_); return math::row_index(rows - (i.min_ - 1), -1) + (i.min_ - 1); } inline auto cl_row_index(index_max i, int rows, const char* name) { stan::math::check_range("max indexing", name, rows, i.max_); return math::row_index(i.max_, -1); } inline auto cl_row_index(index_min_max i, int rows, const char* name) { math::check_range("min_max indexing min", name, rows, i.min_); math::check_range("min_max indexing max", name, rows, i.max_); if (i.min_ <= i.max_) { return 1 * math::row_index(i.max_ - (i.min_ - 1), -1) + (i.min_ - 1); } else { return 1 * math::row_index(0, -1) + (i.min_ - 1); } } inline auto cl_col_index(index_uni i, int cols, const char* name) { math::check_range("uni indexing", name, cols, i.n_); return math::constant(i.n_ - 1, -1, 1); } template * = nullptr, require_all_vt_same* = nullptr> inline auto cl_col_index(const T& i, int cols, const char* name) { return math::colwise_broadcast(math::transpose(i - 1)); } inline auto cl_col_index(index_omni i, int cols, const char* name) { return math::col_index(-1, cols); } inline auto cl_col_index(index_min i, int cols, const char* name) { stan::math::check_range("min indexing", name, cols, i.min_); return math::col_index(-1, cols - (i.min_ - 1)) + (i.min_ - 1); } inline auto cl_col_index(index_max i, int cols, const char* name) { stan::math::check_range("max indexing", name, cols, i.max_); return math::col_index(-1, i.max_); } inline auto cl_col_index(index_min_max i, int cols, const char* name) { math::check_range("min_max indexing min", name, cols, i.min_); math::check_range("min_max indexing max", name, cols, i.max_); if (i.min_ <= i.max_) { return 1 * math::col_index(-1, i.max_ - (i.min_ - 1)) + (i.min_ - 1); } else { return 1 * math::col_index(-1, 0) + (i.min_ - 1); } } template inline void index_check(Index i, const char* name, int dim) {} inline void index_check(const index_min_max& i, const char* name, int dim) { math::check_range("min_max indexing min", name, dim, i.min_); math::check_range("min_max indexing max", name, dim, i.max_); } inline void index_check(const index_max& i, const char* name, int dim) { math::check_range("min_max indexing min", name, dim, i.max_); } inline void index_check(const index_min& i, const char* name, int dim) { math::check_range("min_max indexing", name, dim, i.min_); } inline void index_check(const index_uni& i, const char* name, int dim) { math::check_range("uni indexing", name, dim, i.n_); } inline void index_check(const math::matrix_cl& i, const char* name, int dim) { try { math::check_cl("multi-indexing", name, i, "within range") = static_cast(stan::error_index::value) <= i && i < dim + static_cast(stan::error_index::value); } catch (const std::domain_error& e) { throw std::out_of_range(e.what()); } } } // namespace internal // prim /** * Index a prim kernel generator expression with one index. * * @tparam Expr type of the expression * @tparam RowIndex type of index * @param expr a prim kernel generator expression to index * @param name name of value being indexed (if named, otherwise an empty string) * @param row_index index * @return result of indexing */ template * = nullptr> inline auto rvalue(Expr&& expr, const char* name, const RowIndex& row_index) { internal::index_check(row_index, name, expr.rows()); return math::indexing(expr, internal::cl_row_index(row_index, expr.rows(), name), math::col_index(-1, expr.cols())); } /** * Index a prim kernel generator expression with two indices. * * @tparam Expr type of the expression * @tparam RowIndex type of row index * @tparam ColIndex type of column index * @param expr a prim kernel generator expression to index * @param name name of value being indexed (if named, otherwise an empty string) * @param row_index row index * @param col_index column index * @return result of indexing */ template * = nullptr, require_any_not_same_t* = nullptr> inline auto rvalue(Expr&& expr, const char* name, const RowIndex& row_index, const ColIndex& col_index) { internal::index_check(row_index, name, expr.rows()); internal::index_check(col_index, name, expr.cols()); return math::indexing(expr, internal::cl_row_index(row_index, expr.rows(), name), internal::cl_col_index(col_index, expr.cols(), name)); } /** * Index a prim kernel generator expression with two single indices. * * @tparam Expr type of the expression * @param expr a prim kernel generator expression to index * @param name name of value being indexed (if named, otherwise an empty string) * @param row_index row index * @param col_index column index * @return result of indexing (scalar) */ template * = nullptr> inline auto rvalue(Expr&& expr, const char* name, const index_uni row_index, const index_uni col_index) { using Val = stan::value_type_t; decltype(auto) expr_eval = expr.eval(); math::check_range("uni indexing", name, expr_eval.rows(), row_index.n_); math::check_range("uni indexing", name, expr_eval.cols(), col_index.n_); cl::CommandQueue queue = stan::math::opencl_context.queue(); Val res; try { cl::Event copy_event; queue.enqueueReadBuffer( expr_eval.buffer(), true, sizeof(Val) * (row_index.n_ - 1 + (col_index.n_ - 1) * expr_eval.rows()), sizeof(Val), &res, &expr_eval.write_events(), ©_event); copy_event.wait(); } catch (const cl::Error& e) { std::ostringstream m; m << "uni uni indexing of " << name; stan::math::check_opencl_error(m.str().c_str(), e); } return res; } // rev, without multi-index - no data races /** * Index a rev kernel generator expression with one (non multi-) index. * * @tparam Expr type of the expression * @tparam RowIndex type of index * @param expr a prim kernel generator expression to index * @param name name of value being indexed (if named, otherwise an empty string) * @param row_index index * @return result of indexing */ template * = nullptr, require_not_same_t>* = nullptr> inline auto rvalue(Expr&& expr, const char* name, const RowIndex row_index) { int rows = expr.rows(); internal::index_check(row_index, name, rows); auto res_vari = expr.vi_->index(internal::cl_row_index(row_index, expr.rows(), name), math::col_index(-1, expr.cols())); return math::var_value>( new decltype(res_vari)(std::move(res_vari))); } /** * Index a rev kernel generator expression with two (non-multi) indices. * * @tparam Expr type of the expression * @tparam RowIndex type of row index * @tparam ColIndex type of column index * @param expr a prim kernel generator expression to index * @param name name of value being indexed (if named, otherwise an empty string) * @param row_index row index * @param col_index column index * @return result of indexing */ template * = nullptr, require_not_same_t>* = nullptr, require_not_same_t>* = nullptr, require_any_not_same_t* = nullptr> inline auto rvalue(Expr&& expr, const char* name, const RowIndex row_index, const ColIndex col_index) { int rows = expr.rows(); int cols = expr.cols(); internal::index_check(row_index, name, rows); internal::index_check(col_index, name, cols); auto res_vari = expr.vi_->index(internal::cl_row_index(row_index, rows, name), internal::cl_col_index(col_index, expr.cols(), name)); return math::var_value>( new decltype(res_vari)(std::move(res_vari))); } /** * Index a rev kernel generator expression with two uni indices. * * @tparam Expr type of the expression * @param expr a prim kernel generator expression to index * @param name name of value being indexed (if named, otherwise an empty string) * @param row_index row index * @param col_index column index * @return result of indexing */ template * = nullptr> inline math::var rvalue(Expr&& expr, const char* name, const index_uni row_index, const index_uni col_index) { using Val = stan::value_type_t>; math::check_range("uni indexing", name, expr.rows(), row_index.n_); math::check_range("uni indexing", name, expr.cols(), col_index.n_); cl::CommandQueue queue = stan::math::opencl_context.queue(); Val res; try { cl::Event copy_event; queue.enqueueReadBuffer( expr.val().buffer(), true, sizeof(Val) * (row_index.n_ - 1 + (col_index.n_ - 1) * expr.rows()), sizeof(Val), &res, &expr.val().write_events(), ©_event); copy_event.wait(); } catch (const cl::Error& e) { std::ostringstream m; m << "uni uni indexing of " << name; stan::math::check_opencl_error(m.str().c_str(), e); } return math::make_callback_var( res, [expr, row_index, col_index](math::vari res_vari) mutable { block(expr.adj(), row_index.n_, col_index.n_, 1, 1) += math::constant(res_vari.adj(), 1, 1); }); } // rev, with multi-index - possible data races in rev, needs special kernel /** * Index a rev kernel generator expression with one multi-index. * * @tparam Expr type of the expression * @tparam RowIndex type of index * @param expr a prim kernel generator expression to index * @param name name of value being indexed (if named, otherwise an empty string) * @param row_index index * @return result of indexing */ template * = nullptr> inline auto rvalue(Expr&& expr, const char* name, const math::matrix_cl& row_index) { internal::index_check(row_index, name, expr.rows()); auto row_idx_expr = math::rowwise_broadcast(row_index - 1); auto col_idx_expr = math::col_index(-1, expr.cols()); auto res_expr = math::indexing(expr.val_op(), row_idx_expr, col_idx_expr); auto lin_idx_expr = row_idx_expr + col_idx_expr * static_cast(expr.rows()); math::matrix_cl res; math::arena_matrix_cl lin_idx; math::results(res, lin_idx) = math::expressions(res_expr, lin_idx_expr); return make_callback_var( res, [expr, lin_idx]( math::vari_value>& res_vari) mutable { math::indexing_rev(expr.adj(), lin_idx, res_vari.adj()); }); } /** * Index a rev kernel generator expression with two indices, at least one of * which is multi-index. * * @tparam Expr type of the expression * @tparam RowIndex type of row index * @tparam ColIndex type of column index * @param expr a prim kernel generator expression to index * @param name name of value being indexed (if named, otherwise an empty string) * @param row_index row index * @param col_index column index * @return result of indexing */ template < typename Expr, typename RowIndex, typename ColIndex, require_rev_kernel_expression_t* = nullptr, require_any_t>, std::is_same>>* = nullptr> inline auto rvalue(Expr&& expr, const char* name, const RowIndex& row_index, const ColIndex& col_index) { int rows = expr.rows(); int cols = expr.cols(); internal::index_check(row_index, name, rows); internal::index_check(col_index, name, cols); auto row_idx_expr = internal::cl_row_index(row_index, rows, name); auto col_idx_expr = internal::cl_col_index(col_index, cols, name); auto res_expr = math::indexing(expr.val_op(), row_idx_expr, col_idx_expr); auto lin_idx_expr = row_idx_expr + col_idx_expr * rows; math::matrix_cl res; math::arena_matrix_cl lin_idx; math::results(res, lin_idx) = math::expressions(res_expr, lin_idx_expr); return make_callback_var( res, [expr, lin_idx]( math::vari_value>& res_vari) mutable { math::indexing_rev(expr.adj(), lin_idx, res_vari.adj()); }); } } // namespace model } // namespace stan #endif #endif StanHeaders/inst/include/src/stan/model/indexing/rvalue_index_size.hpp0000644000176200001440000000406114645137105025753 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_RVALUE_INDEX_SIZE_HPP #define STAN_MODEL_INDEXING_RVALUE_INDEX_SIZE_HPP #include #ifdef STAN_OPENCL #include #endif namespace stan { namespace model { // no error checking /** * Return size of specified multi-index. * * @param[in] idx Input index (from 1). * @param[in] size Size of container (ignored here). * @return Size of result. */ inline int rvalue_index_size(const index_multi& idx, int size) noexcept { return idx.ns_.size(); } inline constexpr int rvalue_index_size(const index_uni& idx, int size) noexcept { return 1; } /** * Return size of specified omni-index for specified size of * input. * * @param[in] idx Input index (from 1). * @param[in] size Size of container. * @return Size of result. */ inline int rvalue_index_size(const index_omni& idx, int size) noexcept { return size; } /** * Return size of specified min index for specified size of * input. * * @param[in] idx Input index (from 1). * @param[in] size Size of container. * @return Size of result. */ inline int rvalue_index_size(const index_min& idx, int size) noexcept { return size - idx.min_ + 1; } /** * Return size of specified max index. * * @param[in] idx Input index (from 1). * @param[in] size Size of container (ignored). * @return Size of result. */ inline int rvalue_index_size(const index_max& idx, int size) noexcept { if (idx.max_ > 0) { return idx.max_; } else { return 0; } } /** * Return size of specified min - max index. * * @param[in] idx Input index (from 1). * @param[in] size Size of container (ignored). * @return Size of result. */ inline int rvalue_index_size(const index_min_max& idx, int size) noexcept { return (idx.max_ < idx.min_) ? 0 : (idx.max_ - idx.min_ + 1); } #ifdef STAN_OPENCL inline int rvalue_index_size(const stan::math::matrix_cl& idx, int size) noexcept { return idx.size(); } #endif } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/indexing/rvalue_at.hpp0000644000176200001440000000415714645137105024224 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_RVALUE_AT_HPP #define STAN_MODEL_INDEXING_RVALUE_AT_HPP #include namespace stan { namespace model { // relative indexing from 0; multi-indexing and return from 1 // no error checking from these methods, just indexing /** * Return the index in the underlying array corresponding to the * specified position in the specified multi-index. * * @param[in] n Relative index position (from 0). * @param[in] idx Index (from 1). * @return Underlying index position (from 1). */ inline int rvalue_at(int n, const index_multi& idx) { return idx.ns_[n]; } /** * Return the index in the underlying array corresponding to the * specified position in the specified omni-index. * * @param[in] n Relative index position (from 0). * @param[in] idx Index (from 1). * @return Underlying index position (from 1). */ inline int rvalue_at(int n, const index_omni& idx) { return n + 1; } /** * Return the index in the underlying array corresponding to the * specified position in the specified min-index. * * All indexing begins from 1. * * @param[in] n Relative index position (from 0). * @param[in] idx Index (from 1) * @return Underlying index position (from 1). */ inline int rvalue_at(int n, const index_min& idx) { return idx.min_ + n; } /** * Return the index in the underlying array corresponding to the * specified position in the specified max-index. * * All indexing begins from 1. * * @param[in] n Relative index position (from 0). * @param[in] idx Index (from 1). * @return Underlying index position (from 1). */ inline int rvalue_at(int n, const index_max& idx) { return n + 1; } /** * Return the index in the underlying array corresponding to the * specified position in the specified min-max-index. * * All indexing begins from 1. * * @param[in] n Relative index position (from 0). * @param[in] idx Index (from 1). * @return Underlying index position (from 1). */ inline int rvalue_at(int n, const index_min_max& idx) { if (idx.min_ <= idx.max_) { return idx.min_ + n; } else { return 0; } } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/indexing/assign.hpp0000644000176200001440000011207614645137105023526 0ustar liggesusers#ifndef STAN_MODEL_INDEXING_ASSIGN_HPP #define STAN_MODEL_INDEXING_ASSIGN_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace model { /** * Indexing Notes: * The different index types: * index_uni - A single cell. * index_multi - Access multiple cells. * index_omni - A no-op for all indices along a dimension. * index_min - index from min:N * index_max - index from 1:max * index_min_max - index from min:max * nil_index_list - no-op * The order of the overloads are * vector / row_vector: * - all index overloads * matrix: * - all row index overloads * - Assigns a subset of rows. * - column/row overloads * - overload on both the row and column indices. * - column overloads * - These take a subset of columns and then call the row slice assignment * over the column subset. * Std vector: * - single element and elementwise overloads * - General overload for nested std vectors. */ /** * Assign one object to another. * * @tparam T lvalue variable type * @tparam U rvalue variable type, which must be assignable to `T` * @param[in,out] x lvalue * @param[in] y rvalue * @param[in] name Name of lvalue variable */ template < typename T, typename U, require_t&, std::decay_t>>* = nullptr, require_all_not_t, internal::is_tuple>* = nullptr> inline void assign(T&& x, U&& y, const char* name) { internal::assign_impl(x, std::forward(y), name); } /** * Assign to a single element of an Eigen Vector. * * Types: vector[uni] <- scalar * * @tparam Vec Eigen type with either dynamic rows or columns, but not both. * @tparam U Type of value (must be assignable to T). * @param[in] x Vector variable to be assigned. * @param[in] y Value to assign. * @param[in] name Name of variable * @param[in] idx index to assign to. * @throw std::out_of_range If the index is out of bounds. */ template * = nullptr, require_stan_scalar_t* = nullptr> inline void assign(Vec&& x, const U& y, const char* name, index_uni idx) { stan::math::check_range("vector[uni] assign", name, x.size(), idx.n_); x.coeffRef(idx.n_ - 1) = y; } /** * Assign to a non-contiguous subset of elements in a vector. * * Types: vector[multi] <- vector * * @tparam Vec1 Eigen type with either dynamic rows or columns, but not both. * @tparam Vec2 Eigen type with either dynamic rows or columns, but not both. * @param[in] x Vector to be assigned. * @param[in] y Value vector. * @param[in] name Name of variable * @param[in] idx Index holding an `std::vector` of cells to assign to. * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the value size isn't the same as * the indexed size. */ template * = nullptr> inline void assign(Vec1&& x, const Vec2& y, const char* name, const index_multi& idx) { const auto& y_ref = stan::math::to_ref(y); stan::math::check_size_match("vector[multi] assign", name, idx.ns_.size(), "right hand side", y_ref.size()); const auto x_size = x.size(); for (int n = 0; n < y_ref.size(); ++n) { stan::math::check_range("vector[multi] assign", name, x_size, idx.ns_[n]); x.coeffRef(idx.ns_[n] - 1) = y_ref.coeff(n); } } /** * Assign to a range of an Eigen vector * * Types: vector[min_max] <- vector * * @tparam Vec1 A type with either dynamic rows or columns, but not both. * @tparam Vec2 A type with either dynamic rows or columns, but not both. * @param[in] x vector variable to be assigned. * @param[in] y Value vector. * @param[in] name Name of variable * @param[in] idx `index_min_max`. * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the value size isn't the same as * the indexed size. */ template * = nullptr, require_all_not_std_vector_t* = nullptr> inline void assign(Vec1&& x, const Vec2& y, const char* name, index_min_max idx) { if (likely(idx.max_ >= idx.min_)) { stan::math::check_range("vector[min_max] min assign", name, x.size(), idx.min_); stan::math::check_range("vector[min_max] max assign", name, x.size(), idx.max_); const auto slice_start = idx.min_ - 1; const auto slice_size = idx.max_ - slice_start; stan::math::check_size_match("vector[min_max] assign", name, slice_size, "right hand side", y.size()); internal::assign_impl(x.segment(slice_start, slice_size), y, name); } else { stan::math::check_size_match("vector[negative_min_max] assign", name, 0, "right hand side", y.size()); } } /** * Assign to a tail slice of a vector * * Types: vector[min:N] <- vector * * @tparam Vec1 A type with either dynamic rows or columns, but not both. * @tparam Vec2 A type with either dynamic rows or columns, but not both. * @param[in] x vector to be assigned to. * @param[in] y Value vector. * @param[in] name Name of variable * @param[in] idx An index. * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the value size isn't the same as * the indexed size. */ template * = nullptr, require_all_not_std_vector_t* = nullptr> inline void assign(Vec1&& x, const Vec2& y, const char* name, index_min idx) { stan::math::check_range("vector[min] assign", name, x.size(), idx.min_); stan::math::check_size_match("vector[min] assign", name, x.size() - idx.min_ + 1, "right hand side", y.size()); internal::assign_impl(x.tail(x.size() - idx.min_ + 1), y, name); } /** * Assign to a head slice of the assignee vector * * Types: vector[1:max] <- vector * * @tparam Vec1 A type with either dynamic rows or columns, but not both. * @tparam Vec2 A type with either dynamic rows or columns, but not both. * @param[in] x vector to be assigned to. * @param[in] y Value vector. * @param[in] name Name of variable * @param[in] idx An index. * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the value size isn't the same as * the indexed size. */ template * = nullptr, require_all_not_std_vector_t* = nullptr> inline void assign(Vec1&& x, const Vec2& y, const char* name, index_max idx) { if (likely(idx.max_ > 0)) { stan::math::check_range("vector[max] assign", name, x.size(), idx.max_); stan::math::check_size_match("vector[max] assign", name, idx.max_, "right hand side", y.size()); internal::assign_impl(x.head(idx.max_), y, name); } else { stan::math::check_size_match("vector[max < 1] assign", name, 0, "right hand side", y.size()); } } /** * Assign a vector to another vector. * * Types: vector[omni] <- vector * * @tparam Vec1 A type with either dynamic rows or columns, but not both. * @tparam Vec2 A type with either dynamic rows or columns, but not both. * @param[in] x vector to be assigned to. * @param[in] y Value vector. * @param[in] name Name of variable * @throw std::invalid_argument If the value size isn't the same as * the indexed size. */ template * = nullptr, require_all_not_std_vector_t* = nullptr> inline void assign(Vec1&& x, Vec2&& y, const char* name, index_omni /* idx */) { stan::math::check_size_match("vector[omni] assign", name, x.size(), "right hand side", y.size()); internal::assign_impl(x, std::forward(y), name); } /** * Assign a row vector to a row of an eigen matrix. * * Types: mat[uni] = row_vector * * @tparam Mat A type with dynamic rows and columns. * @tparam RowVec A type with dynamic columns and a compile time rows equal * to 1. * @param[in] x Matrix variable to be assigned. * @param[in] y Value row vector. * @param[in] name Name of variable * @param[in] idx An index holding the row to be assigned to. * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the number of columns in the row * vector and matrix do not match. */ template * = nullptr, require_row_vector_t* = nullptr> inline void assign(Mat&& x, const RowVec& y, const char* name, index_uni idx) { stan::math::check_range("matrix[uni] assign row", name, x.rows(), idx.n_); stan::math::check_size_match("matrix[uni] assign columns", name, x.cols(), "right hand side size", y.size()); internal::assign_impl(x.row(idx.n_ - 1), y, name); } /** * Assign to a non-contiguous subset of a matrice's rows. * * Types: mat[multi] = mat * * @tparam Mat An Eigen type with dynamic rows and columns. * @tparam Mat2 An Eigen type with dynamic rows and columns. * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] idx multi index * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and right-hand side matrix do not match. */ template * = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, const index_multi& idx) { const auto& y_ref = stan::math::to_ref(y); stan::math::check_size_match("matrix[multi] assign rows", name, idx.ns_.size(), "right hand side rows", y.rows()); stan::math::check_size_match("matrix[multi] assign columns", name, x.cols(), "right hand side columns", y.cols()); for (int i = 0; i < idx.ns_.size(); ++i) { const int n = idx.ns_[i]; stan::math::check_range("matrix[multi] assign row", name, x.rows(), n); x.row(n - 1) = y_ref.row(i); } } /** * Assign a matrix to another matrix * * Types: mat[omni] = mat * * @tparam Mat1 A type with dynamic rows and columns. * @tparam Mat2 A type with dynamic rows and columns. * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and right-hand side matrix do not match. */ template * = nullptr> inline void assign(Mat1&& x, Mat2&& y, const char* name, index_omni /* idx */) { stan::math::check_size_match("matrix[omni] assign rows", name, x.rows(), "right hand side rows", y.rows()); stan::math::check_size_match("matrix[omni] assign columns", name, x.cols(), "right hand side columns", y.cols()); internal::assign_impl(x, std::forward(y), name); } /** * Assign a matrix to the bottom rows of the assignee matrix. * * Types: mat[min] = mat * * @tparam Mat1 A type with dynamic rows and columns. * @tparam Mat2 A type with dynamic rows and columns. * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] idx min index * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and right-hand side matrix do not match. */ template * = nullptr, require_matrix_t* = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, index_min idx) { const auto row_size = x.rows() - (idx.min_ - 1); stan::math::check_range("matrix[min] assign row", name, x.rows(), idx.min_); stan::math::check_size_match("matrix[min] assign rows", name, row_size, "right hand side rows", y.rows()); stan::math::check_size_match("matrix[min] assign columns", name, x.cols(), "right hand side columns", y.cols()); internal::assign_impl(x.bottomRows(row_size), y, name); } /** * Assign a matrix to the top rows of the assignee matrix. * * Types: mat[max] = mat * * @tparam Mat1 A type with dynamic rows and columns. * @tparam Mat2 A type with dynamic rows and columns. * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] idx max index * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and right-hand side matrix do not match. */ template * = nullptr, require_matrix_t* = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, index_max idx) { stan::math::check_size_match("matrix[max] assign columns", name, x.cols(), "right hand side columns", y.cols()); if (likely(idx.max_ > 0)) { stan::math::check_range("matrix[max] assign row", name, x.rows(), idx.max_); stan::math::check_size_match("matrix[max] assign rows", name, idx.max_, "right hand side rows", y.rows()); internal::assign_impl(x.topRows(idx.max_), y, name); } else { stan::math::check_size_match("matrix[max < 1] assign rows", name, 0, "right hand side rows", y.rows()); } } /** * Assign a matrix to a range of rows of the assignee matrix. * * Types: mat[min_max] = mat * * @tparam Mat1 A type with dynamic rows and columns. * @tparam Mat2 A type with dynamic rows and columns. * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] idx An index for a min_max range of rows * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and right-hand side matrix do not match. */ template * = nullptr, require_matrix_t* = nullptr> inline void assign(Mat1&& x, Mat2&& y, const char* name, index_min_max idx) { stan::math::check_size_match("matrix[min_max] assign columns", name, x.cols(), "right hand side columns", y.cols()); if (likely(idx.max_ >= idx.min_)) { stan::math::check_range("matrix[min_max] min row indexing", name, x.rows(), idx.min_); stan::math::check_range("matrix[min_max] max row indexing", name, x.rows(), idx.max_); const auto row_size = idx.max_ - idx.min_ + 1; stan::math::check_size_match("matrix[min_max] assign rows", name, row_size, "right hand side rows", y.rows()); internal::assign_impl(x.middleRows(idx.min_ - 1, row_size), y, name); } else { stan::math::check_size_match("matrix[negative_min_max] assign rows", name, 0, "right hand side", y.rows()); } } /** * Assign to a block of an Eigen matrix. * * Types: mat[min_max, min_max] = mat * * @tparam Mat1 A type with dynamic rows and columns. * @tparam Mat2 A type with dynamic rows and columns. * @param[in] x Matrix variable to be assigned. * @param[in] y Matrix variable to assign from. * @param[in] name Name of variable * @param[in] row_idx min_max index for selecting rows * @param[in] col_idx min_max index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and right-hand side matrix do not match. */ template * = nullptr> inline void assign(Mat1&& x, Mat2&& y, const char* name, index_min_max row_idx, index_min_max col_idx) { if (likely((row_idx.max_ >= row_idx.min_) && (col_idx.max_ >= col_idx.min_))) { stan::math::check_range("matrix[min_max, min_max] assign min row", name, x.rows(), row_idx.min_); stan::math::check_range("matrix[min_max, min_max] assign min column", name, x.cols(), col_idx.min_); stan::math::check_range("matrix[min_max, min_max] assign max row", name, x.rows(), row_idx.max_); stan::math::check_range("matrix[min_max, min_max] assign max column", name, x.cols(), col_idx.max_); auto row_size = row_idx.max_ - (row_idx.min_ - 1); auto col_size = col_idx.max_ - (col_idx.min_ - 1); stan::math::check_size_match("matrix[min_max, min_max] assign rows", name, row_size, "right hand side rows", y.rows()); stan::math::check_size_match("matrix[min_max, min_max] assign columns", name, col_size, "right hand side columns", y.cols()); internal::assign_impl( x.block(row_idx.min_ - 1, col_idx.min_ - 1, row_size, col_size), y, name); } else { // Check the indexing is valid but don't actually do assignment if (row_idx.max_ >= row_idx.min_) { stan::math::check_range( "matrix[min_max, negative_min_max] assign min row", name, x.rows(), row_idx.min_); stan::math::check_range( "matrix[min_max, negative_min_max] assign max row", name, x.rows(), row_idx.max_); stan::math::check_size_match("matrix[min_max, negative_min_max] assign", name, 0, "right hand side columns", y.cols()); } else if (col_idx.max_ >= col_idx.min_) { stan::math::check_range( "matrix[negative_min_max, min_max] assign min column", name, x.cols(), col_idx.min_); stan::math::check_range( "matrix[negative_min_max, min_max] assign max column", name, x.cols(), col_idx.max_); stan::math::check_size_match("matrix[min_max, min_max] assign rows", name, 0, "right hand side", y.rows()); } else { stan::math::check_size_match( "matrix[negative_min_max, negative_min_max] assign rows", name, 0, "right hand side", y.rows()); stan::math::check_size_match( "matrix[negative_min_max, negative_min_max] assign cols", name, 0, "right hand side", y.cols()); } } } /** * Assign to a cell of an Eigen Matrix. * * Types: mat[single, single] = scalar * * @tparam Mat Eigen type with dynamic rows and columns. * @tparam U Scalar type. * @param[in] x Matrix variable to be assigned. * @param[in] y Value scalar. * @param[in] name Name of variable * @param[in] row_idx uni index for selecting rows * @param[in] col_idx uni index for selecting columns * @throw std::out_of_range If either of the indices are out of bounds. */ template * = nullptr> inline void assign(Mat&& x, const U& y, const char* name, index_uni row_idx, index_uni col_idx) { stan::math::check_range("matrix[uni,uni] assign row", name, x.rows(), row_idx.n_); stan::math::check_range("matrix[uni,uni] assign column", name, x.cols(), col_idx.n_); x.coeffRef(row_idx.n_ - 1, col_idx.n_ - 1) = y; } /** * Assign multiple possibly unordered cells of row vector to a row of an eigen * matrix. * * Types: mat[uni, multi] = row_vector * * @tparam Mat1 Eigen type with dynamic rows and columns. * @tparam Vec Eigen type with dynamic columns and compile time rows of 1. * @param[in] x Matrix variable to be assigned. * @param[in] y Row vector. * @param[in] name Name of variable * @param[in] row_idx uni index for selecting rows * @param[in] col_idx multi index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and value matrix do not match. */ template * = nullptr, require_eigen_row_vector_t* = nullptr> inline void assign(Mat1&& x, const Vec& y, const char* name, index_uni row_idx, const index_multi& col_idx) { const auto& y_ref = stan::math::to_ref(y); stan::math::check_range("matrix[uni, multi] assign row", name, x.rows(), row_idx.n_); stan::math::check_size_match("matrix[uni, multi] assign", name, col_idx.ns_.size(), "right hand side", y_ref.size()); for (int i = 0; i < col_idx.ns_.size(); ++i) { stan::math::check_range("matrix[uni, multi] assign column", name, x.cols(), col_idx.ns_[i]); x.coeffRef(row_idx.n_ - 1, col_idx.ns_[i] - 1) = y_ref.coeff(i); } } /** * Assign to multiple possibly unordered cell's of a matrix from an input * matrix. * * Types: mat[multi, multi] = mat * * @tparam Mat1 Eigen type with dynamic rows and columns. * @tparam Mat2 Eigen type with dynamic rows and columns. * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] row_idx multi index for selecting rows * @param[in] col_idx multi index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and value matrix do not match. */ template * = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, const index_multi& row_idx, const index_multi& col_idx) { const auto& y_ref = stan::math::to_ref(y); stan::math::check_size_match("matrix[multi,multi] assign rows", name, row_idx.ns_.size(), "right hand side rows", y_ref.rows()); stan::math::check_size_match("matrix[multi,multi] assign columns", name, col_idx.ns_.size(), "right hand side columns", y_ref.cols()); for (int j = 0; j < y_ref.cols(); ++j) { const int n = col_idx.ns_[j]; stan::math::check_range("matrix[multi,multi] assign column", name, x.cols(), n); for (int i = 0; i < y_ref.rows(); ++i) { const int m = row_idx.ns_[i]; stan::math::check_range("matrix[multi,multi] assign row", name, x.rows(), m); x.coeffRef(m - 1, n - 1) = y_ref.coeff(i, j); } } } /** * Assign to any rows of a single column of a matrix. * * Types: mat[Idx, uni] = mat * * @tparam Mat1 A type with dynamic rows and columns. * @tparam Mat2 A type that's assignable to the indexed matrix. * @tparam Idx The row index type * @param[in] x Matrix variable to be assigned. * @param[in] y Matrix variable to assign from. * @param[in] name Name of variable * @param[in] row_idx index for selecting rows * @param[in] col_idx uni index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and right-hand side matrix do not match. */ template * = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, const Idx& row_idx, index_uni col_idx) { stan::math::check_range("matrix[..., uni] assign column", name, x.cols(), col_idx.n_); assign(x.col(col_idx.n_ - 1), y, name, row_idx); } /** * Assign to a non-contiguous set of columns of a matrix. * * Types: mat[Idx, multi] = mat * * @tparam Mat1 Eigen type with dynamic rows and columns. * @tparam Mat2 Eigen type * @tparam Idx The row index type * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] row_idx index for selecting rows * @param[in] col_idx multi index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and value matrix do not match. */ template * = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, const Idx& row_idx, const index_multi& col_idx) { const auto& y_ref = stan::math::to_ref(y); stan::math::check_size_match("matrix[..., multi] assign column sizes", name, col_idx.ns_.size(), "right hand side columns", y_ref.cols()); for (int j = 0; j < col_idx.ns_.size(); ++j) { const int n = col_idx.ns_[j]; stan::math::check_range("matrix[..., multi] assign column", name, x.cols(), n); assign(x.col(n - 1), y_ref.col(j), name, row_idx); } } /** * Assign to any rows of a matrix. * * Types: mat[Idx, omni] = mat * * @tparam Mat1 A type with dynamic rows and columns. * @tparam Mat2 A type assignable to the slice of the matrix. * @tparam Idx The row index type * @param[in] x Matrix variable to be assigned. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] row_idx index for selecting rows * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and value matrix do not match. */ template * = nullptr> inline void assign(Mat1&& x, Mat2&& y, const char* name, const Idx& row_idx, index_omni /* idx */) { assign(x, std::forward(y), name, row_idx); } /** * Assign any rows and min:N columns of a matrix. * * Types: mat[Idx, min] = mat * * @tparam Mat1 A type with dynamic rows and columns. * @tparam Mat2 A type assignable to the slice of the matrix. * @tparam Idx The row index type * @param[in] x Matrix variable to be assigned. * index (inclusive) to the end of a container. * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] row_idx index for selecting rows * @param[in] col_idx min index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and right-hand side matrix do not match. */ template * = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, const Idx& row_idx, index_min col_idx) { const auto start_col = col_idx.min_ - 1; const auto col_size = x.cols() - start_col; stan::math::check_range("matrix[..., min] assign column", name, x.cols(), col_idx.min_); stan::math::check_size_match("matrix[..., min] assign columns", name, col_size, "right hand side columns", y.cols()); assign(x.rightCols(col_size), y, name, row_idx); } /** * Assign to any rows and 1:max columns of a matrix. * * Types: mat[Idx, max] = mat * * @tparam Mat1 A type with dynamic rows and columns. * @tparam Mat2 A type assignable to the slice of the matrix. * @tparam Idx The row index type * @param[in] x Matrix variable to be assigned. * container up to the specified maximum index (inclusive). * @param[in] y Value matrix. * @param[in] name Name of variable * @param[in] row_idx index for selecting rows * @param[in] col_idx max index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and right-hand side matrix do not match. */ template * = nullptr> inline void assign(Mat1&& x, const Mat2& y, const char* name, const Idx& row_idx, index_max col_idx) { if (likely(col_idx.max_ > 0)) { stan::math::check_range("matrix[..., max] assign", name, x.cols(), col_idx.max_); stan::math::check_size_match("matrix[..., max] assign columns", name, col_idx.max_, "right hand side columns", y.cols()); assign(x.leftCols(col_idx.max_), y, name, row_idx); } else { stan::math::check_size_match("matrix[..., max] assign columns", name, 0, "right hand side columns", y.cols()); } } /** * Assign to any rows and a range of columns. * * Types: mat[Idx, min_max] = mat * * @tparam Mat1 A type with dynamic rows and columns. * @tparam Mat2 A type assignable to the slice of the matrix. * @tparam Idx The row index type * @param[in] x Matrix variable to be assigned. * @param[in] y Matrix variable to assign from. * @param[in] name Name of variable * @param[in] row_idx index for selecting rows * @param[in] col_idx max index for selecting columns * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions of the indexed * matrix and right-hand side matrix do not match. */ template * = nullptr> inline void assign(Mat1&& x, Mat2&& y, const char* name, const Idx& row_idx, index_min_max col_idx) { if (likely(col_idx.max_ >= col_idx.min_)) { stan::math::check_range("matrix[..., min_max] assign min column", name, x.cols(), col_idx.min_); stan::math::check_range("matrix[..., min_max] assign max column", name, x.cols(), col_idx.max_); const auto col_start = col_idx.min_ - 1; const auto col_size = col_idx.max_ - col_start; stan::math::check_size_match("matrix[..., min_max] assign column size", name, col_size, "right hand side", y.cols()); assign(x.middleCols(col_start, col_size), y, name, row_idx); } else { stan::math::check_size_match("matrix[..., negative_min_max] assign columns", name, 0, "right hand side columns", y.cols()); } } /** * Assign the elements of one standard vector to another. * * std_vector = std_vector * * @tparam T Type of Std vector to be assigned to. * @tparam U Type of Std vector to be assigned from. * @param[in] x lvalue variable * @param[in] y rvalue variable * @param[in] name name of lvalue variable */ template * = nullptr, require_not_t< std::is_assignable&, std::decay_t>>* = nullptr> inline void assign(T&& x, U&& y, const char* name) { if (unlikely(x.size() != 0)) { stan::math::check_size_match("assign array size", name, x.size(), "right hand side", y.size()); } if (std::is_rvalue_reference::value) { for (size_t i = 0; i < y.size(); ++i) { assign(x[i], std::move(y[i]), name); } } else { for (size_t i = 0; i < y.size(); ++i) { assign(x[i], y[i], name); } } } /** * Assign to a single element of an std vector with additional subsetting on * that element. * * Types: std_vector[uni | Idx] = T * * @tparam StdVec A standard vector * @tparam Idx Type of tail of index list. * @tparam U A type assignable to the value type of `StdVec` * @param[in] x Array variable to be assigned. * @param[in] y Value. * @param[in] name Name of variable * @param[in] idx1 uni index * @param[in] idxs Remaining indices * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions do not match in the * tail assignment. */ template * = nullptr> inline void assign(StdVec&& x, U&& y, const char* name, index_uni idx1, const Idxs&... idxs) { stan::math::check_range("array[uni,...] assign", name, x.size(), idx1.n_); assign(x[idx1.n_ - 1], std::forward(y), name, idxs...); } /** * Assign to a single element of an std vector. * * Types: x[uni] = y * * @tparam StdVec A standard vector * @tparam Idx Type of tail of index list. * @tparam U A type assignable to the value type of `StdVec` * @param[in] x Array variable to be assigned. * @param[in] y Value. * @param[in] name Name of variable * @param[in] idx uni index * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the dimensions do not match in the * tail assignment. */ template * = nullptr, require_t&, U>>* = nullptr> inline void assign(StdVec&& x, U&& y, const char* name, index_uni idx) { stan::math::check_range("array[uni,...] assign", name, x.size(), idx.n_); x[idx.n_ - 1] = std::forward(y); } /** * Assign to the elements of an std vector with additional subsetting on each * element. * * Types: x[Idx1 | Idx2] = y * * @tparam T A standard vector. * @tparam Idx1 Type of multiple index heading index list. * @tparam Idx2 Type of tail of index list. * @tparam U A standard vector * @param[in] x Array variable to be assigned. * @param[in] y Value. * @param[in] name Name of variable * @param[in] idx1 first index * @param[in] idxs Remaining indices * @throw std::out_of_range If any of the indices are out of bounds. * @throw std::invalid_argument If the size of the multiple indexing * and size of first dimension of value do not match, or any of * the recursive tail assignment dimensions do not match. */ template * = nullptr, require_not_same_t* = nullptr> inline void assign(T&& x, U&& y, const char* name, const Idx1& idx1, const Idxs&... idxs) { int x_idx_size = rvalue_index_size(idx1, x.size()); // If there is a reverse min_max index or negative max index if (std::is_same, index_min_max>::value || std::is_same, index_max>::value) { if (x_idx_size == 0) { if (std::is_same, index_min_max>::value) { stan::math::check_size_match("array[negative_min_max, ...] assign", name, 0, "right hand side", y.size()); } else { stan::math::check_size_match("array[max < 1, ...] assign", name, 0, "right hand side", y.size()); } return; } } stan::math::check_size_match("array[multi, ...] assign", name, x_idx_size, "right hand side size", y.size()); for (size_t n = 0; n < y.size(); ++n) { size_t i = rvalue_at(n, idx1); stan::math::check_range("array[multi, ...] assign", name, x.size(), i); if (std::is_rvalue_reference::value) { assign(x[i - 1], std::move(y[n]), name, idxs...); } else { assign(x[i - 1], y[n], name, idxs...); } } } namespace internal { template inline constexpr auto make_tuple_seq(std::integer_sequence) { return std::make_tuple(I...); } } // namespace internal /** * Assign one tuple to another * @tparam Tuple1 Tuple with the same number of elements as `Tuple2` * @tparam Tuple2 Tuple with the same number of elements as `Tuple1` * @param x A tuple with elements to be assigned to * @param y A tuple with elements to be assigned from * @param name The name of the tuple to assign to */ template , internal::is_tuple>* = nullptr> inline void assign(Tuple1&& x, Tuple2&& y, const char* name) { constexpr auto t1_size = std::tuple_size>::value; stan::math::for_each( [name](auto&& x_sub, auto&& y_sub, auto idx_name) mutable { assign(std::forward(x_sub), std::forward(y_sub), (std::string(name) + "." + std::to_string(idx_name)).c_str()); }, std::forward(x), std::forward(y), internal::make_tuple_seq(std::make_index_sequence())); } } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/model/model_functional.hpp0000644000176200001440000000130714645137105023751 0ustar liggesusers#ifndef STAN_MODEL_MODEL_FUNCTIONAL_HPP #define STAN_MODEL_MODEL_FUNCTIONAL_HPP #include #include namespace stan { namespace model { // Interface for automatic differentiation of models template struct model_functional { const M& model; std::ostream* o; model_functional(const M& m, std::ostream* out) : model(m), o(out) {} template T operator()(const Eigen::Matrix& x) const { // log_prob() requires non-const but doesn't modify its argument return model.template log_prob( const_cast&>(x), o); } }; } // namespace model } // namespace stan #endif StanHeaders/inst/include/src/stan/services/0000755000176200001440000000000014645137105020440 5ustar liggesusersStanHeaders/inst/include/src/stan/services/util/0000755000176200001440000000000014645137105021415 5ustar liggesusersStanHeaders/inst/include/src/stan/services/util/inv_metric.hpp0000644000176200001440000000054714645137105024273 0ustar liggesusers#include #include #include #include #include #include StanHeaders/inst/include/src/stan/services/util/create_rng.hpp0000644000176200001440000000234414645137105024242 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_CREATE_RNG_HPP #define STAN_SERVICES_UTIL_CREATE_RNG_HPP #include namespace stan { namespace services { namespace util { /** * Creates a pseudo random number generator from a random seed * and a chain id by initializing the PRNG with the seed and * then advancing past pow(2, 50) times the chain ID draws to * ensure different chains sample from different segments of the * pseudo random number sequence. * * Chain IDs should be kept to larger values than one to ensure * that the draws used to initialized transformed data are not * duplicated. * * @param[in] seed the random seed * @param[in] chain the chain id * @return a boost::ecuyer1988 instance */ inline boost::ecuyer1988 create_rng(unsigned int seed, unsigned int chain) { using boost::uintmax_t; static constexpr uintmax_t DISCARD_STRIDE = static_cast(1) << 50; boost::ecuyer1988 rng(seed); // always discard at least 1 to avoid issue with small seeds for certain RNG // distributions. See stan#3167 and boostorg/random#92 rng.discard(std::max(static_cast(1), DISCARD_STRIDE * chain)); return rng; } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/validate_dense_inv_metric.hpp0000644000176200001440000000174114645137105027317 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_VALIDATE_DENSE_INV_METRIC_HPP #define STAN_SERVICES_UTIL_VALIDATE_DENSE_INV_METRIC_HPP #include #include namespace stan { namespace services { namespace util { /** * Validate that dense inverse Euclidean metric is positive definite * * @param[in] inv_metric inverse Euclidean metric * @param[in,out] logger Logger for messages * @throws std::domain_error if matrix is not positive definite */ inline void validate_dense_inv_metric(const Eigen::MatrixXd& inv_metric, callbacks::logger& logger) { try { stan::math::check_pos_definite("check_pos_definite", "inv_metric", inv_metric); } catch (const std::domain_error& e) { logger.error("Inverse Euclidean metric not positive definite."); throw std::domain_error("Initialization failure"); } } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/validate_diag_inv_metric.hpp0000644000176200001440000000177114645137105027130 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_VALIDATE_DIAG_INV_METRIC_HPP #define STAN_SERVICES_UTIL_VALIDATE_DIAG_INV_METRIC_HPP #include #include namespace stan { namespace services { namespace util { /** * Validate that diag inverse Euclidean metric is positive definite * * @param[in] inv_metric inverse Euclidean metric * @param[in,out] logger Logger for messages * @throws std::domain_error if matrix is not positive definite */ inline void validate_diag_inv_metric(const Eigen::VectorXd& inv_metric, callbacks::logger& logger) { try { stan::math::check_finite("check_finite", "inv_metric", inv_metric); stan::math::check_positive("check_positive", "inv_metric", inv_metric); } catch (const std::domain_error& e) { logger.error("Inverse Euclidean metric not positive definite."); throw std::domain_error("Initialization failure"); } } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/create_unit_e_dense_inv_metric.hpp0000644000176200001440000000201114645137105030323 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_CREATE_UNIT_E_DENSE_INV_METRIC_HPP #define STAN_SERVICES_UTIL_CREATE_UNIT_E_DENSE_INV_METRIC_HPP #include #include #include namespace stan { namespace services { namespace util { /** * Create a stan::dump object which contains vector "metric" * of specified size where all elements are ones. * * @param[in] num_params expected number of dense elements * @return var_context */ inline stan::io::dump create_unit_e_dense_inv_metric(size_t num_params) { auto num_params_str = std::to_string(num_params); std::string dims("),.Dim=c(" + num_params_str + ", " + num_params_str + "))"); Eigen::IOFormat RFmt(Eigen::StreamPrecision, Eigen::DontAlignCols, ", ", ",", "", "", "inv_metric <- structure(c(", dims); std::stringstream txt; txt << Eigen::MatrixXd::Identity(num_params, num_params).format(RFmt); return stan::io::dump(txt); } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/read_dense_inv_metric.hpp0000644000176200001440000000273714645137105026447 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_READ_DENSE_INV_METRIC_HPP #define STAN_SERVICES_UTIL_READ_DENSE_INV_METRIC_HPP #include #include #include #include #include #include #include namespace stan { namespace services { namespace util { /** * Extract dense inverse Euclidean metric from a var_context object. * * @param[in] init_context a var_context with array of initial values * @param[in] num_params expected number of row, column elements * @param[in,out] logger Logger for messages * @throws std::domain_error if cannot read the Euclidean metric * @return inv_metric */ inline Eigen::MatrixXd read_dense_inv_metric( const stan::io::var_context& init_context, size_t num_params, callbacks::logger& logger) { Eigen::MatrixXd inv_metric; try { init_context.validate_dims("read dense inv metric", "inv_metric", "matrix", init_context.to_vec(num_params, num_params)); std::vector dense_vals = init_context.vals_r("inv_metric"); inv_metric = stan::math::to_matrix(dense_vals, num_params, num_params); } catch (const std::exception& e) { logger.error("Cannot get inverse metric from input file."); logger.error("Caught exception: "); logger.error(e.what()); throw std::domain_error("Initialization failure"); } return inv_metric; } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/run_adaptive_sampler.hpp0000644000176200001440000001017314645137105026334 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_RUN_ADAPTIVE_SAMPLER_HPP #define STAN_SERVICES_UTIL_RUN_ADAPTIVE_SAMPLER_HPP #include #include #include #include #include #include #include namespace stan { namespace services { namespace util { /** * Runs the sampler with adaptation. * * @tparam Sampler Type of adaptive sampler. * @tparam Model Type of model * @tparam RNG Type of random number generator * @param[in,out] sampler the mcmc sampler to use on the model * @param[in] model the model concept to use for computing log probability * @param[in] cont_vector initial parameter values * @param[in] num_warmup number of warmup draws * @param[in] num_samples number of post warmup draws * @param[in] num_thin number to thin the draws. Must be greater than * or equal to 1. * @param[in] refresh controls output to the logger * @param[in] save_warmup indicates whether the warmup draws should be * sent to the sample writer * @param[in,out] rng random number generator * @param[in,out] interrupt interrupt callback * @param[in,out] logger logger for messages * @param[in,out] sample_writer writer for draws * @param[in,out] diagnostic_writer writer for diagnostic information * @param[in] chain_id The id for a given chain. * @param[in] num_chains The number of chains used in the program. This * is used in generate transitions to print out the chain number. */ template void run_adaptive_sampler(Sampler& sampler, Model& model, std::vector& cont_vector, int num_warmup, int num_samples, int num_thin, int refresh, bool save_warmup, RNG& rng, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer, size_t chain_id = 1, size_t num_chains = 1) { Eigen::Map cont_params(cont_vector.data(), cont_vector.size()); sampler.engage_adaptation(); try { sampler.z().q = cont_params; sampler.init_stepsize(logger); } catch (const std::exception& e) { logger.info("Exception initializing step size."); logger.info(e.what()); return; } services::util::mcmc_writer writer(sample_writer, diagnostic_writer, logger); stan::mcmc::sample s(cont_params, 0, 0); // Headers writer.write_sample_names(s, sampler, model); writer.write_diagnostic_names(s, sampler, model); auto start_warm = std::chrono::steady_clock::now(); util::generate_transitions(sampler, num_warmup, 0, num_warmup + num_samples, num_thin, refresh, save_warmup, true, writer, s, model, rng, interrupt, logger, chain_id, num_chains); auto end_warm = std::chrono::steady_clock::now(); double warm_delta_t = std::chrono::duration_cast( end_warm - start_warm) .count() / 1000.0; sampler.disengage_adaptation(); writer.write_adapt_finish(sampler); sampler.write_sampler_state(sample_writer); auto start_sample = std::chrono::steady_clock::now(); util::generate_transitions(sampler, num_samples, num_warmup, num_warmup + num_samples, num_thin, refresh, true, false, writer, s, model, rng, interrupt, logger, chain_id, num_chains); auto end_sample = std::chrono::steady_clock::now(); double sample_delta_t = std::chrono::duration_cast( end_sample - start_sample) .count() / 1000.0; writer.write_timing(warm_delta_t, sample_delta_t); } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/gq_writer.hpp0000644000176200001440000000550114645137105024132 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_GQ_WRITER_HPP #define STAN_SERVICES_UTIL_GQ_WRITER_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace util { /** * gq_writer writes out * * @tparam Model Model class */ class gq_writer { private: callbacks::writer& sample_writer_; callbacks::logger& logger_; int num_constrained_params_; public: /** * Constructor. * * @param[in,out] sample_writer samples are "written" to this stream * @param[in,out] logger messages are written through the logger * @param[in] num_constrained_params offset into write_array gqs */ gq_writer(callbacks::writer& sample_writer, callbacks::logger& logger, int num_constrained_params) : sample_writer_(sample_writer), logger_(logger), num_constrained_params_(num_constrained_params) {} /** * Write names of variables declared in the generated quantities block * to stream `sample_writer_`. * * @tparam M model class */ template void write_gq_names(const Model& model) { static const bool include_tparams = false; static const bool include_gqs = true; std::vector names; model.constrained_param_names(names, include_tparams, include_gqs); std::vector gq_names(names.begin() + num_constrained_params_, names.end()); sample_writer_(gq_names); } /** * Calls model's `write_array` method and writes values of * variables defined in the generated quantities block * to stream `sample_writer_`. * * @tparam M model class * @tparam RNG pseudo random number generator class * @param[in] model instantiated model * @param[in] rng instantiated RNG * @param[in] draw sequence unconstrained parameters values. */ template void write_gq_values(const Model& model, RNG& rng, const std::vector& draw) { std::vector values; std::vector params_i; // unused - no discrete params std::stringstream ss; try { model.write_array(rng, const_cast&>(draw), params_i, values, false, true, &ss); if (ss.str().length() > 0) logger_.info(ss); } catch (const std::exception& e) { if (ss.str().length() > 0) logger_.info(ss); logger_.info(e.what()); } std::vector gq_values(values.begin() + num_constrained_params_, values.end()); sample_writer_(gq_values); } }; } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/run_sampler.hpp0000644000176200001440000000641214645137105024460 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_RUN_SAMPLER_HPP #define STAN_SERVICES_UTIL_RUN_SAMPLER_HPP #include #include #include #include #include namespace stan { namespace services { namespace util { /** * Runs the sampler without adaptation. * * @tparam Model Type of model * @tparam RNG Type of random number generator * @param[in,out] sampler the mcmc sampler to use on the model * @param[in] model the model concept to use for computing log probability * @param[in] cont_vector initial parameter values * @param[in] num_warmup number of warmup draws * @param[in] num_samples number of post warmup draws * @param[in] num_thin number to thin the draws. Must be greater than or * equal to 1. * @param[in] refresh controls output to the logger * @param[in] save_warmup indicates whether the warmup draws should be * sent to the sample writer * @param[in,out] rng random number generator * @param[in,out] interrupt interrupt callback * @param[in,out] logger logger for messages * @param[in,out] sample_writer writer for draws * @param[in,out] diagnostic_writer writer for diagnostic information */ template void run_sampler(stan::mcmc::base_mcmc& sampler, Model& model, std::vector& cont_vector, int num_warmup, int num_samples, int num_thin, int refresh, bool save_warmup, RNG& rng, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { Eigen::Map cont_params(cont_vector.data(), cont_vector.size()); services::util::mcmc_writer writer(sample_writer, diagnostic_writer, logger); stan::mcmc::sample s(cont_params, 0, 0); // Headers writer.write_sample_names(s, sampler, model); writer.write_diagnostic_names(s, sampler, model); auto start_warm = std::chrono::steady_clock::now(); util::generate_transitions(sampler, num_warmup, 0, num_warmup + num_samples, num_thin, refresh, save_warmup, true, writer, s, model, rng, interrupt, logger); auto end_warm = std::chrono::steady_clock::now(); double warm_delta_t = std::chrono::duration_cast( end_warm - start_warm) .count() / 1000.0; writer.write_adapt_finish(sampler); sampler.write_sampler_state(sample_writer); auto start_sample = std::chrono::steady_clock::now(); util::generate_transitions(sampler, num_samples, num_warmup, num_warmup + num_samples, num_thin, refresh, true, false, writer, s, model, rng, interrupt, logger); auto end_sample = std::chrono::steady_clock::now(); double sample_delta_t = std::chrono::duration_cast( end_sample - start_sample) .count() / 1000.0; writer.write_timing(warm_delta_t, sample_delta_t); } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/mcmc_writer.hpp0000644000176200001440000001652314645137105024450 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_MCMC_WRITER_HPP #define STAN_SERVICES_UTIL_MCMC_WRITER_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace util { /** * mcmc_writer writes out headers and samples * * @tparam Model Model class */ class mcmc_writer { private: callbacks::writer& sample_writer_; callbacks::writer& diagnostic_writer_; callbacks::logger& logger_; public: size_t num_sample_params_; size_t num_sampler_params_; size_t num_model_params_; /** * Constructor. * * @param[in,out] sample_writer samples are "written" to this stream * @param[in,out] diagnostic_writer diagnostic info is "written" to this * stream * @param[in,out] logger messages are written through the logger */ mcmc_writer(callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer, callbacks::logger& logger) : sample_writer_(sample_writer), diagnostic_writer_(diagnostic_writer), logger_(logger), num_sample_params_(0), num_sampler_params_(0), num_model_params_(0) {} /** * Outputs parameter string names. First outputs the names stored in * the sample object (stan::mcmc::sample), then uses the sampler * provided to output sampler specific names, then adds the model * constrained parameter names. * * The names are written to the sample_stream as comma separated values * with a newline at the end. * * @param[in] sample a sample (unconstrained) that works with the model * @param[in] sampler a stan::mcmc::base_mcmc object * @param[in] model the model */ template void write_sample_names(stan::mcmc::sample& sample, stan::mcmc::base_mcmc& sampler, Model& model) { std::vector names; sample.get_sample_param_names(names); num_sample_params_ = names.size(); sampler.get_sampler_param_names(names); num_sampler_params_ = names.size() - num_sample_params_; model.constrained_param_names(names, true, true); num_model_params_ = names.size() - num_sample_params_ - num_sampler_params_; sample_writer_(names); } /** * Outputs samples. First outputs the values of the sample params * from a stan::mcmc::sample, then outputs the values of the sampler * params from a stan::mcmc::base_mcmc, then finally outputs the values * of the model. * * The samples are written to the sample_stream as comma separated * values with a newline at the end. * * @param[in,out] rng random number generator (used by * model.write_array()) * @param[in] sample the sample in constrained space * @param[in] sampler the sampler * @param[in] model the model */ template void write_sample_params(RNG& rng, stan::mcmc::sample& sample, stan::mcmc::base_mcmc& sampler, Model& model) { std::vector values; sample.get_sample_params(values); sampler.get_sampler_params(values); std::vector model_values; std::vector params_i; std::stringstream ss; try { std::vector cont_params( sample.cont_params().data(), sample.cont_params().data() + sample.cont_params().size()); model.write_array(rng, cont_params, params_i, model_values, true, true, &ss); } catch (const std::exception& e) { if (ss.str().length() > 0) logger_.info(ss); ss.str(""); logger_.info(e.what()); } if (ss.str().length() > 0) logger_.info(ss); if (model_values.size() > 0) values.insert(values.end(), model_values.begin(), model_values.end()); if (model_values.size() < num_model_params_) values.insert(values.end(), num_model_params_ - model_values.size(), std::numeric_limits::quiet_NaN()); sample_writer_(values); } /** * Prints additional info to the streams * * Prints to the sample stream * * @param[in] sampler sampler */ void write_adapt_finish(stan::mcmc::base_mcmc& sampler) { sample_writer_("Adaptation terminated"); } /** * Print diagnostic names * * @param[in] sample unconstrained sample * @param[in] sampler sampler * @param[in] model model */ template void write_diagnostic_names(stan::mcmc::sample sample, stan::mcmc::base_mcmc& sampler, Model& model) { std::vector names; sample.get_sample_param_names(names); sampler.get_sampler_param_names(names); std::vector model_names; model.unconstrained_param_names(model_names, false, false); sampler.get_sampler_diagnostic_names(model_names, names); diagnostic_writer_(names); } /** * Print diagnostic params to the diagnostic stream. * * @param[in] sample unconstrained sample * @param[in] sampler sampler */ void write_diagnostic_params(stan::mcmc::sample& sample, stan::mcmc::base_mcmc& sampler) { std::vector values; sample.get_sample_params(values); sampler.get_sampler_params(values); sampler.get_sampler_diagnostics(values); diagnostic_writer_(values); } /** * Internal method * * Prints timing information * * @param[in] warmDeltaT warmup time in seconds * @param[in] sampleDeltaT sample time in seconds * @param[in,out] writer output stream */ void write_timing(double warmDeltaT, double sampleDeltaT, callbacks::writer& writer) { std::string title(" Elapsed Time: "); writer(); std::stringstream ss1; ss1 << title << warmDeltaT << " seconds (Warm-up)"; writer(ss1.str()); std::stringstream ss2; ss2 << std::string(title.size(), ' ') << sampleDeltaT << " seconds (Sampling)"; writer(ss2.str()); std::stringstream ss3; ss3 << std::string(title.size(), ' ') << warmDeltaT + sampleDeltaT << " seconds (Total)"; writer(ss3.str()); writer(); } /** * Internal method * * Logs timing information * * @param[in] warmDeltaT warmup time in seconds * @param[in] sampleDeltaT sample time in seconds */ void log_timing(double warmDeltaT, double sampleDeltaT) { std::string title(" Elapsed Time: "); logger_.info(""); std::stringstream ss1; ss1 << title << warmDeltaT << " seconds (Warm-up)"; logger_.info(ss1); std::stringstream ss2; ss2 << std::string(title.size(), ' ') << sampleDeltaT << " seconds (Sampling)"; logger_.info(ss2); std::stringstream ss3; ss3 << std::string(title.size(), ' ') << warmDeltaT + sampleDeltaT << " seconds (Total)"; logger_.info(ss3); logger_.info(""); } /** * Print timing information to all streams * * @param[in] warmDeltaT warmup time (sec) * @param[in] sampleDeltaT sample time (sec) */ void write_timing(double warmDeltaT, double sampleDeltaT) { write_timing(warmDeltaT, sampleDeltaT, sample_writer_); write_timing(warmDeltaT, sampleDeltaT, diagnostic_writer_); log_timing(warmDeltaT, sampleDeltaT); } }; } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/read_diag_inv_metric.hpp0000644000176200001440000000303414645137105026244 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_READ_DIAG_INV_METRIC_HPP #define STAN_SERVICES_UTIL_READ_DIAG_INV_METRIC_HPP #include #include #include #include #include #include #include namespace stan { namespace services { namespace util { /** * Extract diagonal values for an inverse Euclidean metric * from a var_context object. * * @param[in] init_context a var_context with initial values * @param[in] num_params expected number of diagonal elements * @param[in,out] logger Logger for messages * @throws std::domain_error if the Euclidean metric is invalid * @return inv_metric vector of diagonal values */ inline Eigen::VectorXd read_diag_inv_metric( const stan::io::var_context& init_context, size_t num_params, callbacks::logger& logger) { Eigen::VectorXd inv_metric(num_params); try { init_context.validate_dims("read diag inv metric", "inv_metric", "vector_d", init_context.to_vec(num_params)); std::vector diag_vals = init_context.vals_r("inv_metric"); for (size_t i = 0; i < num_params; i++) { inv_metric(i) = diag_vals[i]; } } catch (const std::exception& e) { logger.error("Cannot get inverse Euclidean metric from input file."); logger.error("Caught exception: "); logger.error(e.what()); throw std::domain_error("Initialization failure"); } return inv_metric; } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/experimental_message.hpp0000644000176200001440000000171014645137105026326 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_EXPERIMENTAL_MESSAGE_HPP #define STAN_SERVICES_UTIL_EXPERIMENTAL_MESSAGE_HPP #include namespace stan { namespace services { namespace util { /** * Writes an experimental message to the writer. * All experimental algorithms should call this function. * * @param[in,out] logger logger for experimental algorithm message */ inline void experimental_message(stan::callbacks::logger& logger) { logger.info( "------------------------------" "------------------------------"); logger.info("EXPERIMENTAL ALGORITHM:"); logger.info( " This procedure has not been thoroughly tested" " and may be unstable"); logger.info(" or buggy. The interface is subject to change."); logger.info( "------------------------------" "------------------------------"); logger.info(""); logger.info(""); } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/create_unit_e_diag_inv_metric.hpp0000644000176200001440000000162114645137105030137 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_CREATE_UNIT_E_DIAG_INV_METRIC_HPP #define STAN_SERVICES_UTIL_CREATE_UNIT_E_DIAG_INV_METRIC_HPP #include #include namespace stan { namespace services { namespace util { /** * Create a stan::dump object which contains vector "metric" * of specified size where all elements are ones. * * @param[in] num_params expected number of diagonal elements * @return var_context */ inline stan::io::dump create_unit_e_diag_inv_metric(size_t num_params) { std::string dims("),.Dim=c(" + std::to_string(num_params) + "))"); Eigen::IOFormat RFmt(Eigen::StreamPrecision, Eigen::DontAlignCols, ", ", ",", "", "", "inv_metric <- structure(c(", dims); std::stringstream txt; txt << Eigen::VectorXd::Ones(num_params).format(RFmt); return stan::io::dump(txt); } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/initialize.hpp0000644000176200001440000001772614645137105024304 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_INITIALIZE_HPP #define STAN_SERVICES_UTIL_INITIALIZE_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace util { /** * Returns a valid initial value of the parameters of the model * on the unconstrained scale. * * For identical inputs (model, init, rng, init_radius), this * function will produce the same initialization. * * Initialization first tries to use the provided * stan::io::var_context, then it will generate * random uniform values from -init_radius to +init_radius for missing * parameters. * * When the var_context provides all variables or * the init_radius is 0, this function will only evaluate the * log probability of the model with the unconstrained * parameters once to see if it's valid. * * When at least some of the initialization is random, it will * randomly initialize until it finds a set of unconstrained * parameters that are valid or it hits MAX_INIT_TRIES = * 100 (hard-coded). * * Valid initialization is defined as a finite, non-NaN value for the * evaluation of the log probability density function and all its * gradients. * * @tparam Jacobian indicates whether to include the Jacobian term when * evaluating the log density function * @tparam Model the type of the model class * @tparam RNG the type of the random number generator * * @param[in] model the model * @param[in] init a var_context with initial values * @param[in,out] rng random number generator * @param[in] init_radius the radius for generating random values. * A value of 0 indicates that the unconstrained parameters (not * provided by init) should be initialized with 0. * @param[in] print_timing indicates whether a timing message should * be printed to the logger * @param[in,out] logger logger for messages * @param[in,out] init_writer init writer (on the unconstrained scale) * @throws exception passed through from the model if the model has a * fatal error (not a std::domain_error) * @throws std::domain_error if the model can not be initialized and * the model does not have a fatal error (only allows for * std::domain_error) * @return valid unconstrained parameters for the model */ template std::vector initialize(Model& model, const InitContext& init, RNG& rng, double init_radius, bool print_timing, stan::callbacks::logger& logger, stan::callbacks::writer& init_writer) { std::vector unconstrained; std::vector disc_vector; bool is_fully_initialized = true; bool any_initialized = false; std::vector param_names; model.get_param_names(param_names, false, false); for (size_t n = 0; n < param_names.size(); n++) { is_fully_initialized &= init.contains_r(param_names[n]); any_initialized |= init.contains_r(param_names[n]); } bool is_initialized_with_zero = init_radius == 0.0; int MAX_INIT_TRIES = is_fully_initialized || is_initialized_with_zero ? 1 : 100; int num_init_tries = 0; for (; num_init_tries < MAX_INIT_TRIES; num_init_tries++) { std::stringstream msg; try { stan::io::random_var_context random_context(model, rng, init_radius, is_initialized_with_zero); if (!any_initialized) { unconstrained = random_context.get_unconstrained(); } else { stan::io::chained_var_context context(init, random_context); model.transform_inits(context, disc_vector, unconstrained, &msg); } } catch (std::domain_error& e) { if (msg.str().length() > 0) logger.info(msg); logger.info("Rejecting initial value:"); logger.info( " Error evaluating the log probability" " at the initial value."); logger.info(e.what()); continue; } catch (std::exception& e) { if (msg.str().length() > 0) logger.info(msg); logger.info( "Unrecoverable error evaluating the log probability" " at the initial value."); logger.info(e.what()); throw; } msg.str(""); double log_prob(0); try { // we evaluate the log_prob function with propto=false // because we're evaluating with `double` as the type of // the parameters. log_prob = model.template log_prob(unconstrained, disc_vector, &msg); if (msg.str().length() > 0) logger.info(msg); } catch (std::domain_error& e) { if (msg.str().length() > 0) logger.info(msg); logger.info("Rejecting initial value:"); logger.info( " Error evaluating the log probability" " at the initial value."); logger.info(e.what()); continue; } catch (std::exception& e) { if (msg.str().length() > 0) logger.info(msg); logger.info( "Unrecoverable error evaluating the log probability" " at the initial value."); logger.info(e.what()); throw; } if (!std::isfinite(log_prob)) { logger.info("Rejecting initial value:"); logger.info( " Log probability evaluates to log(0)," " i.e. negative infinity."); logger.info( " Stan can't start sampling from this" " initial value."); continue; } std::stringstream log_prob_msg; std::vector gradient; auto start = std::chrono::steady_clock::now(); try { // we evaluate this with propto=true since we're // evaluating with autodiff variables log_prob = stan::model::log_prob_grad( model, unconstrained, disc_vector, gradient, &log_prob_msg); } catch (const std::exception& e) { if (log_prob_msg.str().length() > 0) logger.info(log_prob_msg); logger.info(e.what()); throw; } auto end = std::chrono::steady_clock::now(); double deltaT = std::chrono::duration_cast(end - start) .count() / 1000000.0; if (log_prob_msg.str().length() > 0) logger.info(log_prob_msg); bool gradient_ok = std::isfinite(stan::math::sum(gradient)); if (!gradient_ok) { logger.info("Rejecting initial value:"); logger.info( " Gradient evaluated at the initial value" " is not finite."); logger.info( " Stan can't start sampling from this" " initial value."); } if (gradient_ok && print_timing) { logger.info(""); std::stringstream msg1; msg1 << "Gradient evaluation took " << deltaT << " seconds"; logger.info(msg1); std::stringstream msg2; msg2 << "1000 transitions using 10 leapfrog steps" << " per transition would take" << " " << 1e4 * deltaT << " seconds."; logger.info(msg2); logger.info("Adjust your expectations accordingly!"); logger.info(""); logger.info(""); } if (gradient_ok) { init_writer(unconstrained); return unconstrained; } } if (!is_initialized_with_zero) { logger.info(""); std::stringstream msg; msg << "Initialization between (-" << init_radius << ", " << init_radius << ") failed after" << " " << MAX_INIT_TRIES << " attempts. "; logger.info(msg); logger.info( " Try specifying initial values," " reducing ranges of constrained values," " or reparameterizing the model."); } throw std::domain_error("Initialization failed."); } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/util/generate_transitions.hpp0000644000176200001440000000653514645137105026366 0ustar liggesusers#ifndef STAN_SERVICES_UTIL_GENERATE_TRANSITIONS_HPP #define STAN_SERVICES_UTIL_GENERATE_TRANSITIONS_HPP #include #include #include #include #include namespace stan { namespace services { namespace util { /** * Generates MCMC transitions. * * @tparam Model model class * @tparam RNG random number generator class * @param[in,out] sampler MCMC sampler used to generate transitions * @param[in] num_iterations number of MCMC transitions * @param[in] start starting iteration number used for printing messages * @param[in] finish end iteration number used for printing messages * @param[in] num_thin when save is true, a draw will be written to the * mcmc_writer every num_thin iterations * @param[in] refresh number of iterations to print a message. If * refresh is zero, iteration number messages will not be printed * @param[in] save if save is true, the transitions will be written * to the mcmc_writer. If false, transitions will not be written * @param[in] warmup indicates whether these transitions are warmup. Used * for printing iteration number messages * @param[in,out] mcmc_writer writer to handle mcmc output * @param[in,out] init_s starts as the initial unconstrained parameter * values. When the function completes, this will have the final * iteration's unconstrained parameter values * @param[in] model model * @param[in,out] base_rng random number generator * @param[in,out] callback interrupt callback called once an iteration * @param[in,out] logger logger for messages * @param[in] chain_id The id of the current chain, used in output. * @param[in] num_chains The number of chains used in the program. This * is used in generate transitions to print out the chain number. */ template void generate_transitions(stan::mcmc::base_mcmc& sampler, int num_iterations, int start, int finish, int num_thin, int refresh, bool save, bool warmup, util::mcmc_writer& mcmc_writer, stan::mcmc::sample& init_s, Model& model, RNG& base_rng, callbacks::interrupt& callback, callbacks::logger& logger, size_t chain_id = 1, size_t num_chains = 1) { for (int m = 0; m < num_iterations; ++m) { callback(); if (refresh > 0 && (start + m + 1 == finish || m == 0 || (m + 1) % refresh == 0)) { int it_print_width = std::ceil(std::log10(static_cast(finish))); std::stringstream message; if (num_chains != 1) { message << "Chain [" << chain_id << "] "; } message << "Iteration: "; message << std::setw(it_print_width) << m + 1 + start << " / " << finish; message << " [" << std::setw(3) << static_cast((100.0 * (start + m + 1)) / finish) << "%] "; message << (warmup ? " (Warmup)" : " (Sampling)"); logger.info(message); } init_s = sampler.transition(init_s, logger); if (save && ((m % num_thin) == 0)) { mcmc_writer.write_sample_params(base_rng, init_s, sampler, model); mcmc_writer.write_diagnostic_params(init_s, sampler); } } } } // namespace util } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/diagnose/0000755000176200001440000000000014645137105022231 5ustar liggesusersStanHeaders/inst/include/src/stan/services/diagnose/defaults.hpp0000644000176200001440000000321514645137105024552 0ustar liggesusers#ifndef STAN_SERVICES_DIAGNOSE_DEFAULTS_HPP #define STAN_SERVICES_DIAGNOSE_DEFAULTS_HPP #include #include namespace stan { namespace services { namespace diagnose { /** * Epsilon is the finite differences stepsize. */ struct epsilon { /** * Return the string description of epsilon. * * @return description */ static std::string description() { return "Finite difference stepsize."; } /** * Validates epsilon; epsilon must be greater than 0. * * @param[in] epsilon argument to validate * @throw std::invalid_argument unless epsilon is greater than zero */ static void validate(double epsilon) { if (!(epsilon > 0)) throw std::invalid_argument("epsilon must be greater than 0."); } /** * Return the default epsilon value. * * @return 1e-6 */ static double default_value() { return 1e-6; } }; /** * Error is the absolute error threshold for evaluating * gradients relative to the finite differences calculation. */ struct error { /** * Return the string description of error. * * @return description */ static std::string description() { return "Absolute error threshold."; } /** * Validates error; error must be greater than 0. * * @throw std::invalid_argument unless error is greater than zero * equal to 0. */ static void validate(double error) { if (!(error > 0)) throw std::invalid_argument("error must be greater than 0."); } /** * Return the default error value. * * @return 1e-6 */ static double default_value() { return 1e-6; } }; } // namespace diagnose } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/diagnose/diagnose.hpp0000644000176200001440000000453714645137105024544 0ustar liggesusers#ifndef STAN_SERVICES_DIAGNOSE_DIAGNOSE_HPP #define STAN_SERVICES_DIAGNOSE_DIAGNOSE_HPP #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace diagnose { /** * Checks the gradients of the model computed using reverse mode * autodiff against finite differences. * * This will test the first order gradients using reverse mode * at the value specified in cont_params. This method only * outputs to the logger. * * @tparam Model A model implementation * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] epsilon epsilon to use for finite differences * @param[in] error amount of absolute error to allow * @param[in,out] interrupt interrupt callback * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] parameter_writer Writer callback for file output * @return the number of parameters that are not within epsilon * of the finite difference calculation */ template int diagnose(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, double epsilon, double error, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& parameter_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, false, logger, init_writer); logger.info("TEST GRADIENT MODE"); int num_failed = stan::model::test_gradients( model, cont_vector, disc_vector, epsilon, error, interrupt, logger, parameter_writer); return num_failed; } } // namespace diagnose } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/experimental/0000755000176200001440000000000014645137105023135 5ustar liggesusersStanHeaders/inst/include/src/stan/services/experimental/advi/0000755000176200001440000000000014645137105024060 5ustar liggesusersStanHeaders/inst/include/src/stan/services/experimental/advi/fullrank.hpp0000644000176200001440000000704514645137105026415 0ustar liggesusers#ifndef STAN_SERVICES_EXPERIMENTAL_ADVI_FULLRANK_HPP #define STAN_SERVICES_EXPERIMENTAL_ADVI_FULLRANK_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace experimental { namespace advi { /** * Runs full rank ADVI. * * @tparam Model A model implementation * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the random number generator * @param[in] init_radius radius to initialize * @param[in] grad_samples number of samples for Monte Carlo estimate * of gradients * @param[in] elbo_samples number of samples for Monte Carlo estimate * of ELBO * @param[in] max_iterations maximum number of iterations * @param[in] tol_rel_obj convergence tolerance on the relative norm of * the objective * @param[in] eta stepsize scaling parameter for variational inference * @param[in] adapt_engaged adaptation engaged? * @param[in] adapt_iterations number of iterations for eta adaptation * @param[in] eval_elbo evaluate ELBO every Nth iteration * @param[in] output_samples number of posterior samples to draw and * save * @param[in,out] interrupt callback to be called every iteration * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] parameter_writer output for parameter values * @param[in,out] diagnostic_writer output for diagnostic values * @return error_codes::OK if successful */ template int fullrank(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int grad_samples, int elbo_samples, int max_iterations, double tol_rel_obj, double eta, bool adapt_engaged, int adapt_iterations, int eval_elbo, int output_samples, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& parameter_writer, callbacks::writer& diagnostic_writer) { util::experimental_message(logger); boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); std::vector names; names.push_back("lp__"); names.push_back("log_p__"); names.push_back("log_g__"); model.constrained_param_names(names, true, true); parameter_writer(names); Eigen::VectorXd cont_params = Eigen::Map(&cont_vector[0], cont_vector.size(), 1); stan::variational::advi cmd_advi(model, cont_params, rng, grad_samples, elbo_samples, eval_elbo, output_samples); cmd_advi.run(eta, adapt_engaged, adapt_iterations, tol_rel_obj, max_iterations, logger, parameter_writer, diagnostic_writer); return 0; } } // namespace advi } // namespace experimental } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/experimental/advi/meanfield.hpp0000644000176200001440000000706214645137105026522 0ustar liggesusers#ifndef STAN_SERVICES_EXPERIMENTAL_ADVI_MEANFIELD_HPP #define STAN_SERVICES_EXPERIMENTAL_ADVI_MEANFIELD_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace experimental { namespace advi { /** * Runs mean field ADVI. * * @tparam Model A model implementation * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the random number generator * @param[in] init_radius radius to initialize * @param[in] grad_samples number of samples for Monte Carlo estimate * of gradients * @param[in] elbo_samples number of samples for Monte Carlo estimate * of ELBO * @param[in] max_iterations maximum number of iterations * @param[in] tol_rel_obj convergence tolerance on the relative norm * of the objective * @param[in] eta stepsize scaling parameter for variational inference * @param[in] adapt_engaged adaptation engaged? * @param[in] adapt_iterations number of iterations for eta adaptation * @param[in] eval_elbo evaluate ELBO every Nth iteration * @param[in] output_samples number of posterior samples to draw and * save * @param[in,out] interrupt callback to be called every iteration * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] parameter_writer output for parameter values * @param[in,out] diagnostic_writer output for diagnostic values * @return error_codes::OK if successful */ template int meanfield(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int grad_samples, int elbo_samples, int max_iterations, double tol_rel_obj, double eta, bool adapt_engaged, int adapt_iterations, int eval_elbo, int output_samples, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& parameter_writer, callbacks::writer& diagnostic_writer) { util::experimental_message(logger); boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); std::vector names; names.push_back("lp__"); names.push_back("log_p__"); names.push_back("log_g__"); model.constrained_param_names(names, true, true); parameter_writer(names); Eigen::VectorXd cont_params = Eigen::Map(&cont_vector[0], cont_vector.size(), 1); stan::variational::advi cmd_advi(model, cont_params, rng, grad_samples, elbo_samples, eval_elbo, output_samples); cmd_advi.run(eta, adapt_engaged, adapt_iterations, tol_rel_obj, max_iterations, logger, parameter_writer, diagnostic_writer); return 0; } } // namespace advi } // namespace experimental } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/experimental/advi/defaults.hpp0000644000176200001440000001575614645137105026416 0ustar liggesusers#ifndef STAN_SERVICES_EXPERIMENTAL_ADVI_DEFAULTS_HPP #define STAN_SERVICES_EXPERIMENTAL_ADVI_DEFAULTS_HPP #include #include namespace stan { namespace services { namespace experimental { namespace advi { /** * Number of samples for Monte Carlo estimate of gradients. */ struct gradient_samples { /** * Return the string description of gradient_samples. * * @return description */ static std::string description() { return "Number of Monte Carlo draws for computing the gradient."; } /** * Validates gradient_samples; must be greater than 0. * * @param[in] gradient_samples argument to validate * @throw std::invalid_argument unless gradient_samples is greater * than zero */ static void validate(int gradient_samples) { if (!(gradient_samples > 0)) throw std::invalid_argument( "gradient_samples must be greater" " than 0."); } /** * Return the default number of gradient_samples. * * @return 1 */ static int default_value() { return 1; } }; /** * Number of Monte Carlo samples for estimate of ELBO. */ struct elbo_samples { /** * Return the string description of elbo_samples. * * @return description */ static std::string description() { return "Number of Monte Carlo draws for estimate of ELBO."; } /** * Validates elbo_samples; must be greater than 0. * * @param[in] elbo_samples argument to validate * @throw std::invalid_argument unless elbo_samples is greater than * zero */ static void validate(double elbo_samples) { if (!(elbo_samples > 0)) throw std::invalid_argument( "elbo_samples must be greater" " than 0."); } /** * Return the default elbo_samples value. * * @return 100 */ static int default_value() { return 100; } }; /** * Maximum number of iterations to run ADVI. */ struct max_iterations { /** * Return the string description of max_iterations. * * @return description */ static std::string description() { return "Maximum number of ADVI iterations."; } /** * Validates max_iterations; max_iterations must be greater than 0. * * @param[in] max_iterations argument to validate * @throw std::invalid_argument unless max_iterations is greater * than zero */ static void validate(int max_iterations) { if (!(max_iterations > 0)) throw std::invalid_argument( "max_iterations must be greater" " than 0."); } /** * Return the default max_iterations value. * * @return 10000 */ static int default_value() { return 10000; } }; /** * Relative tolerance parameter for convergence. */ struct tol_rel_obj { /** * Return the string description of tol_rel_obj. * * @return description */ static std::string description() { return "Relative tolerance parameter for convergence."; } /** * Validates tol_rel_obj; must be greater than 0. * * @param[in] tol_rel_obj argument to validate * @throw std::invalid_argument unless tol_rel_obj is greater than * zero */ static void validate(double tol_rel_obj) { if (!(tol_rel_obj > 0)) throw std::invalid_argument( "tol_rel_obj must be greater" " than 0."); } /** * Return the default tol_rel_obj value. * * @return 0.01 */ static double default_value() { return 0.01; } }; /** * Stepsize scaling parameter for variational inference */ struct eta { /** * Return the string description of eta. * * @return description */ static std::string description() { return "Stepsize scaling parameter."; } /** * Validates eta; must be greater than 0. * * @param[in] eta argument to validate * @throw std::invalid_argument unless eta is greater than zero */ static void validate(double eta) { if (!(eta > 0)) throw std::invalid_argument("eta must be greater than 0."); } /** * Return the default eta value. * * @return 1.0 */ static double default_value() { return 1.0; } }; /** * Flag for eta adaptation. */ struct adapt_engaged { /** * Return the string description of adapt_engaged. * * @return description */ static std::string description() { return "Boolean flag for eta adaptation."; } /** * Validates adapt_engaged. This is a no-op. * * @param[in] adapt_engaged argument to validate */ static void validate(bool adapt_engaged) {} /** * Return the default adapt_engaged value. * * @return true */ static bool default_value() { return true; } }; /** * Number of iterations for eta adaptation. */ struct adapt_iterations { /** * Return the string description of adapt_iterations. * * @return description */ static std::string description() { return "Number of iterations for eta adaptation."; } /** * Validates adapt_iterations; must be greater than 0. * * @param[in] adapt_iterations argument to validate * @throw std::invalid_argument unless adapt_iterations is * greater than zero */ static void validate(int adapt_iterations) { if (!(adapt_iterations > 0)) throw std::invalid_argument( "adapt_iterations must be greater" " than 0."); } /** * Return the default adapt_iterations value. * * @return 50 */ static int default_value() { return 50; } }; /** * Evaluate ELBO every Nth iteration */ struct eval_elbo { /** * Return the string description of eval_elbo. Evaluate * ELBO at every eval_elbo iterations. * * @return description */ static std::string description() { return "Number of iterations between ELBO evaluations"; } /** * Validates eval_elbo; must be greater than 0. * * @param[in] eval_elbo argument to validate * @throw std::invalid_argument unless eval_elbo is greater than zero */ static void validate(int eval_elbo) { if (!(eval_elbo > 0)) throw std::invalid_argument("eval_elbo must be greater than 0."); } /** * Return the default eval_elbo value. * * @return 100 */ static int default_value() { return 100; } }; /** * Number of approximate posterior output draws to save. */ struct output_draws { /** * Return the string description of output_draws. * * @return description */ static std::string description() { return "Number of approximate posterior output draws to save."; } /** * Validates output_draws; must be greater than or equal to 0. * * @param[in] output_draws argument to validate * @throw std::invalid_argument unless output_draws is greater than * or equal to zero */ static void validate(int output_draws) { if (!(output_draws >= 0)) throw std::invalid_argument( "output_draws must be greater than" " or equal to 0."); } /** * Return the default output_samples value. * * @return 1000 */ static int default_value() { return 1000; } }; } // namespace advi } // namespace experimental } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/0000755000176200001440000000000014645137105021721 5ustar liggesusersStanHeaders/inst/include/src/stan/services/sample/fixed_param.hpp0000644000176200001440000000637114645137105024720 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_FIXED_PARAM_HPP #define STAN_SERVICES_SAMPLE_FIXED_PARAM_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs the fixed parameter sampler. * * The fixed parameter sampler sets the parameters randomly once * on the unconstrained scale, then runs the model for the number * of iterations specified with the parameters fixed. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] refresh Controls the output * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int fixed_param(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_samples, int num_thin, int refresh, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, false, logger, init_writer); stan::mcmc::fixed_param_sampler sampler; util::mcmc_writer writer(sample_writer, diagnostic_writer, logger); Eigen::VectorXd cont_params(cont_vector.size()); for (size_t i = 0; i < cont_vector.size(); i++) cont_params[i] = cont_vector[i]; stan::mcmc::sample s(cont_params, 0, 0); // Headers writer.write_sample_names(s, sampler, model); writer.write_diagnostic_names(s, sampler, model); auto start = std::chrono::steady_clock::now(); util::generate_transitions(sampler, num_samples, 0, num_samples, num_thin, refresh, true, false, writer, s, model, rng, interrupt, logger); auto end = std::chrono::steady_clock::now(); double sample_delta_t = std::chrono::duration_cast(end - start) .count() / 1000.0; writer.write_timing(0.0, sample_delta_t); return error_codes::OK; } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_nuts_unit_e_adapt.hpp0000644000176200001440000000671214645137105026774 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_NUTS_UNIT_E_ADAPT_HPP #define STAN_SERVICES_SAMPLE_HMC_NUTS_UNIT_E_ADAPT_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs HMC with NUTS with unit Euclidean * metric with adaptation. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_nuts_unit_e_adapt( Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, double delta, double gamma, double kappa, double t0, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); stan::mcmc::adapt_unit_e_nuts sampler(model, rng); sampler.set_nominal_stepsize(stepsize); sampler.set_stepsize_jitter(stepsize_jitter); sampler.set_max_depth(max_depth); sampler.get_stepsize_adaptation().set_mu(log(10 * stepsize)); sampler.get_stepsize_adaptation().set_delta(delta); sampler.get_stepsize_adaptation().set_gamma(gamma); sampler.get_stepsize_adaptation().set_kappa(kappa); sampler.get_stepsize_adaptation().set_t0(t0); util::run_adaptive_sampler( sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_static_unit_e_adapt.hpp0000644000176200001440000000701614645137105027270 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_STATIC_UNIT_E_ADAPT_HPP #define STAN_SERVICES_SAMPLE_HMC_STATIC_UNIT_E_ADAPT_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs static HMC with unit Euclidean * metric with adaptation. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] int_time integration time * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_static_unit_e_adapt( Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, double int_time, double delta, double gamma, double kappa, double t0, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); stan::mcmc::adapt_unit_e_static_hmc sampler(model, rng); sampler.set_nominal_stepsize_and_T(stepsize, int_time); sampler.set_stepsize_jitter(stepsize_jitter); sampler.get_stepsize_adaptation().set_mu(log(10 * stepsize)); sampler.get_stepsize_adaptation().set_delta(delta); sampler.get_stepsize_adaptation().set_gamma(gamma); sampler.get_stepsize_adaptation().set_kappa(kappa); sampler.get_stepsize_adaptation().set_t0(t0); util::run_adaptive_sampler( sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_nuts_dense_e_adapt.hpp0000644000176200001440000004252214645137105027112 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_NUTS_DENSE_E_ADAPT_HPP #define STAN_SERVICES_SAMPLE_HMC_NUTS_DENSE_E_ADAPT_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs HMC with NUTS with adaptation using dense Euclidean metric * with a pre-specified Euclidean metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] init_inv_metric var context exposing an initial dense inverse Euclidean metric (must be positive definite) * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_nuts_dense_e_adapt( Model& model, const stan::io::var_context& init, const stan::io::var_context& init_inv_metric, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); Eigen::MatrixXd inv_metric; try { inv_metric = util::read_dense_inv_metric(init_inv_metric, model.num_params_r(), logger); util::validate_dense_inv_metric(inv_metric, logger); } catch (const std::domain_error& e) { return error_codes::CONFIG; } stan::mcmc::adapt_dense_e_nuts sampler(model, rng); sampler.set_metric(inv_metric); sampler.set_nominal_stepsize(stepsize); sampler.set_stepsize_jitter(stepsize_jitter); sampler.set_max_depth(max_depth); sampler.get_stepsize_adaptation().set_mu(log(10 * stepsize)); sampler.get_stepsize_adaptation().set_delta(delta); sampler.get_stepsize_adaptation().set_gamma(gamma); sampler.get_stepsize_adaptation().set_kappa(kappa); sampler.get_stepsize_adaptation().set_t0(t0); sampler.set_window_params(num_warmup, init_buffer, term_buffer, window, logger); util::run_adaptive_sampler( sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } /** * Runs HMC with NUTS with adaptation using dense Euclidean metric, * with identity matrix as initial inv_metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_nuts_dense_e_adapt( Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { stan::io::dump dmp = util::create_unit_e_dense_inv_metric(model.num_params_r()); stan::io::var_context& unit_e_metric = dmp; return hmc_nuts_dense_e_adapt( model, init, unit_e_metric, random_seed, chain, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, max_depth, delta, gamma, kappa, t0, init_buffer, term_buffer, window, interrupt, logger, init_writer, sample_writer, diagnostic_writer); } /** * Runs multiple chains of NUTS with adaptation using dense Euclidean metric * with a pre-specified Euclidean metric. * * @tparam Model Model class * @tparam InitContextPtr A pointer with underlying type derived from `stan::io::var_context` * @tparam InitInvContextPtr A pointer with underlying type derived from `stan::io::var_context` * @tparam SamplerWriter A type derived from `stan::callbacks::writer` * @tparam DiagnosticWriter A type derived from `stan::callbacks::writer` * @tparam InitWriter A type derived from `stan::callbacks::writer` * @param[in] model Input model to test (with data already instantiated) * @param[in] num_chains The number of chains to run in parallel. `init`, * `init_inv_metric`, `init_writer`, `sample_writer`, and `diagnostic_writer` must * be the same length as this value. * @param[in] init An std vector of init var contexts for initialization of each * chain. * @param[in] init_inv_metric An std vector of var contexts exposing an initial * diagonal inverse Euclidean metric for each chain (must be positive definite) * @param[in] random_seed random seed for the random number generator * @param[in] init_chain_id first chain id. The pseudo random number generator * will advance by for each chain by an integer sequence from `init_chain_id` to * `init_chain_id+num_chains-1` * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer std vector of Writer callbacks for unconstrained inits of each chain. * @param[in,out] sample_writer std vector of Writers for draws of each chain. * @param[in,out] diagnostic_writer std vector of Writers for diagnostic * information of each chain. * @return error_codes::OK if successful */ template int hmc_nuts_dense_e_adapt( Model& model, size_t num_chains, const std::vector& init, const std::vector& init_inv_metric, unsigned int random_seed, unsigned int init_chain_id, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, std::vector& init_writer, std::vector& sample_writer, std::vector& diagnostic_writer) { if (num_chains == 1) { return hmc_nuts_dense_e_adapt( model, *init[0], *init_inv_metric[0], random_seed, init_chain_id, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, max_depth, delta, gamma, kappa, t0, init_buffer, term_buffer, window, interrupt, logger, init_writer[0], sample_writer[0], diagnostic_writer[0]); } using sample_t = stan::mcmc::adapt_dense_e_nuts; std::vector rngs; rngs.reserve(num_chains); std::vector> cont_vectors; cont_vectors.reserve(num_chains); std::vector samplers; samplers.reserve(num_chains); try { for (int i = 0; i < num_chains; ++i) { rngs.emplace_back(util::create_rng(random_seed, init_chain_id + i)); cont_vectors.emplace_back(util::initialize( model, *init[i], rngs[i], init_radius, true, logger, init_writer[i])); Eigen::MatrixXd inv_metric = util::read_dense_inv_metric( *init_inv_metric[i], model.num_params_r(), logger); util::validate_dense_inv_metric(inv_metric, logger); samplers.emplace_back(model, rngs[i]); samplers[i].set_metric(inv_metric); samplers[i].set_nominal_stepsize(stepsize); samplers[i].set_stepsize_jitter(stepsize_jitter); samplers[i].set_max_depth(max_depth); samplers[i].get_stepsize_adaptation().set_mu(log(10 * stepsize)); samplers[i].get_stepsize_adaptation().set_delta(delta); samplers[i].get_stepsize_adaptation().set_gamma(gamma); samplers[i].get_stepsize_adaptation().set_kappa(kappa); samplers[i].get_stepsize_adaptation().set_t0(t0); samplers[i].set_window_params(num_warmup, init_buffer, term_buffer, window, logger); } } catch (const std::domain_error& e) { return error_codes::CONFIG; } tbb::parallel_for( tbb::blocked_range(0, num_chains, 1), [num_warmup, num_samples, num_thin, refresh, save_warmup, num_chains, init_chain_id, &samplers, &model, &rngs, &interrupt, &logger, &sample_writer, &cont_vectors, &diagnostic_writer](const tbb::blocked_range& r) { for (size_t i = r.begin(); i != r.end(); ++i) { util::run_adaptive_sampler(samplers[i], model, cont_vectors[i], num_warmup, num_samples, num_thin, refresh, save_warmup, rngs[i], interrupt, logger, sample_writer[i], diagnostic_writer[i], init_chain_id + i, num_chains); } }, tbb::simple_partitioner()); return error_codes::OK; } /** * Runs multiple chains of NUTS with adaptation using dense Euclidean metric, * with identity matrix as initial inv_metric. * * @tparam Model Model class * @tparam InitContextPtr A pointer with underlying type derived from * `stan::io::var_context` * @tparam InitWriter A type derived from `stan::callbacks::writer` * @tparam SamplerWriter A type derived from `stan::callbacks::writer` * @tparam DiagnosticWriter A type derived from `stan::callbacks::writer` * @param[in] model Input model to test (with data already instantiated) * @param[in] num_chains The number of chains to run in parallel. `init`, * `init_writer`, `sample_writer`, and `diagnostic_writer` must be the same * length as this value. * @param[in] init An std vector of init var contexts for initialization of each * chain. * @param[in] random_seed random seed for the random number generator * @param[in] init_chain_id first chain id. The pseudo random number generator * will advance by for each chain by an integer sequence from `init_chain_id` to * `init_chain_id+num_chains-1` * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer std vector of Writer callbacks for unconstrained * inits of each chain. * @param[in,out] sample_writer std vector of Writers for draws of each chain. * @param[in,out] diagnostic_writer std vector of Writers for diagnostic * information of each chain. * @return error_codes::OK if successful */ template int hmc_nuts_dense_e_adapt( Model& model, size_t num_chains, const std::vector& init, unsigned int random_seed, unsigned int init_chain_id, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, std::vector& init_writer, std::vector& sample_writer, std::vector& diagnostic_writer) { if (num_chains == 1) { return hmc_nuts_dense_e_adapt( model, *init[0], random_seed, init_chain_id, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, max_depth, delta, gamma, kappa, t0, init_buffer, term_buffer, window, interrupt, logger, init_writer[0], sample_writer[0], diagnostic_writer[0]); } std::vector> unit_e_metrics; unit_e_metrics.reserve(num_chains); for (size_t i = 0; i < num_chains; ++i) { unit_e_metrics.emplace_back(std::make_unique( util::create_unit_e_dense_inv_metric(model.num_params_r()))); } return hmc_nuts_dense_e_adapt( model, num_chains, init, unit_e_metrics, random_seed, init_chain_id, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, max_depth, delta, gamma, kappa, t0, init_buffer, term_buffer, window, interrupt, logger, init_writer, sample_writer, diagnostic_writer); } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_static_diag_e_adapt.hpp0000644000176200001440000001624714645137105027223 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_STATIC_DIAG_E_ADAPT_HPP #define STAN_SERVICES_SAMPLE_HMC_STATIC_DIAG_E_ADAPT_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs static HMC with adaptation using diagonal Euclidean metric * with a pre-specified Euclidean metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] init_inv_metric var context exposing an initial diagonal inverse Euclidean metric (must be positive definite) * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] int_time integration time * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_static_diag_e_adapt( Model& model, const stan::io::var_context& init, const stan::io::var_context& init_inv_metric, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, double int_time, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); Eigen::VectorXd inv_metric; try { inv_metric = util::read_diag_inv_metric(init_inv_metric, model.num_params_r(), logger); util::validate_diag_inv_metric(inv_metric, logger); } catch (const std::domain_error& e) { return error_codes::CONFIG; } stan::mcmc::adapt_diag_e_static_hmc sampler(model, rng); sampler.set_metric(inv_metric); sampler.set_nominal_stepsize_and_T(stepsize, int_time); sampler.set_stepsize_jitter(stepsize_jitter); sampler.get_stepsize_adaptation().set_mu(log(10 * stepsize)); sampler.get_stepsize_adaptation().set_delta(delta); sampler.get_stepsize_adaptation().set_gamma(gamma); sampler.get_stepsize_adaptation().set_kappa(kappa); sampler.get_stepsize_adaptation().set_t0(t0); sampler.set_window_params(num_warmup, init_buffer, term_buffer, window, logger); util::run_adaptive_sampler( sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } /** * Runs static HMC with adaptation using diagonal Euclidean metric, * with identity matrix as initial inv_metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] int_time integration time * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_static_diag_e_adapt( Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, double int_time, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { stan::io::dump dmp = util::create_unit_e_diag_inv_metric(model.num_params_r()); stan::io::var_context& unit_e_metric = dmp; return hmc_static_diag_e_adapt( model, init, unit_e_metric, random_seed, chain, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, int_time, delta, gamma, kappa, t0, init_buffer, term_buffer, window, interrupt, logger, init_writer, sample_writer, diagnostic_writer); } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_nuts_unit_e.hpp0000644000176200001440000000604414645137105025621 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_NUTS_UNIT_E_HPP #define STAN_SERVICES_SAMPLE_HMC_NUTS_UNIT_E_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs HMC with NUTS with unit Euclidean * metric without adaptation. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_nuts_unit_e(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); stan::mcmc::unit_e_nuts sampler(model, rng); sampler.set_nominal_stepsize(stepsize); sampler.set_stepsize_jitter(stepsize_jitter); sampler.set_max_depth(max_depth); util::run_sampler(sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_static_dense_e.hpp0000644000176200001440000001325514645137105026240 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_STATIC_DENSE_E_HPP #define STAN_SERVICES_SAMPLE_HMC_STATIC_DENSE_E_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs static HMC without adaptation using dense Euclidean metric * with a pre-specified Euclidean metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] init_inv_metric var context exposing an initial diagonal inverse Euclidean metric (must be positive definite) * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] int_time integration time * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_static_dense_e( Model& model, const stan::io::var_context& init, const stan::io::var_context& init_inv_metric, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, double int_time, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); Eigen::MatrixXd inv_metric; try { inv_metric = util::read_dense_inv_metric(init_inv_metric, model.num_params_r(), logger); util::validate_dense_inv_metric(inv_metric, logger); } catch (const std::domain_error& e) { return error_codes::CONFIG; } stan::mcmc::dense_e_static_hmc sampler(model, rng); sampler.set_metric(inv_metric); sampler.set_nominal_stepsize_and_T(stepsize, int_time); sampler.set_stepsize_jitter(stepsize_jitter); util::run_sampler(sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } /** * Runs static HMC without adaptation using dense Euclidean metric, * with identity matrix as initial inv_metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] int_time integration time * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_static_dense_e( Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, double int_time, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { stan::io::dump dmp = util::create_unit_e_dense_inv_metric(model.num_params_r()); stan::io::var_context& unit_e_metric = dmp; return hmc_static_dense_e(model, init, unit_e_metric, random_seed, chain, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, int_time, interrupt, logger, init_writer, sample_writer, diagnostic_writer); } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/defaults.hpp0000644000176200001440000002374014645137105024247 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_DEFAULTS_HPP #define STAN_SERVICES_SAMPLE_DEFAULTS_HPP #include #include namespace stan { namespace services { namespace sample { /** * Number of sampling iterations. */ struct num_samples { /** * Return the string description of num_samples. * * @return description */ static std::string description() { return "Number of sampling iterations."; } /** * Validates num_samples; num_samples must be greater than or * equal to 0. * * @param[in] num_samples argument to validate * @throw std::invalid_argument unless num_samples is greater * than or equal to zero */ static void validate(int num_samples) { if (!(num_samples >= 0)) throw std::invalid_argument( "num_samples must be greater" " than or equal to 0."); } /** * Return the default num_samples value. * * @return 1000 */ static int default_value() { return 1000; } }; /** * Number of warmup iterations. */ struct num_warmup { /** * Return the string description of num_warmup. * * @return description */ static std::string description() { return "Number of warmup iterations."; } /** * Validates num_warmup; num_warmup must be greater than or * equal to 0. * * @param[in] num_warmup argument to validate * @throw std::invalid_argument unless num_warmup is greater than * or equal to zero */ static void validate(int num_warmup) { if (!(num_warmup >= 0)) throw std::invalid_argument( "num_warmup must be greater" " than or equal to 0."); } /** * Return the default num_warmup value. * * @return 1000 */ static int default_value() { return 1000; } }; /** * Save warmup iterations to output. */ struct save_warmup { /** * Return the string description of save_warmup. * * @return description */ static std::string description() { return "Save warmup iterations to output."; } /** * Validates save_warmup. This is a no-op. * * @param[in] save_warmup argument to validate */ static void validate(bool save_warmup) {} /** * Return the default save_warmup value. * * @return false */ static bool default_value() { return false; } }; /** * Period between saved samples. */ struct thin { /** * Return the string description of thin. * * @return description */ static std::string description() { return "Period between saved samples."; } /** * Validates thin; thin must be greater than 0. * * @param[in] thin argument to validate * @throw std::invalid_argument unless thin is greater than zero */ static void validate(int thin) { if (!(thin > 0)) throw std::invalid_argument("thin must be greater than 0."); } /** * Return the default thin value. * * @return 1 */ static int default_value() { return 1; } }; /** * Indicates whether adaptation is engaged. */ struct adaptation_engaged { /** * Return the string description of adaptation_engaged. * * @return description */ static std::string description() { return "Indicates whether adaptation is engaged."; } /** * Validates adaptation_engaged. This is a no op. * * @param[in] adaptation_engaged argument to validate */ static void validate(bool adaptation_engaged) {} /** * Return the default adaptation_engaged value. * * @return true */ static bool default_value() { return true; } }; /** * Adaptation regularization scale. */ struct gamma { /** * Return the string description of gamma. * * @return description */ static std::string description() { return "Adaptation regularization scale."; } /** * Validates gamma; gamma must be greater than 0. * * @param[in] gamma argument to validate * @throw std::invalid_argument unless gamma is greater than zero */ static void validate(double gamma) { if (!(gamma > 0)) throw std::invalid_argument("gamma must be greater than 0."); } /** * Return the default gamma value. * * @return 0.05 */ static double default_value() { return 0.05; } }; /** * Adaptation relaxation exponent. */ struct kappa { /** * Return the string description of kappa. * * @return description */ static std::string description() { return "Adaptation relaxation exponent."; } /** * Validates kappa; kappa must be greater than 0. * * @param[in] kappa argument to validate * @throw std::invalid_argument unless kappa is greater than zero */ static void validate(double kappa) { if (!(kappa > 0)) throw std::invalid_argument("kappa must be greater than 0."); } /** * Return the default kappa value. * * @return 0.75 */ static double default_value() { return 0.75; } }; /** * Adaptation iteration offset. */ struct t0 { /** * Return the description of t0. * * @return description */ static std::string description() { return "Adaptation iteration offset."; } /** * Validates t0; t0 must be greater than 0. * * @param[in] t0 argument to validate * @throw std::invalid_argument unless t0 is greater than zero */ static void validate(double t0) { if (!(t0 > 0)) throw std::invalid_argument("t0 must be greater than 0."); } /** * Return the default t0 value. * * @return 10 */ static double default_value() { return 10; } }; /** * Width of initial fast adaptation interval. */ struct init_buffer { /** * Return the string description of init_buffer. * * @return description */ static std::string description() { return "Width of initial fast adaptation interval."; } /** * Validates init_buffer. This is a no op. * * @param[in] init_buffer argument to validate */ static void validate(unsigned int init_buffer) {} /** * Return the default init_buffer value. * * @return 75 */ static unsigned int default_value() { return 75; } }; /** * Width of final fast adaptation interval. */ struct term_buffer { /** * Return the string description of term_buffer. * * @return description */ static std::string description() { return "Width of final fast adaptation interval."; } /** * Validates term_buffer. This is a no-op. * * @param[in] term_buffer argument to validate */ static void validate(unsigned int term_buffer) {} /** * Return the default term_buffer value. * * @return 50 */ static unsigned int default_value() { return 50; } }; /** * Initial width of slow adaptation interval. */ struct window { /** * Return the string description of window. * * @return description */ static std::string description() { return "Initial width of slow adaptation interval."; } /** * Validates window. This is a no op. * * @param[in] window argument to validate */ static void validate(unsigned int window) {} /** * Return the default window value. * * @return 25 */ static unsigned int default_value() { return 25; } }; /** * Total integration time for Hamiltonian evolution. */ struct int_time { /** * Return the string description of int_time. * * @return description */ static std::string description() { return "Total integration time for Hamiltonian evolution."; } /** * Validates int_time. int_time must be greater than 0. * * @param[in] int_time argument to validate * @throw std::invalid_argument unless int_time is greater than zero */ static void validate(double int_time) { if (!(int_time > 0)) throw std::invalid_argument("int_time must be greater than 0."); } /** * Return the default int_time value. * * @return 2 * pi */ static double default_value() { return 6.28318530717959; } }; /** * Maximum tree depth. */ struct max_depth { /** * Return the string description of max_depth. * * @return description */ static std::string description() { return "Maximum tree depth."; } /** * Validates max_depth; max_depth must be greater than 0. * * @param[in] max_depth argument to validate * @throw std::invalid_argument unless max_depth is greater than zero */ static void validate(int max_depth) { if (!(max_depth > 0)) throw std::invalid_argument("max_depth must be greater than 0."); } /** * Return the default max_depth value. * * @return 10 */ static int default_value() { return 10; } }; /** * Step size for discrete evolution */ struct stepsize { /** * Return the string description of stepsize. * * @return description */ static std::string description() { return "Step size for discrete evolution."; } /** * Validates stepsize; stepsize must be greater than 0. * * @param[in] stepsize argument to validate * @throw std::invalid_argument unless stepsize is greater than zero */ static void validate(double stepsize) { if (!(stepsize > 0)) throw std::invalid_argument("stepsize must be greater than 0."); } /** * Return the default stepsize value. * * @return 1 */ static double default_value() { return 1; } }; /** * Uniformly random jitter of the stepsize, in percent. */ struct stepsize_jitter { /** * Return the string description of stepsize_jitter. * * @return description */ static std::string description() { return "Uniformly random jitter of the stepsize, in percent."; } /** * Validates stepsize_jitter; stepsize_jitter must be between 0 and 1. * * @param[in] stepsize_jitter argument to validate * @throw std::invalid_argument unless stepsize_jitter is between 0 and * 1, inclusive */ static void validate(double stepsize_jitter) { if (!(stepsize_jitter >= 0 && stepsize_jitter <= 1)) throw std::invalid_argument( "stepsize_jitter must be between" " 0 and 1."); } /** * Return the default stepsize_jitter value. * * @return 0 */ static double default_value() { return 0; } }; } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/standalone_gqs.hpp0000644000176200001440000001044214645137105025435 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_STANDALONE_GQS_HPP #define STAN_SERVICES_SAMPLE_STANDALONE_GQS_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { /** * Given a set of draws from a fitted model, generate corresponding * quantities of interest which are written to callback writer. * Matrix of draws consists of one row per draw, one column per parameter. * Draws are processed one row at a time. * Return code indicates success or type of error. * * @tparam Model model class * @param[in] model instantiated model * @param[in] draws sequence of draws of constrained parameters * @param[in] seed seed to use for randomization * @param[in, out] interrupt called every iteration * @param[in, out] logger logger to which to write warning and error messages * @param[in, out] sample_writer writer to which draws are written * @return error code */ template int standalone_generate(const Model &model, const Eigen::MatrixXd &draws, unsigned int seed, callbacks::interrupt &interrupt, callbacks::logger &logger, callbacks::writer &sample_writer) { if (draws.size() == 0) { logger.error("Empty set of draws from fitted model."); return error_codes::DATAERR; } std::vector p_names; model.constrained_param_names(p_names, false, false); std::vector gq_names; model.constrained_param_names(gq_names, false, true); if (!(p_names.size() < gq_names.size())) { logger.error("Model doesn't generate any quantities of interest."); return error_codes::CONFIG; } std::stringstream msg; if (p_names.size() != draws.cols()) { msg << "Wrong number of parameter values in draws from fitted model. "; msg << "Expecting " << p_names.size() << " columns, "; msg << "found " << draws.cols() << " columns."; std::string msgstr = msg.str(); logger.error(msgstr); return error_codes::DATAERR; } util::gq_writer writer(sample_writer, logger, p_names.size()); writer.write_gq_names(model); boost::ecuyer1988 rng = util::create_rng(seed, 1); std::vector unconstrained_params_r; std::vector row(draws.cols()); for (size_t i = 0; i < draws.rows(); ++i) { Eigen::Map(&row[0], draws.cols()) = draws.row(i); try { model.unconstrain_array(row, unconstrained_params_r, &msg); } catch (const std::exception &e) { if (msg.str().length() > 0) logger.error(msg); logger.error(e.what()); return error_codes::DATAERR; } interrupt(); // call out to interrupt and fail writer.write_gq_values(model, rng, unconstrained_params_r); } return error_codes::OK; } /** * DEPRECATED: This function assumes dimensions are rectangular, * a restriction which the Stan language may soon relax. * * Find the names, dimensions of the model parameters. * Assembles vectors of name, dimensions for the variables * declared in the parameters block. * * @tparam Model type of model * @param[in] model model to query * @param[in, out] param_names sequence of parameter names * @param[in, out] param_dimss sequence of variable dimensionalities */ template #if defined(__GNUC__) || defined(__clang__) __attribute__((deprecated)) #elif defined(_MSC_VER) __declspec(deprecated) #endif void get_model_parameters(const Model &model, std::vector ¶m_names, std::vector> ¶m_dimss) { std::vector all_param_names; model.get_param_names(all_param_names, false, false); std::vector> dimss; model.get_dims(dimss, false, false); // remove zero-size for (size_t i = 0; i < all_param_names.size(); i++) { auto &v = dimss[i]; if (std::find(v.begin(), v.end(), 0) == v.end()) { param_names.emplace_back(all_param_names[i]); param_dimss.emplace_back(dimss[i]); } } } } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_static_unit_e.hpp0000644000176200001440000000606014645137105026115 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_STATIC_UNIT_E_HPP #define STAN_SERVICES_SAMPLE_HMC_STATIC_UNIT_E_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs static HMC with unit Euclidean * metric without adaptation. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] int_time integration time * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_static_unit_e(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, double int_time, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); stan::mcmc::unit_e_static_hmc sampler(model, rng); sampler.set_nominal_stepsize_and_T(stepsize, int_time); sampler.set_stepsize_jitter(stepsize_jitter); util::run_sampler(sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_static_dense_e_adapt.hpp0000644000176200001440000001625514645137105027414 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_STATIC_DENSE_E_ADAPT_HPP #define STAN_SERVICES_SAMPLE_HMC_STATIC_DENSE_E_ADAPT_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs static HMC with adaptation using dense Euclidean metric * with a pre-specified Euclidean metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] init_inv_metric var context exposing an initial diagonal inverse Euclidean metric (must be positive definite) * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] int_time integration time * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_static_dense_e_adapt( Model& model, const stan::io::var_context& init, const stan::io::var_context& init_inv_metric, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, double int_time, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); Eigen::MatrixXd inv_metric; try { inv_metric = util::read_dense_inv_metric(init_inv_metric, model.num_params_r(), logger); util::validate_dense_inv_metric(inv_metric, logger); } catch (const std::domain_error& e) { return error_codes::CONFIG; } stan::mcmc::adapt_dense_e_static_hmc sampler(model, rng); sampler.set_metric(inv_metric); sampler.set_nominal_stepsize_and_T(stepsize, int_time); sampler.set_stepsize_jitter(stepsize_jitter); sampler.get_stepsize_adaptation().set_mu(log(10 * stepsize)); sampler.get_stepsize_adaptation().set_delta(delta); sampler.get_stepsize_adaptation().set_gamma(gamma); sampler.get_stepsize_adaptation().set_kappa(kappa); sampler.get_stepsize_adaptation().set_t0(t0); sampler.set_window_params(num_warmup, init_buffer, term_buffer, window, logger); util::run_adaptive_sampler( sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } /** * Runs static HMC with adaptation using dense Euclidean metric. * with identity matrix as initial inv_metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] int_time integration time * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_static_dense_e_adapt( Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, double int_time, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { stan::io::dump dmp = util::create_unit_e_dense_inv_metric(model.num_params_r()); stan::io::var_context& unit_e_metric = dmp; return hmc_static_dense_e_adapt( model, init, unit_e_metric, random_seed, chain, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, int_time, delta, gamma, kappa, t0, init_buffer, term_buffer, window, interrupt, logger, init_writer, sample_writer, diagnostic_writer); } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_nuts_diag_e_adapt.hpp0000644000176200001440000004311114645137105026713 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_NUTS_DIAG_E_ADAPT_HPP #define STAN_SERVICES_SAMPLE_HMC_NUTS_DIAG_E_ADAPT_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs HMC with NUTS with adaptation using diagonal Euclidean metric * with a pre-specified Euclidean metric. * * @tparam Model Model class * @tparam InitContextPtr A type derived from `stan::io::var_context` * @tparam InitMetricContext A type derived from `stan::io::var_context` * @tparam SamplerWriter A type derived from `stan::callbacks::writer` * @tparam DiagnosticWriter A type derived from `stan::callbacks::writer` * @tparam InitWriter A type derived from `stan::callbacks::writer` * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] init_inv_metric var context exposing an initial diagonal inverse Euclidean metric (must be positive definite) * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_nuts_diag_e_adapt( Model& model, const stan::io::var_context& init, const stan::io::var_context& init_inv_metric, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); Eigen::VectorXd inv_metric; try { inv_metric = util::read_diag_inv_metric(init_inv_metric, model.num_params_r(), logger); util::validate_diag_inv_metric(inv_metric, logger); } catch (const std::domain_error& e) { return error_codes::CONFIG; } stan::mcmc::adapt_diag_e_nuts sampler(model, rng); sampler.set_metric(inv_metric); sampler.set_nominal_stepsize(stepsize); sampler.set_stepsize_jitter(stepsize_jitter); sampler.set_max_depth(max_depth); sampler.get_stepsize_adaptation().set_mu(log(10 * stepsize)); sampler.get_stepsize_adaptation().set_delta(delta); sampler.get_stepsize_adaptation().set_gamma(gamma); sampler.get_stepsize_adaptation().set_kappa(kappa); sampler.get_stepsize_adaptation().set_t0(t0); sampler.set_window_params(num_warmup, init_buffer, term_buffer, window, logger); util::run_adaptive_sampler( sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } /** * Runs HMC with NUTS with adaptation using diagonal Euclidean metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_nuts_diag_e_adapt( Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { stan::io::dump unit_e_metric = util::create_unit_e_diag_inv_metric(model.num_params_r()); return hmc_nuts_diag_e_adapt( model, init, unit_e_metric, random_seed, chain, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, max_depth, delta, gamma, kappa, t0, init_buffer, term_buffer, window, interrupt, logger, init_writer, sample_writer, diagnostic_writer); } /** * Runs multiple chains of HMC with NUTS with adaptation using diagonal * Euclidean metric with a pre-specified Euclidean metric. * * @tparam Model Model class * @tparam InitContextPtr A pointer with underlying type derived from `stan::io::var_context` * @tparam InitInvContextPtr A pointer with underlying type derived from `stan::io::var_context` * @tparam SamplerWriter A type derived from `stan::callbacks::writer` * @tparam DiagnosticWriter A type derived from `stan::callbacks::writer` * @tparam InitWriter A type derived from `stan::callbacks::writer` * @param[in] model Input model to test (with data already instantiated) * @param[in] num_chains The number of chains to run in parallel. `init`, * `init_inv_metric`, `init_writer`, `sample_writer`, and `diagnostic_writer` must * be the same length as this value. * @param[in] init An std vector of init var contexts for initialization of each * chain. * @param[in] init_inv_metric An std vector of var contexts exposing an initial diagonal inverse Euclidean metric for each chain (must be positive definite) * @param[in] random_seed random seed for the random number generator * @param[in] init_chain_id first chain id. The pseudo random number generator * will advance for each chain by an integer sequence from `init_chain_id` to * `init_chain_id + num_chains - 1` * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer std vector of Writer callbacks for unconstrained * inits of each chain. * @param[in,out] sample_writer std vector of Writers for draws of each chain. * @param[in,out] diagnostic_writer std vector of Writers for diagnostic * information of each chain. * @return error_codes::OK if successful */ template int hmc_nuts_diag_e_adapt( Model& model, size_t num_chains, const std::vector& init, const std::vector& init_inv_metric, unsigned int random_seed, unsigned int init_chain_id, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, std::vector& init_writer, std::vector& sample_writer, std::vector& diagnostic_writer) { if (num_chains == 1) { return hmc_nuts_diag_e_adapt( model, *init[0], *init_inv_metric[0], random_seed, init_chain_id, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, max_depth, delta, gamma, kappa, t0, init_buffer, term_buffer, window, interrupt, logger, init_writer[0], sample_writer[0], diagnostic_writer[0]); } using sample_t = stan::mcmc::adapt_diag_e_nuts; std::vector rngs; rngs.reserve(num_chains); std::vector> cont_vectors; cont_vectors.reserve(num_chains); std::vector samplers; samplers.reserve(num_chains); try { for (int i = 0; i < num_chains; ++i) { rngs.emplace_back(util::create_rng(random_seed, init_chain_id + i)); cont_vectors.emplace_back(util::initialize( model, *init[i], rngs[i], init_radius, true, logger, init_writer[i])); samplers.emplace_back(model, rngs[i]); Eigen::VectorXd inv_metric = util::read_diag_inv_metric( *init_inv_metric[i], model.num_params_r(), logger); util::validate_diag_inv_metric(inv_metric, logger); samplers[i].set_metric(inv_metric); samplers[i].set_nominal_stepsize(stepsize); samplers[i].set_stepsize_jitter(stepsize_jitter); samplers[i].set_max_depth(max_depth); samplers[i].get_stepsize_adaptation().set_mu(log(10 * stepsize)); samplers[i].get_stepsize_adaptation().set_delta(delta); samplers[i].get_stepsize_adaptation().set_gamma(gamma); samplers[i].get_stepsize_adaptation().set_kappa(kappa); samplers[i].get_stepsize_adaptation().set_t0(t0); samplers[i].set_window_params(num_warmup, init_buffer, term_buffer, window, logger); } } catch (const std::domain_error& e) { return error_codes::CONFIG; } tbb::parallel_for( tbb::blocked_range(0, num_chains, 1), [num_warmup, num_samples, num_thin, refresh, save_warmup, num_chains, init_chain_id, &samplers, &model, &rngs, &interrupt, &logger, &sample_writer, &cont_vectors, &diagnostic_writer](const tbb::blocked_range& r) { for (size_t i = r.begin(); i != r.end(); ++i) { util::run_adaptive_sampler(samplers[i], model, cont_vectors[i], num_warmup, num_samples, num_thin, refresh, save_warmup, rngs[i], interrupt, logger, sample_writer[i], diagnostic_writer[i], init_chain_id + i, num_chains); } }, tbb::simple_partitioner()); return error_codes::OK; } /** * Runs multiple chains of HMC with NUTS with adaptation using diagonal * Euclidean metric. * * @tparam Model Model class * @tparam InitContextPtr A pointer with underlying type derived from * `stan::io::var_context` * @tparam SamplerWriter A type derived from `stan::callbacks::writer` * @tparam DiagnosticWriter A type derived from `stan::callbacks::writer` * @tparam InitWriter A type derived from `stan::callbacks::writer` * @param[in] model Input model to test (with data already instantiated) * @param[in] num_chains The number of chains to run in parallel. `init`, * `init_writer`, `sample_writer`, and `diagnostic_writer` must be the same * length as this value. * @param[in] init An std vector of init var contexts for initialization of each * chain. * @param[in] random_seed random seed for the random number generator * @param[in] init_chain_id first chain id. The pseudo random number generator * will advance by for each chain by an integer sequence from `init_chain_id` to * `init_chain_id+num_chains-1` * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in] delta adaptation target acceptance statistic * @param[in] gamma adaptation regularization scale * @param[in] kappa adaptation relaxation exponent * @param[in] t0 adaptation iteration offset * @param[in] init_buffer width of initial fast adaptation interval * @param[in] term_buffer width of final fast adaptation interval * @param[in] window initial width of slow adaptation interval * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer std vector of Writer callbacks for unconstrained * inits of each chain. * @param[in,out] sample_writer std vector of Writers for draws of each chain. * @param[in,out] diagnostic_writer std vector of Writers for diagnostic * information of each chain. * @return error_codes::OK if successful */ template int hmc_nuts_diag_e_adapt( Model& model, size_t num_chains, const std::vector& init, unsigned int random_seed, unsigned int init_chain_id, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, double delta, double gamma, double kappa, double t0, unsigned int init_buffer, unsigned int term_buffer, unsigned int window, callbacks::interrupt& interrupt, callbacks::logger& logger, std::vector& init_writer, std::vector& sample_writer, std::vector& diagnostic_writer) { if (num_chains == 1) { return hmc_nuts_diag_e_adapt( model, *init[0], random_seed, init_chain_id, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, max_depth, delta, gamma, kappa, t0, init_buffer, term_buffer, window, interrupt, logger, init_writer[0], sample_writer[0], diagnostic_writer[0]); } std::vector> unit_e_metrics; unit_e_metrics.reserve(num_chains); for (size_t i = 0; i < num_chains; ++i) { unit_e_metrics.emplace_back(std::make_unique( util::create_unit_e_diag_inv_metric(model.num_params_r()))); } return hmc_nuts_diag_e_adapt( model, num_chains, init, unit_e_metrics, random_seed, init_chain_id, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, max_depth, delta, gamma, kappa, t0, init_buffer, term_buffer, window, interrupt, logger, init_writer, sample_writer, diagnostic_writer); } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_nuts_diag_e.hpp0000644000176200001440000001367714645137105025560 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_NUTS_DIAG_E_HPP #define STAN_SERVICES_SAMPLE_HMC_NUTS_DIAG_E_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs HMC with NUTS without adaptation using diagonal Euclidean metric * with a pre-specified Euclidean metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] init_inv_metric var context exposing an initial diagonal inverse Euclidean metric (must be positive definite) * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_nuts_diag_e(Model& model, const stan::io::var_context& init, const stan::io::var_context& init_inv_metric, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); Eigen::VectorXd inv_metric; try { inv_metric = util::read_diag_inv_metric(init_inv_metric, model.num_params_r(), logger); util::validate_diag_inv_metric(inv_metric, logger); } catch (const std::domain_error& e) { return error_codes::CONFIG; } stan::mcmc::diag_e_nuts sampler(model, rng); sampler.set_metric(inv_metric); sampler.set_nominal_stepsize(stepsize); sampler.set_stepsize_jitter(stepsize_jitter); sampler.set_max_depth(max_depth); util::run_sampler(sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } /** * Runs HMC with NUTS without adaptation using diagonal Euclidean metric, * with identity matrix as initial inv_metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_nuts_diag_e(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { stan::io::dump dmp = util::create_unit_e_diag_inv_metric(model.num_params_r()); stan::io::var_context& unit_e_metric = dmp; return hmc_nuts_diag_e(model, init, unit_e_metric, random_seed, chain, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, max_depth, interrupt, logger, init_writer, sample_writer, diagnostic_writer); } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_nuts_dense_e.hpp0000644000176200001440000001373214645137105025742 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_NUTS_DENSE_E_HPP #define STAN_SERVICES_SAMPLE_HMC_NUTS_DENSE_E_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs HMC with NUTS without adaptation using dense Euclidean metric * with a pre-specified Euclidean metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] init_inv_metric var context exposing an initial dense inverse Euclidean metric (must be positive definite) * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_nuts_dense_e(Model& model, const stan::io::var_context& init, const stan::io::var_context& init_inv_metric, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); Eigen::MatrixXd inv_metric; try { inv_metric = util::read_dense_inv_metric(init_inv_metric, model.num_params_r(), logger); util::validate_dense_inv_metric(inv_metric, logger); } catch (const std::domain_error& e) { return error_codes::CONFIG; } stan::mcmc::dense_e_nuts sampler(model, rng); sampler.set_metric(inv_metric); sampler.set_nominal_stepsize(stepsize); sampler.set_stepsize_jitter(stepsize_jitter); sampler.set_max_depth(max_depth); util::run_sampler(sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } /** * Runs HMC with NUTS without adaptation using dense Euclidean metric, * with identity matrix as initial inv_metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] max_depth Maximum tree depth * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful * */ template int hmc_nuts_dense_e(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, int max_depth, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { stan::io::dump dmp = util::create_unit_e_dense_inv_metric(model.num_params_r()); stan::io::var_context& unit_e_metric = dmp; return hmc_nuts_dense_e(model, init, unit_e_metric, random_seed, chain, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, max_depth, interrupt, logger, init_writer, sample_writer, diagnostic_writer); } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/sample/hmc_static_diag_e.hpp0000644000176200001440000001374514645137105026052 0ustar liggesusers#ifndef STAN_SERVICES_SAMPLE_HMC_STATIC_DIAG_E_HPP #define STAN_SERVICES_SAMPLE_HMC_STATIC_DIAG_E_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace sample { /** * Runs static HMC without adaptation using diagonal Euclidean metric * with a pre-specified Euclidean metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] init_inv_metric var context exposing an initial diagonal inverse Euclidean metric (must be positive definite) * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] int_time integration time * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_static_diag_e(Model& model, const stan::io::var_context& init, const stan::io::var_context& init_inv_metric, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, double int_time, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, true, logger, init_writer); Eigen::VectorXd inv_metric; try { inv_metric = util::read_diag_inv_metric(init_inv_metric, model.num_params_r(), logger); util::validate_diag_inv_metric(inv_metric, logger); } catch (const std::domain_error& e) { return error_codes::CONFIG; } stan::mcmc::diag_e_static_hmc sampler(model, rng); sampler.set_metric(inv_metric); sampler.set_nominal_stepsize_and_T(stepsize, int_time); sampler.set_stepsize_jitter(stepsize_jitter); util::run_sampler(sampler, model, cont_vector, num_warmup, num_samples, num_thin, refresh, save_warmup, rng, interrupt, logger, sample_writer, diagnostic_writer); return error_codes::OK; } /** * Runs static HMC without adaptation using diagonal Euclidean metric. * with identity matrix as initial inv_metric. * * @tparam Model Model class * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_warmup Number of warmup samples * @param[in] num_samples Number of samples * @param[in] num_thin Number to thin the samples * @param[in] save_warmup Indicates whether to save the warmup iterations * @param[in] refresh Controls the output * @param[in] stepsize initial stepsize for discrete evolution * @param[in] stepsize_jitter uniform random jitter of stepsize * @param[in] int_time integration time * @param[in,out] interrupt Callback for interrupts * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] sample_writer Writer for draws * @param[in,out] diagnostic_writer Writer for diagnostic information * @return error_codes::OK if successful */ template int hmc_static_diag_e(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_warmup, int num_samples, int num_thin, bool save_warmup, int refresh, double stepsize, double stepsize_jitter, double int_time, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& sample_writer, callbacks::writer& diagnostic_writer) { stan::io::dump dmp = util::create_unit_e_diag_inv_metric(model.num_params_r()); stan::io::var_context& unit_e_metric = dmp; return hmc_static_diag_e(model, init, unit_e_metric, random_seed, chain, init_radius, num_warmup, num_samples, num_thin, save_warmup, refresh, stepsize, stepsize_jitter, int_time, interrupt, logger, init_writer, sample_writer, diagnostic_writer); } } // namespace sample } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/optimize/0000755000176200001440000000000014645137105022300 5ustar liggesusersStanHeaders/inst/include/src/stan/services/optimize/lbfgs.hpp0000644000176200001440000001466214645137105024117 0ustar liggesusers#ifndef STAN_SERVICES_OPTIMIZE_LBFGS_HPP #define STAN_SERVICES_OPTIMIZE_LBFGS_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace optimize { /** * Runs the L-BFGS algorithm for a model. * * @tparam Model A model implementation * @tparam jacobian `true` to include Jacobian adjustment (default `false`) * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] history_size amount of history to keep for L-BFGS * @param[in] init_alpha line search step size for first iteration * @param[in] tol_obj convergence tolerance on absolute changes in * objective function value * @param[in] tol_rel_obj convergence tolerance on relative changes * in objective function value * @param[in] tol_grad convergence tolerance on the norm of the gradient * @param[in] tol_rel_grad convergence tolerance on the relative norm of * the gradient * @param[in] tol_param convergence tolerance on changes in parameter * value * @param[in] num_iterations maximum number of iterations * @param[in] save_iterations indicates whether all the iterations should * be saved to the parameter_writer * @param[in] refresh how often to write output to logger * @param[in,out] interrupt callback to be called every iteration * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] parameter_writer output for parameter values * @return error_codes::OK if successful */ template int lbfgs(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int history_size, double init_alpha, double tol_obj, double tol_rel_obj, double tol_grad, double tol_rel_grad, double tol_param, int num_iterations, bool save_iterations, int refresh, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& parameter_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, false, logger, init_writer); std::stringstream lbfgs_ss; typedef stan::optimization::BFGSLineSearch, double, Eigen::Dynamic, jacobian> Optimizer; Optimizer lbfgs(model, cont_vector, disc_vector, &lbfgs_ss); lbfgs.get_qnupdate().set_history_size(history_size); lbfgs._ls_opts.alpha0 = init_alpha; lbfgs._conv_opts.tolAbsF = tol_obj; lbfgs._conv_opts.tolRelF = tol_rel_obj; lbfgs._conv_opts.tolAbsGrad = tol_grad; lbfgs._conv_opts.tolRelGrad = tol_rel_grad; lbfgs._conv_opts.tolAbsX = tol_param; lbfgs._conv_opts.maxIts = num_iterations; double lp = lbfgs.logp(); std::stringstream initial_msg; initial_msg << "Initial log joint probability = " << lp; logger.info(initial_msg); std::vector names; names.push_back("lp__"); model.constrained_param_names(names, true, true); parameter_writer(names); if (save_iterations) { std::vector values; std::stringstream msg; model.write_array(rng, cont_vector, disc_vector, values, true, true, &msg); if (msg.str().length() > 0) logger.info(msg); values.insert(values.begin(), lp); parameter_writer(values); } int ret = 0; while (ret == 0) { interrupt(); if (refresh > 0 && (lbfgs.iter_num() == 0 || ((lbfgs.iter_num() + 1) % refresh == 0))) logger.info( " Iter" " log prob" " ||dx||" " ||grad||" " alpha" " alpha0" " # evals" " Notes "); ret = lbfgs.step(); lp = lbfgs.logp(); lbfgs.params_r(cont_vector); if (refresh > 0 && (ret != 0 || !lbfgs.note().empty() || lbfgs.iter_num() == 0 || ((lbfgs.iter_num() + 1) % refresh == 0))) { std::stringstream msg; msg << " " << std::setw(7) << lbfgs.iter_num() << " "; msg << " " << std::setw(12) << std::setprecision(6) << lp << " "; msg << " " << std::setw(12) << std::setprecision(6) << lbfgs.prev_step_size() << " "; msg << " " << std::setw(12) << std::setprecision(6) << lbfgs.curr_g().norm() << " "; msg << " " << std::setw(10) << std::setprecision(4) << lbfgs.alpha() << " "; msg << " " << std::setw(10) << std::setprecision(4) << lbfgs.alpha0() << " "; msg << " " << std::setw(7) << lbfgs.grad_evals() << " "; msg << " " << lbfgs.note() << " "; logger.info(msg); } if (lbfgs_ss.str().length() > 0) { logger.info(lbfgs_ss); lbfgs_ss.str(""); } if (save_iterations) { std::vector values; std::stringstream msg; model.write_array(rng, cont_vector, disc_vector, values, true, true, &msg); if (msg.str().length() > 0) logger.info(msg); values.insert(values.begin(), lp); parameter_writer(values); } } if (!save_iterations) { std::vector values; std::stringstream msg; model.write_array(rng, cont_vector, disc_vector, values, true, true, &msg); if (msg.str().length() > 0) logger.info(msg); values.insert(values.begin(), lp); parameter_writer(values); } int return_code; if (ret >= 0) { logger.info("Optimization terminated normally: "); return_code = error_codes::OK; } else { logger.info("Optimization terminated with error: "); return_code = error_codes::SOFTWARE; } logger.info(" " + lbfgs.get_code_string(ret)); return return_code; } } // namespace optimize } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/optimize/defaults.hpp0000644000176200001440000001600514645137105024622 0ustar liggesusers#ifndef STAN_SERVICES_OPTIMIZE_DEFAULTS_HPP #define STAN_SERVICES_OPTIMIZE_DEFAULTS_HPP #include #include namespace stan { namespace services { namespace optimize { /** * Line search step size for first iteration. */ struct init_alpha { /** * Return the string description of init_alpha. * * @return description */ static std::string description() { return "Line search step size for first iteration."; } /** * Validates init_alpha; init_alpha must be greater than 0. * * @param[in] init_alpha argument to validate * @throw std::invalid_argument unless init_alpha is greater than zero */ static void validate(double init_alpha) { if (!(init_alpha > 0)) throw std::invalid_argument("init_alpha must be greater than 0."); } /** * Return the default init_alpha value. * * @return 0.001 */ static double default_value() { return 0.001; } }; /** * Convergence tolerance on absolute changes in objective function value. */ struct tol_obj { /** * Return the string description of tol_obj. * * @return description */ static std::string description() { return "Convergence tolerance on absolute changes in objective" " function value."; } /** * Validates tol_obj; tol_obj must be greater than or equal to 0. * * @param[in] tol_obj argument to validate * @throw std::invalid_argument unless tol_obj is greater than or equal * to zero */ static void validate(double tol_obj) { if (!(tol_obj >= 0)) throw std::invalid_argument( "tol_obj must be greater" " than or equal to 0."); } /** * Return the default tol_obj value. * * @return 1e-12 */ static double default_value() { return 1e-12; } }; /** * Convergence tolerance on relative changes in objective function value. */ struct tol_rel_obj { /** * Return the string description of tol_rel_obj. * * @return description */ static std::string description() { return "Convergence tolerance on relative changes in" " objective function value."; } /** * Validates tol_rel_obj; tol_rel_obj must be greater than or equal * to 0. * * @param[in] tol_rel_obj argument to validate * @throw std::invalid_argument unless tol_rel_obj is greater than or * equal to zero */ static void validate(double tol_rel_obj) { if (!(tol_rel_obj >= 0)) throw std::invalid_argument( "tol_rel_obj must be greater" " than or equal to 0"); } /** * Return the default tol_rel_obj value. * * @return 10000 */ static double default_value() { return 10000; } }; /** * Convergence tolerance on the norm of the gradient. */ struct tol_grad { /** * Return the string description of tol_grad. * * @return description */ static std::string description() { return "Convergence tolerance on the norm of the gradient."; } /** * Validates tol_grad; tol_grad must be greater than or equal to 0. * * @param[in] tol_grad argument to validate * @throw std::invalid_argument unless tol_grad is greater than or * equal to zero */ static void validate(double tol_grad) { if (!(tol_grad >= 0)) throw std::invalid_argument( "tol_grad must be greater" " than or equal to 0"); } /** * Return the default tol_grad value. * * @return 1e-8 */ static double default_value() { return 1e-8; } }; /** * Convergence tolerance on the relative norm of the gradient. */ struct tol_rel_grad { /** * Return the string description of tol_rel_grad. * * @return description */ static std::string description() { return "Convergence tolerance on the relative norm of the gradient."; } /** * Validates tol_rel_grad; tol_rel_grad must be greater than * or equal to 0. * * @param[in] tol_rel_grad argument to validate * @throw std::invalid_argument unless tol_rel_grad is greater than or * equal to zero */ static void validate(double tol_rel_grad) { if (!(tol_rel_grad >= 0)) throw std::invalid_argument( "tol_rel_grad must be greater" " than or equal to 0."); } /** * Return the default tol_rel_grad value. * * @return 10000000 */ static double default_value() { return 10000000; } }; /** * Convergence tolerance on changes in parameter value. */ struct tol_param { /** * Return the string description of tol_param. * * @return description */ static std::string description() { return "Convergence tolerance on changes in parameter value."; } /** * Validates tol_param; tol_param must be greater than or equal to 0. * * @param[in] tol_param argument to validate * @throw std::invalid_argument unless tol_param is greater than or * equal to zero */ static void validate(double tol_param) { if (!(tol_param >= 0)) throw std::invalid_argument("tol_param"); } /** * Return the default tol_param. * * @return 1e-08 */ static double default_value() { return 1e-08; } }; /** * Amount of history to keep for L-BFGS. */ struct history_size { /** * Return the string description of history_size. * * @return description */ static std::string description() { return "Amount of history to keep for L-BFGS."; } /** * Validates history_size; history_size must be greater than 0. * * @param[in] history_size argument to validate * @throw std::invalid_argument unless history_size is greater than * zero */ static void validate(int history_size) { if (!(history_size > 0)) throw std::invalid_argument("history_size must be greater than 0."); } /** * Return the default history_size value. * * @return 5 */ static int default_value() { return 5; } }; /** * Total number of iterations. */ struct iter { /** * Return the string description of iter. * * @return description */ static std::string description() { return "Total number of iterations."; } /** * Validates iter; iter must be greater than 0. * * @param[in] iter argument to validate * @throw std::invalid_argument unless iter is greater than zero */ static void validate(int iter) { if (!(iter > 0)) throw std::invalid_argument("iter must be greater than 0."); } /** * Return the default iter value. * * @return 2000 */ static int default_value() { return 2000; } }; /** * Save optimization iterations to output. */ struct save_iterations { /** * Return the string description of save_iterations. * * @return description */ static std::string description() { return "Save optimization iterations to output."; } /** * Validates save_iterations. This is a no-op. * * @param[in] save_iterations argument to validate */ static void validate(bool save_iterations) {} /** * Return the default save_iterations value. * * @return false */ static bool default_value() { return false; } }; } // namespace optimize } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/optimize/laplace_sample.hpp0000644000176200001440000001516314645137105025761 0ustar liggesusers#ifndef STAN_SERVICES_OPTIMIZE_LAPLACE_SAMPLE_HPP #define STAN_SERVICES_OPTIMIZE_LAPLACE_SAMPLE_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace internal { template void laplace_sample(const Model& model, const Eigen::VectorXd& theta_hat, int draws, unsigned int random_seed, int refresh, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& sample_writer) { if (draws <= 0) { throw std::domain_error("Number of draws must be > 0; found draws = " + std::to_string(draws)); } std::vector unc_param_names; model.unconstrained_param_names(unc_param_names, false, false); int num_unc_params = unc_param_names.size(); if (theta_hat.size() != num_unc_params) { throw ::std::domain_error( "Specified mode is wrong size; expected " + std::to_string(num_unc_params) + " unconstrained parameters, but specified mode has size = " + std::to_string(theta_hat.size())); } std::vector param_tp_gq_names; model.constrained_param_names(param_tp_gq_names, true, true); size_t draw_size = param_tp_gq_names.size(); // write names of params, tps, and gqs to sample writer std::vector names; names.push_back("log_p__"); names.push_back("log_q__"); static const bool include_tp = true; static const bool include_gq = true; model.constrained_param_names(names, include_tp, include_gq); sample_writer(names); // create log density functor for vars and vals std::stringstream log_density_msgs; auto log_density_fun = [&](const Eigen::Matrix& theta) { return model.template log_prob( const_cast&>(theta), &log_density_msgs); }; // calculate inverse negative Hessian's Cholesky factor if (refresh > 0) { logger.info("Calculating Hessian"); } double log_p; // dummy Eigen::VectorXd grad; // dummy Eigen::MatrixXd hessian; interrupt(); math::internal::finite_diff_hessian_auto(log_density_fun, theta_hat, log_p, grad, hessian); if (refresh > 0 && log_density_msgs.peek() != std::char_traits::eof()) logger.info(log_density_msgs); // calculate Cholesky factor and inverse interrupt(); if (refresh > 0) { logger.info("Calculating inverse of Cholesky factor"); } Eigen::MatrixXd L_neg_hessian = (-hessian).llt().matrixL(); interrupt(); Eigen::MatrixXd inv_sqrt_neg_hessian = L_neg_hessian.inverse().transpose(); interrupt(); Eigen::MatrixXd half_hessian = 0.5 * hessian; if (refresh > 0) { logger.info("Generating draws"); } // generate draws std::stringstream refresh_msg; boost::ecuyer1988 rng = util::create_rng(random_seed, 0); Eigen::VectorXd draw_vec; // declare draw_vec, msgs here to avoid re-alloc for (int m = 0; m < draws; ++m) { interrupt(); // allow interpution each iteration if (refresh > 0 && m % refresh == 0) { refresh_msg << "iteration: " << std::to_string(m); logger.info(refresh_msg); refresh_msg.str(std::string()); } Eigen::VectorXd z(num_unc_params); for (int n = 0; n < num_unc_params; ++n) { z(n) = math::std_normal_rng(rng); } Eigen::VectorXd unc_draw = theta_hat + inv_sqrt_neg_hessian * z; std::stringstream write_array_msgs; model.write_array(rng, unc_draw, draw_vec, include_tp, include_gq, &write_array_msgs); if (refresh > 0 && write_array_msgs.peek() != std::char_traits::eof()) logger.info(write_array_msgs); // output draw, log_p, log_q std::vector draw(&draw_vec(0), &draw_vec(0) + draw_size); double log_p = log_density_fun(unc_draw).val(); draw.insert(draw.begin(), log_p); Eigen::VectorXd diff = unc_draw - theta_hat; double log_q = diff.transpose() * half_hessian * diff; draw.insert(draw.begin() + 1, log_q); sample_writer(draw); } } // namespace internal } // namespace internal /** * Take the specified number of draws from the Laplace approximation * for the model at the specified unconstrained mode, writing the * draws, unnormalized log density, and unnormalized density of the * approximation to the sample writer and writing messages to the * logger, returning a return code of zero if successful. * * Interrupts are called between compute-intensive operations. To * turn off all console messages sent to the logger, set refresh to 0. * If an exception is thrown by the model, the return value is * non-zero, and if refresh > 0, its message is given to the logger as * an error. * * @tparam jacobian `true` to include Jacobian adjustment for * constrained parameters * @tparam Model a Stan model * @param[in] m model from which to sample * @param[in] theta_hat unconstrained mode at which to center the * Laplace approximation * @param[in] draws number of draws to generate * @param[in] random_seed seed for generating random numbers in the * Stan program and in sampling * @param[in] refresh period between iterations at which updates are * given, with a value of 0 turning off all messages * @param[in] interrupt callback for interrupting sampling * @param[in,out] logger callback for writing console messages from * sampler and from Stan programs * @param[in,out] sample_writer callback for writing parameter names * and then draws * @return a return code, with 0 indicating success */ template int laplace_sample(const Model& model, const Eigen::VectorXd& theta_hat, int draws, unsigned int random_seed, int refresh, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& sample_writer) { try { internal::laplace_sample(model, theta_hat, draws, random_seed, refresh, interrupt, logger, sample_writer); return error_codes::OK; } catch (const std::exception& e) { if (refresh >= 0) { logger.error(e.what()); } } catch (...) { if (refresh >= 0) { logger.error("unknown exception during execution"); } } return error_codes::DATAERR; } } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/optimize/newton.hpp0000644000176200001440000001042514645137105024325 0ustar liggesusers#ifndef STAN_SERVICES_OPTIMIZE_NEWTON_HPP #define STAN_SERVICES_OPTIMIZE_NEWTON_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace optimize { /** * Runs the Newton algorithm for a model. * * @tparam Model A model implementation * @tparam jacobian `true` to include Jacobian adjustment (default `false`) * @param[in] model the Stan model instantiated with data * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] num_iterations maximum number of iterations * @param[in] save_iterations indicates whether all the iterations should * be saved * @param[in,out] interrupt callback to be called every iteration * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] parameter_writer output for parameter values * @return error_codes::OK if successful */ template int newton(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, int num_iterations, bool save_iterations, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& parameter_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, false, logger, init_writer); double lp(0); try { std::stringstream message; lp = model.template log_prob(cont_vector, disc_vector, &message); logger.info(message); } catch (const std::exception& e) { logger.info(""); logger.info( "Informational Message: The current" " proposal is about to be rejected because of" " the following issue:"); logger.info(e.what()); logger.info( "If this warning occurs sporadically, such as" " for highly constrained variable types like" " covariance matrices, then the sampler is fine,"); logger.info( "but if this warning occurs often then your model" " may be either severely ill-conditioned or" " misspecified."); lp = -std::numeric_limits::infinity(); } std::stringstream msg; msg << "Initial log joint probability = " << lp; logger.info(msg); std::vector names; names.push_back("lp__"); model.constrained_param_names(names, true, true); parameter_writer(names); double lastlp = lp; for (int m = 0; m < num_iterations; m++) { if (save_iterations) { std::vector values; std::stringstream ss; model.write_array(rng, cont_vector, disc_vector, values, true, true, &ss); if (ss.str().length() > 0) logger.info(ss); values.insert(values.begin(), lp); parameter_writer(values); } interrupt(); lastlp = lp; lp = stan::optimization::newton_step(model, cont_vector, disc_vector); std::stringstream msg2; msg2 << "Iteration " << std::setw(2) << (m + 1) << "." << " Log joint probability = " << std::setw(10) << lp << ". Improved by " << (lp - lastlp) << "."; logger.info(msg2); if (std::fabs(lp - lastlp) <= 1e-8) break; } { std::vector values; std::stringstream ss; model.write_array(rng, cont_vector, disc_vector, values, true, true, &ss); if (ss.str().length() > 0) logger.info(ss); values.insert(values.begin(), lp); parameter_writer(values); } return error_codes::OK; } } // namespace optimize } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/optimize/bfgs.hpp0000644000176200001440000001434414645137105023740 0ustar liggesusers#ifndef STAN_SERVICES_OPTIMIZE_BFGS_HPP #define STAN_SERVICES_OPTIMIZE_BFGS_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace services { namespace optimize { /** * Runs the BFGS algorithm for a model. * * @tparam Model A model implementation * @tparam jacobian `true` to include Jacobian adjust (default `false`) * @param[in] model Input model to test (with data already instantiated) * @param[in] init var context for initialization * @param[in] random_seed random seed for the random number generator * @param[in] chain chain id to advance the pseudo random number generator * @param[in] init_radius radius to initialize * @param[in] init_alpha line search step size for first iteration * @param[in] tol_obj convergence tolerance on absolute changes in * objective function value * @param[in] tol_rel_obj convergence tolerance on relative changes * in objective function value * @param[in] tol_grad convergence tolerance on the norm of the gradient * @param[in] tol_rel_grad convergence tolerance on the relative norm of * the gradient * @param[in] tol_param convergence tolerance on changes in parameter * value * @param[in] num_iterations maximum number of iterations * @param[in] save_iterations indicates whether all the iterations should * be saved to the parameter_writer * @param[in] refresh how often to write output to logger * @param[in,out] interrupt callback to be called every iteration * @param[in,out] logger Logger for messages * @param[in,out] init_writer Writer callback for unconstrained inits * @param[in,out] parameter_writer output for parameter values * @return error_codes::OK if successful */ template int bfgs(Model& model, const stan::io::var_context& init, unsigned int random_seed, unsigned int chain, double init_radius, double init_alpha, double tol_obj, double tol_rel_obj, double tol_grad, double tol_rel_grad, double tol_param, int num_iterations, bool save_iterations, int refresh, callbacks::interrupt& interrupt, callbacks::logger& logger, callbacks::writer& init_writer, callbacks::writer& parameter_writer) { boost::ecuyer1988 rng = util::create_rng(random_seed, chain); std::vector disc_vector; std::vector cont_vector = util::initialize( model, init, rng, init_radius, false, logger, init_writer); std::stringstream bfgs_ss; typedef stan::optimization::BFGSLineSearch< Model, stan::optimization::BFGSUpdate_HInv<>, double, Eigen::Dynamic, jacobian> Optimizer; Optimizer bfgs(model, cont_vector, disc_vector, &bfgs_ss); bfgs._ls_opts.alpha0 = init_alpha; bfgs._conv_opts.tolAbsF = tol_obj; bfgs._conv_opts.tolRelF = tol_rel_obj; bfgs._conv_opts.tolAbsGrad = tol_grad; bfgs._conv_opts.tolRelGrad = tol_rel_grad; bfgs._conv_opts.tolAbsX = tol_param; bfgs._conv_opts.maxIts = num_iterations; double lp = bfgs.logp(); std::stringstream initial_msg; initial_msg << "Initial log joint probability = " << lp; logger.info(initial_msg); std::vector names; names.push_back("lp__"); model.constrained_param_names(names, true, true); parameter_writer(names); if (save_iterations) { std::vector values; std::stringstream msg; model.write_array(rng, cont_vector, disc_vector, values, true, true, &msg); if (msg.str().length() > 0) logger.info(msg); values.insert(values.begin(), lp); parameter_writer(values); } int ret = 0; while (ret == 0) { interrupt(); if (refresh > 0 && (bfgs.iter_num() == 0 || ((bfgs.iter_num() + 1) % refresh == 0))) logger.info( " Iter" " log prob" " ||dx||" " ||grad||" " alpha" " alpha0" " # evals" " Notes "); ret = bfgs.step(); lp = bfgs.logp(); bfgs.params_r(cont_vector); if (refresh > 0 && (ret != 0 || !bfgs.note().empty() || bfgs.iter_num() == 0 || ((bfgs.iter_num() + 1) % refresh == 0))) { std::stringstream msg; msg << " " << std::setw(7) << bfgs.iter_num() << " "; msg << " " << std::setw(12) << std::setprecision(6) << lp << " "; msg << " " << std::setw(12) << std::setprecision(6) << bfgs.prev_step_size() << " "; msg << " " << std::setw(12) << std::setprecision(6) << bfgs.curr_g().norm() << " "; msg << " " << std::setw(10) << std::setprecision(4) << bfgs.alpha() << " "; msg << " " << std::setw(10) << std::setprecision(4) << bfgs.alpha0() << " "; msg << " " << std::setw(7) << bfgs.grad_evals() << " "; msg << " " << bfgs.note() << " "; logger.info(msg); } if (bfgs_ss.str().length() > 0) { logger.info(bfgs_ss); bfgs_ss.str(""); } if (save_iterations) { std::vector values; std::stringstream msg; model.write_array(rng, cont_vector, disc_vector, values, true, true, &msg); // This if is here to match the pre-refactor behavior if (msg.str().length() > 0) logger.info(msg); values.insert(values.begin(), lp); parameter_writer(values); } } if (!save_iterations) { std::vector values; std::stringstream msg; model.write_array(rng, cont_vector, disc_vector, values, true, true, &msg); if (msg.str().length() > 0) logger.info(msg); values.insert(values.begin(), lp); parameter_writer(values); } int return_code; if (ret >= 0) { logger.info("Optimization terminated normally: "); return_code = error_codes::OK; } else { logger.info("Optimization terminated with error: "); return_code = error_codes::SOFTWARE; } logger.info(" " + bfgs.get_code_string(ret)); return return_code; } } // namespace optimize } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/services/error_codes.hpp0000644000176200001440000000066414645137105023465 0ustar liggesusers#ifndef STAN_SERVICES_ERROR_CODES_HPP #define STAN_SERVICES_ERROR_CODES_HPP namespace stan { namespace services { struct error_codes { // defining error codes to follow FreeBSD sysexits conventions // http://www.gsp.com/cgi-bin/man.cgi?section=3&topic=sysexits enum { OK = 0, USAGE = 64, DATAERR = 65, NOINPUT = 66, SOFTWARE = 70, CONFIG = 78 }; }; } // namespace services } // namespace stan #endif StanHeaders/inst/include/src/stan/version.hpp0000644000176200001440000000117214645137105021014 0ustar liggesusers#ifndef STAN_VERSION_HPP #define STAN_VERSION_HPP #include #ifndef STAN_STRING_EXPAND #define STAN_STRING_EXPAND(s) #s #endif #ifndef STAN_STRING #define STAN_STRING(s) STAN_STRING_EXPAND(s) #endif #define STAN_MAJOR 2 #define STAN_MINOR 32 #define STAN_PATCH 2 namespace stan { /** Major version number for Stan package. */ const std::string MAJOR_VERSION = STAN_STRING(STAN_MAJOR); /** Minor version number for Stan package. */ const std::string MINOR_VERSION = STAN_STRING(STAN_MINOR); /** Patch version for Stan package. */ const std::string PATCH_VERSION = STAN_STRING(STAN_PATCH); } // namespace stan #endif StanHeaders/inst/include/sunlinsol/0000755000176200001440000000000014511135055017101 5ustar liggesusersStanHeaders/inst/include/sunlinsol/sunlinsol_spbcgs.h0000644000176200001440000001114014645137106022645 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on code sundials_spbcgs.h by: Peter Brown and * Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the SPBCGS implementation of the * SUNLINSOL module, SUNLINSOL_SPBCGS. The SPBCGS algorithm is based * on the Scaled Preconditioned Bi-CG-Stabilized method. * * Note: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_SPBCGS_H #define _SUNLINSOL_SPBCGS_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* Default SPBCGS solver parameters */ #define SUNSPBCGS_MAXL_DEFAULT 5 /* ----------------------------------------- * SPBCGS Implementation of SUNLinearSolver * ---------------------------------------- */ struct _SUNLinearSolverContent_SPBCGS { int maxl; int pretype; booleantype zeroguess; int numiters; realtype resnorm; int last_flag; SUNATimesFn ATimes; void* ATData; SUNPSetupFn Psetup; SUNPSolveFn Psolve; void* PData; N_Vector s1; N_Vector s2; N_Vector r; N_Vector r_star; N_Vector p; N_Vector q; N_Vector u; N_Vector Ap; N_Vector vtemp; int print_level; FILE* info_file; }; typedef struct _SUNLinearSolverContent_SPBCGS *SUNLinearSolverContent_SPBCGS; /* --------------------------------------- *Exported Functions for SUNLINSOL_SPBCGS * --------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_SPBCGS(N_Vector y, int pretype, int maxl, SUNContext sunctx); SUNDIALS_EXPORT int SUNLinSol_SPBCGSSetPrecType(SUNLinearSolver S, int pretype); SUNDIALS_EXPORT int SUNLinSol_SPBCGSSetMaxl(SUNLinearSolver S, int maxl); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_SPBCGS(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_SPBCGS(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_SPBCGS(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetATimes_SPBCGS(SUNLinearSolver S, void* A_data, SUNATimesFn ATimes); SUNDIALS_EXPORT int SUNLinSolSetPreconditioner_SPBCGS(SUNLinearSolver S, void* P_data, SUNPSetupFn Pset, SUNPSolveFn Psol); SUNDIALS_EXPORT int SUNLinSolSetScalingVectors_SPBCGS(SUNLinearSolver S, N_Vector s1, N_Vector s2); SUNDIALS_EXPORT int SUNLinSolSetZeroGuess_SPBCGS(SUNLinearSolver S, booleantype onoff); SUNDIALS_EXPORT int SUNLinSolSetup_SPBCGS(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_SPBCGS(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT int SUNLinSolNumIters_SPBCGS(SUNLinearSolver S); SUNDIALS_EXPORT realtype SUNLinSolResNorm_SPBCGS(SUNLinearSolver S); SUNDIALS_EXPORT N_Vector SUNLinSolResid_SPBCGS(SUNLinearSolver S); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_SPBCGS(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_SPBCGS(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_SPBCGS(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetInfoFile_SPBCGS(SUNLinearSolver S, FILE* info_file); SUNDIALS_EXPORT int SUNLinSolSetPrintLevel_SPBCGS(SUNLinearSolver S, int print_level); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_cusolversp_batchqr.h0000644000176200001440000001126014645137106025300 0ustar liggesusers/* ---------------------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ---------------------------------------------------------------------------- * Based on work by Donald Wilcox @ LBNL * ---------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ---------------------------------------------------------------------------- * Header file for cuSolverSp batched QR SUNLinearSolver interface. * ----------------------------------------------------------------------------*/ #ifndef _SUNLINSOL_CUSOLVERSP_H #define _SUNLINSOL_CUSOLVERSP_H #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif /* * ---------------------------------------------------------------------------- * PART I: cuSolverSp implementation of SUNLinearSolver * ---------------------------------------------------------------------------- */ struct _SUNLinearSolverContent_cuSolverSp_batchQR { int last_flag; /* last return flag */ booleantype first_factorize; /* is this the first factorization? */ size_t internal_size; /* size of cusolver internal buffer for Q and R */ size_t workspace_size; /* size of cusolver memory block for num. factorization */ cusolverSpHandle_t cusolver_handle; /* cuSolverSp context */ csrqrInfo_t info; /* opaque cusolver data structure */ void* workspace; /* memory block used by cusolver */ const char* desc; /* description of this linear solver */ }; typedef struct _SUNLinearSolverContent_cuSolverSp_batchQR *SUNLinearSolverContent_cuSolverSp_batchQR; /* * ---------------------------------------------------------------------------- * PART II: Functions exported by sunlinsol_sludist * ---------------------------------------------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_cuSolverSp_batchQR(N_Vector y, SUNMatrix A, cusolverSpHandle_t cusol_handle, SUNContext sunctx); /* * ---------------------------------------------------------------------------- * cuSolverSp implementations of SUNLinearSolver operations * ---------------------------------------------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_cuSolverSp_batchQR(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_cuSolverSp_batchQR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_cuSolverSp_batchQR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup_cuSolverSp_batchQR(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_cuSolverSp_batchQR(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_cuSolverSp_batchQR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolFree_cuSolverSp_batchQR(SUNLinearSolver S); /* * ---------------------------------------------------------------------------- * Additional get and set functions. * ---------------------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNLinSol_cuSolverSp_batchQR_GetDescription(SUNLinearSolver S, char** desc); SUNDIALS_EXPORT void SUNLinSol_cuSolverSp_batchQR_SetDescription(SUNLinearSolver S, const char* desc); SUNDIALS_EXPORT void SUNLinSol_cuSolverSp_batchQR_GetDeviceSpace(SUNLinearSolver S, size_t* cuSolverInternal, size_t* cuSolverWorkspace); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_sptfqmr.h0000644000176200001440000001126514645137106023070 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on code sundials_sptfqmr.h by: Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the SPTFQMR implementation of the * SUNLINSOL module, SUNLINSOL_SPTFQMR. The SPTFQMR algorithm is * based on the Scaled Preconditioned Transpose-free Quasi-Minimum * Residual method. * * Note: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_SPTFQMR_H #define _SUNLINSOL_SPTFQMR_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* Default SPTFQMR solver parameters */ #define SUNSPTFQMR_MAXL_DEFAULT 5 /* ------------------------------------------ * SPTFQMR Implementation of SUNLinearSolver * ------------------------------------------ */ struct _SUNLinearSolverContent_SPTFQMR { int maxl; int pretype; booleantype zeroguess; int numiters; realtype resnorm; int last_flag; SUNATimesFn ATimes; void* ATData; SUNPSetupFn Psetup; SUNPSolveFn Psolve; void* PData; N_Vector s1; N_Vector s2; N_Vector r_star; N_Vector q; N_Vector d; N_Vector v; N_Vector p; N_Vector *r; N_Vector u; N_Vector vtemp1; N_Vector vtemp2; N_Vector vtemp3; int print_level; FILE* info_file; }; typedef struct _SUNLinearSolverContent_SPTFQMR *SUNLinearSolverContent_SPTFQMR; /* ------------------------------------- * Exported Functions SUNLINSOL_SPTFQMR * -------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_SPTFQMR(N_Vector y, int pretype, int maxl, SUNContext sunctx); SUNDIALS_EXPORT int SUNLinSol_SPTFQMRSetPrecType(SUNLinearSolver S, int pretype); SUNDIALS_EXPORT int SUNLinSol_SPTFQMRSetMaxl(SUNLinearSolver S, int maxl); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_SPTFQMR(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_SPTFQMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_SPTFQMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetATimes_SPTFQMR(SUNLinearSolver S, void* A_data, SUNATimesFn ATimes); SUNDIALS_EXPORT int SUNLinSolSetPreconditioner_SPTFQMR(SUNLinearSolver S, void* P_data, SUNPSetupFn Pset, SUNPSolveFn Psol); SUNDIALS_EXPORT int SUNLinSolSetScalingVectors_SPTFQMR(SUNLinearSolver S, N_Vector s1, N_Vector s2); SUNDIALS_EXPORT int SUNLinSolSetZeroGuess_SPTFQMR(SUNLinearSolver S, booleantype onoff); SUNDIALS_EXPORT int SUNLinSolSetup_SPTFQMR(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_SPTFQMR(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT int SUNLinSolNumIters_SPTFQMR(SUNLinearSolver S); SUNDIALS_EXPORT realtype SUNLinSolResNorm_SPTFQMR(SUNLinearSolver S); SUNDIALS_EXPORT N_Vector SUNLinSolResid_SPTFQMR(SUNLinearSolver S); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_SPTFQMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_SPTFQMR(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_SPTFQMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetInfoFile_SPTFQMR(SUNLinearSolver LS, FILE* info_file); SUNDIALS_EXPORT int SUNLinSolSetPrintLevel_SPTFQMR(SUNLinearSolver LS, int print_level); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_lapackdense.h0000644000176200001440000000527414645137106023651 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the LAPACK dense implementation of the * SUNLINSOL module, SUNLINSOL_LINPACKDENSE. * * Note: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_LAPDENSE_H #define _SUNLINSOL_LAPDENSE_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------- * LAPACK dense implementation of SUNLinearSolver * ----------------------------------------------- */ struct _SUNLinearSolverContent_LapackDense { sunindextype N; sunindextype *pivots; sunindextype last_flag; }; typedef struct _SUNLinearSolverContent_LapackDense *SUNLinearSolverContent_LapackDense; /* --------------------------------------------- * Exported Functions for SUNLINSOL_LAPACKDENSE * --------------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_LapackDense(N_Vector y, SUNMatrix A, SUNContext sunctx); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_LapackDense(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_LapackDense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_LapackDense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup_LapackDense(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_LapackDense(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_LapackDense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_LapackDense(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_LapackDense(SUNLinearSolver S); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_dense.h0000644000176200001440000000544314645137106022473 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds, Ashley Crawford @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the dense implementation of the * SUNLINSOL module, SUNLINSOL_DENSE. * * Notes: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * - The definition of the type 'realtype' can be found in the * header file sundials_types.h, and it may be changed (at the * configuration stage) according to the user's needs. * The sundials_types.h file also contains the definition * for the type 'booleantype' and 'indextype'. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_DENSE_H #define _SUNLINSOL_DENSE_H #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ---------------------------------------- * Dense Implementation of SUNLinearSolver * ---------------------------------------- */ struct _SUNLinearSolverContent_Dense { sunindextype N; sunindextype *pivots; sunindextype last_flag; }; typedef struct _SUNLinearSolverContent_Dense *SUNLinearSolverContent_Dense; /* ---------------------------------------- * Exported Functions for SUNLINSOL_DENSE * ---------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_Dense(N_Vector y, SUNMatrix A, SUNContext sunctx); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_Dense(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_Dense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_Dense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup_Dense(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_Dense(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_Dense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_Dense(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_Dense(SUNLinearSolver S); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_klu.h0000644000176200001440000001220614645137106022163 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on sundials_klu_impl.h and arkode_klu.h/cvode_klu.h/... * code, written by Carol S. Woodward @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the KLU implementation of the * SUNLINSOL module, SUNLINSOL_KLU. * * Note: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_KLU_H #define _SUNLINSOL_KLU_H #include #include #include #include #ifndef _KLU_H #include #endif #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* Default KLU solver parameters */ #define SUNKLU_ORDERING_DEFAULT 1 /* COLAMD */ #define SUNKLU_REINIT_FULL 1 #define SUNKLU_REINIT_PARTIAL 2 /* Interfaces to match 'sunindextype' with the correct KLU types/functions */ #if defined(SUNDIALS_INT64_T) #define sun_klu_symbolic klu_l_symbolic #define sun_klu_numeric klu_l_numeric #define sun_klu_common klu_l_common #define sun_klu_analyze klu_l_analyze #define sun_klu_factor klu_l_factor #define sun_klu_refactor klu_l_refactor #define sun_klu_rcond klu_l_rcond #define sun_klu_condest klu_l_condest #define sun_klu_defaults klu_l_defaults #define sun_klu_free_symbolic klu_l_free_symbolic #define sun_klu_free_numeric klu_l_free_numeric #elif defined(SUNDIALS_INT32_T) #define sun_klu_symbolic klu_symbolic #define sun_klu_numeric klu_numeric #define sun_klu_common klu_common #define sun_klu_analyze klu_analyze #define sun_klu_factor klu_factor #define sun_klu_refactor klu_refactor #define sun_klu_rcond klu_rcond #define sun_klu_condest klu_condest #define sun_klu_defaults klu_defaults #define sun_klu_free_symbolic klu_free_symbolic #define sun_klu_free_numeric klu_free_numeric #else /* incompatible sunindextype for KLU */ #error Incompatible sunindextype for KLU #endif #if defined(SUNDIALS_DOUBLE_PRECISION) #else #error Incompatible realtype for KLU #endif /* -------------------------------------- * KLU Implementation of SUNLinearSolver * -------------------------------------- */ /* Create a typedef for the KLU solver function pointer to suppress compiler * warning messages about sunindextype vs internal KLU index types. */ typedef sunindextype (*KLUSolveFn)(sun_klu_symbolic*, sun_klu_numeric*, sunindextype, sunindextype, double*, sun_klu_common*); struct _SUNLinearSolverContent_KLU { int last_flag; int first_factorize; sun_klu_symbolic *symbolic; sun_klu_numeric *numeric; sun_klu_common common; KLUSolveFn klu_solver; }; typedef struct _SUNLinearSolverContent_KLU *SUNLinearSolverContent_KLU; /* ------------------------------------- * Exported Functions for SUNLINSOL_KLU * ------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_KLU(N_Vector y, SUNMatrix A, SUNContext sunctx); SUNDIALS_EXPORT int SUNLinSol_KLUReInit(SUNLinearSolver S, SUNMatrix A, sunindextype nnz, int reinit_type); SUNDIALS_EXPORT int SUNLinSol_KLUSetOrdering(SUNLinearSolver S, int ordering_choice); /* -------------------- * Accessor functions * -------------------- */ SUNDIALS_EXPORT sun_klu_symbolic* SUNLinSol_KLUGetSymbolic(SUNLinearSolver S); SUNDIALS_EXPORT sun_klu_numeric* SUNLinSol_KLUGetNumeric(SUNLinearSolver S); SUNDIALS_EXPORT sun_klu_common* SUNLinSol_KLUGetCommon(SUNLinearSolver S); /* ----------------------------------------------- * Implementations of SUNLinearSolver operations * ----------------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_KLU(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_KLU(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_KLU(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup_KLU(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_KLU(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_KLU(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_KLU(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_KLU(SUNLinearSolver S); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_spfgmr.h0000644000176200001440000001172314645137106022671 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on code sundials_spfgmr.h by: Daniel R. Reynolds and * Hilari C. Tiedeman @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the SPFGMR implementation of the * SUNLINSOL module, SUNLINSOL_SPFGMR. The SPFGMR algorithm is based * on the Scaled Preconditioned FGMRES (Flexible Generalized Minimal * Residual) method [Y. Saad, SIAM J. Sci. Comput., 1993]. * * Note: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_SPFGMR_H #define _SUNLINSOL_SPFGMR_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* Default SPFGMR solver parameters */ #define SUNSPFGMR_MAXL_DEFAULT 5 #define SUNSPFGMR_MAXRS_DEFAULT 0 #define SUNSPFGMR_GSTYPE_DEFAULT SUN_MODIFIED_GS /* ----------------------------------------- * SPFGMR Implementation of SUNLinearSolver * ----------------------------------------- */ struct _SUNLinearSolverContent_SPFGMR { int maxl; int pretype; int gstype; int max_restarts; booleantype zeroguess; int numiters; realtype resnorm; int last_flag; SUNATimesFn ATimes; void* ATData; SUNPSetupFn Psetup; SUNPSolveFn Psolve; void* PData; N_Vector s1; N_Vector s2; N_Vector *V; N_Vector *Z; realtype **Hes; realtype *givens; N_Vector xcor; realtype *yg; N_Vector vtemp; realtype *cv; N_Vector *Xv; int print_level; FILE* info_file; }; typedef struct _SUNLinearSolverContent_SPFGMR *SUNLinearSolverContent_SPFGMR; /* ---------------------------------------- * Exported Functions for SUNLINSOL_SPFGMR * ---------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_SPFGMR(N_Vector y, int pretype, int maxl, SUNContext sunctx); SUNDIALS_EXPORT int SUNLinSol_SPFGMRSetPrecType(SUNLinearSolver S, int pretype); SUNDIALS_EXPORT int SUNLinSol_SPFGMRSetGSType(SUNLinearSolver S, int gstype); SUNDIALS_EXPORT int SUNLinSol_SPFGMRSetMaxRestarts(SUNLinearSolver S, int maxrs); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_SPFGMR(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_SPFGMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_SPFGMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetATimes_SPFGMR(SUNLinearSolver S, void* A_data, SUNATimesFn ATimes); SUNDIALS_EXPORT int SUNLinSolSetPreconditioner_SPFGMR(SUNLinearSolver S, void* P_data, SUNPSetupFn Pset, SUNPSolveFn Psol); SUNDIALS_EXPORT int SUNLinSolSetScalingVectors_SPFGMR(SUNLinearSolver S, N_Vector s1, N_Vector s2); SUNDIALS_EXPORT int SUNLinSolSetZeroGuess_SPFGMR(SUNLinearSolver S, booleantype onoff); SUNDIALS_EXPORT int SUNLinSolSetup_SPFGMR(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_SPFGMR(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT int SUNLinSolNumIters_SPFGMR(SUNLinearSolver S); SUNDIALS_EXPORT realtype SUNLinSolResNorm_SPFGMR(SUNLinearSolver S); SUNDIALS_EXPORT N_Vector SUNLinSolResid_SPFGMR(SUNLinearSolver S); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_SPFGMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_SPFGMR(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_SPFGMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetInfoFile_SPFGMR(SUNLinearSolver LS, FILE* info_file); SUNDIALS_EXPORT int SUNLinSolSetPrintLevel_SPFGMR(SUNLinearSolver LS, int print_level); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_magmadense.h0000644000176200001440000000532214645137106023472 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the MAGMA dense implementation of the * SUNLINSOL module, SUNLINSOL_MAGMADENSE. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_MAGMADENSE_H #define _SUNLINSOL_MAGMADENSE_H #include #include #include #include #if defined(SUNDIALS_MAGMA_BACKENDS_CUDA) #define HAVE_CUBLAS #elif defined(SUNDIALS_MAGMA_BACKENDS_HIP) #define HAVE_HIP #endif #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------- * MAGMA dense implementation of SUNLinearSolver * ----------------------------------------------- */ struct _SUNLinearSolverContent_MagmaDense { int last_flag; booleantype async; sunindextype N; SUNMemory pivots; SUNMemory pivotsarr; SUNMemory dpivotsarr; SUNMemory infoarr; SUNMemory rhsarr; SUNMemoryHelper memhelp; magma_queue_t q; }; typedef struct _SUNLinearSolverContent_MagmaDense *SUNLinearSolverContent_MagmaDense; SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_MagmaDense(N_Vector y, SUNMatrix A, SUNContext sunctx); SUNDIALS_EXPORT int SUNLinSol_MagmaDense_SetAsync(SUNLinearSolver S, booleantype onoff); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_MagmaDense(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_MagmaDense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_MagmaDense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup_MagmaDense(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_MagmaDense(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_MagmaDense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_MagmaDense(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_MagmaDense(SUNLinearSolver S); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_onemkldense.h0000644000176200001440000000577714645137106023713 0ustar liggesusers/* --------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * --------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * --------------------------------------------------------------------------- * This is the header file for the SUNLINEARSOLVER class implementation using * the Intel oneAPI Math Kernel Library (oneMKL). * ---------------------------------------------------------------------------*/ #ifndef _SUNLINSOL_ONEMKLDENSE_H #define _SUNLINSOL_ONEMKLDENSE_H #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif struct _SUNLinearSolverContent_OneMklDense { int last_flag; /* last error code returned */ sunindextype rows; /* number of rows in A */ SUNMemory pivots; /* pivots array */ sunindextype f_scratch_size; /* num scratchpad elements */ SUNMemory f_scratchpad; /* scratchpad memory */ sunindextype s_scratch_size; /* num scratchpad elements */ SUNMemory s_scratchpad; /* scratchpad memory */ SUNMemoryType mem_type; /* memory type */ SUNMemoryHelper mem_helper; /* memory helper */ ::sycl::queue* queue; /* operation queue */ }; typedef struct _SUNLinearSolverContent_OneMklDense *SUNLinearSolverContent_OneMklDense; /* --------------------------------------------------------------------------- * Implementation specific functions * ---------------------------------------------------------------------------*/ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_OneMklDense(N_Vector y, SUNMatrix A, SUNContext sunctx); SUNDIALS_STATIC_INLINE SUNLinearSolver_Type SUNLinSolGetType_OneMklDense(SUNLinearSolver S) { return SUNLINEARSOLVER_DIRECT; }; SUNDIALS_STATIC_INLINE SUNLinearSolver_ID SUNLinSolGetID_OneMklDense(SUNLinearSolver S) { return SUNLINEARSOLVER_ONEMKLDENSE; }; SUNDIALS_EXPORT int SUNLinSolInitialize_OneMklDense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup_OneMklDense(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_OneMklDense(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_OneMklDense(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_OneMklDense(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_OneMklDense(SUNLinearSolver S); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_superlumt.h0000644000176200001440000001032214645137106023425 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on codes sundials_superlumt_impl.h and _superlumt.h * written by Carol S. Woodward @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the SuperLUMT implementation of the * SUNLINSOL module, SUNLINSOL_SUPERLUMT. * * Note: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_SLUMT_H #define _SUNLINSOL_SLUMT_H #include #include #include #include /* Assume SuperLU_MT library was built with compatible index type */ #if defined(SUNDIALS_INT64_T) #define _LONGINT #endif #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* Default SuperLU_MT solver parameters */ #define SUNSLUMT_ORDERING_DEFAULT 3 /* COLAMD */ /* Interfaces to match 'realtype' with the correct SuperLUMT functions */ #if defined(SUNDIALS_DOUBLE_PRECISION) #ifndef _SLUMT_H #define _SLUMT_H #include "slu_mt_ddefs.h" #endif #define xgstrs dgstrs #define pxgstrf pdgstrf #define pxgstrf_init pdgstrf_init #define xCreate_Dense_Matrix dCreate_Dense_Matrix #define xCreate_CompCol_Matrix dCreate_CompCol_Matrix #elif defined(SUNDIALS_SINGLE_PRECISION) #ifndef _SLUMT_H #define _SLUMT_H #include "slu_mt_sdefs.h" #endif #define xgstrs sgstrs #define pxgstrf psgstrf #define pxgstrf_init psgstrf_init #define xCreate_Dense_Matrix sCreate_Dense_Matrix #define xCreate_CompCol_Matrix sCreate_CompCol_Matrix #else /* incompatible sunindextype for SuperLUMT */ #error Incompatible realtype for SuperLUMT #endif /* -------------------------------------------- * SuperLUMT Implementation of SUNLinearSolver * -------------------------------------------- */ struct _SUNLinearSolverContent_SuperLUMT { int last_flag; int first_factorize; SuperMatrix *A, *AC, *L, *U, *B; Gstat_t *Gstat; sunindextype *perm_r, *perm_c; sunindextype N; int num_threads; realtype diag_pivot_thresh; int ordering; superlumt_options_t *options; }; typedef struct _SUNLinearSolverContent_SuperLUMT *SUNLinearSolverContent_SuperLUMT; /* ------------------------------------------- * Exported Functions for SUNLINSOL_SUPERLUMT * ------------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_SuperLUMT(N_Vector y, SUNMatrix A, int num_threads, SUNContext sunctx); SUNDIALS_EXPORT int SUNLinSol_SuperLUMTSetOrdering(SUNLinearSolver S, int ordering_choice); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_SuperLUMT(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_SuperLUMT(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_SuperLUMT(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup_SuperLUMT(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_SuperLUMT(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_SuperLUMT(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_SuperLUMT(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_SuperLUMT(SUNLinearSolver S); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_superludist.h0000644000176200001440000001307614645137106023761 0ustar liggesusers/* * ---------------------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ---------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ---------------------------------------------------------------------------- * This is the header file for the SuperLU-DIST implementation of the SUNLINSOL * module. * * Part I contains declarations specific to the SuperLU-Dist implementation of * the supplied SUNLINSOL module. * * Part II contains the prototype for the constructor SUNSuperLUDIST as well as * implementation-specific prototypes for various useful solver operations. * * Notes: * * - The definition of the generic SUNLinearSolver structure can be found in * the header file sundials_linearsolver.h. * ---------------------------------------------------------------------------- */ #ifndef _SUNLINSOL_SLUDIST_H #define _SUNLINSOL_SLUDIST_H #include #include #include #include #include #include #if (SUPERLU_DIST_MAJOR_VERSION >= 7) || ((SUPERLU_DIST_MAJOR_VERSION == 6) && (SUPERLU_DIST_MINOR_VERSION >= 3)) #define xLUstructInit dLUstructInit #define xScalePermstructInit dScalePermstructInit #define xScalePermstructFree dScalePermstructFree #define xLUstructFree dLUstructFree #define xDestroy_LU dDestroy_LU #define xScalePermstruct_t dScalePermstruct_t #define xLUstruct_t dLUstruct_t #define xSOLVEstruct_t dSOLVEstruct_t #else #define xLUstructInit LUstructInit #define xScalePermstructInit ScalePermstructInit #define xScalePermstructFree ScalePermstructFree #define xLUstructFree LUstructFree #define xDestroy_LU Destroy_LU #define xScalePermstruct_t ScalePermstruct_t #define xLUstruct_t LUstruct_t #define xSOLVEstruct_t SOLVEstruct_t #endif #ifdef __cplusplus extern "C" { #endif /* * ---------------------------------------------------------------------------- * PART I: SuperLU-DIST implementation of SUNLinearSolver * ---------------------------------------------------------------------------- */ struct _SUNLinearSolverContent_SuperLUDIST { booleantype first_factorize; int last_flag; realtype berr; gridinfo_t *grid; xLUstruct_t *lu; superlu_dist_options_t *options; xScalePermstruct_t *scaleperm; xSOLVEstruct_t *solve; SuperLUStat_t *stat; sunindextype N; }; typedef struct _SUNLinearSolverContent_SuperLUDIST *SUNLinearSolverContent_SuperLUDIST; /* * ---------------------------------------------------------------------------- * PART II: Functions exported by sunlinsol_sludist * ---------------------------------------------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_SuperLUDIST(N_Vector y, SUNMatrix A, gridinfo_t *grid, xLUstruct_t *lu, xScalePermstruct_t *scaleperm, xSOLVEstruct_t *solve, SuperLUStat_t *stat, superlu_dist_options_t *options, SUNContext sunctx); /* * ---------------------------------------------------------------------------- * Accessor functions. * ---------------------------------------------------------------------------- */ SUNDIALS_EXPORT realtype SUNLinSol_SuperLUDIST_GetBerr(SUNLinearSolver LS); SUNDIALS_EXPORT gridinfo_t* SUNLinSol_SuperLUDIST_GetGridinfo(SUNLinearSolver LS); SUNDIALS_EXPORT xLUstruct_t* SUNLinSol_SuperLUDIST_GetLUstruct(SUNLinearSolver LS); SUNDIALS_EXPORT superlu_dist_options_t* SUNLinSol_SuperLUDIST_GetSuperLUOptions(SUNLinearSolver LS); SUNDIALS_EXPORT xScalePermstruct_t* SUNLinSol_SuperLUDIST_GetScalePermstruct(SUNLinearSolver LS); SUNDIALS_EXPORT xSOLVEstruct_t* SUNLinSol_SuperLUDIST_GetSOLVEstruct(SUNLinearSolver LS); SUNDIALS_EXPORT SuperLUStat_t* SUNLinSol_SuperLUDIST_GetSuperLUStat(SUNLinearSolver LS); /* * ---------------------------------------------------------------------------- * SuperLU-DIST implementations of SUNLinearSolver operations * ---------------------------------------------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_SuperLUDIST(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_SuperLUDIST(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_SuperLUDIST(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup_SuperLUDIST(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_SuperLUDIST(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_SuperLUDIST(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_SuperLUDIST(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_SuperLUDIST(SUNLinearSolver S); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_lapackband.h0000644000176200001440000000523714645137106023456 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the LAPACK band implementation of the * SUNLINSOL module, SUNLINSOL_LAPACKBAND. * * Note: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_LAPBAND_H #define _SUNLINSOL_LAPBAND_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ---------------------------------------------- * LAPACK band implementation of SUNLinearSolver * ---------------------------------------------- */ struct _SUNLinearSolverContent_LapackBand { sunindextype N; sunindextype *pivots; sunindextype last_flag; }; typedef struct _SUNLinearSolverContent_LapackBand *SUNLinearSolverContent_LapackBand; /* -------------------------------------------- * Exported Functions for SUNLINSOL_LAPACKBAND * -------------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_LapackBand(N_Vector y, SUNMatrix A, SUNContext sunctx); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_LapackBand(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_LapackBand(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_LapackBand(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup_LapackBand(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_LapackBand(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_LapackBand(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_LapackBand(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_LapackBand(SUNLinearSolver S); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_band.h0000644000176200001440000000472714645137106022305 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds, Ashley Crawford @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the band implementation of the * SUNLINSOL module, SUNLINSOL_BAND. * * Note: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_BAND_H #define _SUNLINSOL_BAND_H #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* --------------------------------------- * Band Implementation of SUNLinearSolver * --------------------------------------- */ struct _SUNLinearSolverContent_Band { sunindextype N; sunindextype *pivots; sunindextype last_flag; }; typedef struct _SUNLinearSolverContent_Band *SUNLinearSolverContent_Band; /* -------------------------------------- * Exported Functions for SUNLINSOL_BAND * -------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_Band(N_Vector y, SUNMatrix A, SUNContext sunctx); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_Band(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_Band(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_Band(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup_Band(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_Band(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_Band(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_Band(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_Band(SUNLinearSolver S); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_spgmr.h0000644000176200001440000001157214645137106022525 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * Based on code sundials_spgmr.h by: Scott D. Cohen, * Alan C. Hindmarsh and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the SPGMR implementation of the * SUNLINSOL module, SUNLINSOL_SPGMR. The SPGMR algorithm is based * on the Scaled Preconditioned GMRES (Generalized Minimal Residual) * method. * * Note: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_SPGMR_H #define _SUNLINSOL_SPGMR_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* Default SPGMR solver parameters */ #define SUNSPGMR_MAXL_DEFAULT 5 #define SUNSPGMR_MAXRS_DEFAULT 0 #define SUNSPGMR_GSTYPE_DEFAULT SUN_MODIFIED_GS /* ---------------------------------------- * SPGMR Implementation of SUNLinearSolver * ---------------------------------------- */ struct _SUNLinearSolverContent_SPGMR { int maxl; int pretype; int gstype; int max_restarts; booleantype zeroguess; int numiters; realtype resnorm; int last_flag; SUNATimesFn ATimes; void* ATData; SUNPSetupFn Psetup; SUNPSolveFn Psolve; void* PData; N_Vector s1; N_Vector s2; N_Vector *V; realtype **Hes; realtype *givens; N_Vector xcor; realtype *yg; N_Vector vtemp; realtype *cv; N_Vector *Xv; int print_level; FILE* info_file; }; typedef struct _SUNLinearSolverContent_SPGMR *SUNLinearSolverContent_SPGMR; /* --------------------------------------- * Exported Functions for SUNLINSOL_SPGMR * --------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_SPGMR(N_Vector y, int pretype, int maxl, SUNContext sunctx); SUNDIALS_EXPORT int SUNLinSol_SPGMRSetPrecType(SUNLinearSolver S, int pretype); SUNDIALS_EXPORT int SUNLinSol_SPGMRSetGSType(SUNLinearSolver S, int gstype); SUNDIALS_EXPORT int SUNLinSol_SPGMRSetMaxRestarts(SUNLinearSolver S, int maxrs); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_SPGMR(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_SPGMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_SPGMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetATimes_SPGMR(SUNLinearSolver S, void* A_data, SUNATimesFn ATimes); SUNDIALS_EXPORT int SUNLinSolSetPreconditioner_SPGMR(SUNLinearSolver S, void* P_data, SUNPSetupFn Pset, SUNPSolveFn Psol); SUNDIALS_EXPORT int SUNLinSolSetScalingVectors_SPGMR(SUNLinearSolver S, N_Vector s1, N_Vector s2); SUNDIALS_EXPORT int SUNLinSolSetZeroGuess_SPGMR(SUNLinearSolver S, booleantype onff); SUNDIALS_EXPORT int SUNLinSolSetup_SPGMR(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve_SPGMR(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT int SUNLinSolNumIters_SPGMR(SUNLinearSolver S); SUNDIALS_EXPORT realtype SUNLinSolResNorm_SPGMR(SUNLinearSolver S); SUNDIALS_EXPORT N_Vector SUNLinSolResid_SPGMR(SUNLinearSolver S); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_SPGMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_SPGMR(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_SPGMR(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetInfoFile_SPGMR(SUNLinearSolver LS, FILE* info_file); SUNDIALS_EXPORT int SUNLinSolSetPrintLevel_SPGMR(SUNLinearSolver LS, int print_level); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sunlinsol/sunlinsol_pcg.h0000644000176200001440000001045014645137106022140 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds, Ashley Crawford @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the PCG implementation of the * SUNLINSOL module, SUNLINSOL_PCG. The PCG algorithm is based * on the Preconditioned Conjugate Gradient. * * Note: * - The definition of the generic SUNLinearSolver structure can * be found in the header file sundials_linearsolver.h. * ----------------------------------------------------------------- */ #ifndef _SUNLINSOL_PCG_H #define _SUNLINSOL_PCG_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* Default PCG solver parameters */ #define SUNPCG_MAXL_DEFAULT 5 /* -------------------------------------- * PCG Implementation of SUNLinearSolver * -------------------------------------- */ struct _SUNLinearSolverContent_PCG { int maxl; int pretype; booleantype zeroguess; int numiters; realtype resnorm; int last_flag; SUNATimesFn ATimes; void* ATData; SUNPSetupFn Psetup; SUNPSolveFn Psolve; void* PData; N_Vector s; N_Vector r; N_Vector p; N_Vector z; N_Vector Ap; int print_level; FILE* info_file; }; typedef struct _SUNLinearSolverContent_PCG *SUNLinearSolverContent_PCG; /* ------------------------------------- * Exported Functions for SUNLINSOL_PCG * ------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSol_PCG(N_Vector y, int pretype, int maxl, SUNContext sunctx); SUNDIALS_EXPORT int SUNLinSol_PCGSetPrecType(SUNLinearSolver S, int pretype); SUNDIALS_EXPORT int SUNLinSol_PCGSetMaxl(SUNLinearSolver S, int maxl); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType_PCG(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID_PCG(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolInitialize_PCG(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetATimes_PCG(SUNLinearSolver S, void* A_data, SUNATimesFn ATimes); SUNDIALS_EXPORT int SUNLinSolSetPreconditioner_PCG(SUNLinearSolver S, void* P_data, SUNPSetupFn Pset, SUNPSolveFn Psol); SUNDIALS_EXPORT int SUNLinSolSetScalingVectors_PCG(SUNLinearSolver S, N_Vector s, N_Vector nul); SUNDIALS_EXPORT int SUNLinSolSetZeroGuess_PCG(SUNLinearSolver S, booleantype onoff); SUNDIALS_EXPORT int SUNLinSolSetup_PCG(SUNLinearSolver S, SUNMatrix nul); SUNDIALS_EXPORT int SUNLinSolSolve_PCG(SUNLinearSolver S, SUNMatrix nul, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT int SUNLinSolNumIters_PCG(SUNLinearSolver S); SUNDIALS_EXPORT realtype SUNLinSolResNorm_PCG(SUNLinearSolver S); SUNDIALS_EXPORT N_Vector SUNLinSolResid_PCG(SUNLinearSolver S); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag_PCG(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace_PCG(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree_PCG(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetInfoFile_PCG(SUNLinearSolver LS, FILE* info_file); SUNDIALS_EXPORT int SUNLinSolSetPrintLevel_PCG(SUNLinearSolver LS, int print_level); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/kinsol/0000755000176200001440000000000014511135055016352 5ustar liggesusersStanHeaders/inst/include/kinsol/kinsol_direct.h0000644000176200001440000000443314645137106021367 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Header file for the deprecated direct linear solver interface in * KINSOL; these routines now just wrap the updated KINSOL generic * linear solver interface in kinsol_ls.h. * -----------------------------------------------------------------*/ #ifndef _KINDLS_H #define _KINDLS_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*================================================================= Function Types (typedefs for equivalent types in kinsol_ls.h) =================================================================*/ typedef KINLsJacFn KINDlsJacFn; /*=================================================================== Exported Functions (wrappers for equivalent routines in kinsol_ls.h) ===================================================================*/ SUNDIALS_DEPRECATED_EXPORT_MSG("use KINSetLinearSolver instead") int KINDlsSetLinearSolver(void *kinmem, SUNLinearSolver LS, SUNMatrix A); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINSetJacFn instead") int KINDlsSetJacFn(void *kinmem, KINDlsJacFn jac); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetLinWorkSpace instead") int KINDlsGetWorkSpace(void *kinmem, long int *lenrw, long int *leniw); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetNumJacEvals instead") int KINDlsGetNumJacEvals(void *kinmem, long int *njevals); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetNumLinFuncEvals instead") int KINDlsGetNumFuncEvals(void *kinmem, long int *nfevals); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetLastLinFlag instead") int KINDlsGetLastFlag(void *kinmem, long int *flag); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetLinReturnFlagName instead") char *KINDlsGetReturnFlagName(long int flag); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/kinsol/kinsol_bbdpre.h0000644000176200001440000000421014645137106021344 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Alan Hindmarsh, Radu Serban, and * Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the KINBBDPRE module, for a * band-block-diagonal preconditioner, i.e. a block-diagonal * matrix with banded blocks. * -----------------------------------------------------------------*/ #ifndef _KINBBDPRE_H #define _KINBBDPRE_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* KINBBDPRE return values */ #define KINBBDPRE_SUCCESS 0 #define KINBBDPRE_PDATA_NULL -11 #define KINBBDPRE_FUNC_UNRECVR -12 /* User-supplied function Types */ typedef int (*KINBBDCommFn)(sunindextype Nlocal, N_Vector u, void *user_data); typedef int (*KINBBDLocalFn)(sunindextype Nlocal, N_Vector uu, N_Vector gval, void *user_data); /* Exported Functions */ SUNDIALS_EXPORT int KINBBDPrecInit(void *kinmem, sunindextype Nlocal, sunindextype mudq, sunindextype mldq, sunindextype mukeep, sunindextype mlkeep, realtype dq_rel_uu, KINBBDLocalFn gloc, KINBBDCommFn gcomm); /* Optional output functions */ SUNDIALS_EXPORT int KINBBDPrecGetWorkSpace(void *kinmem, long int *lenrwBBDP, long int *leniwBBDP); SUNDIALS_EXPORT int KINBBDPrecGetNumGfnEvals(void *kinmem, long int *ngevalsBBDP); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/kinsol/kinsol.h0000644000176200001440000001452414645137106020037 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Allan Taylor, Alan Hindmarsh, Radu Serban, and * Aaron Collier, Shelby Lockhart @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for the main KINSOL solver. * -----------------------------------------------------------------*/ #ifndef _KINSOL_H #define _KINSOL_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------- * KINSOL Constants * ----------------- */ /* return values */ #define KIN_SUCCESS 0 #define KIN_INITIAL_GUESS_OK 1 #define KIN_STEP_LT_STPTOL 2 #define KIN_WARNING 99 #define KIN_MEM_NULL -1 #define KIN_ILL_INPUT -2 #define KIN_NO_MALLOC -3 #define KIN_MEM_FAIL -4 #define KIN_LINESEARCH_NONCONV -5 #define KIN_MAXITER_REACHED -6 #define KIN_MXNEWT_5X_EXCEEDED -7 #define KIN_LINESEARCH_BCFAIL -8 #define KIN_LINSOLV_NO_RECOVERY -9 #define KIN_LINIT_FAIL -10 #define KIN_LSETUP_FAIL -11 #define KIN_LSOLVE_FAIL -12 #define KIN_SYSFUNC_FAIL -13 #define KIN_FIRST_SYSFUNC_ERR -14 #define KIN_REPTD_SYSFUNC_ERR -15 #define KIN_VECTOROP_ERR -16 #define KIN_CONTEXT_ERR -17 /* Anderson Acceleration Orthogonalization Choice */ #define KIN_ORTH_MGS 0 #define KIN_ORTH_ICWY 1 #define KIN_ORTH_CGS2 2 #define KIN_ORTH_DCGS2 3 /* Enumeration for eta choice */ #define KIN_ETACHOICE1 1 #define KIN_ETACHOICE2 2 #define KIN_ETACONSTANT 3 /* Enumeration for global strategy */ #define KIN_NONE 0 #define KIN_LINESEARCH 1 #define KIN_PICARD 2 #define KIN_FP 3 /* ------------------------------ * User-Supplied Function Types * ------------------------------ */ typedef int (*KINSysFn)(N_Vector uu, N_Vector fval, void *user_data); typedef void (*KINErrHandlerFn)(int error_code, const char *module, const char *function, char *msg, void *user_data); typedef void (*KINInfoHandlerFn)(const char *module, const char *function, char *msg, void *user_data); /* ------------------- * Exported Functions * ------------------- */ /* Creation function */ SUNDIALS_EXPORT void *KINCreate(SUNContext sunctx); /* Initialization function */ SUNDIALS_EXPORT int KINInit(void *kinmem, KINSysFn func, N_Vector tmpl); /* Solver function */ SUNDIALS_EXPORT int KINSol(void *kinmem, N_Vector uu, int strategy, N_Vector u_scale, N_Vector f_scale); /* Optional input functions */ SUNDIALS_EXPORT int KINSetErrHandlerFn(void *kinmem, KINErrHandlerFn ehfun, void *eh_data); SUNDIALS_EXPORT int KINSetErrFile(void *kinmem, FILE *errfp); SUNDIALS_EXPORT int KINSetInfoHandlerFn(void *kinmem, KINInfoHandlerFn ihfun, void *ih_data); SUNDIALS_EXPORT int KINSetInfoFile(void *kinmem, FILE *infofp); SUNDIALS_EXPORT int KINSetUserData(void *kinmem, void *user_data); SUNDIALS_EXPORT int KINSetPrintLevel(void *kinmem, int printfl); SUNDIALS_EXPORT int KINSetDamping(void *kinmem, realtype beta); SUNDIALS_EXPORT int KINSetMAA(void *kinmem, long int maa); SUNDIALS_EXPORT int KINSetOrthAA(void *kinmem, int orthaa); SUNDIALS_EXPORT int KINSetDelayAA(void *kinmem, long int delay); SUNDIALS_EXPORT int KINSetDampingAA(void *kinmem, realtype beta); SUNDIALS_EXPORT int KINSetReturnNewest(void *kinmem, booleantype ret_newest); SUNDIALS_EXPORT int KINSetNumMaxIters(void *kinmem, long int mxiter); SUNDIALS_EXPORT int KINSetNoInitSetup(void *kinmem, booleantype noInitSetup); SUNDIALS_EXPORT int KINSetNoResMon(void *kinmem, booleantype noNNIResMon); SUNDIALS_EXPORT int KINSetMaxSetupCalls(void *kinmem, long int msbset); SUNDIALS_EXPORT int KINSetMaxSubSetupCalls(void *kinmem, long int msbsetsub); SUNDIALS_EXPORT int KINSetEtaForm(void *kinmem, int etachoice); SUNDIALS_EXPORT int KINSetEtaConstValue(void *kinmem, realtype eta); SUNDIALS_EXPORT int KINSetEtaParams(void *kinmem, realtype egamma, realtype ealpha); SUNDIALS_EXPORT int KINSetResMonParams(void *kinmem, realtype omegamin, realtype omegamax); SUNDIALS_EXPORT int KINSetResMonConstValue(void *kinmem, realtype omegaconst); SUNDIALS_EXPORT int KINSetNoMinEps(void *kinmem, booleantype noMinEps); SUNDIALS_EXPORT int KINSetMaxNewtonStep(void *kinmem, realtype mxnewtstep); SUNDIALS_EXPORT int KINSetMaxBetaFails(void *kinmem, long int mxnbcf); SUNDIALS_EXPORT int KINSetRelErrFunc(void *kinmem, realtype relfunc); SUNDIALS_EXPORT int KINSetFuncNormTol(void *kinmem, realtype fnormtol); SUNDIALS_EXPORT int KINSetScaledStepTol(void *kinmem, realtype scsteptol); SUNDIALS_EXPORT int KINSetConstraints(void *kinmem, N_Vector constraints); SUNDIALS_EXPORT int KINSetSysFunc(void *kinmem, KINSysFn func); /* Optional output functions */ SUNDIALS_EXPORT int KINGetWorkSpace(void *kinmem, long int *lenrw, long int *leniw); SUNDIALS_EXPORT int KINGetNumNonlinSolvIters(void *kinmem, long int *nniters); SUNDIALS_EXPORT int KINGetNumFuncEvals(void *kinmem, long int *nfevals); SUNDIALS_EXPORT int KINGetNumBetaCondFails(void *kinmem, long int *nbcfails); SUNDIALS_EXPORT int KINGetNumBacktrackOps(void *kinmem, long int *nbacktr); SUNDIALS_EXPORT int KINGetFuncNorm(void *kinmem, realtype *fnorm); SUNDIALS_EXPORT int KINGetStepLength(void *kinmem, realtype *steplength); SUNDIALS_EXPORT char *KINGetReturnFlagName(long int flag); /* Free function */ SUNDIALS_EXPORT void KINFree(void **kinmem); /* KINLS interface function that depends on KINSysFn */ SUNDIALS_EXPORT int KINSetJacTimesVecSysFn(void *kinmem, KINSysFn jtimesSysFn); /* Debugging output file */ SUNDIALS_EXPORT int KINSetDebugFile(void *kinmem, FILE *debugfp); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/kinsol/kinsol_spils.h0000644000176200001440000000633214645137106021247 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Scott Cohen, Alan Hindmarsh, Radu Serban, * and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Header file for the deprecated Scaled Preconditioned Iterative * Linear Solver interface in KINSOL; these routines now just wrap * the updated KINSOL generic linear solver interface in kinsol_ls.h. * -----------------------------------------------------------------*/ #ifndef _KINSPILS_H #define _KINSPILS_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*=============================================================== Function Types (typedefs for equivalent types in kinsol_ls.h) ===============================================================*/ typedef KINLsPrecSetupFn KINSpilsPrecSetupFn; typedef KINLsPrecSolveFn KINSpilsPrecSolveFn; typedef KINLsJacTimesVecFn KINSpilsJacTimesVecFn; /*==================================================================== Exported Functions (wrappers for equivalent routines in kinsol_ls.h) ====================================================================*/ SUNDIALS_DEPRECATED_EXPORT_MSG("use KINSetLinearSolver instead") int KINSpilsSetLinearSolver(void *kinmem, SUNLinearSolver LS); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINSetPreconditioner instead") int KINSpilsSetPreconditioner(void *kinmem, KINSpilsPrecSetupFn psetup, KINSpilsPrecSolveFn psolve); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINSetJacTimesVecFn instead") int KINSpilsSetJacTimesVecFn(void *kinmem, KINSpilsJacTimesVecFn jtv); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetLinWorkSpace instead") int KINSpilsGetWorkSpace(void *kinmem, long int *lenrwLS, long int *leniwLS); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetNumPrecEvals instead") int KINSpilsGetNumPrecEvals(void *kinmem, long int *npevals); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetNumPrecSolves instead") int KINSpilsGetNumPrecSolves(void *kinmem, long int *npsolves); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetNumLinIters instead") int KINSpilsGetNumLinIters(void *kinmem, long int *nliters); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetNumLinConvFails instead") int KINSpilsGetNumConvFails(void *kinmem, long int *nlcfails); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetNumJtimesEvals instead") int KINSpilsGetNumJtimesEvals(void *kinmem, long int *njvevals); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetNumLinFuncEvals instead") int KINSpilsGetNumFuncEvals(void *kinmem, long int *nfevals); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetLastLinFlag instead") int KINSpilsGetLastFlag(void *kinmem, long int *flag); SUNDIALS_DEPRECATED_EXPORT_MSG("use KINGetLinReturnFlagName instead") char *KINSpilsGetReturnFlagName(long int flag); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/kinsol/kinsol_ls.h0000644000176200001440000001073314645137106020533 0ustar liggesusers/* ---------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU * Scott Cohen, Alan Hindmarsh, Radu Serban, and * Aaron Collier @ LLNL * ---------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ---------------------------------------------------------------- * This is the header file for KINSOL's linear solver interface. * ----------------------------------------------------------------*/ #ifndef _KINLS_H #define _KINLS_H #include #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*================================================================== KINLS Constants ==================================================================*/ #define KINLS_SUCCESS 0 #define KINLS_MEM_NULL -1 #define KINLS_LMEM_NULL -2 #define KINLS_ILL_INPUT -3 #define KINLS_MEM_FAIL -4 #define KINLS_PMEM_NULL -5 #define KINLS_JACFUNC_ERR -6 #define KINLS_SUNMAT_FAIL -7 #define KINLS_SUNLS_FAIL -8 /*=============================================================== KINLS user-supplied function prototypes ===============================================================*/ typedef int (*KINLsJacFn)(N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2); typedef int (*KINLsPrecSetupFn)(N_Vector uu, N_Vector uscale, N_Vector fval, N_Vector fscale, void *user_data); typedef int (*KINLsPrecSolveFn)(N_Vector uu, N_Vector uscale, N_Vector fval, N_Vector fscale, N_Vector vv, void *user_data); typedef int (*KINLsJacTimesVecFn)(N_Vector v, N_Vector Jv, N_Vector uu, booleantype *new_uu, void *J_data); /*================================================================== KINLS Exported functions ==================================================================*/ SUNDIALS_EXPORT int KINSetLinearSolver(void *kinmem, SUNLinearSolver LS, SUNMatrix A); /*----------------------------------------------------------------- Optional inputs to the KINLS linear solver interface -----------------------------------------------------------------*/ SUNDIALS_EXPORT int KINSetJacFn(void *kinmem, KINLsJacFn jac); SUNDIALS_EXPORT int KINSetPreconditioner(void *kinmem, KINLsPrecSetupFn psetup, KINLsPrecSolveFn psolve); SUNDIALS_EXPORT int KINSetJacTimesVecFn(void *kinmem, KINLsJacTimesVecFn jtv); /*----------------------------------------------------------------- Optional outputs from the KINLS linear solver interface -----------------------------------------------------------------*/ SUNDIALS_EXPORT int KINGetLinWorkSpace(void *kinmem, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int KINGetNumJacEvals(void *kinmem, long int *njevals); SUNDIALS_EXPORT int KINGetNumLinFuncEvals(void *kinmem, long int *nfevals); SUNDIALS_EXPORT int KINGetNumPrecEvals(void *kinmem, long int *npevals); SUNDIALS_EXPORT int KINGetNumPrecSolves(void *kinmem, long int *npsolves); SUNDIALS_EXPORT int KINGetNumLinIters(void *kinmem, long int *nliters); SUNDIALS_EXPORT int KINGetNumLinConvFails(void *kinmem, long int *nlcfails); SUNDIALS_EXPORT int KINGetNumJtimesEvals(void *kinmem, long int *njvevals); SUNDIALS_EXPORT int KINGetLastLinFlag(void *kinmem, long int *flag); SUNDIALS_EXPORT char *KINGetLinReturnFlagName(long int flag); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/0000755000176200001440000000000014513025555016702 5ustar liggesusersStanHeaders/inst/include/sundials/sundials_mpi_types.h0000644000176200001440000000235514645137106022775 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Scott Cohen, Alan Hindmarsh, Radu Serban, * Aaron Collier, and Slaven Peles @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header file contains definitions of MPI data types, which * are used by MPI parallel vector implementations. * -----------------------------------------------------------------*/ #include /* define MPI data types */ #if defined(SUNDIALS_SINGLE_PRECISION) #define MPI_SUNREALTYPE MPI_FLOAT #elif defined(SUNDIALS_DOUBLE_PRECISION) #define MPI_SUNREALTYPE MPI_DOUBLE #elif defined(SUNDIALS_EXTENDED_PRECISION) #define MPI_SUNREALTYPE MPI_LONG_DOUBLE #endif #if defined(SUNDIALS_INT64_T) #define MPI_SUNINDEXTYPE MPI_INT64_T #elif defined(SUNDIALS_INT32_T) #define MPI_SUNINDEXTYPE MPI_INT32_T #endif StanHeaders/inst/include/sundials/sundials_nvector_senswrapper.h0000644000176200001440000001123314645137106025070 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the header file for the implementation of the NVECTOR SensWrapper. * * Part I contains declarations specific to the implementation of the * vector wrapper. * * Part II defines accessor macros that allow the user to efficiently access * the content of the vector wrapper data structure. * * Part III contains the prototype for the constructors N_VNewEmpty_SensWrapper * and N_VNew_SensWrapper, as well as wrappers to NVECTOR vector operations. * ---------------------------------------------------------------------------*/ #ifndef _NVECTOR_SENSWRAPPER_H #define _NVECTOR_SENSWRAPPER_H #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /*============================================================================== PART I: NVector wrapper content structure ============================================================================*/ struct _N_VectorContent_SensWrapper { N_Vector* vecs; /* array of wrapped vectors */ int nvecs; /* number of wrapped vectors */ booleantype own_vecs; /* flag indicating if wrapper owns vectors */ }; typedef struct _N_VectorContent_SensWrapper *N_VectorContent_SensWrapper; /*============================================================================== PART II: Macros to access wrapper content ============================================================================*/ #define NV_CONTENT_SW(v) ( (N_VectorContent_SensWrapper)(v->content) ) #define NV_VECS_SW(v) ( NV_CONTENT_SW(v)->vecs ) #define NV_NVECS_SW(v) ( NV_CONTENT_SW(v)->nvecs ) #define NV_OWN_VECS_SW(v) ( NV_CONTENT_SW(v)->own_vecs ) #define NV_VEC_SW(v,i) ( NV_VECS_SW(v)[i] ) /*============================================================================== PART III: Exported functions ============================================================================*/ /* constructor creates an empty vector wrapper */ SUNDIALS_EXPORT N_Vector N_VNewEmpty_SensWrapper(int nvecs, SUNContext sunctx); SUNDIALS_EXPORT N_Vector N_VNew_SensWrapper(int count, N_Vector w); /* clone operations */ SUNDIALS_EXPORT N_Vector N_VCloneEmpty_SensWrapper(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone_SensWrapper(N_Vector w); /* destructor */ SUNDIALS_EXPORT void N_VDestroy_SensWrapper(N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum_SensWrapper(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst_SensWrapper(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd_SensWrapper(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv_SensWrapper(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale_SensWrapper(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs_SensWrapper(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv_SensWrapper(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst_SensWrapper(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd_SensWrapper(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm_SensWrapper(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm_SensWrapper(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask_SensWrapper(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin_SensWrapper(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm_SensWrapper(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm_SensWrapper(N_Vector x); SUNDIALS_EXPORT void N_VCompare_SensWrapper(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest_SensWrapper(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask_SensWrapper(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient_SensWrapper(N_Vector num, N_Vector denom); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_version.h0000644000176200001440000000236214645137106022447 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header file is for routines to get SUNDIALS version info * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_VERSION_H #define _SUNDIALS_VERSION_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* Fill a string with SUNDIALS version information */ SUNDIALS_EXPORT int SUNDIALSGetVersion(char *version, int len); /* Fills integers with the major, minor, and patch release version numbers and a string with the release label.*/ SUNDIALS_EXPORT int SUNDIALSGetVersionNumber(int *major, int *minor, int *patch, char *label, int len); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_cuda_policies.hpp0000644000176200001440000001537214645137106024132 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header files defines the ExecPolicy classes which * are utilized to determine CUDA kernel launch paramaters. * ----------------------------------------------------------------- */ #ifndef _SUNDIALS_CUDAEXECPOLICIES_HPP #define _SUNDIALS_CUDAEXECPOLICIES_HPP #include #include #include namespace sundials { namespace cuda { constexpr const sunindextype WARP_SIZE = 32; constexpr const sunindextype MAX_BLOCK_SIZE = 1024; constexpr const sunindextype MAX_WARPS = MAX_BLOCK_SIZE / WARP_SIZE; class ExecPolicy { public: ExecPolicy(cudaStream_t stream = 0) : stream_(stream) { } virtual size_t gridSize(size_t numWorkUnits = 0, size_t blockDim = 0) const = 0; virtual size_t blockSize(size_t numWorkUnits = 0, size_t gridDim = 0) const = 0; virtual const cudaStream_t* stream() const { return (&stream_); } virtual ExecPolicy* clone() const = 0; ExecPolicy* clone_new_stream(cudaStream_t stream) const { ExecPolicy* ex = clone(); ex->stream_ = stream; return ex; } virtual bool atomic() const { return false; } virtual ~ExecPolicy() {} protected: cudaStream_t stream_; }; /* * A kernel execution policy that maps each thread to a work unit. * The number of threads per block (blockSize) can be set to anything. * The grid size will be chosen so that there are enough threads for one * thread per element. If a stream is provided, it will be used to * execute the kernel. */ class ThreadDirectExecPolicy : public ExecPolicy { public: ThreadDirectExecPolicy(const size_t blockDim, cudaStream_t stream = 0) : blockDim_(blockDim), ExecPolicy(stream) {} ThreadDirectExecPolicy(const ThreadDirectExecPolicy& ex) : blockDim_(ex.blockDim_), ExecPolicy(ex.stream_) {} virtual size_t gridSize(size_t numWorkUnits = 0, size_t /*blockDim*/ = 0) const { /* ceil(n/m) = floor((n + m - 1) / m) */ return (numWorkUnits + blockSize() - 1) / blockSize(); } virtual size_t blockSize(size_t /*numWorkUnits*/ = 0, size_t /*gridDim*/ = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new ThreadDirectExecPolicy(*this)); } private: const size_t blockDim_; }; /* * A kernel execution policy for kernels that use grid stride loops. * The number of threads per block (blockSize) can be set to anything. * The number of blocks (gridSize) can be set to anything. If a stream * is provided, it will be used to execute the kernel. */ class GridStrideExecPolicy : public ExecPolicy { public: GridStrideExecPolicy(const size_t blockDim, const size_t gridDim, cudaStream_t stream = 0) : blockDim_(blockDim), gridDim_(gridDim), ExecPolicy(stream) {} GridStrideExecPolicy(const GridStrideExecPolicy& ex) : blockDim_(ex.blockDim_), gridDim_(ex.gridDim_), ExecPolicy(ex.stream_) {} virtual size_t gridSize(size_t /*numWorkUnits*/ = 0, size_t /*blockDim*/ = 0) const { return gridDim_; } virtual size_t blockSize(size_t /*numWorkUnits*/ = 0, size_t /*gridDim*/ = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new GridStrideExecPolicy(*this)); } private: const size_t blockDim_; const size_t gridDim_; }; /* * A kernel execution policy for performing a reduction across indvidual thread * blocks. The number of threads per block (blockSize) can be set to any valid * multiple of the CUDA warp size. The number of blocks (gridSize) can be set to * any value greater or equal to 0. If it is set to 0, then the grid size will * be chosen so that there are at most two work units per thread. If a stream is * provided, it will be used to execute the kernel. */ class BlockReduceAtomicExecPolicy : public ExecPolicy { public: BlockReduceAtomicExecPolicy(const size_t blockDim, const size_t gridDim = 0, cudaStream_t stream = 0) : blockDim_(blockDim), gridDim_(gridDim), ExecPolicy(stream) { if (blockDim < 1 || blockDim % WARP_SIZE) { throw std::invalid_argument("the block size must be a multiple of the CUDA warp size"); } } BlockReduceAtomicExecPolicy(const BlockReduceAtomicExecPolicy& ex) : blockDim_(ex.blockDim_), gridDim_(ex.gridDim_), ExecPolicy(ex.stream_) {} virtual size_t gridSize(size_t numWorkUnits = 0, size_t /*blockDim*/ = 0) const { if (gridDim_ == 0) { return (numWorkUnits + (blockSize() * 2 - 1)) / (blockSize() * 2); } return gridDim_; } virtual size_t blockSize(size_t /*numWorkUnits*/ = 0, size_t /*gridDim*/ = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new BlockReduceAtomicExecPolicy(*this)); } virtual bool atomic() const { return true; } private: const size_t blockDim_; const size_t gridDim_; }; class BlockReduceExecPolicy : public ExecPolicy { public: BlockReduceExecPolicy(const size_t blockDim, const size_t gridDim = 0, cudaStream_t stream = 0) : blockDim_(blockDim), gridDim_(gridDim), ExecPolicy(stream) { if (blockDim < 1 || blockDim % WARP_SIZE) { throw std::invalid_argument("the block size must be a multiple of the CUDA warp size"); } } BlockReduceExecPolicy(const BlockReduceExecPolicy& ex) : blockDim_(ex.blockDim_), gridDim_(ex.gridDim_), ExecPolicy(ex.stream_) {} virtual size_t gridSize(size_t numWorkUnits = 0, size_t /*blockDim*/ = 0) const { if (gridDim_ == 0) { return (numWorkUnits + (blockSize() * 2 - 1)) / (blockSize() * 2); } return gridDim_; } virtual size_t blockSize(size_t /*numWorkUnits*/ = 0, size_t /*gridDim*/ = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new BlockReduceExecPolicy(*this)); } bool atomic() const { return false; } private: const size_t blockDim_; const size_t gridDim_; }; } // namespace cuda } // namespace sundials typedef sundials::cuda::ExecPolicy SUNCudaExecPolicy; typedef sundials::cuda::ThreadDirectExecPolicy SUNCudaThreadDirectExecPolicy; typedef sundials::cuda::GridStrideExecPolicy SUNCudaGridStrideExecPolicy; typedef sundials::cuda::BlockReduceExecPolicy SUNCudaBlockReduceExecPolicy; typedef sundials::cuda::BlockReduceAtomicExecPolicy SUNCudaBlockReduceAtomicExecPolicy; #endif StanHeaders/inst/include/sundials/sundials_memory.h0000644000176200001440000001106314645137106022270 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS memory helpers and types. * ----------------------------------------------------------------*/ #ifndef _SUNDIALS_MEMORY_H #define _SUNDIALS_MEMORY_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif typedef enum { SUNMEMTYPE_HOST, /* pageable memory accessible on the host */ SUNMEMTYPE_PINNED, /* page-locked memory accesible on the host */ SUNMEMTYPE_DEVICE, /* memory accessible from the device */ SUNMEMTYPE_UVM /* memory accessible from the host or device */ } SUNMemoryType; /* * SUNMemory is a simple abstraction of a pointer to some * contiguos memory, so that we can keep track of its type * and its ownership. */ typedef struct _SUNMemory *SUNMemory; struct _SUNMemory { void* ptr; SUNMemoryType type; booleantype own; }; /* Creates a new SUNMemory object with a NULL ptr */ SUNDIALS_EXPORT SUNMemory SUNMemoryNewEmpty(void); /* * SUNMemoryHelper holds ops which can allocate, deallocate, * and copy SUNMemory. */ typedef struct _SUNMemoryHelper_Ops *SUNMemoryHelper_Ops; typedef struct _SUNMemoryHelper *SUNMemoryHelper; struct _SUNMemoryHelper { void* content; SUNMemoryHelper_Ops ops; SUNContext sunctx; }; struct _SUNMemoryHelper_Ops { /* operations that implementations are required to provide */ int (*alloc)(SUNMemoryHelper, SUNMemory* memptr, size_t mem_size, SUNMemoryType mem_type, void* queue); int (*dealloc)(SUNMemoryHelper, SUNMemory mem, void* queue); int (*copy)(SUNMemoryHelper, SUNMemory dst, SUNMemory src, size_t mem_size, void* queue); /* operations that provide default implementations */ int (*copyasync)(SUNMemoryHelper, SUNMemory dst, SUNMemory src, size_t mem_size, void* queue); SUNMemoryHelper (*clone)(SUNMemoryHelper); int (*destroy)(SUNMemoryHelper); }; /* * Generic SUNMemoryHelper functions that work without a SUNMemoryHelper object. */ /* Creates a new SUNMemory object which points to the same data as another * SUNMemory object. * The SUNMemory returned will not own the ptr, therefore, it will not free * the ptr in Dealloc. */ SUNDIALS_EXPORT SUNMemory SUNMemoryHelper_Alias(SUNMemory mem); /* Creates a new SUNMemory object with ptr set to the user provided pointer * The SUNMemory returned will not own the ptr, therefore, it will not free * the ptr in Dealloc. */ SUNDIALS_EXPORT SUNMemory SUNMemoryHelper_Wrap(void* ptr, SUNMemoryType mem_type); /* * Required SUNMemoryHelper operations. */ SUNDIALS_EXPORT int SUNMemoryHelper_Alloc(SUNMemoryHelper, SUNMemory* memptr, size_t mem_size, SUNMemoryType mem_type, void* queue); SUNDIALS_EXPORT int SUNMemoryHelper_Dealloc(SUNMemoryHelper, SUNMemory mem, void* queue); SUNDIALS_EXPORT int SUNMemoryHelper_Copy(SUNMemoryHelper, SUNMemory dst, SUNMemory src, size_t mem_size, void* queue); /* * Optional SUNMemoryHelper operations. */ SUNDIALS_EXPORT int SUNMemoryHelper_CopyAsync(SUNMemoryHelper, SUNMemory dst, SUNMemory src, size_t mem_size, void* queue); /* Clones the SUNMemoryHelper */ SUNDIALS_EXPORT SUNMemoryHelper SUNMemoryHelper_Clone(SUNMemoryHelper); /* Frees the SUNMemoryHelper */ SUNDIALS_EXPORT int SUNMemoryHelper_Destroy(SUNMemoryHelper); /* * Utility SUNMemoryHelper functions. */ /* Creates an empty SUNMemoryHelper object */ SUNDIALS_EXPORT SUNMemoryHelper SUNMemoryHelper_NewEmpty(SUNContext sunctx); /* Copyies the SUNMemoryHelper ops structure from src->ops to dst->ops. */ SUNDIALS_EXPORT int SUNMemoryHelper_CopyOps(SUNMemoryHelper src, SUNMemoryHelper dst); /* Checks that all required SUNMemoryHelper ops are provided */ SUNDIALS_EXPORT booleantype SUNMemoryHelper_ImplementsRequiredOps(SUNMemoryHelper); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_dense.h0000644000176200001440000003051414645137106022060 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer: Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for a generic package of DENSE matrix * operations, based on the DlsMat type defined in sundials_direct.h. * * There are two sets of dense solver routines listed in * this file: one set uses type DlsMat defined below and the * other set uses the type realtype ** for dense matrix arguments. * Routines that work with the type DlsMat begin with "Dense". * Routines that work with realtype** begin with "dense". * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_DENSE_H #define _SUNDIALS_DENSE_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ---------------------------------------------------------------------------- * Functions: SUNDlsMat_DenseGETRF and SUNDlsMat_DenseGETRS * ---------------------------------------------------------------------------- * SUNDlsMat_DenseGETRF performs the LU factorization of the M by N dense matrix A. * This is done using standard Gaussian elimination with partial (row) pivoting. * Note that this applies only to matrices with M >= N and full column rank. * * A successful LU factorization leaves the matrix A and the pivot array p with * the following information: * * (1) p[k] contains the row number of the pivot element chosen at the beginning * of elimination step k, k=0, 1, ..., N-1. * * (2) If the unique LU factorization of A is given by PA = LU, where P is a * permutation matrix, L is a lower trapezoidal matrix with all 1's on the * diagonal, and U is an upper triangular matrix, then the upper triangular * part of A (including its diagonal) contains U and the strictly lower * trapezoidal part of A contains the multipliers, I-L. * * For square matrices (M = N), L is unit lower triangular. * * SUNDlsMat_DenseGETRF returns 0 if successful. Otherwise it encountered a zero * diagonal element during the factorization. In this case it returns the column * index (numbered from one) at which it encountered the zero. * * SUNDlsMat_DenseGETRS solves the N-dimensional system A x = b using the LU * factorization in A and the pivot information in p computed in * SUNDlsMat_DenseGETRF. The solution x is returned in b. This routine cannot fail * if the corresponding call to SUNDlsMat_DenseGETRF did not fail. * SUNDlsMat_DenseGETRS does NOT check for a square matrix! * * ---------------------------------------------------------------------------- * SUNDlsMat_DenseGETRF and SUNDlsMat_DenseGETRS are simply wrappers around * SUNDlsMat_denseGETRF and SUNDlsMat_denseGETRS, respectively, which perform all the * work by directly accessing the data in the SUNDlsMat A (i.e. in A->cols). * ---------------------------------------------------------------------------- */ SUNDIALS_EXPORT sunindextype SUNDlsMat_DenseGETRF(SUNDlsMat A, sunindextype *p); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_DenseGETRF instead") sunindextype DenseGETRF(DlsMat A, sunindextype *p); SUNDIALS_EXPORT void SUNDlsMat_DenseGETRS(SUNDlsMat A, sunindextype *p, realtype *b); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_DenseGETRS instead") void DenseGETRS(DlsMat A, sunindextype *p, realtype *b); SUNDIALS_EXPORT sunindextype SUNDlsMat_denseGETRF(realtype **a, sunindextype m, sunindextype n, sunindextype *p); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_denseGETRF instead") sunindextype denseGETRF(realtype **a, sunindextype m, sunindextype n, sunindextype *p); SUNDIALS_EXPORT void SUNDlsMat_denseGETRS(realtype **a, sunindextype n, sunindextype *p, realtype *b); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_denseGETRS instead") void denseGETRS(realtype **a, sunindextype n, sunindextype *p, realtype *b); /* * ---------------------------------------------------------------------------- * Functions : SUNDlsMat_DensePOTRF and SUNDlsMat_DensePOTRS * ---------------------------------------------------------------------------- * SUNDlsMat_DensePOTRF computes the Cholesky factorization of a real symmetric * positive definite matrix A. * ---------------------------------------------------------------------------- * SUNDlsMat_DensePOTRS solves a system of linear equations A*X = B with a * symmetric positive definite matrix A using the Cholesky factorization A = * L*L**T computed by SUNDlsMat_DensePOTRF. * * ---------------------------------------------------------------------------- * SUNDlsMat_DensePOTRF and SUNDlsMat_DensePOTRS are simply wrappers around * SUNDlsMat_densePOTRF and SUNDlsMat_densePOTRS, respectively, which perform all the * work by directly accessing the data in the DlsMat A (i.e. the field cols) * ---------------------------------------------------------------------------- */ SUNDIALS_EXPORT sunindextype SUNDlsMat_DensePOTRF(SUNDlsMat A); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_DensePOTRF instead") sunindextype DensePOTRF(DlsMat A); SUNDIALS_EXPORT void SUNDlsMat_DensePOTRS(SUNDlsMat A, realtype *b); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_DensePOTRS instead") void DensePOTRS(DlsMat A, realtype *b); SUNDIALS_EXPORT sunindextype SUNDlsMat_densePOTRF(realtype **a, sunindextype m); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_densePOTRF instead") sunindextype densePOTRF(realtype **a, sunindextype m); SUNDIALS_EXPORT void SUNDlsMat_densePOTRS(realtype **a, sunindextype m, realtype *b); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_densePOTRS instead") void densePOTRS(realtype **a, sunindextype m, realtype *b); /* * ----------------------------------------------------------------------------- * Functions : SUNDlsMat_DenseGEQRF and SUNDlsMat_DenseORMQR * ----------------------------------------------------------------------------- * SUNDlsMat_DenseGEQRF computes a QR factorization of a real M-by-N matrix A: A = * Q * R (with M>= N). * * SUNDlsMat_DenseGEQRF requires a temporary work vector wrk of length M. * ----------------------------------------------------------------------------- * SUNDlsMat_DenseORMQR computes the product w = Q * v where Q is a real orthogonal * matrix defined as the product of k elementary reflectors * * Q = H(1) H(2) . . . H(k) * * as returned by SUNDlsMat_DenseGEQRF. Q is an M-by-N matrix, v is a vector of * length N and w is a vector of length M (with M >= N). * * SUNDlsMat_DenseORMQR requires a temporary work vector wrk of length M. * * ----------------------------------------------------------------------------- * SUNDlsMat_DenseGEQRF and SUNDlsMat_DenseORMQR are simply wrappers around * SUNDlsMat_denseGEQRF and SUNDlsMat_denseORMQR, respectively, which perform all the * work by directly accessing the data in the DlsMat A (i.e. the field cols) * ----------------------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNDlsMat_DenseGEQRF(SUNDlsMat A, realtype *beta, realtype *wrk); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_DenseGEQRF instead") int DenseGEQRF(DlsMat A, realtype *beta, realtype *wrk); SUNDIALS_EXPORT int SUNDlsMat_DenseORMQR(SUNDlsMat A, realtype *beta, realtype *vn, realtype *vm, realtype *wrk); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_DenseORMQR instead") int DenseORMQR(DlsMat A, realtype *beta, realtype *vn, realtype *vm, realtype *wrk); SUNDIALS_EXPORT int SUNDlsMat_denseGEQRF(realtype **a, sunindextype m, sunindextype n, realtype *beta, realtype *wrk); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_denseGEQRF instead") int denseGEQRF(realtype **a, sunindextype m, sunindextype n, realtype *beta, realtype *wrk); SUNDIALS_EXPORT int SUNDlsMat_denseORMQR(realtype **a, sunindextype m, sunindextype n, realtype *beta, realtype *v, realtype *w, realtype *wrk); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_denseORMQR instead") int denseORMQR(realtype **a, sunindextype m, sunindextype n, realtype *beta, realtype *v, realtype *w, realtype *wrk); /* * ---------------------------------------------------------------------------- * Function : SUNDlsMat_DenseCopy * ---------------------------------------------------------------------------- * SUNDlsMat_DenseCopy copies the contents of the M-by-N matrix A into the * M-by-N matrix B. * * SUNDlsMat_DenseCopy is a wrapper around SUNDlsMat_denseCopy which accesses * the data in the SUNDlsMat A and SUNDlsMat B (i.e. the fields cols) * ----------------------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_DenseCopy(SUNDlsMat A, SUNDlsMat B); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_DenseCopy instead") void DenseCopy(DlsMat A, DlsMat B); SUNDIALS_EXPORT void SUNDlsMat_denseCopy(realtype **a, realtype **b, sunindextype m, sunindextype n); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_denseCopy instead") void denseCopy(realtype **a, realtype **b, sunindextype m, sunindextype n); /* * ----------------------------------------------------------------------------- * Function: SUNDlsMat_DenseScale * ----------------------------------------------------------------------------- * SUNDlsMat_DenseScale scales the elements of the M-by-N matrix A by the * constant c and stores the result back in A. * * SUNDlsMat_DenseScale is a wrapper around SUNDlsMat_denseScale which performs * the actual scaling by accessing the data in the SUNDlsMat A (i.e. in * A->cols). * ----------------------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_DenseScale(realtype c, SUNDlsMat A); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsSUNDlsMat_DenseScale_denseCopy instead") void DenseScale(realtype c, DlsMat A); SUNDIALS_EXPORT void SUNDlsMat_denseScale(realtype c, realtype **a, sunindextype m, sunindextype n); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_denseScale instead") void denseScale(realtype c, realtype **a, sunindextype m, sunindextype n); /* * ----------------------------------------------------------------------------- * Function: SUNDlsMat_denseAddIdentity * ----------------------------------------------------------------------------- * SUNDlsMat_denseAddIdentity adds the identity matrix to the n-by-n matrix * stored in a realtype** array. * ----------------------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_denseAddIdentity(realtype **a, sunindextype n); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_denseAddIdentity instead") void denseAddIdentity(realtype **a, sunindextype n); /* * ----------------------------------------------------------------------------- * Function: SUNDlsMat_DenseMatvec * ----------------------------------------------------------------------------- * SUNDlsMat_DenseMatvec computes the matrix-vector product y = A*x, where A is * an M-by-N matrix, x is a vector of length N, and y is a vector of length M. * No error checking is performed on the length of the arrays x and y. Only y * is modified in this routine. * * SUNDlsMat_DenseMatvec is a wrapper around SUNDlsMat_denseMatvec which * performs the actual product by accessing the data in the SUNDlsMat A. * ----------------------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_DenseMatvec(SUNDlsMat A, realtype *x, realtype *y); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_DenseMatvec instead") void DenseMatvec(DlsMat A, realtype *x, realtype *y); SUNDIALS_EXPORT void SUNDlsMat_denseMatvec(realtype **a, realtype *x, realtype *y, sunindextype m, sunindextype n); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_denseMatvec instead") void denseMatvec(realtype **a, realtype *x, realtype *y, sunindextype m, sunindextype n); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_nonlinearsolver.h0000644000176200001440000002207214645137106024202 0ustar liggesusers/* ----------------------------------------------------------------------------- * Programmer(s): David J. Gardner, and Cody J. Balos @ LLNL * ----------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------------------- * This is the header file for a generic nonlinear solver package. It defines * the SUNNonlinearSolver structure (_generic_SUNNonlinearSolver) which contains * the following fields: * - an implementation-dependent 'content' field which contains any internal * data required by the solver * - an 'ops' filed which contains a structure listing operations acting on/by * such solvers * * We consider iterative nonlinear solvers for systems in both root finding * (F(y) = 0) or fixed-point (G(y) = y) form. As a result, some of the routines * are applicable only to one type of nonlinear solver. * ----------------------------------------------------------------------------- * This header file contains: * - function types supplied to a SUNNonlinearSolver, * - enumeration constants for SUNDIALS-defined nonlinear solver types, * - type declarations for the _generic_SUNNonlinearSolver and * _generic_SUNNonlinearSolver_Ops structures, as well as references to * pointers to such structures (SUNNonlinearSolver), * - prototypes for the nonlinear solver functions which operate * on/by SUNNonlinearSolver objects, and * - return codes for SUNLinearSolver objects. * ----------------------------------------------------------------------------- * At a minimum, a particular implementation of a SUNNonlinearSolver must do the * following: * - specify the 'content' field of a SUNNonlinearSolver, * - implement the operations on/by the SUNNonlinearSovler objects, * - provide a constructor routine for new SUNNonlinearSolver objects * * Additionally, a SUNNonlinearSolver implementation may provide the following: * - "Set" routines to control solver-specific parameters/options * - "Get" routines to access solver-specific performance metrics * ---------------------------------------------------------------------------*/ #ifndef _SUNNONLINEARSOLVER_H #define _SUNNONLINEARSOLVER_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------------------------------------- * Forward references for SUNNonlinearSolver types defined below * ---------------------------------------------------------------------------*/ /* Forward reference for pointer to SUNNonlinearSolver_Ops object */ typedef _SUNDIALS_STRUCT_ _generic_SUNNonlinearSolver_Ops *SUNNonlinearSolver_Ops; /* Forward reference for pointer to SUNNonlinearSolver object */ typedef _SUNDIALS_STRUCT_ _generic_SUNNonlinearSolver *SUNNonlinearSolver; /* ----------------------------------------------------------------------------- * Integrator supplied function types * ---------------------------------------------------------------------------*/ typedef int (*SUNNonlinSolSysFn)(N_Vector y, N_Vector F, void* mem); typedef int (*SUNNonlinSolLSetupFn)(booleantype jbad, booleantype* jcur, void* mem); typedef int (*SUNNonlinSolLSolveFn)(N_Vector b, void* mem); typedef int (*SUNNonlinSolConvTestFn)(SUNNonlinearSolver NLS, N_Vector y, N_Vector del, realtype tol, N_Vector ewt, void* mem); /* ----------------------------------------------------------------------------- * SUNNonlinearSolver types * ---------------------------------------------------------------------------*/ typedef enum { SUNNONLINEARSOLVER_ROOTFIND, SUNNONLINEARSOLVER_FIXEDPOINT } SUNNonlinearSolver_Type; /* ----------------------------------------------------------------------------- * Generic definition of SUNNonlinearSolver * ---------------------------------------------------------------------------*/ /* Structure containing function pointers to nonlinear solver operations */ struct _generic_SUNNonlinearSolver_Ops { SUNNonlinearSolver_Type (*gettype)(SUNNonlinearSolver); int (*initialize)(SUNNonlinearSolver); int (*setup)(SUNNonlinearSolver, N_Vector, void*); int (*solve)(SUNNonlinearSolver, N_Vector, N_Vector, N_Vector, realtype, booleantype, void*); int (*free)(SUNNonlinearSolver); int (*setsysfn)(SUNNonlinearSolver, SUNNonlinSolSysFn); int (*setlsetupfn)(SUNNonlinearSolver, SUNNonlinSolLSetupFn); int (*setlsolvefn)(SUNNonlinearSolver, SUNNonlinSolLSolveFn); int (*setctestfn)(SUNNonlinearSolver, SUNNonlinSolConvTestFn, void*); int (*setmaxiters)(SUNNonlinearSolver, int); int (*getnumiters)(SUNNonlinearSolver, long int*); int (*getcuriter)(SUNNonlinearSolver, int*); int (*getnumconvfails)(SUNNonlinearSolver, long int*); }; /* A nonlinear solver is a structure with an implementation-dependent 'content' field, and a pointer to a structure of solver nonlinear solver operations corresponding to that implementation. */ struct _generic_SUNNonlinearSolver { void *content; SUNNonlinearSolver_Ops ops; SUNContext sunctx; }; /* ----------------------------------------------------------------------------- * Functions exported by SUNNonlinearSolver module * ---------------------------------------------------------------------------*/ /* empty constructor/destructor */ SUNDIALS_EXPORT SUNNonlinearSolver SUNNonlinSolNewEmpty(SUNContext sunctx); SUNDIALS_EXPORT void SUNNonlinSolFreeEmpty(SUNNonlinearSolver NLS); /* core functions */ SUNDIALS_EXPORT SUNNonlinearSolver_Type SUNNonlinSolGetType(SUNNonlinearSolver NLS); SUNDIALS_EXPORT int SUNNonlinSolInitialize(SUNNonlinearSolver NLS); SUNDIALS_EXPORT int SUNNonlinSolSetup(SUNNonlinearSolver NLS, N_Vector y, void* mem); SUNDIALS_EXPORT int SUNNonlinSolSolve(SUNNonlinearSolver NLS, N_Vector y0, N_Vector y, N_Vector w, realtype tol, booleantype callLSetup, void *mem); SUNDIALS_EXPORT int SUNNonlinSolFree(SUNNonlinearSolver NLS); /* set functions */ SUNDIALS_EXPORT int SUNNonlinSolSetSysFn(SUNNonlinearSolver NLS, SUNNonlinSolSysFn SysFn); SUNDIALS_EXPORT int SUNNonlinSolSetLSetupFn(SUNNonlinearSolver NLS, SUNNonlinSolLSetupFn SetupFn); SUNDIALS_EXPORT int SUNNonlinSolSetLSolveFn(SUNNonlinearSolver NLS, SUNNonlinSolLSolveFn SolveFn); SUNDIALS_EXPORT int SUNNonlinSolSetConvTestFn(SUNNonlinearSolver NLS, SUNNonlinSolConvTestFn CTestFn, void* ctest_data); SUNDIALS_EXPORT int SUNNonlinSolSetMaxIters(SUNNonlinearSolver NLS, int maxiters); /* get functions */ SUNDIALS_EXPORT int SUNNonlinSolGetNumIters(SUNNonlinearSolver NLS, long int *niters); SUNDIALS_EXPORT int SUNNonlinSolGetCurIter(SUNNonlinearSolver NLS, int *iter); SUNDIALS_EXPORT int SUNNonlinSolGetNumConvFails(SUNNonlinearSolver NLS, long int *nconvfails); /* ----------------------------------------------------------------------------- * SUNNonlinearSolver return values * ---------------------------------------------------------------------------*/ #define SUN_NLS_SUCCESS 0 /* successful / converged */ /* Recoverable */ #define SUN_NLS_CONTINUE +901 /* not converged, keep iterating */ #define SUN_NLS_CONV_RECVR +902 /* convergece failure, try to recover */ /* Unrecoverable */ #define SUN_NLS_MEM_NULL -901 /* memory argument is NULL */ #define SUN_NLS_MEM_FAIL -902 /* failed memory access / allocation */ #define SUN_NLS_ILL_INPUT -903 /* illegal function input */ #define SUN_NLS_VECTOROP_ERR -904 /* failed NVector operation */ #define SUN_NLS_EXT_FAIL -905 /* failed in external library call */ /* ----------------------------------------------------------------------------- * SUNNonlinearSolver messages * ---------------------------------------------------------------------------*/ #if defined(SUNDIALS_EXTENDED_PRECISION) #define SUN_NLS_MSG_RESIDUAL "\tnonlin. iteration %ld, nonlin. residual: %Lg\n" #elif defined(SUNDIALS_DOUBLE_PRECISION) #define SUN_NLS_MSG_RESIDUAL "\tnonlin. iteration %ld, nonlin. residual: %g\n" #else #define SUN_NLS_MSG_RESIDUAL "\tnonlin. iteration %ld, nonlin. residual: %g\n" #endif #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_nvector.h0000644000176200001440000003270414645137106022445 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Radu Serban and Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for a generic NVECTOR package. * It defines the N_Vector structure (_generic_N_Vector) which * contains the following fields: * - an implementation-dependent 'content' field which contains * the description and actual data of the vector * - an 'ops' filed which contains a structure listing operations * acting on such vectors * ----------------------------------------------------------------- * This header file contains: * - enumeration constants for all SUNDIALS-defined vector types, * as well as a generic type for user-supplied vector types, * - type declarations for the _generic_N_Vector and * _generic_N_Vector_Ops structures, as well as references to * pointers to such structures (N_Vector), and * - prototypes for the vector functions which operate on * N_Vector objects. * ----------------------------------------------------------------- * At a minimum, a particular implementation of an NVECTOR must * do the following: * - specify the 'content' field of N_Vector, * - implement the operations on those N_Vector objects, * - provide a constructor routine for new N_Vector objects * * Additionally, an NVECTOR implementation may provide the following: * - macros to access the underlying N_Vector data * - a constructor for an array of N_Vectors * - a constructor for an empty N_Vector (i.e., a new N_Vector with * a NULL data pointer). * - a routine to print the content of an N_Vector * -----------------------------------------------------------------*/ #ifndef _NVECTOR_H #define _NVECTOR_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------------------------- * Implemented N_Vector types * ----------------------------------------------------------------- */ typedef enum { SUNDIALS_NVEC_SERIAL, SUNDIALS_NVEC_PARALLEL, SUNDIALS_NVEC_OPENMP, SUNDIALS_NVEC_PTHREADS, SUNDIALS_NVEC_PARHYP, SUNDIALS_NVEC_PETSC, SUNDIALS_NVEC_CUDA, SUNDIALS_NVEC_HIP, SUNDIALS_NVEC_SYCL, SUNDIALS_NVEC_RAJA, SUNDIALS_NVEC_OPENMPDEV, SUNDIALS_NVEC_TRILINOS, SUNDIALS_NVEC_MANYVECTOR, SUNDIALS_NVEC_MPIMANYVECTOR, SUNDIALS_NVEC_MPIPLUSX, SUNDIALS_NVEC_CUSTOM } N_Vector_ID; /* ----------------------------------------------------------------- * Generic definition of N_Vector * ----------------------------------------------------------------- */ /* Forward reference for pointer to N_Vector_Ops object */ typedef _SUNDIALS_STRUCT_ _generic_N_Vector_Ops *N_Vector_Ops; /* Forward reference for pointer to N_Vector object */ typedef _SUNDIALS_STRUCT_ _generic_N_Vector *N_Vector; /* Define array of N_Vectors */ typedef N_Vector *N_Vector_S; /* Structure containing function pointers to vector operations */ struct _generic_N_Vector_Ops { /* * REQUIRED operations. * * These must be implemented by derivations of the generic N_Vector. */ /* constructors, destructors, and utility operations */ N_Vector_ID (*nvgetvectorid)(N_Vector); N_Vector (*nvclone)(N_Vector); N_Vector (*nvcloneempty)(N_Vector); void (*nvdestroy)(N_Vector); void (*nvspace)(N_Vector, sunindextype *, sunindextype *); realtype* (*nvgetarraypointer)(N_Vector); realtype* (*nvgetdevicearraypointer)(N_Vector); void (*nvsetarraypointer)(realtype *, N_Vector); void* (*nvgetcommunicator)(N_Vector); sunindextype (*nvgetlength)(N_Vector); /* standard vector operations */ void (*nvlinearsum)(realtype, N_Vector, realtype, N_Vector, N_Vector); void (*nvconst)(realtype, N_Vector); void (*nvprod)(N_Vector, N_Vector, N_Vector); void (*nvdiv)(N_Vector, N_Vector, N_Vector); void (*nvscale)(realtype, N_Vector, N_Vector); void (*nvabs)(N_Vector, N_Vector); void (*nvinv)(N_Vector, N_Vector); void (*nvaddconst)(N_Vector, realtype, N_Vector); realtype (*nvdotprod)(N_Vector, N_Vector); realtype (*nvmaxnorm)(N_Vector); realtype (*nvwrmsnorm)(N_Vector, N_Vector); realtype (*nvwrmsnormmask)(N_Vector, N_Vector, N_Vector); realtype (*nvmin)(N_Vector); realtype (*nvwl2norm)(N_Vector, N_Vector); realtype (*nvl1norm)(N_Vector); void (*nvcompare)(realtype, N_Vector, N_Vector); booleantype (*nvinvtest)(N_Vector, N_Vector); booleantype (*nvconstrmask)(N_Vector, N_Vector, N_Vector); realtype (*nvminquotient)(N_Vector, N_Vector); /* * OPTIONAL operations. * * These operations provide default implementations that may be overriden. */ /* OPTIONAL fused vector operations */ int (*nvlinearcombination)(int, realtype*, N_Vector*, N_Vector); int (*nvscaleaddmulti)(int, realtype*, N_Vector, N_Vector*, N_Vector*); int (*nvdotprodmulti)(int, N_Vector, N_Vector*, realtype*); /* OPTIONAL vector array operations */ int (*nvlinearsumvectorarray)(int, realtype, N_Vector*, realtype, N_Vector*, N_Vector*); int (*nvscalevectorarray)(int, realtype*, N_Vector*, N_Vector*); int (*nvconstvectorarray)(int, realtype, N_Vector*); int (*nvwrmsnormvectorarray)(int, N_Vector*, N_Vector*, realtype*); int (*nvwrmsnormmaskvectorarray)(int, N_Vector*, N_Vector*, N_Vector, realtype*); int (*nvscaleaddmultivectorarray)(int, int, realtype*, N_Vector*, N_Vector**, N_Vector**); int (*nvlinearcombinationvectorarray)(int, int, realtype*, N_Vector**, N_Vector*); /* * OPTIONAL operations with no default implementation. */ /* Local reduction kernels (no parallel communication) */ realtype (*nvdotprodlocal)(N_Vector, N_Vector); realtype (*nvmaxnormlocal)(N_Vector); realtype (*nvminlocal)(N_Vector); realtype (*nvl1normlocal)(N_Vector); booleantype (*nvinvtestlocal)(N_Vector, N_Vector); booleantype (*nvconstrmasklocal)(N_Vector, N_Vector, N_Vector); realtype (*nvminquotientlocal)(N_Vector, N_Vector); realtype (*nvwsqrsumlocal)(N_Vector, N_Vector); realtype (*nvwsqrsummasklocal)(N_Vector, N_Vector, N_Vector); /* Single buffer reduction operations */ int (*nvdotprodmultilocal)(int, N_Vector, N_Vector*, realtype*); int (*nvdotprodmultiallreduce)(int, N_Vector, realtype*); /* XBraid interface operations */ int (*nvbufsize)(N_Vector, sunindextype*); int (*nvbufpack)(N_Vector, void*); int (*nvbufunpack)(N_Vector, void*); /* Debugging functions (called when SUNDIALS_DEBUG_PRINTVEC is defined). */ void (*nvprint)(N_Vector); void (*nvprintfile)(N_Vector, FILE*); }; /* A vector is a structure with an implementation-dependent 'content' field, and a pointer to a structure of vector operations corresponding to that implementation. */ struct _generic_N_Vector { void *content; N_Vector_Ops ops; SUNContext sunctx; }; /* ----------------------------------------------------------------- * Functions exported by NVECTOR module * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector N_VNewEmpty(SUNContext sunctx); SUNDIALS_EXPORT void N_VFreeEmpty(N_Vector v); SUNDIALS_EXPORT int N_VCopyOps(N_Vector w, N_Vector v); /* * Required operations. */ SUNDIALS_EXPORT N_Vector_ID N_VGetVectorID(N_Vector w); SUNDIALS_EXPORT N_Vector N_VClone(N_Vector w); SUNDIALS_EXPORT N_Vector N_VCloneEmpty(N_Vector w); SUNDIALS_EXPORT void N_VDestroy(N_Vector v); SUNDIALS_EXPORT void N_VSpace(N_Vector v, sunindextype *lrw, sunindextype *liw); SUNDIALS_EXPORT realtype *N_VGetArrayPointer(N_Vector v); SUNDIALS_EXPORT realtype *N_VGetDeviceArrayPointer(N_Vector v); SUNDIALS_EXPORT void N_VSetArrayPointer(realtype *v_data, N_Vector v); SUNDIALS_EXPORT void *N_VGetCommunicator(N_Vector v); SUNDIALS_EXPORT sunindextype N_VGetLength(N_Vector v); /* standard vector operations */ SUNDIALS_EXPORT void N_VLinearSum(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VConst(realtype c, N_Vector z); SUNDIALS_EXPORT void N_VProd(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VDiv(N_Vector x, N_Vector y, N_Vector z); SUNDIALS_EXPORT void N_VScale(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAbs(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VInv(N_Vector x, N_Vector z); SUNDIALS_EXPORT void N_VAddConst(N_Vector x, realtype b, N_Vector z); SUNDIALS_EXPORT realtype N_VDotProd(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNorm(N_Vector x); SUNDIALS_EXPORT realtype N_VWrmsNorm(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWrmsNormMask(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT realtype N_VMin(N_Vector x); SUNDIALS_EXPORT realtype N_VWL2Norm(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VL1Norm(N_Vector x); SUNDIALS_EXPORT void N_VCompare(realtype c, N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VInvTest(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMask(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotient(N_Vector num, N_Vector denom); /* * OPTIONAL operations with default implementations. */ /* fused vector operations */ SUNDIALS_EXPORT int N_VLinearCombination(int nvec, realtype* c, N_Vector* X, N_Vector z); SUNDIALS_EXPORT int N_VScaleAddMulti(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VDotProdMulti(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); /* vector array operations */ SUNDIALS_EXPORT int N_VLinearSumVectorArray(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z); SUNDIALS_EXPORT int N_VScaleVectorArray(int nvec, realtype* c, N_Vector* X, N_Vector* Z); SUNDIALS_EXPORT int N_VConstVectorArray(int nvec, realtype c, N_Vector* Z); SUNDIALS_EXPORT int N_VWrmsNormVectorArray(int nvec, N_Vector* X, N_Vector* W, realtype* nrm); SUNDIALS_EXPORT int N_VWrmsNormMaskVectorArray(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm); SUNDIALS_EXPORT int N_VScaleAddMultiVectorArray(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z); SUNDIALS_EXPORT int N_VLinearCombinationVectorArray(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z); /* * OPTIONAL operations with no default implementation. */ /* local reduction kernels (no parallel communication) */ SUNDIALS_EXPORT realtype N_VDotProdLocal(N_Vector x, N_Vector y); SUNDIALS_EXPORT realtype N_VMaxNormLocal(N_Vector x); SUNDIALS_EXPORT realtype N_VMinLocal(N_Vector x); SUNDIALS_EXPORT realtype N_VL1NormLocal(N_Vector x); SUNDIALS_EXPORT realtype N_VWSqrSumLocal(N_Vector x, N_Vector w); SUNDIALS_EXPORT realtype N_VWSqrSumMaskLocal(N_Vector x, N_Vector w, N_Vector id); SUNDIALS_EXPORT booleantype N_VInvTestLocal(N_Vector x, N_Vector z); SUNDIALS_EXPORT booleantype N_VConstrMaskLocal(N_Vector c, N_Vector x, N_Vector m); SUNDIALS_EXPORT realtype N_VMinQuotientLocal(N_Vector num, N_Vector denom); /* single buffer reduction operations */ SUNDIALS_EXPORT int N_VDotProdMultiLocal(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods); SUNDIALS_EXPORT int N_VDotProdMultiAllReduce(int nvec_total, N_Vector x, realtype* sum); /* XBraid interface operations */ SUNDIALS_EXPORT int N_VBufSize(N_Vector x, sunindextype *size); SUNDIALS_EXPORT int N_VBufPack(N_Vector x, void *buf); SUNDIALS_EXPORT int N_VBufUnpack(N_Vector x, void *buf); /* ----------------------------------------------------------------- * Additional functions exported by NVECTOR module * ----------------------------------------------------------------- */ SUNDIALS_EXPORT N_Vector* N_VNewVectorArray(int count); SUNDIALS_EXPORT N_Vector* N_VCloneEmptyVectorArray(int count, N_Vector w); SUNDIALS_EXPORT N_Vector* N_VCloneVectorArray(int count, N_Vector w); SUNDIALS_EXPORT void N_VDestroyVectorArray(N_Vector* vs, int count); /* These function are really only for users of the Fortran interface */ SUNDIALS_EXPORT N_Vector N_VGetVecAtIndexVectorArray(N_Vector* vs, int index); SUNDIALS_EXPORT void N_VSetVecAtIndexVectorArray(N_Vector* vs, int index, N_Vector w); /* ----------------------------------------------------------------- * Debugging functions * ----------------------------------------------------------------- */ SUNDIALS_EXPORT void N_VPrint(N_Vector v); SUNDIALS_EXPORT void N_VPrintFile(N_Vector v, FILE* outfile); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_config.in0000644000176200001440000001350114645137106022403 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos, Aaron Collier and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS configuration header file. * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_CONFIG_H #define _SUNDIALS_CONFIG_H #include "sundials/sundials_export.h" #ifndef SUNDIALS_DEPRECATED_MSG # define SUNDIALS_DEPRECATED_MSG(msg) @SUNDIALS_DEPRECATED_MSG_MACRO@ #endif #ifndef SUNDIALS_DEPRECATED_EXPORT_MSG # define SUNDIALS_DEPRECATED_EXPORT_MSG(msg) SUNDIALS_EXPORT SUNDIALS_DEPRECATED_MSG(msg) #endif #ifndef SUNDIALS_DEPRECATED_NO_EXPORT_MSG # define SUNDIALS_DEPRECATED_NO_EXPORT_MSG(msg) SUNDIALS_NO_EXPORT SUNDIALS_DEPRECATED_MSG(msg) #endif /* ------------------------------------------------------------------ * Define SUNDIALS version numbers * -----------------------------------------------------------------*/ #define SUNDIALS_VERSION "@PACKAGE_VERSION@" #define SUNDIALS_VERSION_MAJOR @PACKAGE_VERSION_MAJOR@ #define SUNDIALS_VERSION_MINOR @PACKAGE_VERSION_MINOR@ #define SUNDIALS_VERSION_PATCH @PACKAGE_VERSION_PATCH@ #define SUNDIALS_VERSION_LABEL "@PACKAGE_VERSION_LABEL@" #define SUNDIALS_GIT_VERSION "@SUNDIALS_GIT_VERSION@" /* ------------------------------------------------------------------ * SUNDIALS build information * -----------------------------------------------------------------*/ /* Define precision of SUNDIALS data type 'realtype' * Depending on the precision level, one of the following * three macros will be defined: * #define SUNDIALS_SINGLE_PRECISION 1 * #define SUNDIALS_DOUBLE_PRECISION 1 * #define SUNDIALS_EXTENDED_PRECISION 1 */ @PRECISION_LEVEL@ /* Define type of vector indices in SUNDIALS 'sunindextype'. * Depending on user choice of index type, one of the following * two macros will be defined: * #define SUNDIALS_INT64_T 1 * #define SUNDIALS_INT32_T 1 */ @INDEX_TYPE@ /* Define the type of vector indices in SUNDIALS 'sunindextype'. * The macro will be defined with a type of the appropriate size. */ #define SUNDIALS_INDEX_TYPE @SUNDIALS_CINDEX_TYPE@ /* Use generic math functions * If it was decided that generic math functions can be used, then * #define SUNDIALS_USE_GENERIC_MATH */ #cmakedefine SUNDIALS_USE_GENERIC_MATH /* Use POSIX timers if available. * #define SUNDIALS_HAVE_POSIX_TIMERS */ #cmakedefine SUNDIALS_HAVE_POSIX_TIMERS /* BUILD CVODE with fused kernel functionality */ #cmakedefine SUNDIALS_BUILD_PACKAGE_FUSED_KERNELS /* BUILD SUNDIALS with monitoring functionalities */ #cmakedefine SUNDIALS_BUILD_WITH_MONITORING /* BUILD SUNDIALS with profiling functionalities */ #cmakedefine SUNDIALS_BUILD_WITH_PROFILING /* ------------------------------------------------------------------ * SUNDIALS TPL macros * -----------------------------------------------------------------*/ /* Caliper */ #cmakedefine SUNDIALS_CALIPER_ENABLED /* MAGMA backends */ #cmakedefine SUNDIALS_MAGMA_BACKENDS_CUDA #cmakedefine SUNDIALS_MAGMA_BACKENDS_HIP /* Set if SUNDIALS is built with MPI support, then * #define SUNDIALS_MPI_ENABLED 1 * otherwise * #define SUNDIALS_MPI_ENABLED 0 */ #cmakedefine01 SUNDIALS_MPI_ENABLED /* SUPERLUMT threading type */ #cmakedefine SUNDIALS_SUPERLUMT_THREAD_TYPE "@SUPERLUMT_THREAD_TYPE@" /* Trilinos with MPI is available, then * #define SUNDIALS_TRILINOS_HAVE_MPI */ #cmakedefine SUNDIALS_TRILINOS_HAVE_MPI /* RAJA backends */ #cmakedefine SUNDIALS_RAJA_BACKENDS_CUDA #cmakedefine SUNDIALS_RAJA_BACKENDS_HIP #cmakedefine SUNDIALS_RAJA_BACKENDS_SYCL /* ------------------------------------------------------------------ * SUNDIALS modules enabled * -----------------------------------------------------------------*/ @SUNDIALS_CONFIGH_BUILDS@ /* ------------------------------------------------------------------ * SUNDIALS fortran configuration * -----------------------------------------------------------------*/ /* Define Fortran name-mangling macro for C identifiers. * Depending on the inferred scheme, one of the following six * macros will be defined: * #define SUNDIALS_F77_FUNC(name,NAME) name * #define SUNDIALS_F77_FUNC(name,NAME) name ## _ * #define SUNDIALS_F77_FUNC(name,NAME) name ## __ * #define SUNDIALS_F77_FUNC(name,NAME) NAME * #define SUNDIALS_F77_FUNC(name,NAME) NAME ## _ * #define SUNDIALS_F77_FUNC(name,NAME) NAME ## __ */ @F77_MANGLE_MACRO1@ /* Define Fortran name-mangling macro for C identifiers * which contain underscores. */ @F77_MANGLE_MACRO2@ /* Allow user to specify different MPI communicator * If it was found that the MPI implementation supports MPI_Comm_f2c, then * #define SUNDIALS_MPI_COMM_F2C 1 * otherwise * #define SUNDIALS_MPI_COMM_F2C 0 */ @F77_MPI_COMM_F2C@ /* ------------------------------------------------------------------ * SUNDIALS inline macros. * -----------------------------------------------------------------*/ /* Mark SUNDIALS function as inline. */ #ifndef SUNDIALS_CXX_INLINE #define SUNDIALS_CXX_INLINE inline #endif #ifndef SUNDIALS_C_INLINE #ifndef __STDC_VERSION__ /* must be c89 or c90 */ #define SUNDIALS_C_INLINE #else #define SUNDIALS_C_INLINE inline #endif #endif #ifdef __cplusplus #define SUNDIALS_INLINE SUNDIALS_CXX_INLINE #else #define SUNDIALS_INLINE SUNDIALS_C_INLINE #endif /* Mark SUNDIALS function as static inline. */ #define SUNDIALS_STATIC_INLINE static SUNDIALS_INLINE #endif /* _SUNDIALS_CONFIG_H */ StanHeaders/inst/include/sundials/sundials_hip_policies.hpp0000644000176200001440000001553314645137106023775 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header files defines the ExecPolicy classes which * are utilized to determine HIP kernel launch parameters. * ----------------------------------------------------------------- */ #ifndef _SUNDIALS_HIPEXECPOLICIES_HPP #define _SUNDIALS_HIPEXECPOLICIES_HPP #include #include #include namespace sundials { namespace hip { #if defined(__HIP_PLATFORM_HCC__) constexpr const sunindextype WARP_SIZE = 64; #elif defined(__HIP_PLATFORM_NVCC__) constexpr const sunindextype WARP_SIZE = 32; #endif constexpr const sunindextype MAX_BLOCK_SIZE = 1024; constexpr const sunindextype MAX_WARPS = MAX_BLOCK_SIZE / WARP_SIZE; class ExecPolicy { public: ExecPolicy(hipStream_t stream = 0) : stream_(stream) { } virtual size_t gridSize(size_t numWorkUnits = 0, size_t blockDim = 0) const = 0; virtual size_t blockSize(size_t numWorkUnits = 0, size_t gridDim = 0) const = 0; virtual const hipStream_t* stream() const { return (&stream_); } virtual ExecPolicy* clone() const = 0; ExecPolicy* clone_new_stream(hipStream_t stream) const { ExecPolicy* ex = clone(); ex->stream_ = stream; return ex; } virtual bool atomic() const { return false; } virtual ~ExecPolicy() {} protected: hipStream_t stream_; }; /* * A kernel execution policy that maps each thread to a work unit. * The number of threads per block (blockSize) can be set to anything. * The grid size will be chosen so that there are enough threads for one * thread per element. If a stream is provided, it will be used to * execute the kernel. */ class ThreadDirectExecPolicy : public ExecPolicy { public: ThreadDirectExecPolicy(const size_t blockDim, hipStream_t stream = 0) : blockDim_(blockDim), ExecPolicy(stream) {} ThreadDirectExecPolicy(const ThreadDirectExecPolicy& ex) : blockDim_(ex.blockDim_), ExecPolicy(ex.stream_) {} virtual size_t gridSize(size_t numWorkUnits = 0, size_t /*blockDim*/ = 0) const { /* ceil(n/m) = floor((n + m - 1) / m) */ return (numWorkUnits + blockSize() - 1) / blockSize(); } virtual size_t blockSize(size_t /*numWorkUnits*/ = 0, size_t /*gridDim*/ = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new ThreadDirectExecPolicy(*this)); } private: const size_t blockDim_; }; /* * A kernel execution policy for kernels that use grid stride loops. * The number of threads per block (blockSize) can be set to anything. * The number of blocks (gridSize) can be set to anything. If a stream * is provided, it will be used to execute the kernel. */ class GridStrideExecPolicy : public ExecPolicy { public: GridStrideExecPolicy(const size_t blockDim, const size_t gridDim, hipStream_t stream = 0) : blockDim_(blockDim), gridDim_(gridDim), ExecPolicy(stream) {} GridStrideExecPolicy(const GridStrideExecPolicy& ex) : blockDim_(ex.blockDim_), gridDim_(ex.gridDim_), ExecPolicy(ex.stream_) {} virtual size_t gridSize(size_t /*numWorkUnits*/ = 0, size_t /*blockDim*/ = 0) const { return gridDim_; } virtual size_t blockSize(size_t /*numWorkUnits*/ = 0, size_t /*gridDim*/ = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new GridStrideExecPolicy(*this)); } private: const size_t blockDim_; const size_t gridDim_; }; /* * A kernel execution policy for performing a reduction across indvidual thread * blocks. The number of threads per block (blockSize) can be set to any valid * multiple of the HIP warp size. The number of blocks (gridSize) can be set to * any value greater or equal to 0. If it is set to 0, then the grid size will * be chosen so that there are at most two work units per thread. If a stream is * provided, it will be used to execute the kernel. */ class BlockReduceAtomicExecPolicy : public ExecPolicy { public: BlockReduceAtomicExecPolicy(const size_t blockDim, const size_t gridDim = 0, hipStream_t stream = 0) : blockDim_(blockDim), gridDim_(gridDim), ExecPolicy(stream) { if (blockDim < 1 || blockDim % WARP_SIZE) { throw std::invalid_argument("the block size must be a multiple of the HIP warp size"); } } BlockReduceAtomicExecPolicy(const BlockReduceAtomicExecPolicy& ex) : blockDim_(ex.blockDim_), gridDim_(ex.gridDim_), ExecPolicy(ex.stream_) {} virtual size_t gridSize(size_t numWorkUnits = 0, size_t /*blockDim*/ = 0) const { if (gridDim_ == 0) { return (numWorkUnits + (blockSize() * 2 - 1)) / (blockSize() * 2); } return gridDim_; } virtual size_t blockSize(size_t /*numWorkUnits*/ = 0, size_t /*gridDim*/ = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new BlockReduceAtomicExecPolicy(*this)); } virtual bool atomic() const { return true; } private: const size_t blockDim_; const size_t gridDim_; }; class BlockReduceExecPolicy : public ExecPolicy { public: BlockReduceExecPolicy(const size_t blockDim, const size_t gridDim = 0, hipStream_t stream = 0) : blockDim_(blockDim), gridDim_(gridDim), ExecPolicy(stream) { if (blockDim < 1 || blockDim % WARP_SIZE) { throw std::invalid_argument("the block size must be a multiple of the HIP warp size"); } } BlockReduceExecPolicy(const BlockReduceExecPolicy& ex) : blockDim_(ex.blockDim_), gridDim_(ex.gridDim_), ExecPolicy(ex.stream_) {} virtual size_t gridSize(size_t numWorkUnits = 0, size_t /*blockDim*/ = 0) const { if (gridDim_ == 0) { return (numWorkUnits + (blockSize() * 2 - 1)) / (blockSize() * 2); } return gridDim_; } virtual size_t blockSize(size_t /*numWorkUnits*/ = 0, size_t /*gridDim*/ = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new BlockReduceExecPolicy(*this)); } bool atomic() const { return false; } private: const size_t blockDim_; const size_t gridDim_; }; } // namespace hip } // namespace sundials typedef sundials::hip::ExecPolicy SUNHipExecPolicy; typedef sundials::hip::ThreadDirectExecPolicy SUNHipThreadDirectExecPolicy; typedef sundials::hip::GridStrideExecPolicy SUNHipGridStrideExecPolicy; typedef sundials::hip::BlockReduceExecPolicy SUNHipBlockReduceExecPolicy; typedef sundials::hip::BlockReduceAtomicExecPolicy SUNHipBlockReduceAtomicExecPolicy; #endif StanHeaders/inst/include/sundials/sundials_linearsolver.h0000644000176200001440000002273114645137106023471 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * David Gardner, Carol Woodward, * Slaven Peles, Cody Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for a generic linear solver package. * It defines the SUNLinearSolver structure (_generic_SUNLinearSolver) * which contains the following fields: * - an implementation-dependent 'content' field which contains * any internal data required by the solver * - an 'ops' filed which contains a structure listing operations * acting on/by such solvers * * We consider both direct linear solvers and iterative linear solvers * as available implementations of this package. Furthermore, iterative * linear solvers can either use a matrix or be matrix-free. As a * result of these different solver characteristics, some of the * routines are applicable only to some types of linear solver. * ----------------------------------------------------------------- * This header file contains: * - enumeration constants for all SUNDIALS-defined linear solver * types, as well as a generic type for user-supplied linear * solver types, * - type declarations for the _generic_SUNLinearSolver and * _generic_SUNLinearSolver_Ops structures, as well as references * to pointers to such structures (SUNLinearSolver), * - prototypes for the linear solver functions which operate * on/by SUNLinearSolver objects, and * - return codes for SUNLinearSolver objects. * ----------------------------------------------------------------- * At a minimum, a particular implementation of a SUNLinearSolver must * do the following: * - specify the 'content' field of SUNLinearSolver, * - implement the operations on/by those SUNLinearSolver objects, * - provide a constructor routine for new SUNLinearSolver objects * * Additionally, a SUNLinearSolver implementation may provide the * following: * - "Set" routines to control solver-specific parameters/options * - "Get" routines to access solver-specific performance metrics * -----------------------------------------------------------------*/ #ifndef _SUNLINEARSOLVER_H #define _SUNLINEARSOLVER_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------------------------- * Implemented SUNLinearSolver types and IDs: * ----------------------------------------------------------------- */ typedef enum { SUNLINEARSOLVER_DIRECT, SUNLINEARSOLVER_ITERATIVE, SUNLINEARSOLVER_MATRIX_ITERATIVE, SUNLINEARSOLVER_MATRIX_EMBEDDED } SUNLinearSolver_Type; typedef enum { SUNLINEARSOLVER_BAND, SUNLINEARSOLVER_DENSE, SUNLINEARSOLVER_KLU, SUNLINEARSOLVER_LAPACKBAND, SUNLINEARSOLVER_LAPACKDENSE, SUNLINEARSOLVER_PCG, SUNLINEARSOLVER_SPBCGS, SUNLINEARSOLVER_SPFGMR, SUNLINEARSOLVER_SPGMR, SUNLINEARSOLVER_SPTFQMR, SUNLINEARSOLVER_SUPERLUDIST, SUNLINEARSOLVER_SUPERLUMT, SUNLINEARSOLVER_CUSOLVERSP_BATCHQR, SUNLINEARSOLVER_MAGMADENSE, SUNLINEARSOLVER_ONEMKLDENSE, SUNLINEARSOLVER_CUSTOM } SUNLinearSolver_ID; /* ----------------------------------------------------------------- * Generic definition of SUNLinearSolver * ----------------------------------------------------------------- */ /* Forward reference for pointer to SUNLinearSolver_Ops object */ typedef _SUNDIALS_STRUCT_ _generic_SUNLinearSolver_Ops *SUNLinearSolver_Ops; /* Forward reference for pointer to SUNLinearSolver object */ typedef _SUNDIALS_STRUCT_ _generic_SUNLinearSolver *SUNLinearSolver; /* Structure containing function pointers to linear solver operations */ struct _generic_SUNLinearSolver_Ops { SUNLinearSolver_Type (*gettype)(SUNLinearSolver); SUNLinearSolver_ID (*getid)(SUNLinearSolver); int (*setatimes)(SUNLinearSolver, void*, SUNATimesFn); int (*setpreconditioner)(SUNLinearSolver, void*, SUNPSetupFn, SUNPSolveFn); int (*setscalingvectors)(SUNLinearSolver, N_Vector, N_Vector); int (*setzeroguess)(SUNLinearSolver, booleantype); int (*initialize)(SUNLinearSolver); int (*setup)(SUNLinearSolver, SUNMatrix); int (*solve)(SUNLinearSolver, SUNMatrix, N_Vector, N_Vector, realtype); int (*numiters)(SUNLinearSolver); realtype (*resnorm)(SUNLinearSolver); sunindextype (*lastflag)(SUNLinearSolver); int (*space)(SUNLinearSolver, long int*, long int*); N_Vector (*resid)(SUNLinearSolver); int (*free)(SUNLinearSolver); }; /* A linear solver is a structure with an implementation-dependent 'content' field, and a pointer to a structure of linear solver operations corresponding to that implementation. */ struct _generic_SUNLinearSolver { void *content; SUNLinearSolver_Ops ops; SUNContext sunctx; }; /* ----------------------------------------------------------------- * Functions exported by SUNLinearSolver module * ----------------------------------------------------------------- */ SUNDIALS_EXPORT SUNLinearSolver SUNLinSolNewEmpty(SUNContext sunctx); SUNDIALS_EXPORT void SUNLinSolFreeEmpty(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_Type SUNLinSolGetType(SUNLinearSolver S); SUNDIALS_EXPORT SUNLinearSolver_ID SUNLinSolGetID(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetATimes(SUNLinearSolver S, void* A_data, SUNATimesFn ATimes); SUNDIALS_EXPORT int SUNLinSolSetPreconditioner(SUNLinearSolver S, void* P_data, SUNPSetupFn Pset, SUNPSolveFn Psol); SUNDIALS_EXPORT int SUNLinSolSetScalingVectors(SUNLinearSolver S, N_Vector s1, N_Vector s2); SUNDIALS_EXPORT int SUNLinSolSetZeroGuess(SUNLinearSolver S, booleantype onoff); SUNDIALS_EXPORT int SUNLinSolInitialize(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSetup(SUNLinearSolver S, SUNMatrix A); SUNDIALS_EXPORT int SUNLinSolSolve(SUNLinearSolver S, SUNMatrix A, N_Vector x, N_Vector b, realtype tol); SUNDIALS_EXPORT int SUNLinSolNumIters(SUNLinearSolver S); SUNDIALS_EXPORT realtype SUNLinSolResNorm(SUNLinearSolver S); SUNDIALS_EXPORT N_Vector SUNLinSolResid(SUNLinearSolver S); SUNDIALS_EXPORT sunindextype SUNLinSolLastFlag(SUNLinearSolver S); SUNDIALS_EXPORT int SUNLinSolSpace(SUNLinearSolver S, long int *lenrwLS, long int *leniwLS); SUNDIALS_EXPORT int SUNLinSolFree(SUNLinearSolver S); /* ----------------------------------------------------------------- * SUNLinearSolver return values * ----------------------------------------------------------------- */ #define SUNLS_SUCCESS 0 /* successful/converged */ #define SUNLS_MEM_NULL -801 /* mem argument is NULL */ #define SUNLS_ILL_INPUT -802 /* illegal function input */ #define SUNLS_MEM_FAIL -803 /* failed memory access */ #define SUNLS_ATIMES_NULL -804 /* atimes function is NULL */ #define SUNLS_ATIMES_FAIL_UNREC -805 /* atimes unrecoverable failure */ #define SUNLS_PSET_FAIL_UNREC -806 /* pset unrecoverable failure */ #define SUNLS_PSOLVE_NULL -807 /* psolve function is NULL */ #define SUNLS_PSOLVE_FAIL_UNREC -808 /* psolve unrecoverable failure */ #define SUNLS_PACKAGE_FAIL_UNREC -809 /* external package unrec. fail */ #define SUNLS_GS_FAIL -810 /* Gram-Schmidt failure */ #define SUNLS_QRSOL_FAIL -811 /* QRsol found singular R */ #define SUNLS_VECTOROP_ERR -812 /* vector operation error */ #define SUNLS_RES_REDUCED 801 /* nonconv. solve, resid reduced */ #define SUNLS_CONV_FAIL 802 /* nonconvergent solve */ #define SUNLS_ATIMES_FAIL_REC 803 /* atimes failed recoverably */ #define SUNLS_PSET_FAIL_REC 804 /* pset failed recoverably */ #define SUNLS_PSOLVE_FAIL_REC 805 /* psolve failed recoverably */ #define SUNLS_PACKAGE_FAIL_REC 806 /* external package recov. fail */ #define SUNLS_QRFACT_FAIL 807 /* QRfact found singular matrix */ #define SUNLS_LUFACT_FAIL 808 /* LUfact found singular matrix */ /* ----------------------------------------------------------------------------- * SUNLinearSolver messages * ---------------------------------------------------------------------------*/ #if defined(SUNDIALS_EXTENDED_PRECISION) #define SUNLS_MSG_RESIDUAL "\t\tlin. iteration %ld, lin. residual: %Lg\n" #elif defined(SUNDIALS_DOUBLE_PRECISION) #define SUNLS_MSG_RESIDUAL "\t\tlin. iteration %ld, lin. residual: %g\n" #else #define SUNLS_MSG_RESIDUAL "\t\tlin. iteration %ld, lin. residual: %g\n" #endif #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_direct.h0000644000176200001440000004030414645137106022232 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer: Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header file contains definitions and declarations for use by * generic direct linear solvers for Ax = b. It defines types for * dense and banded matrices and corresponding accessor macros. * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_DIRECT_H #define _SUNDIALS_DIRECT_H #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ================================================================= * C O N S T A N T S * ================================================================= */ /* * SUNDIALS_DENSE: dense matrix * SUNDIALS_BAND: banded matrix */ #define SUNDIALS_DENSE 1 #define SUNDIALS_BAND 2 /* * ================================================================== * Type definitions * ================================================================== */ /* * ----------------------------------------------------------------- * Type : SUNDlsMat * ----------------------------------------------------------------- * The type SUNDlsMat is defined to be a pointer to a structure * with various sizes, a data field, and an array of pointers to * the columns which defines a dense or band matrix for use in * direct linear solvers. The M and N fields indicates the number * of rows and columns, respectively. The data field is a one * dimensional array used for component storage. The cols field * stores the pointers in data for the beginning of each column. * ----------------------------------------------------------------- * For DENSE matrices, the relevant fields in SUNDlsMat are: * type = SUNDIALS_DENSE * M - number of rows * N - number of columns * ldim - leading dimension (ldim >= M) * data - pointer to a contiguous block of realtype variables * ldata - length of the data array =ldim*N * cols - array of pointers. cols[j] points to the first element * of the j-th column of the matrix in the array data. * * The elements of a dense matrix are stored columnwise (i.e. columns * are stored one on top of the other in memory). * If A is of type SUNDlsMat, then the (i,j)th element of A (with * 0 <= i < M and 0 <= j < N) is given by (A->data)[j*n+i]. * * The SUNDLS_DENSE_COL and SUNDLS_DENSE_ELEM macros below allow a * user to access efficiently individual matrix elements without * writing out explicit data structure references and without knowing * too much about the underlying element storage. The only storage * assumption needed is that elements are stored columnwise and that a * pointer to the jth column of elements can be obtained via the * SUNDLS_DENSE_COL macro. * ----------------------------------------------------------------- * For BAND matrices, the relevant fields in SUNDlsMat are: * type = SUNDIALS_BAND * M - number of rows * N - number of columns * mu - upper bandwidth, 0 <= mu <= min(M,N) * ml - lower bandwidth, 0 <= ml <= min(M,N) * s_mu - storage upper bandwidth, mu <= s_mu <= N-1. * The dgbtrf routine writes the LU factors into the storage * for A. The upper triangular factor U, however, may have * an upper bandwidth as big as MIN(N-1,mu+ml) because of * partial pivoting. The s_mu field holds the upper * bandwidth allocated for A. * ldim - leading dimension (ldim >= s_mu) * data - pointer to a contiguous block of realtype variables * ldata - length of the data array =ldim*(s_mu+ml+1) * cols - array of pointers. cols[j] points to the first element * of the j-th column of the matrix in the array data. * * The SUNDLS_BAND_COL, SUNDLS_BAND_COL_ELEM, and SUNDLS_BAND_ELEM * macros below allow a user to access individual matrix elements * without writing out explicit data structure references and without * knowing too much about the underlying element storage. The only * storage assumption needed is that elements are stored columnwise * and that a pointer into the jth column of elements can be obtained * via the SUNDLS_BAND_COL macro. The SUNDLS_BAND_COL_ELEM macro * selects an element from a column which has already been isolated * via SUNDLS_BAND_COL. The macro SUNDLS_BAND_COL_ELEM allows the user * to avoid the translation from the matrix location (i,j) to the * index in the array returned by SUNDLS_BAND_COL at which the (i,j)th * element is stored. * ----------------------------------------------------------------- */ typedef struct _DlsMat { int type; sunindextype M; sunindextype N; sunindextype ldim; sunindextype mu; sunindextype ml; sunindextype s_mu; realtype *data; sunindextype ldata; realtype **cols; } *SUNDlsMat; /* DEPRECATED DlsMat: use SUNDlsMat instead */ typedef SUNDlsMat DlsMat; /* * ================================================================== * Data accessor macros * ================================================================== */ /* * ----------------------------------------------------------------- * SUNDLS_DENSE_COL and SUNDLS_DENSE_ELEM * ----------------------------------------------------------------- * * SUNDLS_DENSE_COL(A,j) references the jth column of the M-by-N dense * matrix A, 0 <= j < N. The type of the expression SUNDLS_DENSE_COL(A,j) * is (realtype *). After the assignment col_j = SUNDLS_DENSE_COL(A,j), * col_j may be treated as an array indexed from 0 to M-1. The (i,j)-th * element of A is thus referenced by * col_j[i]. * * SUNDLS_DENSE_ELEM(A,i,j) references the (i,j)th element of the dense * M-by-N matrix A, 0 <= i < M ; 0 <= j < N. * * ----------------------------------------------------------------- */ #define SUNDLS_DENSE_COL(A,j) ((A->cols)[j]) #define SUNDLS_DENSE_ELEM(A,i,j) ((A->cols)[j][i]) /* DEPRECATED DENSE_COL: use SUNDLS_DENSE_COL instead */ #define DENSE_COL(A,j) SUNDLS_DENSE_COL(A,j) /* DEPRECATED DENSE_ELEM: use SUNDLS_DENSE_ELEM instead */ #define DENSE_ELEM(A,i,j) SUNDLS_DENSE_ELEM(A,i,j) /* * ----------------------------------------------------------------- * SUNDLS_BAND_COL, SUNDLS_BAND_COL_ELEM, and SUNDLS_BAND_ELEM * ----------------------------------------------------------------- * * SUNDLS_BAND_COL(A,j) references the diagonal element of the jth * column of the N by N band matrix A, 0 <= j <= N-1. The type of the * expression SUNDLS_BAND_COL(A,j) is realtype *. The pointer returned * by the call SUNDLS_BAND_COL(A,j) can be treated as an array which * is indexed from -(A->mu) to (A->ml). * * SUNDLS_BAND_COL_ELEM references the (i,j)th entry of the band * matrix A when used in conjunction with SUNDLS_BAND_COL. The index * (i,j) should satisfy j-(A->mu) <= i <= j+(A->ml). * * SUNDLS_BAND_ELEM(A,i,j) references the (i,j)th element of the * M-by-N band matrix A, where 0 <= i,j <= N-1. The location (i,j) * should further satisfy j-(A->mu) <= i <= j+(A->ml). * * ----------------------------------------------------------------- */ #define SUNDLS_BAND_COL(A,j) (((A->cols)[j])+(A->s_mu)) #define SUNDLS_BAND_COL_ELEM(col_j,i,j) (col_j[(i)-(j)]) #define SUNDLS_BAND_ELEM(A,i,j) ((A->cols)[j][(i)-(j)+(A->s_mu)]) /* DEPRECATED BAND_COL: use SUNDLS_BAND_COL */ #define BAND_COL(A,j) SUNDLS_BAND_COL(A,j) /* DEPRECATED BAND_COL_ELEM: use SUNDLS_BAND_COL_ELEM */ #define BAND_COL_ELEM(col_j,i,j) SUNDLS_BAND_COL_ELEM(col_j,i,j) /* DEPRECATED BAND_ELEM: use SUNDLS_BAND_ELEM */ #define BAND_ELEM(A,i,j) SUNDLS_BAND_ELEM(A,i,j) /* * ================================================================== * Exported function prototypes (functions working on SUNDlsMat) * ================================================================== */ /* * ----------------------------------------------------------------- * Function: SUNDlsMat_NewDenseMat * ----------------------------------------------------------------- * SUNDlsMat_NewDenseMat allocates memory for an M-by-N dense matrix * and returns the storage allocated (type SUNDlsMat). * SUNDlsMat_NewDenseMat returns NULL if the request for matrix * storage cannot be satisfied. See the above documentation for the * type SUNDlsMat for matrix storage details. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT SUNDlsMat SUNDlsMat_NewDenseMat(sunindextype M, sunindextype N); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_NewDenseMat instead") DlsMat NewDenseMat(sunindextype M, sunindextype N); /* * ----------------------------------------------------------------- * Function: SUNDlsMat_NewBandMat * ----------------------------------------------------------------- * SUNDlsMat_NewBandMat allocates memory for an M-by-N band matrix * with upper bandwidth mu, lower bandwidth ml, and storage upper * bandwidth smu. Pass smu as follows depending on whether A will be * LU factored: * * (1) Pass smu = mu if A will not be factored. * * (2) Pass smu = MIN(N-1,mu+ml) if A will be factored. * * SUNDlsMat_NewBandMat returns the storage allocated (type SUNDlsMat) * or NULL if the request for matrix storage cannot be satisfied. See * the documentation for the type SUNDlsMat for matrix storage * details. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT SUNDlsMat SUNDlsMat_NewBandMat(sunindextype N, sunindextype mu, sunindextype ml, sunindextype smu); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_NewBandMat instead") DlsMat NewBandMat(sunindextype N, sunindextype mu, sunindextype ml, sunindextype smu); /* * ----------------------------------------------------------------- * Functions: SUNDlsMat_DestroyMat * ----------------------------------------------------------------- * SUNDlsMat_DestroyMat frees the memory allocated by * SUNDlsMat_NewDenseMat or SUNDlsMat_NewBandMat * ----------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_DestroyMat(DlsMat A); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_DestroyMat instead") void DestroyMat(DlsMat A); /* * ----------------------------------------------------------------- * Function: SUNDlsMat_NewIntArray * ----------------------------------------------------------------- * SUNDlsMat_NewIntArray allocates memory an array of N int's and * returns the pointer to the memory it allocates. If the request for * memory storage cannot be satisfied, it returns NULL. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int* SUNDlsMat_NewIntArray(int N); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_NewIntArray instead") int* NewIntArray(int N); /* * ----------------------------------------------------------------- * Function: SUNDlsMat_NewIndexArray * ----------------------------------------------------------------- * SUNDlsMat_NewIndexArray allocates memory an array of N * sunindextype's and returns the pointer to the memory it * allocates. If the request for memory storage cannot be satisfied, * it returns NULL. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT sunindextype* SUNDlsMat_NewIndexArray(sunindextype N); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_NewIndexArray instead") sunindextype* NewIndexArray(sunindextype N); /* * ----------------------------------------------------------------- * Function: SUNDlsMat_NewRealArray * ----------------------------------------------------------------- * SUNDlsMat_NewRealArray allocates memory an array of N realtype and * returns the pointer to the memory it allocates. If the request for * memory storage cannot be satisfied, it returns NULL. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT realtype* SUNDlsMat_NewRealArray(sunindextype N); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_NewRealArray instead") realtype* NewRealArray(sunindextype N); /* * ----------------------------------------------------------------- * Function: SUNDlsMat_DestroyArray * ----------------------------------------------------------------- * SUNDlsMat_DestroyArray frees memory allocated by * SUNDlsMat_NewIntArray, SUNDlsMat_NewIndexArray, or * SUNDlsMat_NewRealArray. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_DestroyArray(void *p); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_DestroyArray instead") void DestroyArray(void *p); /* * ----------------------------------------------------------------- * Function : SUNDlsMat_AddIdentity * ----------------------------------------------------------------- * SUNDlsMat_AddIdentity adds 1.0 to the main diagonal (A_ii, * i=0,1,...,N-1) of the M-by-N matrix A (M>= N) and stores the result * back in A. SUNDlsMat_AddIdentity is typically used with square * matrices. SUNDlsMat_AddIdentity does not check for M >= N and * therefore a segmentation fault will occur if M < N! * ----------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_AddIdentity(SUNDlsMat A); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_AddIdentity instead") void AddIdentity(DlsMat A); /* * ----------------------------------------------------------------- * Function : SUNDlsMat_SetToZero * ----------------------------------------------------------------- * SUNDlsMat_SetToZero sets all the elements of the M-by-N matrix A * to 0.0. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_SetToZero(SUNDlsMat A); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_SetToZero instead") void SetToZero(DlsMat A); /* * ----------------------------------------------------------------- * Functions: SUNDlsMat_PrintMat * ----------------------------------------------------------------- * This function prints the M-by-N (dense or band) matrix A to * outfile as it would normally appear on paper. * It is intended as debugging tools with small values of M and N. * The elements are printed using the %g/%lg/%Lg option. * A blank line is printed before and after the matrix. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_PrintMat(SUNDlsMat A, FILE *outfile); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_PrintMat") void PrintMat(DlsMat A, FILE *outfile); /* * ================================================================== * Exported function prototypes (functions working on realtype**) * ================================================================== */ SUNDIALS_EXPORT realtype** SUNDlsMat_newDenseMat(sunindextype m, sunindextype n); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_newDenseMat instead") realtype** newDenseMat(sunindextype m, sunindextype n); SUNDIALS_EXPORT realtype** SUNDlsMat_newBandMat(sunindextype n, sunindextype smu, sunindextype ml); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_newBandMat instead") realtype** newBandMat(sunindextype n, sunindextype smu, sunindextype ml); SUNDIALS_EXPORT void SUNDlsMat_destroyMat(realtype** a); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_destroyMat instead") void destroyMat(realtype** a); SUNDIALS_EXPORT int* SUNDlsMat_newIntArray(int n); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_newIntArray instead") int* newIntArray(int n); SUNDIALS_EXPORT sunindextype* SUNDlsMat_newIndexArray(sunindextype n); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_newIndexArray instead") sunindextype* newIndexArray(sunindextype n); SUNDIALS_EXPORT realtype* SUNDlsMat_newRealArray(sunindextype m); SUNDIALS_EXPORT SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_newRealArray instead") realtype* newRealArray(sunindextype m); SUNDIALS_EXPORT void SUNDlsMat_destroyArray(void* v); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_destroyArray instead") void destroyArray(void* v); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_xbraid.h0000644000176200001440000000767014645137106022242 0ustar liggesusers/* -------------------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * -------------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -------------------------------------------------------------------------- * This is the header file for SUNDIALS + XBraid interface base class and * NVector interface. * -------------------------------------------------------------------------- */ #ifndef _SUNDIALS_XBRAID_H #define _SUNDIALS_XBRAID_H #include "sundials/sundials_types.h" #include "sundials/sundials_nvector.h" #include "braid.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------- * XBraid vector structure * ----------------------- */ struct _braid_Vector_struct { N_Vector y; }; /* Poiner to vector wrapper (same as braid_Vector) */ typedef struct _braid_Vector_struct *SUNBraidVector; /* ----------------------------- * XBraid ops and app structures * ----------------------------- */ /* Structure containing function pointers to operations */ struct _SUNBraidOps { int (*getvectmpl)(braid_App app, N_Vector *tmpl); }; /* Pointer to operations structure */ typedef struct _SUNBraidOps *SUNBraidOps; /* Define XBraid App structure */ struct _braid_App_struct { void *content; SUNBraidOps ops; }; /* Pointer to the interface object (same as braid_App) */ typedef struct _braid_App_struct *SUNBraidApp; /* ----------------------- * SUNBraid app operations * ----------------------- */ SUNDIALS_EXPORT int SUNBraidApp_NewEmpty(braid_App *app); SUNDIALS_EXPORT int SUNBraidApp_FreeEmpty(braid_App *app); SUNDIALS_EXPORT int SUNBraidApp_GetVecTmpl(braid_App app, N_Vector *tmpl); /* ------------------------- * SUNBraid vector functions * ------------------------- */ SUNDIALS_EXPORT int SUNBraidVector_New(N_Vector y, SUNBraidVector *u); SUNDIALS_EXPORT int SUNBraidVector_GetNVector(SUNBraidVector u, N_Vector *y); SUNDIALS_EXPORT int SUNBraidVector_Clone(braid_App app, braid_Vector u, braid_Vector *v_ptr); SUNDIALS_EXPORT int SUNBraidVector_Free(braid_App app, braid_Vector u); SUNDIALS_EXPORT int SUNBraidVector_Sum(braid_App app, braid_Real alpha, braid_Vector x, braid_Real beta, braid_Vector y); SUNDIALS_EXPORT int SUNBraidVector_SpatialNorm(braid_App app, braid_Vector u, braid_Real *norm_ptr); SUNDIALS_EXPORT int SUNBraidVector_BufSize(braid_App app, braid_Int *size_ptr, braid_BufferStatus bstatus); SUNDIALS_EXPORT int SUNBraidVector_BufPack(braid_App app, braid_Vector u, void *buffer, braid_BufferStatus bstatus); SUNDIALS_EXPORT int SUNBraidVector_BufUnpack(braid_App app, void *buffer, braid_Vector *u_ptr, braid_BufferStatus bstatus); /* ---------------------- * SUNBraid return values * ---------------------- */ #define SUNBRAID_SUCCESS 0 /* call/operation was successful */ #define SUNBRAID_ALLOCFAIL -1 /* a memory allocation failed */ #define SUNBRAID_MEMFAIL -2 /* a memory access fail */ #define SUNBRAID_OPNULL -3 /* the SUNBraid operation is NULL */ #define SUNBRAID_ILLINPUT -4 /* an invalid input was provided */ #define SUNBRAID_BRAIDFAIL -5 /* an XBraid function failed */ #define SUNBRAID_SUNFAIL -6 /* a SUNDIALS function failed */ #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_profiler.h0000644000176200001440000000621114645137106022601 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer: Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_PROFILER_H #define _SUNDIALS_PROFILER_H #include #include "sundials/sundials_config.h" #if defined(SUNDIALS_BUILD_WITH_PROFILING) && defined(SUNDIALS_CALIPER_ENABLED) #include "caliper/cali.h" #endif #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif typedef struct _SUNProfiler *SUNProfiler; SUNDIALS_EXPORT int SUNProfiler_Create(void* comm, const char* title, SUNProfiler* p); SUNDIALS_EXPORT int SUNProfiler_Free(SUNProfiler* p); SUNDIALS_EXPORT int SUNProfiler_Begin(SUNProfiler p, const char* name); SUNDIALS_EXPORT int SUNProfiler_End(SUNProfiler p, const char* name); SUNDIALS_EXPORT int SUNProfiler_Print(SUNProfiler p, FILE* fp); #if defined(SUNDIALS_BUILD_WITH_PROFILING) && defined(SUNDIALS_CALIPER_ENABLED) #define SUNDIALS_MARK_FUNCTION_BEGIN(profobj) CALI_MARK_FUNCTION_BEGIN #define SUNDIALS_MARK_FUNCTION_END(profobj) CALI_MARK_FUNCTION_END #define SUNDIALS_WRAP_STATEMENT(profobj, name, stmt) CALI_WRAP_STATEMENT(name, stmt) #define SUNDIALS_MARK_BEGIN(profobj, name) CALI_MARK_BEGIN(name) #define SUNDIALS_MARK_END(profobj, name) CALI_MARK_END(name) #ifdef __cplusplus #define SUNDIALS_CXX_MARK_FUNCTION(projobj) CALI_CXX_MARK_FUNCTION #endif #elif defined(SUNDIALS_BUILD_WITH_PROFILING) #define SUNDIALS_MARK_FUNCTION_BEGIN(profobj) SUNProfiler_Begin(profobj, __func__) #define SUNDIALS_MARK_FUNCTION_END(profobj) SUNProfiler_End(profobj, __func__) #define SUNDIALS_WRAP_STATEMENT(profobj, name, stmt) \ SUNProfiler_Begin(profobj, (name)); \ stmt; \ SUNProfiler_End(profobj, (name)); #define SUNDIALS_MARK_BEGIN(profobj, name) SUNProfiler_Begin(profobj, (name)) #define SUNDIALS_MARK_END(profobj, name) SUNProfiler_End(profobj, (name)) #ifdef __cplusplus #define SUNDIALS_CXX_MARK_FUNCTION(profobj) sundials::ProfilerMarkScope __ProfilerMarkScope(profobj, __func__) #endif #else #define SUNDIALS_MARK_FUNCTION_BEGIN(profobj) #define SUNDIALS_MARK_FUNCTION_END(profobj) #define SUNDIALS_WRAP_STATEMENT(profobj, name, stmt) #define SUNDIALS_MARK_BEGIN(profobj, name) #define SUNDIALS_MARK_END(profobj, name) #ifdef __cplusplus #define SUNDIALS_CXX_MARK_FUNCTION(profobj) #endif #endif #ifdef __cplusplus } namespace sundials { /* Convenience class for C++ codes. Allows for simpler profiler statements using C++ scoping rules. */ class ProfilerMarkScope { public: ProfilerMarkScope(SUNProfiler prof, const char* name) { prof_ = prof; name_ = name; SUNProfiler_Begin(prof_, name_); } ~ProfilerMarkScope() { SUNProfiler_End(prof_, name_); } private: SUNProfiler prof_; const char* name_; }; } #endif #endif /* SUNDIALS_PROFILER_H_ */ StanHeaders/inst/include/sundials/sundials_matrix.h0000644000176200001440000001264714645137106022275 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds @ SMU * David Gardner, Carol Woodward, Slaven Peles, * Cody Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for a generic matrix package. * It defines the SUNMatrix structure (_generic_SUNMatrix) which * contains the following fields: * - an implementation-dependent 'content' field which contains * the description and actual data of the matrix * - an 'ops' filed which contains a structure listing operations * acting on such matrices * ----------------------------------------------------------------- * This header file contains: * - enumeration constants for all SUNDIALS-defined matrix types, * as well as a generic type for user-supplied matrix types, * - type declarations for the _generic_SUNMatrix and * _generic_SUNMatrix_Ops structures, as well as references to * pointers to such structures (SUNMatrix), and * - prototypes for the matrix functions which operate on * SUNMatrix objects. * ----------------------------------------------------------------- * At a minimum, a particular implementation of a SUNMatrix must * do the following: * - specify the 'content' field of SUNMatrix, * - implement the operations on those SUNMatrix objects, * - provide a constructor routine for new SUNMatrix objects * * Additionally, a SUNMatrix implementation may provide the following: * - macros to access the underlying SUNMatrix data * - a routine to print the content of a SUNMatrix * -----------------------------------------------------------------*/ #ifndef _SUNMATRIX_H #define _SUNMATRIX_H #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* ----------------------------------------------------------------- * Implemented SUNMatrix types * ----------------------------------------------------------------- */ typedef enum { SUNMATRIX_DENSE, SUNMATRIX_MAGMADENSE, SUNMATRIX_ONEMKLDENSE, SUNMATRIX_BAND, SUNMATRIX_SPARSE, SUNMATRIX_SLUNRLOC, SUNMATRIX_CUSPARSE, SUNMATRIX_CUSTOM } SUNMatrix_ID; /* ----------------------------------------------------------------- * Generic definition of SUNMatrix * ----------------------------------------------------------------- */ /* Forward reference for pointer to SUNMatrix_Ops object */ typedef _SUNDIALS_STRUCT_ _generic_SUNMatrix_Ops *SUNMatrix_Ops; /* Forward reference for pointer to SUNMatrix object */ typedef _SUNDIALS_STRUCT_ _generic_SUNMatrix *SUNMatrix; /* Structure containing function pointers to matrix operations */ struct _generic_SUNMatrix_Ops { SUNMatrix_ID (*getid)(SUNMatrix); SUNMatrix (*clone)(SUNMatrix); void (*destroy)(SUNMatrix); int (*zero)(SUNMatrix); int (*copy)(SUNMatrix, SUNMatrix); int (*scaleadd)(realtype, SUNMatrix, SUNMatrix); int (*scaleaddi)(realtype, SUNMatrix); int (*matvecsetup)(SUNMatrix); int (*matvec)(SUNMatrix, N_Vector, N_Vector); int (*space)(SUNMatrix, long int*, long int*); }; /* A matrix is a structure with an implementation-dependent 'content' field, and a pointer to a structure of matrix operations corresponding to that implementation. */ struct _generic_SUNMatrix { void *content; SUNMatrix_Ops ops; SUNContext sunctx; }; /* ----------------------------------------------------------------- * Functions exported by SUNMatrix module * ----------------------------------------------------------------- */ SUNDIALS_EXPORT SUNMatrix SUNMatNewEmpty(SUNContext sunctx); SUNDIALS_EXPORT void SUNMatFreeEmpty(SUNMatrix A); SUNDIALS_EXPORT int SUNMatCopyOps(SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT SUNMatrix_ID SUNMatGetID(SUNMatrix A); SUNDIALS_EXPORT SUNMatrix SUNMatClone(SUNMatrix A); SUNDIALS_EXPORT void SUNMatDestroy(SUNMatrix A); SUNDIALS_EXPORT int SUNMatZero(SUNMatrix A); SUNDIALS_EXPORT int SUNMatCopy(SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAdd(realtype c, SUNMatrix A, SUNMatrix B); SUNDIALS_EXPORT int SUNMatScaleAddI(realtype c, SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvecSetup(SUNMatrix A); SUNDIALS_EXPORT int SUNMatMatvec(SUNMatrix A, N_Vector x, N_Vector y); SUNDIALS_EXPORT int SUNMatSpace(SUNMatrix A, long int *lenrw, long int *leniw); /* * ----------------------------------------------------------------- * IV. SUNMatrix error codes * --------------------------------------------------------------- */ #define SUNMAT_SUCCESS 0 /* function successfull */ #define SUNMAT_ILL_INPUT -701 /* illegal function input */ #define SUNMAT_MEM_FAIL -702 /* failed memory access/alloc */ #define SUNMAT_OPERATION_FAIL -703 /* a SUNMatrix operation returned nonzero */ #define SUNMAT_MATVEC_SETUP_REQUIRED -704 /* the SUNMatMatvecSetup routine needs to be called */ #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_futils.h0000644000176200001440000000215014645137106022263 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS Fortran 2003 interface utility definitions. * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_FUTILS_H #define _SUNDIALS_FUTILS_H #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* Create a file pointer with the given file name and mode. */ SUNDIALS_EXPORT FILE* SUNDIALSFileOpen(const char* filename, const char* modes); /* Close a file pointer with the given file name. */ SUNDIALS_EXPORT void SUNDIALSFileClose(FILE* fp); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_lapack.h0000644000176200001440000002504514645137106022220 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer: Radu Serban @ LLNL * Daniel Reynolds @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for a generic package of direct matrix * operations for use with BLAS/LAPACK. * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_LAPACK_H #define _SUNDIALS_LAPACK_H #include #warning "This header file is deprecated and will be removed in SUNDIALS v7.0.0" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ================================================================== * Blas and Lapack functions * ================================================================== */ #if defined(SUNDIALS_F77_FUNC) #define dcopy_f77 SUNDIALS_F77_FUNC(dcopy, DCOPY) #define dscal_f77 SUNDIALS_F77_FUNC(dscal, DSCAL) #define dgemv_f77 SUNDIALS_F77_FUNC(dgemv, DGEMV) #define dtrsv_f77 SUNDIALS_F77_FUNC(dtrsv, DTRSV) #define dsyrk_f77 SUNDIALS_F77_FUNC(dsyrk, DSKYR) #define dgbtrf_f77 SUNDIALS_F77_FUNC(dgbtrf, DGBTRF) #define dgbtrs_f77 SUNDIALS_F77_FUNC(dgbtrs, DGBTRS) #define dgetrf_f77 SUNDIALS_F77_FUNC(dgetrf, DGETRF) #define dgetrs_f77 SUNDIALS_F77_FUNC(dgetrs, DGETRS) #define dgeqp3_f77 SUNDIALS_F77_FUNC(dgeqp3, DGEQP3) #define dgeqrf_f77 SUNDIALS_F77_FUNC(dgeqrf, DGEQRF) #define dormqr_f77 SUNDIALS_F77_FUNC(dormqr, DORMQR) #define dpotrf_f77 SUNDIALS_F77_FUNC(dpotrf, DPOTRF) #define dpotrs_f77 SUNDIALS_F77_FUNC(dpotrs, DPOTRS) #define scopy_f77 SUNDIALS_F77_FUNC(scopy, SCOPY) #define sscal_f77 SUNDIALS_F77_FUNC(sscal, SSCAL) #define sgemv_f77 SUNDIALS_F77_FUNC(sgemv, SGEMV) #define strsv_f77 SUNDIALS_F77_FUNC(strsv, STRSV) #define ssyrk_f77 SUNDIALS_F77_FUNC(ssyrk, SSKYR) #define sgbtrf_f77 SUNDIALS_F77_FUNC(sgbtrf, SGBTRF) #define sgbtrs_f77 SUNDIALS_F77_FUNC(sgbtrs, SGBTRS) #define sgetrf_f77 SUNDIALS_F77_FUNC(sgetrf, SGETRF) #define sgetrs_f77 SUNDIALS_F77_FUNC(sgetrs, SGETRS) #define sgeqp3_f77 SUNDIALS_F77_FUNC(sgeqp3, SGEQP3) #define sgeqrf_f77 SUNDIALS_F77_FUNC(sgeqrf, SGEQRF) #define sormqr_f77 SUNDIALS_F77_FUNC(sormqr, SORMQR) #define spotrf_f77 SUNDIALS_F77_FUNC(spotrf, SPOTRF) #define spotrs_f77 SUNDIALS_F77_FUNC(spotrs, SPOTRS) #else #define dcopy_f77 dcopy_ #define dscal_f77 dscal_ #define dgemv_f77 dgemv_ #define dtrsv_f77 dtrsv_ #define dsyrk_f77 dsyrk_ #define dgbtrf_f77 dgbtrf_ #define dgbtrs_f77 dgbtrs_ #define dgeqp3_f77 dgeqp3_ #define dgeqrf_f77 dgeqrf_ #define dgetrf_f77 dgetrf_ #define dgetrs_f77 dgetrs_ #define dormqr_f77 dormqr_ #define dpotrf_f77 dpotrf_ #define dpotrs_f77 dpotrs_ #define scopy_f77 scopy_ #define sscal_f77 sscal_ #define sgemv_f77 sgemv_ #define strsv_f77 strsv_ #define ssyrk_f77 ssyrk_ #define sgbtrf_f77 sgbtrf_ #define sgbtrs_f77 sgbtrs_ #define sgeqp3_f77 sgeqp3_ #define sgeqrf_f77 sgeqrf_ #define sgetrf_f77 sgetrf_ #define sgetrs_f77 sgetrs_ #define sormqr_f77 sormqr_ #define spotrf_f77 spotrf_ #define spotrs_f77 spotrs_ #endif /* Level-1 BLAS */ extern void dcopy_f77(sunindextype *n, const double *x, const sunindextype *inc_x, double *y, const sunindextype *inc_y); extern void dscal_f77(sunindextype *n, const double *alpha, double *x, const sunindextype *inc_x); extern void scopy_f77(sunindextype *n, const float *x, const sunindextype *inc_x, float *y, const sunindextype *inc_y); extern void sscal_f77(sunindextype *n, const float *alpha, float *x, const sunindextype *inc_x); /* Level-2 BLAS */ extern void dgemv_f77(const char *trans, sunindextype *m, sunindextype *n, const double *alpha, const double *a, sunindextype *lda, const double *x, sunindextype *inc_x, const double *beta, double *y, sunindextype *inc_y); extern void dtrsv_f77(const char *uplo, const char *trans, const char *diag, const sunindextype *n, const double *a, const sunindextype *lda, double *x, const sunindextype *inc_x); extern void sgemv_f77(const char *trans, sunindextype *m, sunindextype *n, const float *alpha, const float *a, sunindextype *lda, const float *x, sunindextype *inc_x, const float *beta, float *y, sunindextype *inc_y); extern void strsv_f77(const char *uplo, const char *trans, const char *diag, const sunindextype *n, const float *a, const sunindextype *lda, float *x, const sunindextype *inc_x); /* Level-3 BLAS */ extern void dsyrk_f77(const char *uplo, const char *trans, const sunindextype *n, const sunindextype *k, const double *alpha, const double *a, const sunindextype *lda, const double *beta, const double *c, const sunindextype *ldc); extern void ssyrk_f77(const char *uplo, const char *trans, const sunindextype *n, const sunindextype *k, const float *alpha, const float *a, const sunindextype *lda, const float *beta, const float *c, const sunindextype *ldc); /* LAPACK */ extern void dgbtrf_f77(const sunindextype *m, const sunindextype *n, const sunindextype *kl, const sunindextype *ku, double *ab, sunindextype *ldab, sunindextype *ipiv, sunindextype *info); extern void dgbtrs_f77(const char *trans, const sunindextype *n, const sunindextype *kl, const sunindextype *ku, const sunindextype *nrhs, double *ab, const sunindextype *ldab, sunindextype *ipiv, double *b, const sunindextype *ldb, sunindextype *info); extern void dgeqp3_f77(const sunindextype *m, const sunindextype *n, double *a, const sunindextype *lda, sunindextype *jpvt, double *tau, double *work, const sunindextype *lwork, sunindextype *info); extern void dgeqrf_f77(const sunindextype *m, const sunindextype *n, double *a, const sunindextype *lda, double *tau, double *work, const sunindextype *lwork, sunindextype *info); extern void dgetrf_f77(const sunindextype *m, const sunindextype *n, double *a, sunindextype *lda, sunindextype *ipiv, sunindextype *info); extern void dgetrs_f77(const char *trans, const sunindextype *n, const sunindextype *nrhs, double *a, const sunindextype *lda, sunindextype *ipiv, double *b, const sunindextype *ldb, sunindextype *info); extern void dormqr_f77(const char *side, const char *trans, const sunindextype *m, const sunindextype *n, const sunindextype *k, double *a, const sunindextype *lda, double *tau, double *c, const sunindextype *ldc, double *work, const sunindextype *lwork, sunindextype *info); extern void dpotrf_f77(const char *uplo, const sunindextype *n, double *a, sunindextype *lda, sunindextype *info); extern void dpotrs_f77(const char *uplo, const sunindextype *n, const sunindextype *nrhs, double *a, const sunindextype *lda, double *b, const sunindextype *ldb, sunindextype *info); extern void sgbtrf_f77(const sunindextype *m, const sunindextype *n, const sunindextype *kl, const sunindextype *ku, float *ab, sunindextype *ldab, sunindextype *ipiv, sunindextype *info); extern void sgbtrs_f77(const char *trans, const sunindextype *n, const sunindextype *kl, const sunindextype *ku, const sunindextype *nrhs, float *ab, const sunindextype *ldab, sunindextype *ipiv, float *b, const sunindextype *ldb, sunindextype *info); extern void sgeqp3_f77(const sunindextype *m, const sunindextype *n, float *a, const sunindextype *lda, sunindextype *jpvt, float *tau, float *work, const sunindextype *lwork, sunindextype *info); extern void sgeqrf_f77(const sunindextype *m, const sunindextype *n, float *a, const sunindextype *lda, float *tau, float *work, const sunindextype *lwork, sunindextype *info); extern void sgetrf_f77(const sunindextype *m, const sunindextype *n, float *a, sunindextype *lda, sunindextype *ipiv, sunindextype *info); extern void sgetrs_f77(const char *trans, const sunindextype *n, const sunindextype *nrhs, float *a, const sunindextype *lda, sunindextype *ipiv, float *b, const sunindextype *ldb, sunindextype *info); extern void sormqr_f77(const char *side, const char *trans, const sunindextype *m, const sunindextype *n, const sunindextype *k, float *a, const sunindextype *lda, float *tau, float *c, const sunindextype *ldc, float *work, const sunindextype *lwork, sunindextype *info); extern void spotrf_f77(const char *uplo, const sunindextype *n, float *a, sunindextype *lda, sunindextype *info); extern void spotrs_f77(const char *uplo, const sunindextype *n, const sunindextype *nrhs, float *a, const sunindextype *lda, float *b, const sunindextype *ldb, sunindextype *info); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_config.h0000644000176200001440000001435014645137106022227 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos, Aaron Collier and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS configuration header file. * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_CONFIG_H #define _SUNDIALS_CONFIG_H #include "sundials/sundials_export.h" #ifndef SUNDIALS_DEPRECATED_MSG # define SUNDIALS_DEPRECATED_MSG(msg) __attribute__ ((__deprecated__(msg))) #endif #ifndef SUNDIALS_DEPRECATED_EXPORT_MSG # define SUNDIALS_DEPRECATED_EXPORT_MSG(msg) SUNDIALS_EXPORT SUNDIALS_DEPRECATED_MSG(msg) #endif #ifndef SUNDIALS_DEPRECATED_NO_EXPORT_MSG # define SUNDIALS_DEPRECATED_NO_EXPORT_MSG(msg) SUNDIALS_NO_EXPORT SUNDIALS_DEPRECATED_MSG(msg) #endif /* ------------------------------------------------------------------ * Define SUNDIALS version numbers * -----------------------------------------------------------------*/ #define SUNDIALS_VERSION "6.1.1" #define SUNDIALS_VERSION_MAJOR 6 #define SUNDIALS_VERSION_MINOR 1 #define SUNDIALS_VERSION_PATCH 1 #define SUNDIALS_VERSION_LABEL "" #define SUNDIALS_GIT_VERSION "" /* ------------------------------------------------------------------ * SUNDIALS build information * -----------------------------------------------------------------*/ /* Define precision of SUNDIALS data type 'realtype' * Depending on the precision level, one of the following * three macros will be defined: * #define SUNDIALS_SINGLE_PRECISION 1 * #define SUNDIALS_DOUBLE_PRECISION 1 * #define SUNDIALS_EXTENDED_PRECISION 1 */ #define SUNDIALS_DOUBLE_PRECISION 1 /* Define type of vector indices in SUNDIALS 'sunindextype'. * Depending on user choice of index type, one of the following * two macros will be defined: * #define SUNDIALS_INT64_T 1 * #define SUNDIALS_INT32_T 1 */ #define SUNDIALS_INT64_T 1 /* Define the type of vector indices in SUNDIALS 'sunindextype'. * The macro will be defined with a type of the appropriate size. */ #define SUNDIALS_INDEX_TYPE int64_t /* Use generic math functions * If it was decided that generic math functions can be used, then * #define SUNDIALS_USE_GENERIC_MATH */ #define SUNDIALS_USE_GENERIC_MATH /* Use POSIX timers if available. * #define SUNDIALS_HAVE_POSIX_TIMERS */ #define SUNDIALS_HAVE_POSIX_TIMERS /* BUILD CVODE with fused kernel functionality */ /* #undef SUNDIALS_BUILD_PACKAGE_FUSED_KERNELS */ /* BUILD SUNDIALS with monitoring functionalities */ /* #undef SUNDIALS_BUILD_WITH_MONITORING */ /* BUILD SUNDIALS with profiling functionalities */ /* #undef SUNDIALS_BUILD_WITH_PROFILING */ /* ------------------------------------------------------------------ * SUNDIALS TPL macros * -----------------------------------------------------------------*/ /* Caliper */ /* #undef SUNDIALS_CALIPER_ENABLED */ /* MAGMA backends */ #define SUNDIALS_MAGMA_BACKENDS_CUDA /* #undef SUNDIALS_MAGMA_BACKENDS_HIP */ /* Set if SUNDIALS is built with MPI support, then * #define SUNDIALS_MPI_ENABLED 1 * otherwise * #define SUNDIALS_MPI_ENABLED 0 */ #define SUNDIALS_MPI_ENABLED 0 /* SUPERLUMT threading type */ /* #undef SUNDIALS_SUPERLUMT_THREAD_TYPE */ /* Trilinos with MPI is available, then * #define SUNDIALS_TRILINOS_HAVE_MPI */ /* #undef SUNDIALS_TRILINOS_HAVE_MPI */ /* RAJA backends */ #define SUNDIALS_RAJA_BACKENDS_CUDA /* #undef SUNDIALS_RAJA_BACKENDS_HIP */ /* #undef SUNDIALS_RAJA_BACKENDS_SYCL */ /* ------------------------------------------------------------------ * SUNDIALS modules enabled * -----------------------------------------------------------------*/ #define SUNDIALS_ARKODE 1 #define SUNDIALS_CVODE 1 #define SUNDIALS_CVODES 1 #define SUNDIALS_IDA 1 #define SUNDIALS_IDAS 1 #define SUNDIALS_KINSOL 1 #define SUNDIALS_NVECTOR_SERIAL 1 #define SUNDIALS_NVECTOR_MANYVECTOR 1 #define SUNDIALS_SUNMATRIX_BAND 1 #define SUNDIALS_SUNMATRIX_DENSE 1 #define SUNDIALS_SUNMATRIX_SPARSE 1 #define SUNDIALS_SUNLINSOL_BAND 1 #define SUNDIALS_SUNLINSOL_DENSE 1 #define SUNDIALS_SUNLINSOL_PCG 1 #define SUNDIALS_SUNLINSOL_SPBCGS 1 #define SUNDIALS_SUNLINSOL_SPFGMR 1 #define SUNDIALS_SUNLINSOL_SPGMR 1 #define SUNDIALS_SUNLINSOL_SPTFQMR 1 #define SUNDIALS_SUNNONLINSOL_NEWTON 1 #define SUNDIALS_SUNNONLINSOL_FIXEDPOINT 1 /* ------------------------------------------------------------------ * SUNDIALS fortran configuration * -----------------------------------------------------------------*/ /* Define Fortran name-mangling macro for C identifiers. * Depending on the inferred scheme, one of the following six * macros will be defined: * #define SUNDIALS_F77_FUNC(name,NAME) name * #define SUNDIALS_F77_FUNC(name,NAME) name ## _ * #define SUNDIALS_F77_FUNC(name,NAME) name ## __ * #define SUNDIALS_F77_FUNC(name,NAME) NAME * #define SUNDIALS_F77_FUNC(name,NAME) NAME ## _ * #define SUNDIALS_F77_FUNC(name,NAME) NAME ## __ */ /* Define Fortran name-mangling macro for C identifiers * which contain underscores. */ /* Allow user to specify different MPI communicator * If it was found that the MPI implementation supports MPI_Comm_f2c, then * #define SUNDIALS_MPI_COMM_F2C 1 * otherwise * #define SUNDIALS_MPI_COMM_F2C 0 */ /* ------------------------------------------------------------------ * SUNDIALS inline macros. * -----------------------------------------------------------------*/ /* Mark SUNDIALS function as inline. */ #ifndef SUNDIALS_CXX_INLINE #define SUNDIALS_CXX_INLINE inline #endif #ifndef SUNDIALS_C_INLINE #ifndef __STDC_VERSION__ /* must be c89 or c90 */ #define SUNDIALS_C_INLINE #else #define SUNDIALS_C_INLINE inline #endif #endif #ifdef __cplusplus #define SUNDIALS_INLINE SUNDIALS_CXX_INLINE #else #define SUNDIALS_INLINE SUNDIALS_C_INLINE #endif /* Mark SUNDIALS function as static inline. */ #define SUNDIALS_STATIC_INLINE static SUNDIALS_INLINE #endif /* _SUNDIALS_CONFIG_H */ StanHeaders/inst/include/sundials/sundials_context.h0000644000176200001440000000320014645137106022436 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * SUNDIALS context class. A context object holds data that all * SUNDIALS objects in a simulation share. It is thread-safe provided * that each thread has its own context object. * ----------------------------------------------------------------*/ #ifndef _SUNDIALS_CONTEXT_H #define _SUNDIALS_CONTEXT_H #include "sundials/sundials_types.h" #include "sundials/sundials_profiler.h" #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif typedef struct _SUNContext *SUNContext; SUNDIALS_EXPORT int SUNContext_Create(void* comm, SUNContext* ctx); SUNDIALS_EXPORT int SUNContext_GetProfiler(SUNContext sunctx, SUNProfiler* profiler); SUNDIALS_EXPORT int SUNContext_SetProfiler(SUNContext sunctx, SUNProfiler profiler); SUNDIALS_EXPORT int SUNContext_Free(SUNContext* ctx); #ifdef __cplusplus } namespace sundials { class Context { public: Context(void* comm = NULL) { SUNContext_Create(comm, &sunctx_); } operator SUNContext() { return sunctx_; } ~Context() { SUNContext_Free(&sunctx_); } private: SUNContext sunctx_; }; } /* namespace sundials */ #endif #endif StanHeaders/inst/include/sundials/sundials_export.h0000644000176200001440000000204714645137106022303 0ustar liggesusers #ifndef SUNDIALS_EXPORT_H #define SUNDIALS_EXPORT_H #ifdef SUNDIALS_STATIC_DEFINE # define SUNDIALS_EXPORT # define SUNDIALS_NO_EXPORT #else # ifndef SUNDIALS_EXPORT # ifdef sundials_generic_EXPORTS /* We are building this library */ # define SUNDIALS_EXPORT __attribute__((visibility("default"))) # else /* We are using this library */ # define SUNDIALS_EXPORT __attribute__((visibility("default"))) # endif # endif # ifndef SUNDIALS_NO_EXPORT # define SUNDIALS_NO_EXPORT __attribute__((visibility("hidden"))) # endif #endif #ifndef SUNDIALS_DEPRECATED # define SUNDIALS_DEPRECATED __attribute__ ((__deprecated__)) #endif #ifndef SUNDIALS_DEPRECATED_EXPORT # define SUNDIALS_DEPRECATED_EXPORT SUNDIALS_EXPORT SUNDIALS_DEPRECATED #endif #ifndef SUNDIALS_DEPRECATED_NO_EXPORT # define SUNDIALS_DEPRECATED_NO_EXPORT SUNDIALS_NO_EXPORT SUNDIALS_DEPRECATED #endif #if 0 /* DEFINE_NO_DEPRECATED */ # ifndef SUNDIALS_NO_DEPRECATED # define SUNDIALS_NO_DEPRECATED # endif #endif #endif /* SUNDIALS_EXPORT_H */ StanHeaders/inst/include/sundials/sundials_band.h0000644000176200001440000002332114645137106021664 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Alan C. Hindmarsh and Radu Serban @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for a generic BAND linear solver * package, based on the SUNDlsMat type defined in sundials_direct.h. * * There are two sets of band solver routines listed in * this file: one set uses type SUNDlsMat defined below and the * other set uses the type realtype ** for band matrix arguments. * Routines that work with the type SUNDlsMat begin with "Band". * Routines that work with realtype ** begin with "band". * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_BAND_H #define _SUNDIALS_BAND_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * Function: SUNDlsMat_BandGBTRF * ----------------------------------------------------------------- * Usage : ier = SUNDlsMat_BandGBTRF(A, p); * if (ier != 0) ... A is singular * ----------------------------------------------------------------- * SUNDlsMat_BandGBTRF performs the LU factorization of the N by N band * matrix A. This is done using standard Gaussian elimination * with partial pivoting. * * A successful LU factorization leaves the "matrix" A and the * pivot array p with the following information: * * (1) p[k] contains the row number of the pivot element chosen * at the beginning of elimination step k, k = 0, 1, ..., N-1. * * (2) If the unique LU factorization of A is given by PA = LU, * where P is a permutation matrix, L is a lower triangular * matrix with all 1's on the diagonal, and U is an upper * triangular matrix, then the upper triangular part of A * (including its diagonal) contains U and the strictly lower * triangular part of A contains the multipliers, I-L. * * SUNDlsMat_BandGBTRF returns 0 if successful. Otherwise it encountered * a zero diagonal element during the factorization. In this case * it returns the column index (numbered from one) at which * it encountered the zero. * * Important Note: A must be allocated to accommodate the increase * in upper bandwidth that occurs during factorization. If * mathematically, A is a band matrix with upper bandwidth mu and * lower bandwidth ml, then the upper triangular factor U can * have upper bandwidth as big as smu = MIN(n-1,mu+ml). The lower * triangular factor L has lower bandwidth ml. Allocate A with * call A = BandAllocMat(N,mu,ml,smu), where mu, ml, and smu are * as defined above. The user does not have to zero the "extra" * storage allocated for the purpose of factorization. This will * handled by the SUNDlsMat_BandGBTRF routine. * * SUNDlsMat_BandGBTRF is only a wrapper around SUNDlsMat_bandGBTRF. * All work is done in SUNDlsMat_bandGBTRF, which works directly on the * data in the SUNDlsMat A (i.e. in the field A->cols). * ----------------------------------------------------------------- */ SUNDIALS_EXPORT sunindextype SUNDlsMat_BandGBTRF(SUNDlsMat A, sunindextype* p); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_BandGBTRF instead") sunindextype BandGBTRF(DlsMat A, sunindextype *p); SUNDIALS_EXPORT sunindextype SUNDlsMat_bandGBTRF(realtype **a, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu, sunindextype *p); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_bandGBTRF instead") sunindextype bandGBTRF(realtype **a, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu, sunindextype *p); /* * ----------------------------------------------------------------- * Function: SUNDlsMat_BandGBTRS * ----------------------------------------------------------------- * Usage: SUNDlsMat_BandGBTRS(A, p, b); * ----------------------------------------------------------------- * SUNDlsMat_BandGBTRS solves the N-dimensional system A x = b using * the LU factorization in A and the pivot information in p computed * in SUNDlsMat_BandGBTRF. The solution x is returned in b. This * routine cannot fail if the corresponding call to * SUNDlsMat_BandGBTRF did not fail. * * SUNDlsMat_BandGBTRS is only a wrapper around SUNDlsMat_bandGBTRS * which does all the work directly on the data in the DlsMat A (i.e. * in A->cols). * ----------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_BandGBTRS(SUNDlsMat A, sunindextype *p, realtype *b); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_BandGBTRS instead") void BandGBTRS(DlsMat A, sunindextype *p, realtype *b); SUNDIALS_EXPORT void SUNDlsMat_bandGBTRS(realtype **a, sunindextype n, sunindextype smu, sunindextype ml, sunindextype *p, realtype *b); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_bandGBTRS instead") void bandGBTRS(realtype **a, sunindextype n, sunindextype smu, sunindextype ml, sunindextype *p, realtype *b); /* * ----------------------------------------------------------------- * Function: SUNDlsMat_BandCopy * ----------------------------------------------------------------- * Usage: SUNDlsMat_BandCopy(A, B, copymu, copyml); * ----------------------------------------------------------------- * SUNDlsMat_BandCopy copies the submatrix with upper and lower * bandwidths copymu, copyml of the N by N band matrix A into the N by * N band matrix B. * * SUNDlsMat_BandCopy is a wrapper around SUNDlsMat_bandCopy which * accesses the data in the DlsMat A and DlsMat B (i.e. the fields * cols). * ----------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_BandCopy(SUNDlsMat A, SUNDlsMat B, sunindextype copymu, sunindextype copyml); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_BandCopy instead") void BandCopy(DlsMat A, DlsMat B, sunindextype copymu, sunindextype copyml); SUNDIALS_EXPORT void SUNDlsMat_bandCopy(realtype **a, realtype **b, sunindextype n, sunindextype a_smu, sunindextype b_smu, sunindextype copymu, sunindextype copyml); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_bandCopy instead") void bandCopy(realtype **a, realtype **b, sunindextype n, sunindextype a_smu, sunindextype b_smu, sunindextype copymu, sunindextype copyml); /* * ----------------------------------------------------------------- * Function: SUNDlsMat_BandScale * ----------------------------------------------------------------- * Usage: SUNDlsMat_BandScale(c, A); * ----------------------------------------------------------------- * A(i,j) <- c*A(i,j), j-(A->mu) < = i < = j+(A->ml). * * SUNDlsMat_BandScale is a wrapper around SUNDlsMat_bandScale which * performs the actual scaling by accessing the data in the * SUNDlsMat A (i.e. the field A->cols). * ----------------------------------------------------------------- */ void SUNDlsMat_BandScale(realtype c, SUNDlsMat A); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_BandScale instead") void BandScale(realtype c, DlsMat A); SUNDIALS_EXPORT void SUNDlsMat_bandScale(realtype c, realtype **a, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_bandScale instead") void bandScale(realtype c, realtype **a, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu); /* * ----------------------------------------------------------------- * Function: SUNDlsMat_bandAddIdentity * ----------------------------------------------------------------- * SUNDlsMat_bandAddIdentity adds the identity matrix to the n-by-n * matrix stored in the realtype** arrays. * ----------------------------------------------------------------- */ void SUNDlsMat_bandAddIdentity(realtype **a, sunindextype n, sunindextype smu); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_bandAddIdentity instead") void bandAddIdentity(realtype **a, sunindextype n, sunindextype smu); /* * ----------------------------------------------------------------- * Function: SUNDlsMat_BandMatvec * ----------------------------------------------------------------- * SUNDlsMat_BandMatvec computes the matrix-vector product y = A*x, * where A is an M-by-N band matrix, x is a vector of length N, and y * is a vector of length M. No error checking is performed on the * length of the arrays x and y. Only y is modified in this routine. * * SUNDlsMat_BandMatvec is a wrapper around SUNDlsMat_bandMatvec which * performs the actual product by accessing the data in the SUNDlsMat * A. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT void SUNDlsMat_BandMatvec(SUNDlsMat A, realtype *x, realtype *y); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_BandMatvec instead") void BandMatvec(DlsMat A, realtype *x, realtype *y); SUNDIALS_EXPORT void SUNDlsMat_bandMatvec(realtype **a, realtype *x, realtype *y, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNDlsMat_bandMatvec instead") void bandMatvec(realtype **a, realtype *x, realtype *y, sunindextype n, sunindextype mu, sunindextype ml, sunindextype smu); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_types.h0000644000176200001440000001323514645137106022127 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Scott Cohen, Alan Hindmarsh, Radu Serban, * Aaron Collier, and Slaven Peles @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header file exports three types: sunrealtype, sunindextype, * and sunbooleantype, as well as the constants SUNTRUE and SUNFALSE. * * Users should include the header file sundials_types.h in every * program file and use the exported name sunrealtype instead of * float, double or long double. * * The constants SUNDIALS_SINGLE_PRECISION, SUNDIALS_DOUBLE_PRECISION * and SUNDIALS_LONG_DOUBLE_PRECISION indicate the underlying data * type of sunrealtype. * * The legal types for sunrealtype are float, double and long double. * * The constants SUNDIALS_INT64_T and SUNDIALS_INT32_T indicate * the underlying data type of sunindextype -- the integer data type * used for vector and matrix indices. * * Data types are set at the configuration stage. * * The macro SUN_RCONST gives the user a convenient way to define * real-valued literal constants. To use the constant 1.0, for example, * the user should write the following: * * #define ONE SUN_RCONST(1.0) * * If sunrealtype is defined as a double, then SUN_RCONST(1.0) expands * to 1.0. If sunrealtype is defined as a float, then SUN_RCONST(1.0) * expands to 1.0F. If sunrealtype is defined as a long double, * then SUN_RCONST(1.0) expands to 1.0L. There is never a need to * explicitly cast 1.0 to (sunrealtype). The macro can be used for * literal constants only. It cannot be used for expressions. * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_TYPES_H #define _SUNDIALS_TYPES_H #include #include #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* *------------------------------------------------------------------ * Macro _SUNDIALS_STRUCT_ * The _SUNDIALS_STRUCT_ macro is defined as a `struct` unless * generating the SWIG interfaces - in that case it is defined as * nothing. This is needed to work around a bug in SWIG which prevents * it from properly parsing our generic module structures. *------------------------------------------------------------------ */ #ifdef SWIG #define _SUNDIALS_STRUCT_ #else #define _SUNDIALS_STRUCT_ struct #endif /* *------------------------------------------------------------------ * Type sunrealtype * Macro SUN_RCONST * Constants SUN_SMALL_REAL, SUN_BIG_REAL, and SUN_UNIT_ROUNDOFF *------------------------------------------------------------------ */ #if defined(SUNDIALS_SINGLE_PRECISION) /* deprecated */ typedef float realtype; # define RCONST(x) x##F # define BIG_REAL FLT_MAX # define SMALL_REAL FLT_MIN # define UNIT_ROUNDOFF FLT_EPSILON typedef float sunrealtype; # define SUN_RCONST(x) x##F # define SUN_BIG_REAL FLT_MAX # define SUN_SMALL_REAL FLT_MIN # define SUN_UNIT_ROUNDOFF FLT_EPSILON #elif defined(SUNDIALS_DOUBLE_PRECISION) /* deprecated */ typedef double realtype; # define RCONST(x) x # define BIG_REAL DBL_MAX # define SMALL_REAL DBL_MIN # define UNIT_ROUNDOFF DBL_EPSILON typedef double sunrealtype; # define SUN_RCONST(x) x # define SUN_BIG_REAL DBL_MAX # define SUN_SMALL_REAL DBL_MIN # define SUN_UNIT_ROUNDOFF DBL_EPSILON #elif defined(SUNDIALS_EXTENDED_PRECISION) /* deprecated */ typedef long double realtype; # define RCONST(x) x##L # define BIG_REAL LDBL_MAX # define SMALL_REAL LDBL_MIN # define UNIT_ROUNDOFF LDBL_EPSILON typedef long double sunrealtype; # define SUN_RCONST(x) x##L # define SUN_BIG_REAL LDBL_MAX # define SUN_SMALL_REAL LDBL_MIN # define SUN_UNIT_ROUNDOFF LDBL_EPSILON #endif /* *------------------------------------------------------------------ * Type : sunindextype *------------------------------------------------------------------ * Defines integer type to be used for vector and matrix indices. * User can build sundials to use 32- or 64-bit signed integers. * If compiler does not support portable data types, the SUNDIALS * CMake build system tries to find a type of the desired size. *------------------------------------------------------------------ */ typedef SUNDIALS_INDEX_TYPE sunindextype; /* *------------------------------------------------------------------ * Type : sunbooleantype *------------------------------------------------------------------ * Constants : SUNFALSE and SUNTRUE *------------------------------------------------------------------ * ANSI C does not have a built-in boolean data type. Below is the * definition for a new type called sunbooleantype. The advantage of * using the name sunbooleantype (instead of int) is an increase in * code readability. It also allows the programmer to make a * distinction between int and boolean data. Variables of type * sunbooleantype are intended to have only the two values SUNFALSE and * SUNTRUE which are defined below to be equal to 0 and 1, * respectively. *------------------------------------------------------------------ */ /* deprecated */ #ifndef booleantype #define booleantype int #endif #ifndef sunbooleantype #define sunbooleantype int #endif #ifndef SUNFALSE #define SUNFALSE 0 #endif #ifndef SUNTRUE #define SUNTRUE 1 #endif #ifdef __cplusplus } #endif #endif /* _SUNDIALS_TYPES_H */ StanHeaders/inst/include/sundials/sundials_math.h0000644000176200001440000001723714645137106021722 0ustar liggesusers/* * ----------------------------------------------------------------- * Programmer(s): Scott D. Cohen, Alan C. Hindmarsh and * Aaron Collier @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the header file for a simple C-language math library. The * routines listed here work with the type realtype as defined in * the header file sundials_types.h. * ----------------------------------------------------------------- */ #ifndef _SUNDIALSMATH_H #define _SUNDIALSMATH_H #include #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * Macros * ----------------------------------------------------------------- * SUNMIN(A,B) returns the minimum of A and B * * SUNMAX(A,B) returns the maximum of A and B * * SUNSQR(A) returns A^2 * * SUNRsqrt calls the appropriate version of sqrt * * SUNRabs calls the appropriate version of abs * * SUNRexp calls the appropriate version of exp * * SUNRceil calls the appropriate version of ceil * ----------------------------------------------------------------- */ #ifndef SUNMIN #define SUNMIN(A, B) ((A) < (B) ? (A) : (B)) #endif #ifndef SUNMAX #define SUNMAX(A, B) ((A) > (B) ? (A) : (B)) #endif #ifndef SUNSQR #define SUNSQR(A) ((A)*(A)) #endif /* * ----------------------------------------------------------------- * Function : SUNRsqrt * ----------------------------------------------------------------- * Usage : realtype sqrt_x; * sqrt_x = SUNRsqrt(x); * ----------------------------------------------------------------- * SUNRsqrt(x) returns the square root of x. If x < ZERO, then * SUNRsqrt returns ZERO. * ----------------------------------------------------------------- */ #ifndef SUNRsqrt #if defined(SUNDIALS_USE_GENERIC_MATH) #define SUNRsqrt(x) ((x) <= RCONST(0.0) ? (RCONST(0.0)) : ((realtype) sqrt((double) (x)))) #elif defined(SUNDIALS_DOUBLE_PRECISION) #define SUNRsqrt(x) ((x) <= RCONST(0.0) ? (RCONST(0.0)) : (sqrt((x)))) #elif defined(SUNDIALS_SINGLE_PRECISION) #define SUNRsqrt(x) ((x) <= RCONST(0.0) ? (RCONST(0.0)) : (sqrtf((x)))) #elif defined(SUNDIALS_EXTENDED_PRECISION) #define SUNRsqrt(x) ((x) <= RCONST(0.0) ? (RCONST(0.0)) : (sqrtl((x)))) #endif #endif /* * ----------------------------------------------------------------- * Function : SUNRabs * ----------------------------------------------------------------- * Usage : realtype abs_x; * abs_x = SUNRabs(x); * ----------------------------------------------------------------- * SUNRabs(x) returns the absolute value of x. * ----------------------------------------------------------------- */ #ifndef SUNRabs #if defined(SUNDIALS_USE_GENERIC_MATH) #define SUNRabs(x) ((realtype) fabs((double) (x))) #elif defined(SUNDIALS_DOUBLE_PRECISION) #define SUNRabs(x) (fabs((x))) #elif defined(SUNDIALS_SINGLE_PRECISION) #define SUNRabs(x) (fabsf((x))) #elif defined(SUNDIALS_EXTENDED_PRECISION) #define SUNRabs(x) (fabsl((x))) #endif #endif /* * ----------------------------------------------------------------- * Function : SUNRexp * ----------------------------------------------------------------- * Usage : realtype exp_x; * exp_x = SUNRexp(x); * ----------------------------------------------------------------- * SUNRexp(x) returns e^x (base-e exponential function). * ----------------------------------------------------------------- */ #ifndef SUNRexp #if defined(SUNDIALS_USE_GENERIC_MATH) #define SUNRexp(x) ((realtype) exp((double) (x))) #elif defined(SUNDIALS_DOUBLE_PRECISION) #define SUNRexp(x) (exp((x))) #elif defined(SUNDIALS_SINGLE_PRECISION) #define SUNRexp(x) (expf((x))) #elif defined(SUNDIALS_EXTENDED_PRECISION) #define SUNRexp(x) (expl((x))) #endif #endif /* * ----------------------------------------------------------------- * Function : SUNRceil * ----------------------------------------------------------------- * Usage : realtype ceil_x; * ceil_x = SUNRceil(x); * ----------------------------------------------------------------- * SUNRceil(x) returns the smallest integer value not less than x. * ----------------------------------------------------------------- */ #ifndef SUNRceil #if defined(SUNDIALS_USE_GENERIC_MATH) #define SUNRceil(x) ((realtype) ceil((double) (x))) #elif defined(SUNDIALS_DOUBLE_PRECISION) #define SUNRceil(x) (ceil((x))) #elif defined(SUNDIALS_SINGLE_PRECISION) #define SUNRceil(x) (ceilf((x))) #elif defined(SUNDIALS_EXTENDED_PRECISION) #define SUNRceil(x) (ceill((x))) #endif #endif /* * ----------------------------------------------------------------- * Function : SUNRpowerI * ----------------------------------------------------------------- * Usage : int exponent; * realtype base, ans; * ans = SUNRpowerI(base,exponent); * ----------------------------------------------------------------- * SUNRpowerI returns the value of base^exponent, where base is of type * realtype and exponent is of type int. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT realtype SUNRpowerI(realtype base, int exponent); /* * ----------------------------------------------------------------- * Function : SUNRpowerR * ----------------------------------------------------------------- * Usage : realtype base, exponent, ans; * ans = SUNRpowerR(base,exponent); * ----------------------------------------------------------------- * SUNRpowerR returns the value of base^exponent, where both base and * exponent are of type realtype. If base < ZERO, then SUNRpowerR * returns ZERO. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT realtype SUNRpowerR(realtype base, realtype exponent); /* * ----------------------------------------------------------------- * Function : SUNRCompare * ----------------------------------------------------------------- * Usage : int isNotEqual; * realtype a, b; * isNotEqual = SUNRCompare(a, b); * ----------------------------------------------------------------- * SUNRCompareTol returns 0 if the relative difference of a and b is * less than or equal to 10*machine epsilon. If the relative * difference is greater than 10*machine epsilon, it returns 1. The * function handles the case where a or b are near zero as well as * the case where a or b are inf/nan. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT booleantype SUNRCompare(realtype a, realtype b); /* * ----------------------------------------------------------------- * Function : SUNRCompareTol * ----------------------------------------------------------------- * Usage : int isNotEqual; * realtype a, b, tol; * isNotEqual = SUNRCompareTol(a, b, tol); * ----------------------------------------------------------------- * SUNRCompareTol returns 0 if the relative difference of a and b is * less than or equal to the provided tolerance. If the relative * difference is greater than the tolerance, it returns 1. The * function handles the case where a or b are near zero as well as * the case where a or b are inf/nan. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT booleantype SUNRCompareTol(realtype a, realtype b, realtype tol); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_iterative.h0000644000176200001440000004440514645137106022762 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): Scott D. Cohen and Alan C. Hindmarsh @ LLNL * Shelby Lockhart @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header file contains declarations intended for use by * generic iterative solvers of Ax = b. The enumeration gives * symbolic names for the type of preconditioning to be used. * The function type declarations give the prototypes for the * functions to be called within an iterative linear solver, that * are responsible for * multiplying A by a given vector v (SUNATimesFn), * setting up a preconditioner P (SUNPSetupFn), and * solving the preconditioner equation Pz = r (SUNPSolveFn). * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_ITERATIVE_H #define _SUNDIALS_ITERATIVE_H #include #ifdef __cplusplus /* wrapper to enable C++ usage */ extern "C" { #endif /* * ----------------------------------------------------------------- * enum : types of preconditioning * ----------------------------------------------------------------- * SUN_PREC_NONE : The iterative linear solver should not use * preconditioning. * * SUN_PREC_LEFT : The iterative linear solver uses preconditioning * on the left only. * * SUN_PREC_RIGHT : The iterative linear solver uses preconditioning * on the right only. * * SUN_PREC_BOTH : The iterative linear solver uses preconditioning * on both the left and the right. * ----------------------------------------------------------------- */ /* DEPRECATED PREC_NONE: use SUN_PREC_NONE */ /* DEPRECATED PREC_LEFT: use SUN_PREC_LEFT */ /* DEPRECATED PREC_RIGHT: use SUN_PREC_RIGHT */ /* DEPRECATED PREC_BOTH: use SUN_PREC_BOTH */ enum { PREC_NONE, PREC_LEFT, PREC_RIGHT, PREC_BOTH }; enum { SUN_PREC_NONE, SUN_PREC_LEFT, SUN_PREC_RIGHT, SUN_PREC_BOTH }; /* * ----------------------------------------------------------------- * enum : types of Gram-Schmidt routines * ----------------------------------------------------------------- * SUN_MODIFIED_GS : The iterative solver uses the modified * Gram-Schmidt routine SUNModifiedGS listed in * this file. * * SUN_CLASSICAL_GS : The iterative solver uses the classical * Gram-Schmidt routine SUNClassicalGS listed in * this file. * ----------------------------------------------------------------- */ /* DEPRECATED MODIFIED_GS: use SUN_MODIFIED_GS */ /* DEPRECATED CLASSICAL_GS: use SUN_CLASSICAL_GS */ enum { MODIFIED_GS = 1, CLASSICAL_GS = 2 }; enum { SUN_MODIFIED_GS = 1, SUN_CLASSICAL_GS = 2 }; /* * ----------------------------------------------------------------- * Type: SUNATimesFn * ----------------------------------------------------------------- * An SUNATimesFn multiplies Av and stores the result in z. The * caller is responsible for allocating memory for the z vector. * The parameter A_data is a pointer to any information about A * which the function needs in order to do its job. The vector v * is unchanged. An SUNATimesFn returns 0 if successful and a * non-zero value if unsuccessful. * ----------------------------------------------------------------- */ /* DEPRECATED ATimesFn: use SUNATimesFn */ typedef int (*ATimesFn)(void *A_data, N_Vector v, N_Vector z); typedef int (*SUNATimesFn)(void *A_data, N_Vector v, N_Vector z); /* * ----------------------------------------------------------------- * Type: SUNPSetupFn * ----------------------------------------------------------------- * A SUNPSetupFn is an integrator-supplied routine that accesses data * stored in the integrator memory structure (P_data), and calls the * user-supplied, integrator-specific preconditioner setup routine. * ----------------------------------------------------------------- */ /* DEPRECATED PSetupFn: use SUNPSetupFn */ typedef int (*PSetupFn)(void *P_data); typedef int (*SUNPSetupFn)(void *P_data); /* * ----------------------------------------------------------------- * Type: SUNPSolveFn * ----------------------------------------------------------------- * A SUNPSolveFn solves the preconditioner equation Pz = r for the * vector z. The caller is responsible for allocating memory for * the z vector. The parameter P_data is a pointer to any * information about P which the function needs in order to do * its job. The parameter lr is input, and indicates whether P * is to be taken as the left preconditioner or the right * preconditioner: lr = 1 for left and lr = 2 for right. * If preconditioning is on one side only, lr can be ignored. * If the preconditioner is iterative, then it should strive to * solve the preconditioner equation so that * || Pz - r ||_wrms < tol * where the weight vector for the WRMS norm may be accessed from * the main integrator memory structure. * The vector r should not be modified by the SUNPSolveFn. * A SUNPSolveFn returns 0 if successful and a non-zero value if * unsuccessful. On a failure, a negative return value indicates * an unrecoverable condition, while a positive value indicates * a recoverable one, in which the calling routine may reattempt * the solution after updating preconditioner data. * ----------------------------------------------------------------- */ /* DEPRECATED PSolveFn: use SUNPSolveFn */ typedef int (*PSolveFn)(void *P_data, N_Vector r, N_Vector z, realtype tol, int lr); typedef int (*SUNPSolveFn)(void *P_data, N_Vector r, N_Vector z, realtype tol, int lr); /* * ----------------------------------------------------------------- * Type: SUNQRAddFn * ----------------------------------------------------------------- * A QRAddFn updates a given QR factorization defined by the input * parameters: * Q : N_Vector * * R : realtype * * with the input vector * f : N_Vector * * Additional input parameters include: * * m : (int) the number of vectors already in the QR factorization * * mMax : (int) the maximum number of vectors to be in the QR * factorization (the number of N_Vectors allocated to be in Q) * * SUNQR_data : (void *) a structure containing any additional inputs * required for the execution of QRAddFn * * ----------------------------------------------------------------- */ typedef int (*SUNQRAddFn)(N_Vector *Q, realtype *R, N_Vector f, int m, int mMax, void *QR_data); /* * ----------------------------------------------------------------- * Function: SUNModifiedGS * ----------------------------------------------------------------- * SUNModifiedGS performs a modified Gram-Schmidt orthogonalization * of the N_Vector v[k] against the p unit N_Vectors at * v[k-1], v[k-2], ..., v[k-p]. * * v is an array of (k+1) N_Vectors v[i], i=0, 1, ..., k. * v[k-1], v[k-2], ..., v[k-p] are assumed to have L2-norm * equal to 1. * * h is the output k by k Hessenberg matrix of inner products. * This matrix must be allocated row-wise so that the (i,j)th * entry is h[i][j]. The inner products (v[i],v[k]), * i=i0, i0+1, ..., k-1, are stored at h[i][k-1]. Here * i0=SUNMAX(0,k-p). * * k is the index of the vector in the v array that needs to be * orthogonalized against previous vectors in the v array. * * p is the number of previous vectors in the v array against * which v[k] is to be orthogonalized. * * new_vk_norm is a pointer to memory allocated by the caller to * hold the Euclidean norm of the orthogonalized vector v[k]. * * If (k-p) < 0, then SUNModifiedGS uses p=k. The orthogonalized * v[k] is NOT normalized and is stored over the old v[k]. Once * the orthogonalization has been performed, the Euclidean norm * of v[k] is stored in (*new_vk_norm). * * SUNModifiedGS returns 0 to indicate success. It cannot fail. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNModifiedGS(N_Vector* v, realtype **h, int k, int p, realtype *new_vk_norm); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNModifiedGS instead") int ModifiedGS(N_Vector* v, realtype **h, int k, int p, realtype *new_vk_norm); /* * ----------------------------------------------------------------- * Function: SUNClassicalGS * ----------------------------------------------------------------- * SUNClassicalGS performs a classical Gram-Schmidt * orthogonalization of the N_Vector v[k] against the p unit * N_Vectors at v[k-1], v[k-2], ..., v[k-p]. The parameters v, h, * k, p, and new_vk_norm are as described in the documentation * for SUNModifiedGS. * * stemp is a length k+1 array of realtype which can be used as * workspace by the SUNClassicalGS routine. * * vtemp is an N_Vector array of k+1 vectors which can be used as * workspace by the SUNClassicalGS routine. * * SUNClassicalGS returns 0 to indicate success. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNClassicalGS(N_Vector* v, realtype **h, int k, int p, realtype *new_vk_norm, realtype *stemp, N_Vector* vtemp); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNClassicalGS instead") int ClassicalGS(N_Vector* v, realtype **h, int k, int p, realtype *new_vk_norm, realtype *stemp, N_Vector* vtemp); /* * ----------------------------------------------------------------- * Function: SUNQRfact * ----------------------------------------------------------------- * SUNQRfact performs a QR factorization of the Hessenberg matrix H. * * n is the problem size; the matrix H is (n+1) by n. * * h is the (n+1) by n Hessenberg matrix H to be factored. It is * stored row-wise. * * q is an array of length 2*n containing the Givens rotations * computed by this function. A Givens rotation has the form: * | c -s | * | s c |. * The components of the Givens rotations are stored in q as * (c, s, c, s, ..., c, s). * * job is a control flag. If job==0, then a new QR factorization * is performed. If job!=0, then it is assumed that the first * n-1 columns of h have already been factored and only the last * column needs to be updated. * * SUNQRfact returns 0 if successful. If a zero is encountered on * the diagonal of the triangular factor R, then SUNQRfact returns * the equation number of the zero entry, where the equations are * numbered from 1, not 0. If SUNQRsol is subsequently called in * this situation, it will return an error because it could not * divide by the zero diagonal entry. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNQRfact(int n, realtype **h, realtype *q, int job); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNQRFact instead") int QRfact(int n, realtype **h, realtype *q, int job); /* * ----------------------------------------------------------------- * Function: SUNQRsol * ----------------------------------------------------------------- * SUNQRsol solves the linear least squares problem * * min (b - H*x, b - H*x), x in R^n, * * where H is a Hessenberg matrix, and b is in R^(n+1). * It uses the QR factors of H computed by SUNQRfact. * * n is the problem size; the matrix H is (n+1) by n. * * h is a matrix (computed by SUNQRfact) containing the upper * triangular factor R of the original Hessenberg matrix H. * * q is an array of length 2*n (computed by SUNQRfact) containing * the Givens rotations used to factor H. * * b is the (n+1)-vector appearing in the least squares problem * above. * * On return, b contains the solution x of the least squares * problem, if SUNQRsol was successful. * * SUNQRsol returns a 0 if successful. Otherwise, a zero was * encountered on the diagonal of the triangular factor R. * In this case, SUNQRsol returns the equation number (numbered * from 1, not 0) of the zero entry. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNQRsol(int n, realtype **h, realtype *q, realtype *b); SUNDIALS_DEPRECATED_EXPORT_MSG("use SUNQRsol instead") int QRsol(int n, realtype **h, realtype *q, realtype *b); /* * ----------------------------------------------------------------- * Function: SUNQRAdd_MGS * ----------------------------------------------------------------- * SUNQRAdd_MGS uses Modified Gram Schmidt to update the QR factorization * stored in user inputs * - N_Vector *Q * - realtype *R * to include the orthonormalized vector input by * - N_Vector df. * * Additional input parameters include: * * m : (int) current number of vectors in QR factorization * * mMax : (int) maximum number of vectors that will be in the QR * factorization (the allocated number of N_Vectors in Q) * * QRdata : (void *) a struct containing any additional temporary * vectors or arrays required for the QRAdd routine * * On return, Q and R contain the updated Q R factors, if * SUNQRAdd_MGS was successful. * * SUNQRAdd_MGS returns a 0 if successful. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNQRAdd_MGS(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata); /* * ----------------------------------------------------------------- * Function: SUNQRAdd_ICWY * ----------------------------------------------------------------- * SUNQRAdd_ICWY uses the Inverse Compact WY Modified Gram Schmidt * method to update the QR factorization stored in user inputs * - N_Vector *Q * - realtype *R * - realtype *T (held within (void *) QRdata) * to include the orthonormalized vector input by * - N_Vector df. * where the factorization to be updated is of the form * Q * T * R * * Additional input parameters include: * * m : (int) current number of vectors in QR factorization * * mMax : (int) maximum number of vectors that will be in the QR * factorization (the allocated number of N_Vectors in Q) * * QRdata : (void *) a struct containing any additional temporary * vectors or arrays required for the QRAdd routine * * QRdata should contain : * N_Vector vtemp, realtype *temp_array (this will be used for T) * * On return, Q, R, and T contain the updated Q T R factors, if * SUNQRAdd_ICWY was successful. * * SUNQRAdd_ICWY returns a 0 if successful. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNQRAdd_ICWY(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata); /* * ----------------------------------------------------------------- * Function: SUNQRAdd_ICWY_SB * ----------------------------------------------------------------- * The same function as SUNQRAdd_ICWY but using a single buffer * for global reductions. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNQRAdd_ICWY_SB(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata); /* * ----------------------------------------------------------------- * Function: SUNQRAdd_CGS2 * ----------------------------------------------------------------- * SUNQRAdd_CGS2 uses a Classical Gram Schmidt with Reorthogonalization * formulation to update the QR factorization stored in user inputs * - N_Vector *Q * - realtype *R * to include the orthonormalized vector input by * - N_Vector df. * * Additional input parameters include: * * m : (int) current number of vectors in QR factorization * * mMax : (int) maximum number of vectors that will be in the QR * factorization (the allocated number of N_Vectors in Q) * * QRdata : (void *) a struct containing any additional temporary * vectors or arrays required for the QRAdd routine * * QRdata should contain : * N_Vector vtemp, N_Vector vtemp2, realtype *temp_array * * On return, Q and R contain the updated Q R factors, if * SUNQRAdd_CGS2 was successful. * * SUNQRAdd_CGS2 returns a 0 if successful. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNQRAdd_CGS2(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata); /* * ----------------------------------------------------------------- * Function: SUNQRAdd_DCGS2 * ----------------------------------------------------------------- * SUNQRAdd_DCGS2 uses a Classical Gram Schmidt with Reorthogonalization * formulation that delays reorthogonlization (for the purpose of * reducing number of inner products) to update the QR factorization * stored in user inputs * - N_Vector *Q * - realtype *R * to include the orthonormalized vector input by * - N_Vector df. * * Additional input parameters include: * * m : (int) current number of vectors in QR factorization * * mMax : (int) maximum number of vectors that will be in the QR * factorization (the allocated number of N_Vectors in Q) * * QRdata : (void *) a struct containing any additional temporary * vectors or arrays required for the QRAdd routine * * QRdata should contain : * N_Vector vtemp, N_Vector vtemp2, realtype *temp_array * * On return, Q and R contain the updated Q R factors, if * SUNQRAdd_DCGS2 was successful. * * SUNQRAdd_DCGS2 returns a 0 if successful. Otherwise,.... * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNQRAdd_DCGS2(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata); /* * ----------------------------------------------------------------- * Function: SUNQRAdd_DCGS2_SB * ----------------------------------------------------------------- * The same function as SUNQRAdd_DCGS2 but using a single buffer * for global reductions. * ----------------------------------------------------------------- */ SUNDIALS_EXPORT int SUNQRAdd_DCGS2_SB(N_Vector *Q, realtype *R, N_Vector df, int m, int mMax, void *QRdata); #ifdef __cplusplus } #endif #endif StanHeaders/inst/include/sundials/sundials_sycl_policies.hpp0000644000176200001440000001062414645137106024163 0ustar liggesusers/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This header files defines the ExecPolicy classes which * are utilized to determine SYCL kernel launch paramaters. * -----------------------------------------------------------------*/ #ifndef _SUNDIALS_SYCLEXECPOLICIES_HPP #define _SUNDIALS_SYCLEXECPOLICIES_HPP #include #include #include namespace sundials { namespace sycl { class ExecPolicy { public: virtual size_t gridSize(size_t numWorkUnits = 0, size_t blockDim = 0) const = 0; virtual size_t blockSize(size_t numWorkUnits = 0, size_t gridDim = 0) const = 0; virtual ExecPolicy* clone() const = 0; virtual ~ExecPolicy() {} }; /* * A kernel execution policy that maps each thread to a work unit. * The number of threads per block (blockSize) can be set to anything. * The grid size will be chosen so that there are enough threads for one * thread per element. */ class ThreadDirectExecPolicy : public ExecPolicy { public: ThreadDirectExecPolicy(const size_t blockDim) : blockDim_(blockDim) {} ThreadDirectExecPolicy(const ThreadDirectExecPolicy& ex) : blockDim_(ex.blockDim_) {} virtual size_t gridSize(size_t numWorkUnits = 0, size_t blockDim = 0) const { /* ceil(n/m) = floor((n + m - 1) / m) */ return (numWorkUnits + blockSize() - 1) / blockSize(); } virtual size_t blockSize(size_t numWorkUnits = 0, size_t gridDim = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new ThreadDirectExecPolicy(*this)); } private: const size_t blockDim_; }; /* * A kernel execution policy for kernels that use grid stride loops. * The number of threads per block (blockSize) can be set to anything. * The number of blocks (gridSize) can be set to anything. */ class GridStrideExecPolicy : public ExecPolicy { public: GridStrideExecPolicy(const size_t blockDim, const size_t gridDim) : blockDim_(blockDim), gridDim_(gridDim) {} GridStrideExecPolicy(const GridStrideExecPolicy& ex) : blockDim_(ex.blockDim_), gridDim_(ex.gridDim_) {} virtual size_t gridSize(size_t numWorkUnits = 0, size_t blockDim = 0) const { return gridDim_; } virtual size_t blockSize(size_t numWorkUnits = 0, size_t gridDim = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new GridStrideExecPolicy(*this)); } private: const size_t blockDim_; const size_t gridDim_; }; /* * A kernel execution policy for performing a reduction across indvidual thread * blocks. The number of threads per block (blockSize) can be set to anything. * The number of blocks (gridSize) can be set to any value greater than or equal * to 0. If it is set to 0, then the grid size will be chosen so that there are * at most two work units per thread. */ class BlockReduceExecPolicy : public ExecPolicy { public: BlockReduceExecPolicy(const size_t blockDim, const size_t gridDim = 0) : blockDim_(blockDim), gridDim_(gridDim) {} BlockReduceExecPolicy(const BlockReduceExecPolicy& ex) : blockDim_(ex.blockDim_), gridDim_(ex.gridDim_) {} virtual size_t gridSize(size_t numWorkUnits = 0, size_t blockDim = 0) const { if (gridDim_ == 0) { return (numWorkUnits + (blockSize() * 2 - 1)) / (blockSize() * 2); } return gridDim_; } virtual size_t blockSize(size_t numWorkUnits = 0, size_t gridDim = 0) const { return blockDim_; } virtual ExecPolicy* clone() const { return static_cast(new BlockReduceExecPolicy(*this)); } private: const size_t blockDim_; const size_t gridDim_; }; } // namespace sycl } // namespace sundials typedef sundials::sycl::ExecPolicy SUNSyclExecPolicy; typedef sundials::sycl::ThreadDirectExecPolicy SUNSyclThreadDirectExecPolicy; typedef sundials::sycl::GridStrideExecPolicy SUNSyclGridStrideExecPolicy; typedef sundials::sycl::BlockReduceExecPolicy SUNSyclBlockReduceExecPolicy; #endif StanHeaders/inst/include/stan/0000755000176200001440000000000014645137106016027 5ustar liggesusersStanHeaders/inst/include/stan/math.hpp0000644000176200001440000000062014645137106017467 0ustar liggesusers#ifndef STAN_MATH_HPP #define STAN_MATH_HPP /** * \defgroup prob_dists Probability Distributions */ /** * \ingroup prob_dists * \defgroup multivar_dists Multivariate Distributions * Distributions with Matrix inputs */ /** * \ingroup prob_dists * \defgroup univar_dists Univariate Distributions * Distributions with scalar, vector, or array input. */ #include #endif StanHeaders/inst/include/stan/math/0000755000176200001440000000000014645161272016761 5ustar liggesusersStanHeaders/inst/include/stan/math/mix/0000755000176200001440000000000014645161272017556 5ustar liggesusersStanHeaders/inst/include/stan/math/mix/meta.hpp0000644000176200001440000000073114645137106021215 0ustar liggesusers#ifndef STAN_MATH_MIX_META_HPP #define STAN_MATH_MIX_META_HPP #include #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/mix/fun/0000755000176200001440000000000014645161272020346 5ustar liggesusersStanHeaders/inst/include/stan/math/mix/fun/typedefs.hpp0000644000176200001440000000140414645137106022700 0ustar liggesusers#ifndef STAN_MATH_MIX_MAT_FUN_TYPEDEFS_HPP #define STAN_MATH_MIX_MAT_FUN_TYPEDEFS_HPP #include #include #include #include namespace stan { namespace math { using matrix_fv = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; using matrix_ffv = Eigen::Matrix>, Eigen::Dynamic, Eigen::Dynamic>; using vector_fv = Eigen::Matrix, Eigen::Dynamic, 1>; using vector_ffv = Eigen::Matrix>, Eigen::Dynamic, 1>; using row_vector_fv = Eigen::Matrix, 1, Eigen::Dynamic>; using row_vector_ffv = Eigen::Matrix>, 1, Eigen::Dynamic>; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/mix/functor.hpp0000644000176200001440000000110714645137106021745 0ustar liggesusers#ifndef STAN_MATH_MIX_FUNCTOR_HPP #define STAN_MATH_MIX_FUNCTOR_HPP #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/mix/fun.hpp0000644000176200001440000000015714645137106021061 0ustar liggesusers#ifndef STAN_MATH_MIX_FUN_HPP #define STAN_MATH_MIX_FUN_HPP #include #endif StanHeaders/inst/include/stan/math/mix/functor/0000755000176200001440000000000014645161272021236 5ustar liggesusersStanHeaders/inst/include/stan/math/mix/functor/derivative.hpp0000644000176200001440000000145714645137106024117 0ustar liggesusers#ifndef STAN_MATH_MIX_FUNCTOR_DERIVATIVE_HPP #define STAN_MATH_MIX_FUNCTOR_DERIVATIVE_HPP #include #include #include #include namespace stan { namespace math { /** * Return the derivative of the specified univariate function at * the specified argument. * * @tparam T Argument type * @tparam F Function type * @param[in] f Function * @param[in] x Argument * @param[out] fx Value of function applied to argument * @param[out] dfx_dx Value of derivative */ template void derivative(const F& f, const T& x, T& fx, T& dfx_dx) { fvar x_fvar = fvar(x, 1.0); fvar fx_fvar = f(x_fvar); fx = fx_fvar.val_; dfx_dx = fx_fvar.d_; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/mix/functor/finite_diff_grad_hessian_auto.hpp0000644000176200001440000000515014645137106027754 0ustar liggesusers#ifndef STAN_MATH_MIX_FUNCTOR_FINITE_DIFF_GRAD_HESSIAN_AUTO_HPP #define STAN_MATH_MIX_FUNCTOR_FINITE_DIFF_GRAD_HESSIAN_AUTO_HPP #include #include #include #include #include namespace stan { namespace math { /** * Calculate the value, Hessian, and the gradient of the Hessian of * the specified function at the specified argument using second-order * autodiff and first-order finite difference. * *

The functor must implement * * * double operator()(const Eigen::VectorXd&) const; * * *

Reference for finite difference to compute gradient: * *
De Levie: An improved numerical approximation * for the first derivative, page 3 * *

Step size for dimension `i` is set automatically using * `stan::math::finite_diff_stepsize(H(i, j))`; the nested * finite differences are over entries in the Hessian. * *

Evaluating this function involves 6 calls to the Hessian * autodiff function for each entry in the Hessian. * * @tparam F Type of function * @param[in] f Function * @param[in] x Argument to function * @param[out] fx Function applied to argument * @param[out] hess Hessian matrix * @param[out] grad_hess_fx gradient of Hessian of function at argument */ template void finite_diff_grad_hessian_auto(const F& f, const Eigen::VectorXd& x, double& fx, Eigen::MatrixXd& hess, std::vector& grad_hess_fx) { int d = x.size(); grad_hess_fx.clear(); grad_hess_fx.reserve(d); Eigen::VectorXd x_temp(x); Eigen::VectorXd grad_auto(d); Eigen::MatrixXd hess_auto(d, d); Eigen::MatrixXd hess_diff(d, d); hessian(f, x, fx, grad_auto, hess); for (int i = 0; i < d; ++i) { double dummy_fx_eval; double epsilon = finite_diff_stepsize(x(i)); hess_diff.setZero(); x_temp(i) = x(i) + 2 * epsilon; hessian(f, x_temp, dummy_fx_eval, grad_auto, hess_auto); hess_diff = -hess_auto; x_temp(i) = x(i) + -2 * epsilon; hessian(f, x_temp, dummy_fx_eval, grad_auto, hess_auto); hess_diff += hess_auto; x_temp(i) = x(i) + epsilon; hessian(f, x_temp, dummy_fx_eval, grad_auto, hess_auto); hess_diff += 8 * hess_auto; x_temp(i) = x(i) + -epsilon; hessian(f, x_temp, dummy_fx_eval, grad_auto, hess_auto); hess_diff -= 8 * hess_auto; x_temp(i) = x(i); hess_diff /= 12 * epsilon; grad_hess_fx.push_back(hess_diff); } fx = f(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/mix/functor/partial_derivative.hpp0000644000176200001440000000214414645137106025625 0ustar liggesusers#ifndef STAN_MATH_MIX_FUNCTOR_PARTIAL_DERIVATIVE_HPP #define STAN_MATH_MIX_FUNCTOR_PARTIAL_DERIVATIVE_HPP #include #include #include #include namespace stan { namespace math { /** * Return the partial derivative of the specified multivariate * function at the specified argument. * * @tparam T Argument type * @tparam F Function type * @param f Function * @param[in] x Argument vector * @param[in] n Index of argument with which to take derivative * @param[out] fx Value of function applied to argument * @param[out] dfx_dxn Value of partial derivative */ template void partial_derivative(const F& f, const Eigen::Matrix& x, int n, T& fx, T& dfx_dxn) { Eigen::Matrix, Eigen::Dynamic, 1> x_fvar(x.size()); for (int i = 0; i < x.size(); ++i) { x_fvar(i) = fvar(x(i), i == n); } fvar fx_fvar = f(x_fvar); fx = fx_fvar.val_; dfx_dxn = fx_fvar.d_; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/mix/functor/finite_diff_grad_hessian.hpp0000644000176200001440000000437614645137106026735 0ustar liggesusers#ifndef STAN_MATH_MIX_FUNCTOR_FINITE_DIFF_GRAD_HESSIAN_HPP #define STAN_MATH_MIX_FUNCTOR_FINITE_DIFF_GRAD_HESSIAN_HPP #include #include #include #include namespace stan { namespace math { /** * Calculate the value and the gradient of the hessian of the specified * function at the specified argument using second-order autodiff and * first-order finite difference. * *

The functor must implement * * * double * operator()(const * Eigen::Matrix&) * * * Reference: * * De Levie: An improved numerical approximation * for the first derivative, page 3 * * 4 calls to the function, f. * * @tparam F Type of function * @param[in] f Function * @param[in] x Argument to function * @param[out] fx Function applied to argument * @param[out] hess Hessian matrix * @param[out] grad_hess_fx gradient of Hessian of function at argument * @param[in] epsilon perturbation size */ template void finite_diff_grad_hessian(const F& f, const Eigen::VectorXd& x, double& fx, Eigen::MatrixXd& hess, std::vector& grad_hess_fx, double epsilon = 1e-04) { int d = x.size(); grad_hess_fx.clear(); Eigen::VectorXd x_temp(x); Eigen::VectorXd grad_auto(d); Eigen::MatrixXd hess_auto(d, d); Eigen::MatrixXd hess_diff(d, d); hessian(f, x, fx, grad_auto, hess); for (int i = 0; i < d; ++i) { double dummy_fx_eval; hess_diff.setZero(); x_temp(i) = x(i) + 2.0 * epsilon; hessian(f, x_temp, dummy_fx_eval, grad_auto, hess_auto); hess_diff = -hess_auto; x_temp(i) = x(i) + -2.0 * epsilon; hessian(f, x_temp, dummy_fx_eval, grad_auto, hess_auto); hess_diff += hess_auto; x_temp(i) = x(i) + epsilon; hessian(f, x_temp, dummy_fx_eval, grad_auto, hess_auto); hess_diff += 8.0 * hess_auto; x_temp(i) = x(i) + -epsilon; hessian(f, x_temp, dummy_fx_eval, grad_auto, hess_auto); hess_diff -= 8.0 * hess_auto; x_temp(i) = x(i); hess_diff /= 12.0 * epsilon; grad_hess_fx.push_back(hess_diff); } fx = f(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/mix/functor/grad_tr_mat_times_hessian.hpp0000644000176200001440000000272314645137106027150 0ustar liggesusers#ifndef STAN_MATH_MIX_FUNCTOR_GRAD_TR_MAT_TIMES_HESSIAN_HPP #define STAN_MATH_MIX_FUNCTOR_GRAD_TR_MAT_TIMES_HESSIAN_HPP #include #include #include #include #include #include namespace stan { namespace math { template void grad_tr_mat_times_hessian( const F& f, const Eigen::Matrix& x, const Eigen::Matrix& M, Eigen::Matrix& grad_tr_MH) { using Eigen::Dynamic; using Eigen::Matrix; // Run nested autodiff in this scope nested_rev_autodiff nested; grad_tr_MH.resize(x.size()); Matrix x_var(x.size()); for (int i = 0; i < x.size(); ++i) { x_var(i) = x(i); } Matrix, Dynamic, 1> x_fvar(x.size()); var sum(0.0); Matrix M_n(x.size()); for (int n = 0; n < x.size(); ++n) { for (int k = 0; k < x.size(); ++k) { M_n(k) = M(n, k); } for (int k = 0; k < x.size(); ++k) { x_fvar(k) = fvar(x_var(k), k == n); } fvar fx; fvar grad_fx_dot_v; gradient_dot_vector, double>(f, x_fvar, M_n, fx, grad_fx_dot_v); sum += grad_fx_dot_v.d_; } grad(sum.vi_); for (int i = 0; i < x.size(); ++i) { grad_tr_MH(i) = x_var(i).adj(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/mix/functor/hessian_times_vector.hpp0000644000176200001440000000305114645137106026162 0ustar liggesusers#ifndef STAN_MATH_MIX_FUNCTOR_HESSIAN_TIMES_VECTOR_HPP #define STAN_MATH_MIX_FUNCTOR_HESSIAN_TIMES_VECTOR_HPP #include #include #include #include #include namespace stan { namespace math { template void hessian_times_vector(const F& f, const Eigen::Matrix& x, const Eigen::Matrix& v, double& fx, Eigen::Matrix& Hv) { using Eigen::Matrix; // Run nested autodiff in this scope nested_rev_autodiff nested; Matrix x_var(x.size()); for (int i = 0; i < x_var.size(); ++i) { x_var(i) = x(i); } var fx_var; var grad_fx_var_dot_v; gradient_dot_vector(f, x_var, v, fx_var, grad_fx_var_dot_v); fx = fx_var.val(); grad(grad_fx_var_dot_v.vi_); Hv.resize(x.size()); for (int i = 0; i < x.size(); ++i) { Hv(i) = x_var(i).adj(); } } template void hessian_times_vector(const F& f, const Eigen::Matrix& x, const Eigen::Matrix& v, T& fx, Eigen::Matrix& Hv) { using Eigen::Matrix; Matrix grad; Matrix H; hessian(f, x, fx, grad, H); Hv = H * v; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/mix/functor/grad_hessian.hpp0000644000176200001440000000440414645137106024377 0ustar liggesusers#ifndef STAN_MATH_MIX_FUNCTOR_GRAD_HESSIAN_HPP #define STAN_MATH_MIX_FUNCTOR_GRAD_HESSIAN_HPP #include #include #include #include #include namespace stan { namespace math { /** * Calculate the value, the Hessian, and the gradient of the Hessian * of the specified function at the specified argument. * *

The functor must implement * * * fvar\ \> * operator()(const Eigen::Matrix\ \>, * Eigen::Dynamic, 1\>&) * * * using only operations that are defined for * fvar and var. * * This latter constraint usually * requires the functions to be defined in terms of the libraries * defined in Stan or in terms of functions with appropriately * general namespace imports that eventually depend on functions * defined in Stan. * * @tparam F Type of function * @param[in] f Function * @param[in] x Argument to function * @param[out] fx Function applied to argument * @param[out] H Hessian of function at argument * @param[out] grad_H Gradient of the Hessian of function at argument */ template void grad_hessian( const F& f, const Eigen::Matrix& x, double& fx, Eigen::Matrix& H, std::vector >& grad_H) { using Eigen::Dynamic; using Eigen::Matrix; fx = f(x); int d = x.size(); H.resize(d, d); grad_H.resize(d, Matrix(d, d)); for (int i = 0; i < d; ++i) { for (int j = i; j < d; ++j) { // Run nested autodiff in this scope nested_rev_autodiff nested; Matrix >, Dynamic, 1> x_ffvar(d); for (int k = 0; k < d; ++k) { x_ffvar(k) = fvar >(fvar(x(k), i == k), fvar(j == k, 0)); } fvar > fx_ffvar = f(x_ffvar); H(i, j) = fx_ffvar.d_.d_.val(); H(j, i) = H(i, j); grad(fx_ffvar.d_.d_.vi_); for (int k = 0; k < d; ++k) { grad_H[i](j, k) = x_ffvar(k).val_.val_.adj(); grad_H[j](i, k) = grad_H[i](j, k); } } } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/mix/functor/gradient_dot_vector.hpp0000644000176200001440000000152114645137106025772 0ustar liggesusers#ifndef STAN_MATH_MIX_FUNCTOR_GRADIENT_DOT_VECTOR_HPP #define STAN_MATH_MIX_FUNCTOR_GRADIENT_DOT_VECTOR_HPP #include #include #include #include namespace stan { namespace math { template void gradient_dot_vector(const F& f, const Eigen::Matrix& x, const Eigen::Matrix& v, T1& fx, T1& grad_fx_dot_v) { using Eigen::Matrix; Matrix, Eigen::Dynamic, 1> x_fvar(x.size()); for (int i = 0; i < x.size(); ++i) { x_fvar(i) = fvar(x(i), v(i)); } fvar fx_fvar = f(x_fvar); fx = fx_fvar.val_; grad_fx_dot_v = fx_fvar.d_; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/mix/functor/hessian.hpp0000644000176200001440000000407414645137106023405 0ustar liggesusers#ifndef STAN_MATH_MIX_FUNCTOR_HESSIAN_HPP #define STAN_MATH_MIX_FUNCTOR_HESSIAN_HPP #include #include #include #include namespace stan { namespace math { /** * Calculate the value, the gradient, and the Hessian, * of the specified function at the specified argument in * O(N^2) time and O(N^2) space. * *

The functor must implement * * * fvar\ * operator()(const * Eigen::Matrix\, Eigen::Dynamic, 1\>&) * * * using only operations that are defined for * fvar and var. * * This latter constraint usually * requires the functions to be defined in terms of the libraries * defined in Stan or in terms of functions with appropriately * general namespace imports that eventually depend on functions * defined in Stan. * * @tparam F Type of function * @param[in] f Function * @param[in] x Argument to function * @param[out] fx Function applied to argument * @param[out] grad gradient of function at argument * @param[out] H Hessian of function at argument */ template void hessian(const F& f, const Eigen::Matrix& x, double& fx, Eigen::Matrix& grad, Eigen::Matrix& H) { H.resize(x.size(), x.size()); grad.resize(x.size()); // need to compute fx even with size = 0 if (x.size() == 0) { fx = f(x); return; } for (int i = 0; i < x.size(); ++i) { // Run nested autodiff in this scope nested_rev_autodiff nested; Eigen::Matrix, Eigen::Dynamic, 1> x_fvar(x.size()); for (int j = 0; j < x.size(); ++j) { x_fvar(j) = fvar(x(j), i == j); } fvar fx_fvar = f(x_fvar); grad(i) = fx_fvar.d_.val(); if (i == 0) { fx = fx_fvar.val_.val(); } stan::math::grad(fx_fvar.d_.vi_); for (int j = 0; j < x.size(); ++j) { H(i, j) = x_fvar(j).val_.adj(); } } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/fwd.hpp0000644000176200001440000000056114645137106020253 0ustar liggesusers#ifndef STAN_MATH_FWD_HPP #define STAN_MATH_FWD_HPP #include #ifdef STAN_OPENCL #include #endif #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/rev/0000755000176200001440000000000014645161272017555 5ustar liggesusersStanHeaders/inst/include/stan/math/rev/meta.hpp0000644000176200001440000000117414645137106021216 0ustar liggesusers#ifndef STAN_MATH_REV_META_HPP #define STAN_MATH_REV_META_HPP #include #include #include #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/rev/fun/0000755000176200001440000000000014645161272020345 5ustar liggesusersStanHeaders/inst/include/stan/math/rev/fun/gamma_q.hpp0000644000176200001440000000345214645137106022463 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_GAMMA_Q_HPP #define STAN_MATH_REV_FUN_GAMMA_Q_HPP #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { class gamma_q_vv_vari : public op_vv_vari { public: gamma_q_vv_vari(vari* avi, vari* bvi) : op_vv_vari(gamma_q(avi->val_, bvi->val_), avi, bvi) {} void chain() { avi_->adj_ += adj_ * grad_reg_inc_gamma(avi_->val_, bvi_->val_, tgamma(avi_->val_), digamma(avi_->val_)); bvi_->adj_ -= adj_ * boost::math::gamma_p_derivative(avi_->val_, bvi_->val_); } }; class gamma_q_vd_vari : public op_vd_vari { public: gamma_q_vd_vari(vari* avi, double b) : op_vd_vari(gamma_q(avi->val_, b), avi, b) {} void chain() { avi_->adj_ += adj_ * grad_reg_inc_gamma(avi_->val_, bd_, tgamma(avi_->val_), digamma(avi_->val_)); } }; class gamma_q_dv_vari : public op_dv_vari { public: gamma_q_dv_vari(double a, vari* bvi) : op_dv_vari(gamma_q(a, bvi->val_), a, bvi) {} void chain() { bvi_->adj_ -= adj_ * boost::math::gamma_p_derivative(ad_, bvi_->val_); } }; } // namespace internal inline var gamma_q(const var& a, const var& b) { return var(new internal::gamma_q_vv_vari(a.vi_, b.vi_)); } inline var gamma_q(const var& a, double b) { return var(new internal::gamma_q_vd_vari(a.vi_, b)); } inline var gamma_q(double a, const var& b) { return var(new internal::gamma_q_dv_vari(a, b.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/atan2.hpp0000644000176200001440000002157714645137106022076 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ATAN2_HPP #define STAN_MATH_REV_FUN_ATAN2_HPP #include #include #include namespace stan { namespace math { /** * Return the principal value of the arc tangent, in radians, of * the first variable divided by the second (cmath). * * The partial derivatives are defined by * * \f$ \frac{\partial}{\partial x} \arctan \frac{x}{y} = \frac{y}{x^2 + y^2}\f$, * and * * \f$ \frac{\partial}{\partial y} \arctan \frac{x}{y} = \frac{-x}{x^2 + * y^2}\f$. * * @param a Numerator variable. * @param b Denominator variable. * @return The arc tangent of the fraction, in radians. */ inline var atan2(const var& a, const var& b) { return make_callback_var( std::atan2(a.val(), b.val()), [a, b](const auto& vi) mutable { double a_sq_plus_b_sq = (a.val() * a.val()) + (b.val() * b.val()); a.adj() += vi.adj_ * b.val() / a_sq_plus_b_sq; b.adj() += -vi.adj_ * a.val() / a_sq_plus_b_sq; }); } /** * Return the principal value of the arc tangent, in radians, of * the first variable divided by the second scalar (cmath). * * The derivative with respect to the variable is * * \f$ \frac{d}{d x} \arctan \frac{x}{c} = \frac{c}{x^2 + c^2}\f$. * * @param a Numerator variable. * @param b Denominator scalar. * @return The arc tangent of the fraction, in radians. */ inline var atan2(const var& a, double b) { return make_callback_var( std::atan2(a.val(), b), [a, b](const auto& vi) mutable { double a_sq_plus_b_sq = (a.val() * a.val()) + (b * b); a.adj() += vi.adj_ * b / a_sq_plus_b_sq; }); } /** * Return the principal value of the arc tangent, in radians, of * the first scalar divided by the second variable (cmath). * * The derivative with respect to the variable is * * \f$ \frac{\partial}{\partial y} \arctan \frac{c}{y} = \frac{-c}{c^2 + y^2}\f$. * * \f[ \mbox{atan2}(x, y) = \begin{cases} \arctan\left(\frac{x}{y}\right) & \mbox{if } -\infty\leq x \leq \infty, -\infty\leq y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{atan2}(x, y)}{\partial x} = \begin{cases} \frac{y}{x^2+y^2} & \mbox{if } -\infty\leq x\leq \infty, -\infty\leq y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{atan2}(x, y)}{\partial y} = \begin{cases} -\frac{x}{x^2+y^2} & \mbox{if } -\infty\leq x\leq \infty, -\infty\leq y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param a Numerator scalar. * @param b Denominator variable. * @return The arc tangent of the fraction, in radians. */ inline var atan2(double a, const var& b) { return make_callback_var( std::atan2(a, b.val()), [a, b](const auto& vi) mutable { double a_sq_plus_b_sq = (a * a) + (b.val() * b.val()); b.adj() += -vi.adj_ * a / a_sq_plus_b_sq; }); } template * = nullptr, require_all_matrix_t* = nullptr> inline auto atan2(const Mat1& a, const Mat2& b) { if (!is_constant::value && !is_constant::value) { arena_t> arena_a = a; arena_t> arena_b = b; auto atan2_val = atan2(arena_a.val(), arena_b.val()); auto a_sq_plus_b_sq = to_arena((arena_a.val().array() * arena_a.val().array()) + (arena_b.val().array() * arena_b.val().array())); return make_callback_var( atan2(arena_a.val(), arena_b.val()), [arena_a, arena_b, a_sq_plus_b_sq](auto& vi) mutable { arena_a.adj().array() += vi.adj().array() * arena_b.val().array() / a_sq_plus_b_sq; arena_b.adj().array() += -vi.adj().array() * arena_a.val().array() / a_sq_plus_b_sq; }); } else if (!is_constant::value) { arena_t> arena_a = a; arena_t> arena_b = value_of(b); auto a_sq_plus_b_sq = to_arena((arena_a.val().array() * arena_a.val().array()) + (arena_b.array() * arena_b.array())); return make_callback_var( atan2(arena_a.val(), arena_b), [arena_a, arena_b, a_sq_plus_b_sq](auto& vi) mutable { arena_a.adj().array() += vi.adj().array() * arena_b.array() / a_sq_plus_b_sq; }); } else if (!is_constant::value) { arena_t> arena_a = value_of(a); arena_t> arena_b = b; auto a_sq_plus_b_sq = to_arena((arena_a.array() * arena_a.array()) + (arena_b.val().array() * arena_b.val().array())); return make_callback_var( atan2(arena_a, arena_b.val()), [arena_a, arena_b, a_sq_plus_b_sq](auto& vi) mutable { arena_b.adj().array() += -vi.adj().array() * arena_a.array() / a_sq_plus_b_sq; }); } } template * = nullptr, require_stan_scalar_t* = nullptr> inline auto atan2(const Scalar& a, const VarMat& b) { if (!is_constant::value && !is_constant::value) { var arena_a = a; arena_t> arena_b = b; auto atan2_val = atan2(arena_a.val(), arena_b.val()); auto a_sq_plus_b_sq = to_arena((arena_a.val() * arena_a.val()) + (arena_b.val().array() * arena_b.val().array())); return make_callback_var( atan2(arena_a.val(), arena_b.val()), [arena_a, arena_b, a_sq_plus_b_sq](auto& vi) mutable { arena_a.adj() += (vi.adj().array() * arena_b.val().array() / a_sq_plus_b_sq) .sum(); arena_b.adj().array() += -vi.adj().array() * arena_a.val() / a_sq_plus_b_sq; }); } else if (!is_constant::value) { var arena_a = a; arena_t> arena_b = value_of(b); auto a_sq_plus_b_sq = to_arena((arena_a.val() * arena_a.val()) + (arena_b.array() * arena_b.array())); return make_callback_var( atan2(arena_a.val(), arena_b), [arena_a, arena_b, a_sq_plus_b_sq](auto& vi) mutable { arena_a.adj() += (vi.adj().array() * arena_b.array() / a_sq_plus_b_sq).sum(); }); } else if (!is_constant::value) { double arena_a = value_of(a); arena_t> arena_b = b; auto a_sq_plus_b_sq = to_arena( (arena_a * arena_a) + (arena_b.val().array() * arena_b.val().array())); return make_callback_var( atan2(arena_a, arena_b.val()), [arena_a, arena_b, a_sq_plus_b_sq](auto& vi) mutable { arena_b.adj().array() += -vi.adj().array() * arena_a / a_sq_plus_b_sq; }); } } template * = nullptr, require_stan_scalar_t* = nullptr> inline auto atan2(const VarMat& a, const Scalar& b) { if (!is_constant::value && !is_constant::value) { arena_t> arena_a = a; var arena_b = b; auto atan2_val = atan2(arena_a.val(), arena_b.val()); auto a_sq_plus_b_sq = to_arena((arena_a.val().array() * arena_a.val().array()) + (arena_b.val() * arena_b.val())); return make_callback_var( atan2(arena_a.val(), arena_b.val()), [arena_a, arena_b, a_sq_plus_b_sq](auto& vi) mutable { arena_a.adj().array() += vi.adj().array() * arena_b.val() / a_sq_plus_b_sq; arena_b.adj() += -(vi.adj().array() * arena_a.val().array() / a_sq_plus_b_sq) .sum(); }); } else if (!is_constant::value) { arena_t> arena_a = a; double arena_b = value_of(b); auto a_sq_plus_b_sq = to_arena( (arena_a.val().array() * arena_a.val().array()) + (arena_b * arena_b)); return make_callback_var( atan2(arena_a.val(), arena_b), [arena_a, arena_b, a_sq_plus_b_sq](auto& vi) mutable { arena_a.adj().array() += vi.adj().array() * arena_b / a_sq_plus_b_sq; }); } else if (!is_constant::value) { arena_t> arena_a = value_of(a); var arena_b = b; auto a_sq_plus_b_sq = to_arena((arena_a.array() * arena_a.array()) + (arena_b.val() * arena_b.val())); return make_callback_var( atan2(arena_a, arena_b.val()), [arena_a, arena_b, a_sq_plus_b_sq](auto& vi) mutable { arena_b.adj() += -(vi.adj().array() * arena_a.array() / a_sq_plus_b_sq).sum(); }); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/tan.hpp0000644000176200001440000000411614645137106021641 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TAN_HPP #define STAN_MATH_REV_FUN_TAN_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the tangent of a radian-scaled variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \tan x = \sec^2 x\f$. * * Where we use the trig identity * * \f$ \tan^2 x + 1 = \sec^2 x\f$. * \f[ \mbox{tan}(x) = \begin{cases} \tan(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{tan}(x)}{\partial x} = \begin{cases} \sec^2(x) & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Variable for radians of angle. * @return Tangent of variable. */ inline var tan(const var& a) { return make_callback_var(std::tan(a.val()), [a](const auto& vi) mutable { a.adj() += vi.adj() * (1.0 + vi.val() * vi.val()); }); } /** * Return the tangent of a radian-scaled variable (cmath). * * * @tparam Varmat a `var_value` with inner Eigen type * @param a Variable for radians of angle. * @return Tangent of variable. */ template * = nullptr> inline auto tan(const VarMat& a) { return make_callback_var(a.val().array().tan().matrix(), [a](const auto& vi) mutable { a.adj() += vi.adj().cwiseProduct( (1.0 + vi.val().array().square()).matrix()); }); } /** * Return the tangent of the complex argument. * * @param[in] z argument * @return tangent of the argument */ inline std::complex tan(const std::complex& z) { return stan::math::internal::complex_tan(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/svd_V.hpp0000644000176200001440000000433314645137106022141 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SVD_V_HPP #define STAN_MATH_REV_FUN_SVD_V_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Given input matrix m, return matrix V where `m = UDV^{T}` * * Adjoint update equation comes from Equation (4) in Differentiable Programming * Tensor Networks(H. Liao, J. Liu, et al., arXiv:1903.09650). * * @tparam EigMat type of input matrix * @param m MxN input matrix * @return Orthogonal matrix V */ template * = nullptr> inline auto svd_V(const EigMat& m) { using ret_type = return_var_matrix_t; if (unlikely(m.size() == 0)) { return ret_type(Eigen::MatrixXd(0, 0)); } const int M = std::min(m.rows(), m.cols()); auto arena_m = to_arena(m); Eigen::JacobiSVD svd( arena_m.val(), Eigen::ComputeThinU | Eigen::ComputeThinV); auto arena_D = to_arena(svd.singularValues()); arena_t arena_Fm(M, M); for (int i = 0; i < M; i++) { for (int j = 0; j < M; j++) { if (j == i) { arena_Fm(i, j) = 0.0; } else { arena_Fm(i, j) = 1.0 / (arena_D[j] - arena_D[i]) - 1.0 / (arena_D[i] + arena_D[j]); } } } auto arena_U = to_arena(svd.matrixU()); arena_t arena_V = svd.matrixV(); reverse_pass_callback([arena_m, arena_U, arena_D, arena_V, arena_Fm]() mutable { Eigen::MatrixXd VTVadj = arena_V.val_op().transpose() * arena_V.adj_op(); arena_m.adj() += 0.5 * arena_U * (arena_Fm.array() * (VTVadj - VTVadj.transpose()).array()) .matrix() * arena_V.val_op().transpose() + arena_U * arena_D.asDiagonal().inverse() * arena_V.adj_op().transpose() * (Eigen::MatrixXd::Identity(arena_m.cols(), arena_m.cols()) - arena_V.val_op() * arena_V.val_op().transpose()); }); return ret_type(arena_V); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/singular_values.hpp0000644000176200001440000000262114645137106024261 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SINGULAR_VALUES_HPP #define STAN_MATH_REV_FUN_SINGULAR_VALUES_HPP #include #include #include #include namespace stan { namespace math { /** * Return the singular values of the specified matrix. * * Adjoint update equation comes from Equation (4) in Differentiable Programming * Tensor Networks(H. Liao, J. Liu, et al., arXiv:1903.09650). * * @tparam EigMat type of input matrix * @param m MxN input matrix * @return Singular values of matrix */ template * = nullptr> inline auto singular_values(const EigMat& m) { using ret_type = return_var_matrix_t; if (unlikely(m.size() == 0)) { return ret_type(Eigen::VectorXd(0)); } auto arena_m = to_arena(m); Eigen::JacobiSVD svd( arena_m.val(), Eigen::ComputeThinU | Eigen::ComputeThinV); arena_t singular_values = svd.singularValues(); auto arena_U = to_arena(svd.matrixU()); auto arena_V = to_arena(svd.matrixV()); reverse_pass_callback([arena_m, arena_U, singular_values, arena_V]() mutable { arena_m.adj() += arena_U * singular_values.adj().asDiagonal() * arena_V.transpose(); }); return ret_type(singular_values); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/append_row.hpp0000644000176200001440000001324014645137106023213 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_APPEND_ROW_HPP #define STAN_MATH_REV_FUN_APPEND_ROW_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the result of stacking the rows of the first argument * matrix on top of the second argument matrix. * * Given input types result in following outputs: * (matrix, matrix) -> matrix, * (matrix, row_vector) -> matrix, * (row_vector, matrix) -> matrix, * (row_vector, row_vector) -> matrix, * (vector, vector) -> vector. * * @tparam T1 A `var_value` with inner matrix type * @tparam T1 A `var_value` with inner matrix type * * @param A First matrix. * @param B Second matrix. * @return Result of stacking first matrix on top of second. */ template * = nullptr> inline auto append_row(const T1& A, const T2& B) { check_size_match("append_row", "columns of A", A.cols(), "columns of B", B.cols()); if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; return make_callback_var( append_row(value_of(arena_A), value_of(arena_B)), [arena_A, arena_B](auto& vi) mutable { arena_A.adj() += vi.adj().topRows(arena_A.rows()); arena_B.adj() += vi.adj().bottomRows(arena_B.rows()); }); } else if (!is_constant::value) { arena_t> arena_A = A; return make_callback_var(append_row(value_of(arena_A), value_of(B)), [arena_A](auto& vi) mutable { arena_A.adj() += vi.adj().topRows(arena_A.rows()); }); } else { arena_t> arena_B = B; return make_callback_var(append_row(value_of(A), value_of(arena_B)), [arena_B](auto& vi) mutable { arena_B.adj() += vi.adj().bottomRows(arena_B.rows()); }); } } /** * Return the result of stacking an scalar on top of the * a vector, with the result being a vector. * * This function applies to (scalar, vector) and returns a vector. * * @tparam Scal type of the scalar * @tparam ColVec A `var_value` with inner column vector type. * * @param A scalar. * @param B vector. * @return Result of stacking the scalar on top of the vector. */ template * = nullptr, require_t>* = nullptr> inline auto append_row(const Scal& A, const var_value& B) { if (!is_constant::value && !is_constant::value) { var arena_A = A; arena_t> arena_B = B; return make_callback_var(append_row(value_of(arena_A), value_of(arena_B)), [arena_A, arena_B](auto& vi) mutable { arena_A.adj() += vi.adj().coeff(0); arena_B.adj() += vi.adj().tail(arena_B.size()); }); } else if (!is_constant::value) { var arena_A = A; return make_callback_var( append_row(value_of(arena_A), value_of(B)), [arena_A](auto& vi) mutable { arena_A.adj() += vi.adj().coeff(0); }); } else { arena_t> arena_B = B; return make_callback_var(append_row(value_of(A), value_of(arena_B)), [arena_B](auto& vi) mutable { arena_B.adj() += vi.adj().tail(arena_B.size()); }); } } /** * Return the result of stacking a vector on top of the * an scalar, with the result being a vector. * * This function applies to (vector, scalar) and returns a vector. * * @tparam ColVec a `var_value` with inner column vector type. * @tparam Scal type of the scalar * * @param A vector. * @param B scalar. * @return Result of stacking the vector on top of the scalar. */ template >* = nullptr, require_stan_scalar_t* = nullptr> inline auto append_row(const var_value& A, const Scal& B) { if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; var arena_B = B; return make_callback_var(append_row(value_of(arena_A), value_of(arena_B)), [arena_A, arena_B](auto& vi) mutable { arena_A.adj() += vi.adj().head(arena_A.size()); arena_B.adj() += vi.adj().coeff(vi.adj().size() - 1); }); } else if (!is_constant::value) { arena_t> arena_A = A; return make_callback_var(append_row(value_of(arena_A), value_of(B)), [arena_A](auto& vi) mutable { arena_A.adj() += vi.adj().head(arena_A.size()); }); } else { arena_t> arena_B = B; return make_callback_var(append_row(value_of(A), value_of(arena_B)), [arena_B](auto& vi) mutable { arena_B.adj() += vi.adj().coeff(vi.adj().size() - 1); }); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/atan.hpp0000644000176200001440000000465114645137106022006 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ATAN_HPP #define STAN_MATH_REV_FUN_ATAN_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the principal value of the arc tangent, in radians, of the * specified variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \arctan x = \frac{1}{1 + x^2}\f$. * * \f[ \mbox{atan}(x) = \begin{cases} \arctan(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{atan}(x)}{\partial x} = \begin{cases} \frac{\partial\, \arctan(x)}{\partial x} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial \, \arctan(x)}{\partial x} = \frac{1}{x^2+1} \f] * * @param x Variable in range [-1, 1]. * @return Arc tangent of variable, in radians. */ inline var atan(const var& x) { return make_callback_var(std::atan(x.val()), [x](const auto& vi) mutable { x.adj() += vi.adj() / (1.0 + (x.val() * x.val())); }); } /** * Return the principal value of the arc tangent, in radians, of the * specified variable (cmath). * * * @tparam Varmat a `var_value` with inner Eigen type * @param x Variable in range [-1, 1]. * @return Arc tangent of variable, in radians. */ template * = nullptr> inline auto atan(const VarMat& x) { return make_callback_var( x.val().array().atan().matrix(), [x](const auto& vi) mutable { x.adj().array() += vi.adj().array() / (1.0 + (x.val().array().square())); }); } /** * Return the arc tangent of the complex argument. * * @param[in] z argument * @return arc tangent of the argument */ inline std::complex atan(const std::complex& z) { return stan::math::internal::complex_atan(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/atanh.hpp0000644000176200001440000000502414645137106022151 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ATANH_HPP #define STAN_MATH_REV_FUN_ATANH_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * The inverse hyperbolic tangent function for variables (C99). * * The derivative is defined by * * \f$\frac{d}{dx} \mbox{atanh}(x) = \frac{1}{1 - x^2}\f$. * \f[ \mbox{atanh}(x) = \begin{cases} \textrm{NaN} & \mbox{if } x < -1\\ \tanh^{-1}(x) & \mbox{if } -1\leq x \leq 1 \\ \textrm{NaN} & \mbox{if } x > 1\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{atanh}(x)}{\partial x} = \begin{cases} \textrm{NaN} & \mbox{if } x < -1\\ \frac{\partial\, \tanh^{-1}(x)}{\partial x} & \mbox{if } -1\leq x\leq 1 \\ \textrm{NaN} & \mbox{if } x > 1\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \tanh^{-1}(x)=\frac{1}{2}\ln\left(\frac{1+x}{1-x}\right) \f] \f[ \frac{\partial \, \tanh^{-1}(x)}{\partial x} = \frac{1}{1-x^2} \f] * * @param x The variable. * @return Inverse hyperbolic tangent of the variable. * @throw std::domain_error if a < -1 or a > 1 */ inline var atanh(const var& x) { return make_callback_var(atanh(x.val()), [x](const auto& vi) mutable { x.adj() += vi.adj() / (1.0 - x.val() * x.val()); }); } /** * The inverse hyperbolic tangent function for variables (C99). * * @tparam Varmat a `var_value` with inner Eigen type * @param x The variable. * @return Inverse hyperbolic tangent of the variable. * @throw std::domain_error if a < -1 or a > 1 */ template * = nullptr> inline auto atanh(const VarMat& x) { return make_callback_var( x.val().unaryExpr([](const auto x) { return atanh(x); }), [x](const auto& vi) mutable { x.adj().array() += vi.adj().array() / (1.0 - x.val().array().square()); }); } /** * Return the hyperbolic arc tangent of the complex argument. * * @param[in] z argument * @return hyperbolic arc tangent of the argument */ inline std::complex atanh(const std::complex& z) { return stan::math::internal::complex_atanh(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_inv_logit_diff.hpp0000644000176200001440000000464314645137106024707 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_INV_LOGIT_DIFF_HPP #define STAN_MATH_REV_FUN_LOG_INV_LOGIT_DIFF_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the natural logarithm of the difference of the * inverse logits of the specified arguments and its gradients. * \f[ \mathrm{log\_inv\_logit\_diff}(x,y) = \ln\left(\frac{1}{1+\exp(-x)}-\frac{1}{1+\exp(-y)}\right) \f] \f[ \frac{\partial }{\partial x} = -\frac{e^x}{e^y-e^x}-\frac{e^x}{e^x+1} \f] \f[ \frac{\partial }{\partial x} = -\frac{e^y}{e^x-e^y}-\frac{e^y}{e^y+1} \f] * * @tparam T1 Type of x argument * @tparam T2 Type of y argument * @param a Argument * @param b Argument * @return Result of log difference of inverse logits of arguments * and gradients */ namespace internal { class log_inv_logit_diff_vv_vari : public op_vv_vari { public: log_inv_logit_diff_vv_vari(vari* avi, vari* bvi) : op_vv_vari(log_inv_logit_diff(avi->val_, bvi->val_), avi, bvi) {} void chain() { avi_->adj_ -= adj_ * (inv(expm1(bvi_->val_ - avi_->val_)) + inv_logit(avi_->val_)); bvi_->adj_ -= adj_ * (inv(expm1(avi_->val_ - bvi_->val_)) + inv_logit(bvi_->val_)); } }; class log_inv_logit_diff_vd_vari : public op_vd_vari { public: log_inv_logit_diff_vd_vari(vari* avi, double b) : op_vd_vari(log_inv_logit_diff(avi->val_, b), avi, b) {} void chain() { avi_->adj_ -= adj_ * (inv(expm1(bd_ - avi_->val_)) + inv_logit(avi_->val_)); } }; class log_inv_logit_diff_dv_vari : public op_dv_vari { public: log_inv_logit_diff_dv_vari(double a, vari* bvi) : op_dv_vari(log_inv_logit_diff(a, bvi->val_), a, bvi) {} void chain() { bvi_->adj_ -= adj_ * (inv(expm1(ad_ - bvi_->val_)) + inv_logit(bvi_->val_)); } }; } // namespace internal inline var log_inv_logit_diff(const var& a, double b) { return var(new internal::log_inv_logit_diff_vd_vari(a.vi_, b)); } inline var log_inv_logit_diff(const var& a, const var& b) { return var(new internal::log_inv_logit_diff_vv_vari(a.vi_, b.vi_)); } inline var log_inv_logit_diff(double a, const var& b) { return var(new internal::log_inv_logit_diff_dv_vari(a, b.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/erf.hpp0000644000176200001440000000327214645137106021635 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ERF_HPP #define STAN_MATH_REV_FUN_ERF_HPP #include #include #include #include #include namespace stan { namespace math { /** * The error function for variables (C99). * * The derivative is * * \f$\frac{d}{dx} \mbox{erf}(x) = \frac{2}{\sqrt{\pi}} \exp(-x^2)\f$. * * \f[ \mbox{erf}(x) = \begin{cases} \operatorname{erf}(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{erf}(x)}{\partial x} = \begin{cases} \frac{\partial\, \operatorname{erf}(x)}{\partial x} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \operatorname{erf}(x)=\frac{2}{\sqrt{\pi}}\int_0^x e^{-t^2}dt \f] \f[ \frac{\partial \, \operatorname{erf}(x)}{\partial x} = \frac{2}{\sqrt{\pi}} e^{-x^2} \f] * * @param a The variable. * @return Error function applied to the variable. */ inline var erf(const var& a) { auto precomp_erf = TWO_OVER_SQRT_PI * std::exp(-a.val() * a.val()); return make_callback_var(erf(a.val()), [a, precomp_erf](auto& vi) mutable { a.adj() += vi.adj() * precomp_erf; }); } template * = nullptr> inline auto erf(const var_value& a) { auto precomp_erf = to_arena(TWO_OVER_SQRT_PI * (-a.val().array().square()).exp()); return make_callback_var(erf(a.val()), [a, precomp_erf](auto& vi) mutable { a.adj().array() += vi.adj().array() * precomp_erf; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/as_bool.hpp0000644000176200001440000000071114645137106022472 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_AS_BOOL_HPP #define STAN_MATH_REV_FUN_AS_BOOL_HPP #include #include namespace stan { namespace math { /** * Return 1 if the argument is unequal to zero and 0 otherwise. * * @param v Value. * @return 1 if argument is equal to zero (or NaN) and 0 otherwise. */ inline int as_bool(const var& v) { return 0.0 != v.vi_->val_; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/identity_constrain.hpp0000644000176200001440000000260214645137106024766 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_IDENTITY_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_IDENTITY_CONSTRAIN_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the result of applying the identity constraint * transform to the input. This specialization handles convert Eigen matrices * of doubles to var matrix types. * * @tparam T Any type. * @tparam Types Any type with one of `T` and `Types` being a `var_value` * matrix. * @param[in] x object * @return transformed input */ template * = nullptr, require_any_var_matrix_t* = nullptr> inline auto identity_constrain(T&& x, Types&&... /* args */) { return var_value>(x); } template * = nullptr, require_any_var_matrix_t* = nullptr> inline auto identity_constrain(T&& x, Types&&... /* args */) { return to_var_value(x); } template * = nullptr, require_any_var_matrix_t* = nullptr> inline auto identity_constrain(T&& x, Types&&... /* args */) { return x; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/lgamma.hpp0000644000176200001440000000157714645137106022325 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LGAMMA_HPP #define STAN_MATH_REV_FUN_LGAMMA_HPP #include #include #include #include namespace stan { namespace math { /** * The log gamma function for variables (C99). * * The derivative is the digamma function, * * \f$\frac{d}{dx} \Gamma(x) = \psi^{(0)}(x)\f$. * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a The variable. * @return Log gamma of the variable. */ template * = nullptr> inline auto lgamma(const var_value& a) { return make_callback_var(lgamma(a.val()), [a](auto& vi) mutable { as_array_or_scalar(a.adj()) += as_array_or_scalar(vi.adj()) * as_array_or_scalar(digamma(a.val())); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/norm1.hpp0000644000176200001440000000242114645137106022110 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_NORM1_HPP #define STAN_MATH_REV_FUN_NORM1_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the L1 norm of a vector of var. * * @tparam T type of the vector (must have one compile-time dimension equal to * 1) * @param[in] v Vector. * @return L1 norm of v. */ template * = nullptr> inline var norm1(const T& v) { arena_t arena_v = v; var res = norm1(arena_v.val()); reverse_pass_callback([res, arena_v]() mutable { arena_v.adj().array() += res.adj() * sign(arena_v.val().array()); }); return res; } /** * Returns the L1 norm of a `var_value`. * * @tparam A `var_value<>` whose inner type has one compile-time row or column. * @param[in] v Vector. * @return L1 norm of v. */ // template * = nullptr> inline var norm1(const T& v) { return make_callback_vari(norm1(v.val()), [v](const auto& res) mutable { v.adj().array() += res.adj() * sign(v.val().array()); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/read_cov_L.hpp0000644000176200001440000000352614645137106023120 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_READ_COV_L_HPP #define STAN_MATH_REV_FUN_READ_COV_L_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * This is the function that should be called prior to evaluating * the density of any elliptical distribution * * @tparam T_CPCs type of CPCs vector (must be a `var_value` where `T` * inherits from EigenBase) * @tparam T_sds type of sds vector (must be a `var_value` where `T` * inherits from EigenBase) * @param CPCs on (-1, 1) * @param sds on (0, inf) * @param log_prob the log probability value to increment with the Jacobian * @return Cholesky factor of covariance matrix for specified * partial correlations. */ template * = nullptr, require_vt_same* = nullptr> inline auto read_cov_L(const T_CPCs& CPCs, const T_sds& sds, scalar_type_t& log_prob) { size_t K = sds.rows(); // adjust due to transformation from correlations to covariances log_prob += (sum(log(sds.val())) + LOG_TWO) * K; auto corr_L = read_corr_L(CPCs, K, log_prob); var_value res = sds.val().matrix().asDiagonal() * corr_L.val(); reverse_pass_callback([sds, corr_L, log_prob, res]() mutable { size_t K = sds.size(); corr_L.adj() += sds.val().matrix().asDiagonal() * res.adj(); sds.adj() += (res.adj().cwiseProduct(corr_L.val())).rowwise().sum(); sds.adj() += (K * log_prob.adj() / sds.val().array()).matrix(); }); return res; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/inv_erfc.hpp0000644000176200001440000000260314645137106022651 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INV_ERFC_HPP #define STAN_MATH_REV_FUN_INV_ERFC_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * The inverse complementary error function for variables. * * The derivative is: \f[ \frac{d}{dx} \left(\mbox{erfc}^{-1}(x)\right) = -\frac{\sqrt{\pi}}{2}e^{\mbox{erfc}^{-1}(x)^2} \f]. * * * @param a The variable. * @return Inverse complementary error function applied to the variable. */ inline var inv_erfc(const var& a) { auto precomp_inv_erfc = inv_erfc(a.val()); return make_callback_var( precomp_inv_erfc, [a, precomp_inv_erfc](auto& vi) mutable { a.adj() -= vi.adj() * exp(LOG_SQRT_PI - LOG_TWO + square(precomp_inv_erfc)); }); } template * = nullptr> inline auto inv_erfc(const var_value& a) { auto precomp_inv_erfc = to_arena(inv_erfc(a.val())); return make_callback_var( precomp_inv_erfc, [a, precomp_inv_erfc](auto& vi) mutable { a.adj().array() -= vi.adj().array() * exp(LOG_SQRT_PI - LOG_TWO + square(precomp_inv_erfc).array()); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/fabs.hpp0000644000176200001440000000515614645137106021777 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FABS_HPP #define STAN_MATH_REV_FUN_FABS_HPP #include #include #include namespace stan { namespace math { /** * Return the absolute value of the variable (cmath). * * Choosing an arbitrary value at the non-differentiable point 0, * * \f$\frac{d}{dx}|x| = \mbox{sgn}(x)\f$. * * where \f$\mbox{sgn}(x)\f$ is the signum function, taking values * -1 if \f$x < 0\f$, 0 if \f$x == 0\f$, and 1 if \f$x == 1\f$. * * The function abs() provides the same behavior, with * abs() defined in stdlib.h and fabs() * defined in cmath. * The derivative is 0 if the input is 0. * * Returns std::numeric_limits::quiet_NaN() for NaN inputs. * * \f[ \mbox{fabs}(x) = \begin{cases} |x| & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{fabs}(x)}{\partial x} = \begin{cases} -1 & \mbox{if } x < 0 \\ 0 & \mbox{if } x = 0 \\ 1 & \mbox{if } x > 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Input variable. * @return Absolute value of variable. */ inline var fabs(const var& a) { if (a.val() > 0.0) { return a; } else if (a.val() < 0.0) { return -a; } else if (a.val() == 0) { return var(new vari(0)); } else { return make_callback_var(NOT_A_NUMBER, [a](auto& vi) mutable { a.adj() = NOT_A_NUMBER; }); } } /** * Return the absolute value of the variable (cmath). * * @tparam Varmat a `var_value` with inner Eigen type * @param a Input variable. * @return Absolute value of variable. */ template * = nullptr> inline auto fabs(const VarMat& a) { return make_callback_var( a.val().unaryExpr([](const auto x) { if (unlikely(is_nan(x))) { return NOT_A_NUMBER; } else { return std::abs(x); } }), [a](const auto& vi) mutable { for (Eigen::Index j = 0; j < vi.cols(); ++j) { for (Eigen::Index i = 0; i < vi.rows(); ++i) { const auto x = a.val().coeffRef(i, j); if (unlikely(is_nan(x))) { a.adj().coeffRef(i, j) = NOT_A_NUMBER; } else if (x < 0.0) { a.adj().coeffRef(i, j) -= vi.adj_.coeff(i, j); } else { a.adj().coeffRef(i, j) += vi.adj_.coeff(i, j); } } } }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/grad_inc_beta.hpp0000644000176200001440000000307514645137106023623 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_GRAD_INC_BETA_HPP #define STAN_MATH_REV_FUN_GRAD_INC_BETA_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Gradient of the incomplete beta function beta(a, b, z) with * respect to the first two arguments. * * Uses the equivalence to a hypergeometric function. See * http://dlmf.nist.gov/8.17#ii * * @param[out] g1 d/da * @param[out] g2 d/db * @param[in] a a * @param[in] b b * @param[in] z z */ inline void grad_inc_beta(var& g1, var& g2, const var& a, const var& b, const var& z) { var c1 = log(z); var c2 = log1m(z); var c3 = beta(a, b) * inc_beta(a, b, z); var C = exp(a * c1 + b * c2) / a; var dF1 = 0; var dF2 = 0; var dF3 = 0; var dFz = 0; if (value_of_rec(C)) { std::forward_as_tuple(dF1, dF2, dF3, dFz) = grad_2F1(a + b, var(1.0), a + 1, z); } g1 = (c1 - 1.0 / a) * c3 + C * (dF1 + dF3); g2 = c2 * c3 + C * dF1; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/lub_constrain.hpp0000644000176200001440000006414414645137106023730 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LUB_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_LUB_CONSTRAIN_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the lower and upper-bounded scalar derived by * transforming the specified free scalar given the specified * lower and upper bounds. * *

The transform is the transformed and scaled inverse logit, * *

\f$f(x) = L + (U - L) \mbox{logit}^{-1}(x)\f$ * * @tparam T Scalar. * @tparam L Scalar. * @tparam U Scalar. * @param[in] x Free scalar to transform. * @param[in] lb Lower bound. * @param[in] ub Upper bound. * @return Lower- and upper-bounded scalar derived from transforming * the free scalar. * @throw std::domain_error if ub <= lb */ template * = nullptr, require_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub) { using std::exp; const auto lb_val = value_of(lb); const auto ub_val = value_of(ub); const bool is_lb_inf = lb_val == NEGATIVE_INFTY; const bool is_ub_inf = ub_val == INFTY; if (unlikely(is_ub_inf && is_lb_inf)) { return identity_constrain(x, ub, lb); } else if (unlikely(is_ub_inf)) { return lb_constrain(identity_constrain(x, ub), lb); } else if (unlikely(is_lb_inf)) { return ub_constrain(identity_constrain(x, lb), ub); } else { check_less("lub_constrain", "lb", lb_val, ub_val); auto diff = ub_val - lb_val; double inv_logit_x = inv_logit(value_of(x)); return make_callback_var( diff * inv_logit_x + lb_val, [x, ub, lb, diff, inv_logit_x](auto& vi) mutable { if (!is_constant::value) { forward_as(x).adj() += vi.adj() * diff * inv_logit_x * (1.0 - inv_logit_x); } if (!is_constant::value) { forward_as(lb).adj() += vi.adj() * (1.0 - inv_logit_x); } if (!is_constant::value) { forward_as(ub).adj() += vi.adj() * inv_logit_x; } }); } } /** * Return the lower- and upper-bounded scalar derived by * transforming the specified free scalar given the specified * lower and upper bounds and increment the specified log * density with the log absolute Jacobian determinant. * *

The transform is as defined in * lub_constrain(T, double, double). The log absolute * Jacobian determinant is given by * *

\f$\log \left| \frac{d}{dx} \left( * L + (U-L) \mbox{logit}^{-1}(x) \right) * \right|\f$ * *

\f$ {} = \log | * (U-L) * \, (\mbox{logit}^{-1}(x)) * \, (1 - \mbox{logit}^{-1}(x)) |\f$ * *

\f$ {} = \log (U - L) + \log (\mbox{logit}^{-1}(x)) * + \log (1 - \mbox{logit}^{-1}(x))\f$ * * @tparam T Scalar. * @tparam L Scalar. * @tparam U Scalar. * @param[in] x Free scalar to transform. * @param[in] lb Lower bound. * @param[in] ub Upper bound. * @param[in,out] lp Log probability scalar reference. * @return Lower- and upper-bounded scalar derived from transforming * the free scalar. * @throw std::domain_error if ub <= lb */ template * = nullptr, require_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub, return_type_t& lp) { using std::exp; const auto lb_val = value_of(lb); const auto ub_val = value_of(ub); const bool is_lb_inf = lb_val == NEGATIVE_INFTY; const bool is_ub_inf = ub_val == INFTY; if (unlikely(is_ub_inf && is_lb_inf)) { return identity_constrain(x, ub, lb); } else if (unlikely(is_ub_inf)) { return lb_constrain(identity_constrain(x, ub), lb, lp); } else if (unlikely(is_lb_inf)) { return ub_constrain(identity_constrain(x, lb), ub, lp); } else { check_less("lub_constrain", "lb", lb_val, ub_val); auto neg_abs_x = -abs(value_of(x)); auto diff = ub_val - lb_val; double inv_logit_x = inv_logit(value_of(x)); lp += (log(diff) + (neg_abs_x - (2.0 * log1p_exp(neg_abs_x)))); return make_callback_var( diff * inv_logit_x + lb_val, [x, ub, lb, diff, lp, inv_logit_x](auto& vi) mutable { if (!is_constant::value) { forward_as(x).adj() += vi.adj() * diff * inv_logit_x * (1.0 - inv_logit_x) + lp.adj() * (1.0 - 2.0 * inv_logit_x); } if (!is_constant::value && !is_constant::value) { const auto one_over_diff = 1.0 / diff; forward_as(lb).adj() += vi.adj() * (1.0 - inv_logit_x) + -one_over_diff * lp.adj(); forward_as(ub).adj() += vi.adj() * inv_logit_x + one_over_diff * lp.adj(); } else if (!is_constant::value) { forward_as(lb).adj() += vi.adj() * (1.0 - inv_logit_x) + (-1.0 / diff) * lp.adj(); } else if (!is_constant::value) { forward_as(ub).adj() += vi.adj() * inv_logit_x + (1.0 / diff) * lp.adj(); } }); } } /** * Specialization for Eigen matrix and scalar bounds. */ template * = nullptr, require_all_stan_scalar_t* = nullptr, require_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub) { using std::exp; using ret_type = return_var_matrix_t; const auto lb_val = value_of(lb); const auto ub_val = value_of(ub); const bool is_lb_inf = lb_val == NEGATIVE_INFTY; const bool is_ub_inf = ub_val == INFTY; if (unlikely(is_ub_inf && is_lb_inf)) { return ret_type(identity_constrain(x, ub, lb)); } else if (unlikely(is_ub_inf)) { return ret_type(lb_constrain(identity_constrain(x, ub), lb)); } else if (unlikely(is_lb_inf)) { return ret_type(ub_constrain(identity_constrain(x, lb), ub)); } else { arena_t arena_x = x; check_less("lub_constrain", "lb", lb_val, ub_val); const auto diff = ub_val - lb_val; auto inv_logit_x = to_arena(inv_logit(arena_x.val().array())); arena_t ret = diff * inv_logit_x + lb_val; reverse_pass_callback([arena_x, ub, lb, ret, diff, inv_logit_x]() mutable { if (!is_constant::value) { using T_var = arena_t>; forward_as(arena_x).adj().array() += ret.adj().array() * diff * inv_logit_x * (1.0 - inv_logit_x); } if (!is_constant::value) { forward_as(lb).adj() += (ret.adj().array() * (1.0 - inv_logit_x)).sum(); } if (!is_constant::value) { forward_as(ub).adj() += (ret.adj().array() * inv_logit_x).sum(); } }); return ret_type(ret); } } /** * Specialization for Eigen matrix and scalar bounds plus lp. */ template * = nullptr, require_all_stan_scalar_t* = nullptr, require_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub, return_type_t& lp) { using std::exp; using ret_type = return_var_matrix_t; const auto lb_val = value_of(lb); const auto ub_val = value_of(ub); const bool is_lb_inf = lb_val == NEGATIVE_INFTY; const bool is_ub_inf = ub_val == INFTY; if (unlikely(is_ub_inf && is_lb_inf)) { return ret_type(identity_constrain(x, ub, lb)); } else if (unlikely(is_ub_inf)) { return ret_type(lb_constrain(identity_constrain(x, ub), lb, lp)); } else if (unlikely(is_lb_inf)) { return ret_type(ub_constrain(identity_constrain(x, lb), ub, lp)); } else { check_less("lub_constrain", "lb", lb_val, ub_val); arena_t arena_x = x; auto neg_abs_x = to_arena(-(value_of(arena_x).array()).abs()); auto diff = ub_val - lb_val; lp += (log(diff) + (neg_abs_x - (2.0 * log1p_exp(neg_abs_x)))).sum(); auto inv_logit_x = to_arena(inv_logit(value_of(arena_x).array())); arena_t ret = diff * inv_logit_x + lb_val; reverse_pass_callback( [arena_x, ub, lb, ret, lp, diff, inv_logit_x]() mutable { if (!is_constant::value) { forward_as>>(arena_x).adj().array() += ret.adj().array() * diff * inv_logit_x * (1.0 - inv_logit_x) + lp.adj() * (1.0 - 2.0 * inv_logit_x); } if (!is_constant::value && !is_constant::value) { const auto lp_calc = lp.adj() * ret.size(); const auto one_over_diff = 1.0 / diff; forward_as(lb).adj() += (ret.adj().array() * (1.0 - inv_logit_x)).sum() + -one_over_diff * lp_calc; forward_as(ub).adj() += (ret.adj().array() * inv_logit_x).sum() + one_over_diff * lp_calc; } else if (!is_constant::value) { forward_as(lb).adj() += (ret.adj().array() * (1.0 - inv_logit_x)).sum() + -(1.0 / diff) * lp.adj() * ret.size(); } else if (!is_constant::value) { forward_as(ub).adj() += (ret.adj().array() * inv_logit_x).sum() + (1.0 / diff) * lp.adj() * ret.size(); } }); return ret_type(ret); } } /** * Specialization for Eigen matrix with matrix lower bound and scalar upper * bound. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub) { using std::exp; using ret_type = return_var_matrix_t; const auto ub_val = value_of(ub); const bool is_ub_inf = ub_val == INFTY; if (unlikely(is_ub_inf)) { return eval(lb_constrain(identity_constrain(x, ub), lb)); } else { arena_t arena_x = x; arena_t arena_lb = lb; const auto lb_val = value_of(arena_lb).array().eval(); check_less("lub_constrain", "lb", lb_val, ub_val); auto is_lb_inf = to_arena((lb_val == NEGATIVE_INFTY)); auto diff = to_arena(ub_val - lb_val); auto inv_logit_x = to_arena(inv_logit(value_of(arena_x).array())); arena_t ret = (is_lb_inf).select( ub_val - value_of(arena_x).array().exp(), diff * inv_logit_x + lb_val); reverse_pass_callback([arena_x, ub, arena_lb, ret, diff, inv_logit_x, is_lb_inf]() mutable { using T_var = arena_t>; using L_var = arena_t>; if (!is_constant::value) { forward_as(arena_x).adj().array() += (is_lb_inf).select( ret.adj().array() * -value_of(arena_x).array().exp(), ret.adj().array() * diff * inv_logit_x * (1.0 - inv_logit_x)); } if (!is_constant::value) { forward_as(ub).adj() += (is_lb_inf) .select(ret.adj().array(), ret.adj().array() * inv_logit_x) .sum(); } if (!is_constant::value) { forward_as(arena_lb).adj().array() += (is_lb_inf).select(0, ret.adj().array() * (1.0 - inv_logit_x)); } }); return ret_type(ret); } } /** * Specialization for Eigen matrix with matrix lower bound and scalar upper * bound plus lp. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub, std::decay_t>& lp) { using std::exp; using ret_type = return_var_matrix_t; const auto ub_val = value_of(ub); const bool is_ub_inf = ub_val == INFTY; if (unlikely(is_ub_inf)) { return ret_type(lb_constrain(identity_constrain(x, ub), lb, lp)); } else { arena_t arena_x = x; arena_t arena_lb = lb; const auto arena_x_val = to_arena(value_of(arena_x).array()); const auto lb_val = value_of(arena_lb).array().eval(); check_less("lub_constrain", "lb", lb_val, ub_val); auto is_lb_inf = to_arena((lb_val == NEGATIVE_INFTY)); auto diff = to_arena(ub_val - lb_val); auto neg_abs_x = to_arena(-arena_x_val.abs()); auto inv_logit_x = to_arena(inv_logit(arena_x_val)); arena_t ret = (is_lb_inf).select(ub_val - arena_x_val.exp(), diff * inv_logit_x + lb_val); lp += (is_lb_inf) .select(arena_x_val, log(diff) + (neg_abs_x - (2.0 * log1p_exp(neg_abs_x)))) .sum(); reverse_pass_callback([arena_x, arena_x_val, ub, arena_lb, ret, lp, diff, inv_logit_x, is_lb_inf]() mutable { using T_var = arena_t>; using L_var = arena_t>; const auto lp_adj = lp.adj(); if (!is_constant::value) { const auto x_sign = arena_x_val.sign().eval(); forward_as(arena_x).adj().array() += (is_lb_inf).select( ret.adj().array() * -arena_x_val.exp() + lp_adj, ret.adj().array() * diff * inv_logit_x * (1.0 - inv_logit_x) + lp.adj() * (1.0 - 2.0 * inv_logit_x)); } if (!is_constant::value) { forward_as(arena_lb).adj().array() += (is_lb_inf).select(0, ret.adj().array() * (1.0 - inv_logit_x) + -(1.0 / diff) * lp_adj); } if (!is_constant::value) { forward_as(ub).adj() += (is_lb_inf) .select(ret.adj().array(), ret.adj().array() * inv_logit_x + (1.0 / diff) * lp_adj) .sum(); } }); return ret_type(ret); } } /** * Specialization for Eigen matrix with scalar lower bound and matrix upper * bound. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub) { using std::exp; using ret_type = return_var_matrix_t; const auto lb_val = value_of(lb); const bool is_lb_inf = lb_val == NEGATIVE_INFTY; if (unlikely(is_lb_inf)) { return eval(ub_constrain(identity_constrain(x, lb), ub)); } else { arena_t arena_x = x; auto arena_x_val = to_arena(value_of(arena_x)); arena_t arena_ub = ub; const auto ub_val = value_of(arena_ub).array().eval(); check_less("lub_constrain", "lb", lb_val, ub_val); auto is_ub_inf = to_arena((ub_val == INFTY)); auto diff = to_arena(ub_val - lb_val); auto inv_logit_x = to_arena(inv_logit(arena_x_val.array())); arena_t ret = (is_ub_inf).select( arena_x_val.array().exp() + lb_val, diff * inv_logit_x + lb_val); reverse_pass_callback([arena_x, arena_x_val, arena_ub, lb, ret, is_ub_inf, inv_logit_x, diff]() mutable { using T_var = arena_t>; using U_var = arena_t>; if (!is_constant::value) { forward_as(arena_x).adj().array() += (is_ub_inf).select( ret.adj().array() * arena_x_val.array().exp(), ret.adj().array() * diff * inv_logit_x * (1.0 - inv_logit_x)); } if (!is_constant::value) { forward_as(lb).adj() += (is_ub_inf) .select(ret.adj().array(), ret.adj().array() * (1.0 - inv_logit_x)) .sum(); } if (!is_constant::value) { forward_as(arena_ub).adj().array() += (is_ub_inf).select(0, ret.adj().array() * inv_logit_x); } }); return ret_type(ret); } } /** * Specialization for Eigen matrix with scalar lower bound and matrix upper * bound plus lp. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub, std::decay_t>& lp) { using std::exp; using ret_type = return_var_matrix_t; const auto lb_val = value_of(lb); const bool is_lb_inf = lb_val == NEGATIVE_INFTY; if (unlikely(is_lb_inf)) { return eval(ub_constrain(identity_constrain(x, lb), ub, lp)); } else { arena_t arena_x = x; auto arena_x_val = to_arena(value_of(arena_x)); arena_t arena_ub = ub; const auto& ub_val = to_ref(value_of(arena_ub)); check_less("lub_constrain", "lb", lb_val, ub_val); auto is_ub_inf = to_arena((ub_val.array() == INFTY)); auto diff = to_arena(ub_val.array() - lb_val); auto neg_abs_x = to_arena(-(arena_x_val.array()).abs()); lp += (is_ub_inf) .select(arena_x_val.array(), log(diff) + (neg_abs_x - (2.0 * log1p_exp(neg_abs_x)))) .sum(); auto inv_logit_x = to_arena(inv_logit(arena_x_val.array())); arena_t ret = (is_ub_inf).select( arena_x_val.array().exp() + lb_val, diff * inv_logit_x + lb_val); reverse_pass_callback([arena_x, arena_x_val, diff, inv_logit_x, arena_ub, lb, ret, lp, is_ub_inf]() mutable { using T_var = arena_t>; using U_var = arena_t>; const auto lp_adj = lp.adj(); if (!is_constant::value) { forward_as(arena_x).adj().array() += (is_ub_inf).select( ret.adj().array() * arena_x_val.array().exp() + lp_adj, ret.adj().array() * diff * inv_logit_x * (1.0 - inv_logit_x) + lp.adj() * (1.0 - 2.0 * inv_logit_x)); } if (!is_constant::value) { forward_as(lb).adj() += (is_ub_inf) .select(ret.adj().array(), ret.adj().array() * (1.0 - inv_logit_x) + -(1.0 / diff) * lp_adj) .sum(); } if (!is_constant::value) { forward_as(arena_ub).adj().array() += (is_ub_inf).select( 0, ret.adj().array() * inv_logit_x + (1.0 / diff) * lp_adj); } }); return ret_type(ret); } } /** * Specialization for Eigen matrix and matrix bounds. */ template * = nullptr, require_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub) { using std::exp; using ret_type = return_var_matrix_t; arena_t arena_x = x; auto arena_x_val = value_of(arena_x); arena_t arena_lb = lb; arena_t arena_ub = ub; auto lb_val = value_of(arena_lb).array(); auto ub_val = value_of(arena_ub).array(); check_less("lub_constrain", "lb", lb_val, ub_val); auto inv_logit_x = to_arena(inv_logit(arena_x_val.array())); auto is_lb_inf = to_arena((lb_val == NEGATIVE_INFTY)); auto is_ub_inf = to_arena((ub_val == INFTY)); auto is_lb_ub_inf = to_arena(is_lb_inf && is_ub_inf); auto diff = to_arena(ub_val - lb_val); // if both, identity, then lb_inf -> ub_constrain, then ub_inf -> lb_constrain arena_t ret = (is_lb_ub_inf) .select(arena_x_val.array(), (is_lb_inf).select( ub_val - arena_x.val().array().exp(), (is_ub_inf).select(arena_x_val.array().exp() + lb_val, diff * inv_logit_x + lb_val))); reverse_pass_callback([arena_x, arena_x_val, inv_logit_x, arena_ub, arena_lb, diff, ret, is_ub_inf, is_lb_inf, is_lb_ub_inf]() mutable { using T_var = arena_t>; using L_var = arena_t>; using U_var = arena_t>; // The most likely case is neither of them are infinity const bool is_none_inf = !(is_lb_inf.any() || is_ub_inf.any()); if (is_none_inf) { if (!is_constant::value) { forward_as(arena_x).adj().array() += ret.adj().array() * diff * inv_logit_x * (1.0 - inv_logit_x); } if (!is_constant::value) { forward_as(arena_lb).adj().array() += ret.adj().array() * (1.0 - inv_logit_x); } if (!is_constant::value) { forward_as(arena_ub).adj().array() += ret.adj().array() * inv_logit_x; } } else { if (!is_constant::value) { forward_as(arena_x).adj().array() += (is_lb_ub_inf) .select( ret.adj().array(), (is_lb_inf).select( ret.adj().array() * -arena_x_val.array().exp(), (is_ub_inf).select( ret.adj().array() * arena_x_val.array().exp(), ret.adj().array() * diff * inv_logit_x * (1.0 - inv_logit_x)))); } if (!is_constant::value) { forward_as(arena_ub).adj().array() += (is_ub_inf).select( 0, (is_lb_inf).select(ret.adj().array(), ret.adj().array() * inv_logit_x)); } if (!is_constant::value) { forward_as(arena_lb).adj().array() += (is_lb_inf).select( 0, (is_ub_inf).select(ret.adj().array(), ret.adj().array() * (1.0 - inv_logit_x))); } } }); return ret_type(ret); } /** * Specialization for Eigen matrix and matrix bounds plus lp. */ template * = nullptr, require_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub, return_type_t& lp) { using std::exp; using ret_type = return_var_matrix_t; arena_t arena_x = x; auto arena_x_val = value_of(arena_x); arena_t arena_lb = lb; arena_t arena_ub = ub; auto lb_val = value_of(arena_lb).array(); auto ub_val = value_of(arena_ub).array(); check_less("lub_constrain", "lb", lb_val, ub_val); auto inv_logit_x = to_arena(inv_logit(arena_x_val.array())); auto is_lb_inf = to_arena((lb_val == NEGATIVE_INFTY)); auto is_ub_inf = to_arena((ub_val == INFTY)); auto is_lb_ub_inf = to_arena(is_lb_inf && is_ub_inf); auto diff = to_arena(ub_val - lb_val); // if both, identity, then lb_inf -> ub_constrain, then ub_inf -> lb_constrain arena_t ret = (is_lb_ub_inf) .select(arena_x_val.array(), (is_lb_inf).select( ub_val - arena_x.val().array().exp(), (is_ub_inf).select(arena_x_val.array().exp() + lb_val, diff * inv_logit_x + lb_val))); auto neg_abs_x = to_arena(-(arena_x_val.array()).abs()); lp += (is_lb_ub_inf) .select( 0, (is_lb_inf || is_ub_inf) .select( arena_x_val.array(), log(diff) + (neg_abs_x - (2.0 * log1p_exp(neg_abs_x))))) .sum(); reverse_pass_callback([arena_x, arena_x_val, inv_logit_x, arena_ub, arena_lb, diff, ret, is_ub_inf, is_lb_inf, is_lb_ub_inf, lp]() mutable { using T_var = arena_t>; using L_var = arena_t>; using U_var = arena_t>; const auto lp_adj = lp.adj(); // The most likely case is neither of them are infinity const bool is_none_inf = !(is_lb_inf.any() || is_ub_inf.any()); if (is_none_inf) { if (!is_constant::value) { forward_as(arena_x).adj().array() += ret.adj().array() * diff * inv_logit_x * (1.0 - inv_logit_x) + lp.adj() * (1.0 - 2.0 * inv_logit_x); } if (!is_constant::value) { forward_as(arena_lb).adj().array() += ret.adj().array() * (1.0 - inv_logit_x) + -(1.0 / diff) * lp_adj; } if (!is_constant::value) { forward_as(arena_ub).adj().array() += ret.adj().array() * inv_logit_x + (1.0 / diff) * lp_adj; } } else { if (!is_constant::value) { forward_as(arena_x).adj().array() += (is_lb_ub_inf) .select( ret.adj().array(), (is_lb_inf).select( ret.adj().array() * -arena_x_val.array().exp() + lp_adj, (is_ub_inf).select( ret.adj().array() * arena_x_val.array().exp() + lp_adj, ret.adj().array() * diff * inv_logit_x * (1.0 - inv_logit_x) + lp.adj() * (1.0 - 2.0 * inv_logit_x)))); } if (!is_constant::value) { forward_as(arena_lb).adj().array() += (is_lb_inf).select( 0, (is_ub_inf).select(ret.adj().array(), ret.adj().array() * (1.0 - inv_logit_x) + -(1.0 / diff) * lp_adj)); } if (!is_constant::value) { forward_as(arena_ub).adj().array() += (is_ub_inf).select( 0, (is_lb_inf).select( ret.adj().array(), ret.adj().array() * inv_logit_x + (1.0 / diff) * lp_adj)); } } }); return ret_type(ret); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/norm2.hpp0000644000176200001440000000236714645137106022122 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_NORM2_HPP #define STAN_MATH_REV_FUN_NORM2_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the L2 norm of a vector of var. * * @tparam T type of the vector (must have one compile-time dimension equal to * 1) * @param[in] v Vector. * @return L2 norm of v. */ template * = nullptr> inline var norm2(const T& v) { arena_t arena_v = v; var res = norm2(arena_v.val()); reverse_pass_callback([res, arena_v]() mutable { arena_v.adj().array() += res.adj() * (arena_v.val().array() / res.val()); }); return res; } /** * Returns the L2 norm of a `var_value`. * * @tparam A `var_value<>` whose inner type has one compile-time row or column. * @param[in] v Vector. * @return L2 norm of v. */ template * = nullptr> inline var norm2(const T& v) { return make_callback_vari(norm2(v.val()), [v](const auto& res) mutable { v.adj().array() += res.adj() * (v.val().array() / res.val()); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/sinh.hpp0000644000176200001440000000365014645137106022022 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SINH_HPP #define STAN_MATH_REV_FUN_SINH_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the hyperbolic sine of the specified variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \sinh x = \cosh x\f$. * * \f[ \mbox{sinh}(x) = \begin{cases} \sinh(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{sinh}(x)}{\partial x} = \begin{cases} \cosh(x) & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Variable. * @return Hyperbolic sine of variable. */ inline var sinh(const var& a) { return make_callback_var(std::sinh(a.val()), [a](const auto& vi) mutable { a.adj() += vi.adj() * std::cosh(a.val()); }); } /** * Return the hyperbolic of a radian-scaled variable (cmath). * * * @tparam Varmat a `var_value` with inner Eigen type * @param a Variable for radians of angle. * @return Hyperbolid Sine of variable. */ template * = nullptr> inline auto sinh(const VarMat& a) { return make_callback_var( a.val().array().sinh().matrix(), [a](const auto& vi) mutable { a.adj() += vi.adj().cwiseProduct(a.val().array().cosh().matrix()); }); } /** * Return the hyperbolic sine of the complex argument. * * @param[in] z argument * @return hyperbolic sine of the argument */ inline std::complex sinh(const std::complex& z) { return stan::math::internal::complex_sinh(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/lbeta.hpp0000644000176200001440000000554314645137106022153 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LBETA_HPP #define STAN_MATH_REV_FUN_LBETA_HPP #include #include #include #include namespace stan { namespace math { namespace internal { class lbeta_vv_vari : public op_vv_vari { public: lbeta_vv_vari(vari* avi, vari* bvi) : op_vv_vari(lbeta(avi->val_, bvi->val_), avi, bvi) {} void chain() { const double digamma_ab = digamma(avi_->val_ + bvi_->val_); avi_->adj_ += adj_ * (digamma(avi_->val_) - digamma_ab); bvi_->adj_ += adj_ * (digamma(bvi_->val_) - digamma_ab); } }; class lbeta_vd_vari : public op_vd_vari { public: lbeta_vd_vari(vari* avi, double b) : op_vd_vari(lbeta(avi->val_, b), avi, b) {} void chain() { avi_->adj_ += adj_ * (digamma(avi_->val_) - digamma(avi_->val_ + bd_)); } }; class lbeta_dv_vari : public op_dv_vari { public: lbeta_dv_vari(double a, vari* bvi) : op_dv_vari(lbeta(a, bvi->val_), a, bvi) {} void chain() { bvi_->adj_ += adj_ * (digamma(bvi_->val_) - digamma(ad_ + bvi_->val_)); } }; } // namespace internal /** * Returns the natural logarithm of the beta function and its gradients. * \f[ \mathrm{lbeta}(a,b) = \ln\left(B\left(a,b\right)\right) \f] \f[ \frac{\partial }{\partial a} = \psi^{\left(0\right)}\left(a\right) - \psi^{\left(0\right)}\left(a + b\right) \f] \f[ \frac{\partial }{\partial b} = \psi^{\left(0\right)}\left(b\right) - \psi^{\left(0\right)}\left(a + b\right) \f] * @param a var Argument * @param b var Argument * @return Result of log beta function */ inline var lbeta(const var& a, const var& b) { return var(new internal::lbeta_vv_vari(a.vi_, b.vi_)); } /** * Returns the natural logarithm of the beta function and its gradients. * \f[ \mathrm{lbeta}(a,b) = \ln\left(B\left(a,b\right)\right) \f] \f[ \frac{\partial }{\partial a} = \psi^{\left(0\right)}\left(a\right) - \psi^{\left(0\right)}\left(a + b\right) \f] * @param a var Argument * @param b double Argument * @return Result of log beta function */ inline var lbeta(const var& a, double b) { return var(new internal::lbeta_vd_vari(a.vi_, b)); } /** * Returns the natural logarithm of the beta function and its gradients. * \f[ \mathrm{lbeta}(a,b) = \ln\left(B\left(a,b\right)\right) \f] \f[ \frac{\partial }{\partial b} = \psi^{\left(0\right)}\left(b\right) - \psi^{\left(0\right)}\left(a + b\right) \f] * @param a double Argument * @param b var Argument * @return Result of log beta function */ inline var lbeta(double a, const var& b) { return var(new internal::lbeta_dv_vari(a, b.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/cholesky_corr_constrain.hpp0000644000176200001440000001175514645137106026014 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_CHOLESKY_CORR_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_CHOLESKY_CORR_CONSTRAIN_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the Cholesky factor of the correlation matrix of the sepcified * size read from the unconstrained vector `y`. A total of K choose 2 * elements are required to build a K by K Cholesky factor. * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param y Vector of unconstrained values * @param K number of rows * @return Cholesky factor of correlation matrix */ template * = nullptr> var_value cholesky_corr_constrain(const T& y, int K) { using std::sqrt; int k_choose_2 = (K * (K - 1)) / 2; check_size_match("cholesky_corr_constrain", "y.size()", y.size(), "k_choose_2", k_choose_2); if (K == 0) { return Eigen::MatrixXd(); } var_value z = corr_constrain(y); arena_t x_val = Eigen::MatrixXd::Zero(K, K); x_val.coeffRef(0, 0) = 1; int k = 0; for (int i = 1; i < K; ++i) { x_val.coeffRef(i, 0) = z.val().coeff(k); k++; double sum_sqs = square(x_val.coeff(i, 0)); for (int j = 1; j < i; ++j) { x_val.coeffRef(i, j) = z.val().coeff(k) * sqrt(1.0 - sum_sqs); k++; sum_sqs += square(x_val.coeff(i, j)); } x_val.coeffRef(i, i) = sqrt(1.0 - sum_sqs); } var_value x = x_val; reverse_pass_callback([z, K, x]() mutable { size_t k = z.size(); for (int i = K - 1; i > 0; --i) { double sum_sqs_val = 1.0 - square(x.val().coeffRef(i, i)); double sum_sqs_adj = -0.5 * x.adj().coeff(i, i) / x.val().coeff(i, i); for (int j = i - 1; j > 0; --j) { x.adj().coeffRef(i, j) += 2 * sum_sqs_adj * x.val().coeff(i, j); sum_sqs_val -= square(x.val().coeff(i, j)); k--; sum_sqs_adj += -0.5 * x.adj().coeffRef(i, j) * z.val().coeff(k) / sqrt(1.0 - sum_sqs_val); z.adj().coeffRef(k) += x.adj().coeffRef(i, j) * sqrt(1.0 - sum_sqs_val); } x.adj().coeffRef(i, 0) += 2 * sum_sqs_adj * x.val().coeff(i, 0); k--; z.adj().coeffRef(k) += x.adj().coeffRef(i, 0); } }); return x; } /** * Return the Cholesky factor of the correlation matrix of the sepcified * size read from the unconstrained vector `y`. A total of K choose 2 * elements are required to build a K by K Cholesky factor. * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param y Vector of unconstrained values * @param K number of rows * @param[out] lp Log density that is incremented with the log Jacobian * @return Cholesky factor of correlation matrix */ template * = nullptr> var_value cholesky_corr_constrain(const T& y, int K, scalar_type_t& lp) { using Eigen::Dynamic; using Eigen::Matrix; using std::sqrt; int k_choose_2 = (K * (K - 1)) / 2; check_size_match("cholesky_corr_constrain", "y.size()", y.size(), "k_choose_2", k_choose_2); if (K == 0) { return Eigen::MatrixXd(); } var_value z = corr_constrain(y, lp); arena_t x_val = Eigen::MatrixXd::Zero(K, K); x_val.coeffRef(0, 0) = 1; int k = 0; double lp_val = 0.0; for (int i = 1; i < K; ++i) { x_val.coeffRef(i, 0) = z.val().coeff(k); k++; double sum_sqs = square(x_val.coeff(i, 0)); for (int j = 1; j < i; ++j) { lp_val += 0.5 * log1m(sum_sqs); x_val.coeffRef(i, j) = z.val().coeff(k) * sqrt(1.0 - sum_sqs); k++; sum_sqs += square(x_val.coeff(i, j)); } x_val.coeffRef(i, i) = sqrt(1.0 - sum_sqs); } lp += lp_val; var_value x = x_val; reverse_pass_callback([z, K, x, lp]() mutable { size_t k = z.size(); for (int i = K - 1; i > 0; --i) { double sum_sqs_val = 1.0 - square(x.val().coeffRef(i, i)); double sum_sqs_adj = -0.5 * x.adj().coeff(i, i) / x.val().coeff(i, i); for (int j = i - 1; j > 0; --j) { x.adj().coeffRef(i, j) += 2 * sum_sqs_adj * x.val().coeff(i, j); sum_sqs_val -= square(x.val().coeff(i, j)); k--; sum_sqs_adj += -0.5 * x.adj().coeffRef(i, j) * z.val().coeff(k) / sqrt(1.0 - sum_sqs_val); z.adj().coeffRef(k) += x.adj().coeffRef(i, j) * sqrt(1.0 - sum_sqs_val); sum_sqs_adj -= 0.5 * lp.adj() / (1 - sum_sqs_val); } x.adj().coeffRef(i, 0) += 2 * sum_sqs_adj * x.val().coeff(i, 0); k--; z.adj().coeffRef(k) += x.adj().coeffRef(i, 0); } }); return x; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/as_array_or_scalar.hpp0000644000176200001440000000135114645137106024703 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_AS_ARRAY_OR_SCALAR_HPP #define STAN_MATH_REV_FUN_AS_ARRAY_OR_SCALAR_HPP #include #include #include namespace stan { namespace math { /** * Converts a `var_value` with inner Eigen matrix type to an `var_value` * with an inner array. * * @tparam T Type of `var_value` with inner `Eigen::Matrix` or expression * @param v Specified `var_value` with inner `Eigen::Matrix` or expression. * @return `var_value<>` with Matrix converted to an array. */ template * = nullptr> inline auto as_array_or_scalar(T&& v) { return v.array(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/fmod.hpp0000644000176200001440000000725714645137106022015 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FMOD_HPP #define STAN_MATH_REV_FUN_FMOD_HPP #include #include #include #include #include namespace stan { namespace math { namespace internal { class fmod_vv_vari : public op_vv_vari { public: fmod_vv_vari(vari* avi, vari* bvi) : op_vv_vari(std::fmod(avi->val_, bvi->val_), avi, bvi) {} void chain() { if (unlikely(is_any_nan(avi_->val_, bvi_->val_))) { avi_->adj_ = NOT_A_NUMBER; bvi_->adj_ = NOT_A_NUMBER; } else { avi_->adj_ += adj_; bvi_->adj_ -= adj_ * std::trunc(avi_->val_ / bvi_->val_); } } }; class fmod_vd_vari : public op_vd_vari { public: fmod_vd_vari(vari* avi, double b) : op_vd_vari(std::fmod(avi->val_, b), avi, b) {} void chain() { if (unlikely(is_any_nan(avi_->val_, bd_))) { avi_->adj_ = NOT_A_NUMBER; } else { avi_->adj_ += adj_; } } }; class fmod_dv_vari : public op_dv_vari { public: fmod_dv_vari(double a, vari* bvi) : op_dv_vari(std::fmod(a, bvi->val_), a, bvi) {} void chain() { if (unlikely(is_any_nan(bvi_->val_, ad_))) { bvi_->adj_ = NOT_A_NUMBER; } else { int d = std::trunc(ad_ / bvi_->val_); bvi_->adj_ -= adj_ * d; } } }; } // namespace internal /** * Return the floating point remainder after dividing the * first variable by the second (cmath). * * The partial derivatives with respect to the variables are defined * everywhere but where \f$x = y\f$, but we set these to match other values, * with * * \f$\frac{\partial}{\partial x} \mbox{fmod}(x, y) = 1\f$, and * * \f$\frac{\partial}{\partial y} \mbox{fmod}(x, y) = -\lfloor \frac{x}{y} \rfloor\f$. * * \f[ \mbox{fmod}(x, y) = \begin{cases} x - \lfloor \frac{x}{y}\rfloor y & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{fmod}(x, y)}{\partial x} = \begin{cases} 1 & \mbox{if } -\infty\leq x, y\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{fmod}(x, y)}{\partial y} = \begin{cases} -\lfloor \frac{x}{y}\rfloor & \mbox{if } -\infty\leq x, y\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param a First variable. * @param b Second variable. * @return Floating pointer remainder of dividing the first variable * by the second. */ inline var fmod(const var& a, const var& b) { return var(new internal::fmod_vv_vari(a.vi_, b.vi_)); } /** * Return the floating point remainder after dividing the * the first variable by the second scalar (cmath). * * The derivative with respect to the variable is * * \f$\frac{d}{d x} \mbox{fmod}(x, c) = \frac{1}{c}\f$. * * @param a First variable. * @param b Second scalar. * @return Floating pointer remainder of dividing the first variable by * the second scalar. */ inline var fmod(const var& a, double b) { return var(new internal::fmod_vd_vari(a.vi_, b)); } /** * Return the floating point remainder after dividing the * first scalar by the second variable (cmath). * * The derivative with respect to the variable is * * \f$\frac{d}{d y} \mbox{fmod}(c, y) = -\lfloor \frac{c}{y} \rfloor\f$. * * @param a First scalar. * @param b Second variable. * @return Floating pointer remainder of dividing first scalar by * the second variable. */ inline var fmod(double a, const var& b) { return var(new internal::fmod_dv_vari(a, b.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/gamma_p.hpp0000644000176200001440000000551614645137106022465 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_GAMMA_P_HPP #define STAN_MATH_REV_FUN_GAMMA_P_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { class gamma_p_vv_vari : public op_vv_vari { public: gamma_p_vv_vari(vari* avi, vari* bvi) : op_vv_vari(gamma_p(avi->val_, bvi->val_), avi, bvi) {} void chain() { using std::exp; using std::fabs; using std::log; if (is_inf(avi_->val_)) { avi_->adj_ = NOT_A_NUMBER; bvi_->adj_ = NOT_A_NUMBER; return; } if (is_inf(bvi_->val_)) { avi_->adj_ = NOT_A_NUMBER; bvi_->adj_ = NOT_A_NUMBER; return; } // return zero derivative as gamma_p is flat // to machine precision for b / a > 10 if (std::fabs(bvi_->val_ / avi_->val_) > 10) { return; } avi_->adj_ += adj_ * grad_reg_lower_inc_gamma(avi_->val_, bvi_->val_); bvi_->adj_ += adj_ * std::exp(-bvi_->val_ + (avi_->val_ - 1.0) * std::log(bvi_->val_) - lgamma(avi_->val_)); } }; class gamma_p_vd_vari : public op_vd_vari { public: gamma_p_vd_vari(vari* avi, double b) : op_vd_vari(gamma_p(avi->val_, b), avi, b) {} void chain() { if (is_inf(avi_->val_)) { avi_->adj_ = NOT_A_NUMBER; return; } if (is_inf(bd_)) { avi_->adj_ = NOT_A_NUMBER; return; } // return zero derivative as gamma_p is flat // to machine precision for b / a > 10 if (std::fabs(bd_ / avi_->val_) > 10) { return; } avi_->adj_ += adj_ * grad_reg_lower_inc_gamma(avi_->val_, bd_); } }; class gamma_p_dv_vari : public op_dv_vari { public: gamma_p_dv_vari(double a, vari* bvi) : op_dv_vari(gamma_p(a, bvi->val_), a, bvi) {} void chain() { if (is_inf(ad_)) { bvi_->adj_ = NOT_A_NUMBER; return; } if (is_inf(bvi_->val_)) { bvi_->adj_ = NOT_A_NUMBER; return; } // return zero derivative as gamma_p is flat to // machine precision for b / a > 10 if (std::fabs(bvi_->val_ / ad_) > 10) { return; } bvi_->adj_ += adj_ * std::exp(-bvi_->val_ + (ad_ - 1.0) * std::log(bvi_->val_) - lgamma(ad_)); } }; } // namespace internal inline var gamma_p(const var& a, const var& b) { return var(new internal::gamma_p_vv_vari(a.vi_, b.vi_)); } inline var gamma_p(const var& a, double b) { return var(new internal::gamma_p_vd_vari(a.vi_, b)); } inline var gamma_p(double a, const var& b) { return var(new internal::gamma_p_dv_vari(a, b.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/cosh.hpp0000644000176200001440000000366614645137106022024 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_COSH_HPP #define STAN_MATH_REV_FUN_COSH_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the hyperbolic cosine of the specified variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \cosh x = \sinh x\f$. * * \f[ \mbox{cosh}(x) = \begin{cases} \cosh(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{cosh}(x)}{\partial x} = \begin{cases} \sinh(x) & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Variable. * @return Hyperbolic cosine of variable. */ inline var cosh(const var& a) { return make_callback_var(std::cosh(a.val()), [a](const auto& vi) mutable { a.adj() += vi.adj() * std::sinh(a.val()); }); } /** * Return the hyperbolic cosine of the specified variable (cmath). * * @tparam Varmat a `var_value` with inner Eigen type * @param a Variable. * @return Hyperbolic cosine of variable. */ template * = nullptr> inline auto cosh(const VarMat& a) { return make_callback_var( a.val().array().cosh().matrix(), [a](const auto& vi) mutable { a.adj() += vi.adj().cwiseProduct(a.val().array().sinh().matrix()); }); } /** * Return the hyperbolic cosine of the complex argument. * * @param[in] z argument * @return hyperbolic cosine of the argument */ inline std::complex cosh(const std::complex& z) { return stan::math::internal::complex_cosh(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/hypot.hpp0000644000176200001440000000531314645137106022222 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_HYPOT_HPP #define STAN_MATH_REV_FUN_HYPOT_HPP #include #include #include namespace stan { namespace math { /** * Returns the length of the hypotenuse of a right triangle * with sides of the specified lengths (C99). * * The partial derivatives are given by * * \f$\frac{\partial}{\partial x} \sqrt{x^2 + y^2} = \frac{x}{\sqrt{x^2 + * y^2}}\f$, and * * \f$\frac{\partial}{\partial y} \sqrt{x^2 + y^2} = \frac{y}{\sqrt{x^2 + * y^2}}\f$. * * @param[in] a Length of first side. * @param[in] b Length of second side. * @return Length of hypotenuse. */ inline var hypot(const var& a, const var& b) { return make_callback_var(hypot(a.val(), b.val()), [a, b](auto& vi) mutable { a.adj() += vi.adj() * a.val() / vi.val(); b.adj() += vi.adj() * b.val() / vi.val(); }); } /** * Returns the length of the hypotenuse of a right triangle * with sides of the specified lengths (C99). * * The derivative is * * \f$\frac{d}{d x} \sqrt{x^2 + c^2} = \frac{x}{\sqrt{x^2 + c^2}}\f$. * * @param[in] a Length of first side. * @param[in] b Length of second side. * @return Length of hypotenuse. */ inline var hypot(const var& a, double b) { return make_callback_var(hypot(a.val(), b), [a](auto& vi) mutable { a.adj() += vi.adj() * a.val() / vi.val(); }); } /** * Returns the length of the hypotenuse of a right triangle * with sides of the specified lengths (C99). * * The derivative is * * \f$\frac{d}{d y} \sqrt{c^2 + y^2} = \frac{y}{\sqrt{c^2 + y^2}}\f$. * \f[ \mbox{hypot}(x, y) = \begin{cases} \textrm{NaN} & \mbox{if } x < 0 \text{ or } y < 0 \\ \sqrt{x^2+y^2} & \mbox{if } x, y\geq 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{hypot}(x, y)}{\partial x} = \begin{cases} \textrm{NaN} & \mbox{if } x < 0 \text{ or } y < 0 \\ \frac{x}{\sqrt{x^2+y^2}} & \mbox{if } x, y\geq 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{hypot}(x, y)}{\partial y} = \begin{cases} \textrm{NaN} & \mbox{if } x < 0 \text{ or } y < 0 \\ \frac{y}{\sqrt{x^2+y^2}} & \mbox{if } x, y\geq 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param[in] a Length of first side. * @param[in] b Length of second side. * @return Length of hypotenuse. */ inline var hypot(double a, const var& b) { return make_callback_var(hypot(b.val(), a), [b](auto& vi) mutable { b.adj() += vi.adj() * b.val() / vi.val(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/exp2.hpp0000644000176200001440000000244314645137106021736 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_EXP2_HPP #define STAN_MATH_REV_FUN_EXP2_HPP #include #include #include #include #include namespace stan { namespace math { /** * Exponentiation base 2 function for variables (C99). * * The derivative is * * \f$\frac{d}{dx} 2^x = (\log 2) 2^x\f$. * \f[ \mbox{exp2}(x) = \begin{cases} 2^x & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{exp2}(x)}{\partial x} = \begin{cases} 2^x\ln2 & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a The variable. * @return Two to the power of the specified variable. */ inline var exp2(const var& a) { return make_callback_var(std::exp2(a.val()), [a](auto& vi) mutable { a.adj() += vi.adj() * vi.val() * LOG_TWO; }); } template * = nullptr> inline auto exp2(const var_value& a) { return make_callback_var(exp2(a.val()), [a](auto& vi) mutable { a.adj().array() += vi.adj().array() * vi.val().array() * LOG_TWO; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/floor.hpp0000644000176200001440000000302514645137106022176 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FLOOR_HPP #define STAN_MATH_REV_FUN_FLOOR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the floor of the specified variable (cmath). * * The derivative of the floor function is defined and * zero everywhere but at integers, so we set these derivatives * to zero for convenience, * * \f$\frac{d}{dx} {\lfloor x \rfloor} = 0\f$. * * The floor function rounds down. For double values, this is the largest * integral value that is not greater than the specified value. * Although this function is not differentiable because it is * discontinuous at integral values, its gradient is returned as * zero everywhere. * \f[ \mbox{floor}(x) = \begin{cases} \lfloor x \rfloor & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{floor}(x)}{\partial x} = \begin{cases} 0 & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Input variable. * @return Floor of the variable. */ inline var floor(const var& a) { return var(std::floor(a.val())); } template * = nullptr> inline auto floor(const var_value& a) { return var_value(a.val().array().floor().matrix()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/bessel_second_kind.hpp0000644000176200001440000000247514645137106024702 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_BESSEL_SECOND_KIND_HPP #define STAN_MATH_REV_FUN_BESSEL_SECOND_KIND_HPP #include #include #include namespace stan { namespace math { inline var bessel_second_kind(int v, const var& a) { double ret_val = bessel_second_kind(v, a.val()); auto precomp_bessel = v * ret_val / a.val() - bessel_second_kind(v + 1, a.val()); return make_callback_var(ret_val, [precomp_bessel, a](auto& vi) mutable { a.adj() += vi.adj() * precomp_bessel; }); } /** * Overload with `var_value` for `int`, `std::vector`, and * `std::vector>` */ template * = nullptr, require_eigen_t* = nullptr> inline auto bessel_second_kind(const T1& v, const var_value& a) { auto ret_val = bessel_second_kind(v, a.val()).array().eval(); auto v_map = as_array_or_scalar(v); auto precomp_bessel = to_arena(v_map * ret_val / a.val().array() - bessel_second_kind(v_map + 1, a.val().array())); return make_callback_var( ret_val.matrix(), [precomp_bessel, a](const auto& vi) mutable { a.adj().array() += vi.adj().array() * precomp_bessel; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/from_var_value.hpp0000644000176200001440000000335414645137106024071 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FROM_VAR_VALUE_HPP #define STAN_MATH_REV_FUN_FROM_VAR_VALUE_HPP #include #include #include #include namespace stan { namespace math { /** * Converts `var_value` into an Eigen Matrix. Adjoint is propagated back to * argument in the reverse pass. * * @tparam T type of the input * @param a matrix to convert */ template * = nullptr> Eigen::Matrix from_var_value( const T& a) { arena_matrix> res(a.val()); reverse_pass_callback([res, a]() mutable { a.vi_->adj_ += res.adj(); }); return res; } /** * This is a no-op for Eigen containers of vars, scalars or prim types. * * @tparam T type of the input * @param a matrix to convert */ template < typename T, require_any_t< conjunction, is_var>>, std::is_same, var>, bool_constant, var>::value>>* = nullptr> T from_var_value(T&& a) { return std::forward(a); } /** * Convert the elements of the `std::vector` input to `var_value` types * if possible * * @tparam T type of elemnts of the input vector * @param a std::vector of elements to convert */ template auto from_var_value(const std::vector& a) { std::vector()))> out; out.reserve(a.size()); for (size_t i = 0; i < a.size(); ++i) { out.emplace_back(from_var_value(a[i])); } return out; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/tcrossprod.hpp0000644000176200001440000000220614645137106023257 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TCROSSPROD_HPP #define STAN_MATH_REV_FUN_TCROSSPROD_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the result of post-multiplying a matrix by its * own transpose. * * @tparam T Type of the matrix * @param M Matrix to multiply. * @return M times its transpose. */ template * = nullptr> inline auto tcrossprod(const T& M) { using ret_type = return_var_matrix_t< Eigen::Matrix, T>; arena_t arena_M = M; arena_t res = arena_M.val_op() * arena_M.val_op().transpose(); if (likely(M.size() > 0)) { reverse_pass_callback([res, arena_M]() mutable { arena_M.adj() += (res.adj_op() + res.adj_op().transpose()) * arena_M.val_op(); }); } return ret_type(res); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_determinant_ldlt.hpp0000644000176200001440000000211614645137106025247 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_DETERMINANT_LDLT_HPP #define STAN_MATH_REV_FUN_LOG_DETERMINANT_LDLT_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the log det of the matrix whose LDLT factorization is given * * @tparam T Type of matrix for LDLT factor * @param A an LDLT_factor * @return ln(det(A)) */ template * = nullptr> var log_determinant_ldlt(LDLT_factor& A) { if (A.matrix().size() == 0) { return 0; } var log_det = sum(log(A.ldlt().vectorD().array())); arena_t arena_A = A.matrix(); arena_t arena_A_inv = Eigen::MatrixXd::Identity(A.matrix().rows(), A.matrix().cols()); A.ldlt().solveInPlace(arena_A_inv); reverse_pass_callback([arena_A, log_det, arena_A_inv]() mutable { arena_A.adj() += log_det.adj() * arena_A_inv; }); return log_det; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/inv.hpp0000644000176200001440000000202614645137106021651 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INV_HPP #define STAN_MATH_REV_FUN_INV_HPP #include #include #include namespace stan { namespace math { /** * @tparam T Arithmetic or a type inheriting from `EigenBase`. \f[ \mbox{inv}(x) = \begin{cases} \frac{1}{x} & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{inv}(x)}{\partial x} = \begin{cases} -\frac{1}{x^2} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * */ template * = nullptr> inline auto inv(const var_value& a) { auto denom = to_arena(as_array_or_scalar(square(a.val()))); return make_callback_var(inv(a.val()), [a, denom](auto& vi) mutable { as_array_or_scalar(a.adj()) -= as_array_or_scalar(vi.adj()) / denom; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/is_nan.hpp0000644000176200001440000000110114645137106022315 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_IS_NAN_HPP #define STAN_MATH_REV_FUN_IS_NAN_HPP #include #include #include namespace stan { namespace math { /** * Returns 1 if the input's value is NaN and 0 otherwise. * * Delegates to is_nan(double). * * @tparam T type of input * @param v value to test * @return 1 if the value is NaN and 0 otherwise. */ inline bool is_nan(const var& v) { return is_nan(v.val()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/fma.hpp0000644000176200001440000003320614645137106021624 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FMA_HPP #define STAN_MATH_REV_FUN_FMA_HPP #include #include #include #include #include namespace stan { namespace math { /** * The fused multiply-add function for three variables (C99). * This function returns the product of the first two arguments * plus the third argument. * * The partial derivatives are * * \f$\frac{\partial}{\partial x} (x * y) + z = y\f$, and * * \f$\frac{\partial}{\partial y} (x * y) + z = x\f$, and * * \f$\frac{\partial}{\partial z} (x * y) + z = 1\f$. * * @param x First multiplicand. * @param y Second multiplicand. * @param z Summand. * @return Product of the multiplicands plus the summand, ($a * $b) + $c. */ inline var fma(const var& x, const var& y, const var& z) { return make_callback_var(fma(x.val(), y.val(), z.val()), [x, y, z](auto& vi) { x.adj() += vi.adj() * y.val(); y.adj() += vi.adj() * x.val(); z.adj() += vi.adj(); }); } /** * The fused multiply-add function for two variables and a value * (C99). This function returns the product of the first two * arguments plus the third argument. * * The partial derivatives are * * \f$\frac{\partial}{\partial x} (x * y) + z = y\f$, and * * \f$\frac{\partial}{\partial y} (x * y) + z = x\f$. * * @tparam Tc type of the summand * @param x First multiplicand. * @param y Second multiplicand. * @param z Summand. * @return Product of the multiplicands plus the summand, ($a * $b) + $c. */ template * = nullptr> inline var fma(const var& x, const var& y, Tc&& z) { return make_callback_var(fma(x.val(), y.val(), z), [x, y](auto& vi) { x.adj() += vi.adj() * y.val(); y.adj() += vi.adj() * x.val(); }); } /** * The fused multiply-add function for a variable, value, and * variable (C99). This function returns the product of the first * two arguments plus the third argument. * * The partial derivatives are * * \f$\frac{\partial}{\partial x} (x * y) + z = y\f$, and * * \f$\frac{\partial}{\partial z} (x * y) + z = 1\f$. * * @tparam Ta type of the first multiplicand * @tparam Tb type of the second multiplicand * @tparam Tc type of the summand * * @param x First multiplicand. * @param y Second multiplicand. * @param z Summand. * @return Product of the multiplicands plus the summand, ($a * $b) + $c. */ template * = nullptr> inline var fma(const var& x, Tb&& y, const var& z) { return make_callback_var(fma(x.val(), y, z.val()), [x, y, z](auto& vi) { x.adj() += vi.adj() * y; z.adj() += vi.adj(); }); } /** * The fused multiply-add function for a variable and two values * (C99). This function returns the product of the first two * arguments plus the third argument. * * The double-based version * ::%fma(double, double, double) is defined in * <cmath>. * * The derivative is * * \f$\frac{d}{d x} (x * y) + z = y\f$. * * @tparam Tb type of the second multiplicand * @tparam Tc type of the summand * * @param x First multiplicand. * @param y Second multiplicand. * @param z Summand. * @return Product of the multiplicands plus the summand, ($a * $b) + $c. */ template * = nullptr> inline var fma(const var& x, Tb&& y, Tc&& z) { return make_callback_var(fma(x.val(), y, z), [x, y](auto& vi) { x.adj() += vi.adj() * y; }); } /** * The fused multiply-add function for a value, variable, and * value (C99). This function returns the product of the first * two arguments plus the third argument. * * The derivative is * * \f$\frac{d}{d y} (x * y) + z = x\f$, and * * @tparam Ta type of the first multiplicand * @tparam Tc type of the summand * * @param x First multiplicand. * @param y Second multiplicand. * @param z Summand. * @return Product of the multiplicands plus the summand, ($a * $b) + $c. */ template * = nullptr> inline var fma(Ta&& x, const var& y, Tc&& z) { return make_callback_var(fma(x, y.val(), z), [x, y](auto& vi) { y.adj() += vi.adj() * x; }); } /** * The fused multiply-add function for two values and a variable, * and value (C99). This function returns the product of the * first two arguments plus the third argument. * * The derivative is * * \f$\frac{\partial}{\partial z} (x * y) + z = 1\f$. * * @tparam Ta type of the first multiplicand * @tparam Tb type of the second multiplicand * * @param x First multiplicand. * @param y Second multiplicand. * @param z Summand. * @return Product of the multiplicands plus the summand, ($a * $b) + $c. */ template * = nullptr> inline var fma(Ta&& x, Tb&& y, const var& z) { return make_callback_var(fma(x, y, z.val()), [z](auto& vi) { z.adj() += vi.adj(); }); } /** * The fused multiply-add function for a value and two variables * (C99). This function returns the product of the first two * arguments plus the third argument. * * The partial derivatives are * * \f$\frac{\partial}{\partial y} (x * y) + z = x\f$, and * * \f$\frac{\partial}{\partial z} (x * y) + z = 1\f$. * * @tparam Ta type of the first multiplicand * @param x First multiplicand. * @param y Second multiplicand. * @param z Summand. * @return Product of the multiplicands plus the summand, ($a * $b) + $c. */ template * = nullptr> inline var fma(Ta&& x, const var& y, const var& z) { return make_callback_var(fma(x, y.val(), z.val()), [x, y, z](auto& vi) { y.adj() += vi.adj() * x; z.adj() += vi.adj(); }); } namespace internal { /** * Overload for matrix, matrix, matrix */ template * = nullptr> inline auto fma_reverse_pass(T1& arena_x, T2& arena_y, T3& arena_z, T4& ret) { return [arena_x, arena_y, arena_z, ret]() mutable { using T1_var = arena_t>>; using T2_var = arena_t>>; using T3_var = arena_t>>; if (!is_constant::value) { forward_as(arena_x).adj().array() += ret.adj().array() * value_of(arena_y).array(); } if (!is_constant::value) { forward_as(arena_y).adj().array() += ret.adj().array() * value_of(arena_x).array(); } if (!is_constant::value) { forward_as(arena_z).adj().array() += ret.adj().array(); } }; } /** * Overload for scalar, matrix, matrix */ template * = nullptr, require_stan_scalar_t* = nullptr> inline auto fma_reverse_pass(T1& arena_x, T2& arena_y, T3& arena_z, T4& ret) { return [arena_x, arena_y, arena_z, ret]() mutable { using T1_var = arena_t>; using T2_var = arena_t>; using T3_var = arena_t>; if (!is_constant::value) { forward_as(arena_x).adj() += (ret.adj().array() * value_of(arena_y).array()).sum(); } if (!is_constant::value) { forward_as(arena_y).adj().array() += ret.adj().array() * value_of(arena_x); } if (!is_constant::value) { forward_as(arena_z).adj().array() += ret.adj().array(); } }; } /** * Overload for matrix, scalar, matrix */ template * = nullptr, require_stan_scalar_t* = nullptr> inline auto fma_reverse_pass(T1& arena_x, T2& arena_y, T3& arena_z, T4& ret) { return [arena_x, arena_y, arena_z, ret]() mutable { using T1_var = arena_t>; using T2_var = arena_t>; using T3_var = arena_t>; if (!is_constant::value) { forward_as(arena_x).adj().array() += ret.adj().array() * value_of(arena_y); } if (!is_constant::value) { forward_as(arena_y).adj() += (ret.adj().array() * value_of(arena_x).array()).sum(); } if (!is_constant::value) { forward_as(arena_z).adj().array() += ret.adj().array(); } }; } /** * Overload for scalar, scalar, matrix */ template * = nullptr, require_all_stan_scalar_t* = nullptr> inline auto fma_reverse_pass(T1& arena_x, T2& arena_y, T3& arena_z, T4& ret) { return [arena_x, arena_y, arena_z, ret]() mutable { using T1_var = arena_t>; using T2_var = arena_t>; using T3_var = arena_t>; if (!is_constant::value) { forward_as(arena_x).adj() += (ret.adj().array() * value_of(arena_y)).sum(); } if (!is_constant::value) { forward_as(arena_y).adj() += (ret.adj().array() * value_of(arena_x)).sum(); } if (!is_constant::value) { forward_as(arena_z).adj().array() += ret.adj().array(); } }; } /** * Overload for matrix, matrix, scalar */ template * = nullptr, require_stan_scalar_t* = nullptr> inline auto fma_reverse_pass(T1& arena_x, T2& arena_y, T3& arena_z, T4& ret) { return [arena_x, arena_y, arena_z, ret]() mutable { using T1_var = arena_t>; using T2_var = arena_t>; using T3_var = arena_t>; if (!is_constant::value) { forward_as(arena_x).adj().array() += ret.adj().array() * value_of(arena_y).array(); } if (!is_constant::value) { forward_as(arena_y).adj().array() += ret.adj().array() * value_of(arena_x).array(); } if (!is_constant::value) { forward_as(arena_z).adj() += ret.adj().sum(); } }; } /** * Overload for scalar, matrix, scalar */ template * = nullptr, require_all_stan_scalar_t* = nullptr> inline auto fma_reverse_pass(T1& arena_x, T2& arena_y, T3& arena_z, T4& ret) { return [arena_x, arena_y, arena_z, ret]() mutable { using T1_var = arena_t>; using T2_var = arena_t>; using T3_var = arena_t>; if (!is_constant::value) { forward_as(arena_x).adj() += (ret.adj().array() * value_of(arena_y).array()).sum(); } if (!is_constant::value) { forward_as(arena_y).adj().array() += ret.adj().array() * value_of(arena_x); } if (!is_constant::value) { forward_as(arena_z).adj() += ret.adj().sum(); } }; } /** * Overload for matrix, scalar, scalar */ template * = nullptr, require_all_stan_scalar_t* = nullptr> inline auto fma_reverse_pass(T1& arena_x, T2& arena_y, T3& arena_z, T4& ret) { return [arena_x, arena_y, arena_z, ret]() mutable { using T1_var = arena_t>; using T2_var = arena_t>; using T3_var = arena_t>; if (!is_constant::value) { forward_as(arena_x).adj().array() += ret.adj().array() * value_of(arena_y); } if (!is_constant::value) { forward_as(arena_y).adj() += (ret.adj().array() * value_of(arena_x).array()).sum(); } if (!is_constant::value) { forward_as(arena_z).adj() += ret.adj().sum(); } }; } } // namespace internal /** * The fused multiply-add function for three variables (C99). * This function returns the product of the first two arguments * plus the third argument. * * The partial derivatives are * * \f$\frac{\partial}{\partial x} (x * y) + z = y\f$, and * * \f$\frac{\partial}{\partial y} (x * y) + z = x\f$, and * * \f$\frac{\partial}{\partial z} (x * y) + z = 1\f$. * * @param x First multiplicand. * @param y Second multiplicand. * @param z Summand. * @return Product of the multiplicands plus the summand, ($a * $b) + $c. */ template * = nullptr, require_var_t>* = nullptr> inline auto fma(const T1& x, const T2& y, const T3& z) { arena_t arena_x = x; arena_t arena_y = y; arena_t arena_z = z; if (is_matrix::value && is_matrix::value) { check_matching_dims("fma", "x", arena_x, "y", arena_y); } if (is_matrix::value && is_matrix::value) { check_matching_dims("fma", "x", arena_x, "z", arena_z); } if (is_matrix::value && is_matrix::value) { check_matching_dims("fma", "y", arena_y, "z", arena_z); } using inner_ret_type = decltype(fma(value_of(arena_x), value_of(arena_y), value_of(arena_z))); using ret_type = return_var_matrix_t; arena_t ret = fma(value_of(arena_x), value_of(arena_y), value_of(arena_z)); reverse_pass_callback( internal::fma_reverse_pass(arena_x, arena_y, arena_z, ret)); return ret_type(ret); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/is_inf.hpp0000644000176200001440000000105314645137106022323 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_IS_INF_HPP #define STAN_MATH_REV_FUN_IS_INF_HPP #include #include #include namespace stan { namespace math { /** * Returns 1 if the input's value is infinite and 0 otherwise. * * Delegates to is_inf. * * @param v Value to test. * * @return 1 if the value is infinite and 0 otherwise. */ inline int is_inf(const var& v) { return is_inf(v.val()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/cov_matrix_constrain.hpp0000644000176200001440000000755514645137106025324 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_COV_MATRIX_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_COV_MATRIX_CONSTRAIN_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the symmetric, positive-definite matrix of dimensions K * by K resulting from transforming the specified finite vector of * size K plus (K choose 2). * *

See cov_matrix_free() for the inverse transform. * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param x The vector to convert to a covariance matrix. * @param K The number of rows and columns of the resulting * covariance matrix. * @throws std::invalid_argument if (x.size() != K + (K choose 2)). */ template * = nullptr> var_value cov_matrix_constrain(const T& x, Eigen::Index K) { using std::exp; check_size_match("cov_matrix_constrain", "x.size()", x.size(), "K + (K choose 2)", (K * (K + 1)) / 2); arena_t L_val = Eigen::MatrixXd::Zero(K, K); int i = 0; for (Eigen::Index m = 0; m < K; ++m) { L_val.row(m).head(m) = x.val().segment(i, m); i += m; L_val.coeffRef(m, m) = exp(x.val().coeff(i)); i++; } var_value L = L_val; reverse_pass_callback([x, L]() mutable { Eigen::Index K = L.rows(); int i = x.size(); for (int m = K - 1; m >= 0; --m) { i--; x.adj().coeffRef(i) += L.adj().coeff(m, m) * L.val().coeff(m, m); i -= m; x.adj().segment(i, m) += L.adj().row(m).head(m); } }); return multiply_lower_tri_self_transpose(L); } /** * Return the symmetric, positive-definite matrix of dimensions K * by K resulting from transforming the specified finite vector of * size K plus (K choose 2). * *

See cov_matrix_free() for the inverse transform. * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param x The vector to convert to a covariance matrix. * @param K The dimensions of the resulting covariance matrix. * @param lp Reference * @throws std::domain_error if (x.size() != K + (K choose 2)). */ template * = nullptr> var_value cov_matrix_constrain(const T& x, Eigen::Index K, scalar_type_t& lp) { using std::exp; using std::log; check_size_match("cov_matrix_constrain", "x.size()", x.size(), "K + (K choose 2)", (K * (K + 1)) / 2); arena_t L_val = Eigen::MatrixXd::Zero(K, K); int pos = 0; for (Eigen::Index m = 0; m < K; ++m) { L_val.row(m).head(m) = x.val().segment(pos, m); pos += m; L_val.coeffRef(m, m) = exp(x.val().coeff(pos)); pos++; } // Jacobian for complete transform, including exp() above double lp_val = (K * LOG_TWO); for (Eigen::Index k = 0; k < K; ++k) { // only +1 because index from 0 lp_val += (K - k + 1) * log(L_val.coeff(k, k)); } lp += lp_val; var_value L = L_val; reverse_pass_callback([x, L, lp]() mutable { Eigen::Index K = L.rows(); for (Eigen::Index k = 0; k < K; ++k) { L.adj().coeffRef(k, k) += (K - k + 1) * lp.adj() / L.val().coeff(k, k); } int pos = x.size(); for (int m = K - 1; m >= 0; --m) { pos--; x.adj().coeffRef(pos) += L.adj().coeff(m, m) * L.val().coeff(m, m); pos -= m; x.adj().segment(pos, m) += L.adj().row(m).head(m); } }); return multiply_lower_tri_self_transpose(L); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/logit.hpp0000644000176200001440000000150214645137106022171 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOGIT_HPP #define STAN_MATH_REV_FUN_LOGIT_HPP #include #include #include #include namespace stan { namespace math { /** * Return the log odds of the specified argument. * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param u The variable. * @return log odds of argument */ template * = nullptr> inline auto logit(const var_value& u) { auto denom = to_arena(1.0 / as_array_or_scalar(u.val() - square(u.val()))); return make_callback_var(logit(u.val()), [u, denom](auto& vi) mutable { as_array_or_scalar(u.adj()) += as_array_or_scalar(vi.adj()) * denom; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/expm1.hpp0000644000176200001440000000237314645137106022114 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_EXPM1_HPP #define STAN_MATH_REV_FUN_EXPM1_HPP #include #include #include namespace stan { namespace math { /** * The exponentiation of the specified variable minus 1 (C99). * * The derivative is given by * * \f$\frac{d}{dx} \exp(a) - 1 = \exp(a)\f$. * * \f[ \mbox{expm1}(x) = \begin{cases} e^x-1 & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{expm1}(x)}{\partial x} = \begin{cases} e^x & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a The variable. * @return Two to the power of the specified variable. */ inline var expm1(const var& a) { return make_callback_var(expm1(a.val()), [a](auto& vi) mutable { a.adj() += vi.adj() * (vi.val() + 1.0); }); } template * = nullptr> inline auto expm1(const var_value& a) { return make_callback_var(expm1(a.val()), [a](auto& vi) mutable { a.adj().array() += vi.adj().array() * (vi.val().array() + 1.0); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_diff_exp.hpp0000644000176200001440000000444514645137106023511 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_DIFF_EXP_HPP #define STAN_MATH_REV_FUN_LOG_DIFF_EXP_HPP #include #include #include #include #include namespace stan { namespace math { namespace internal { class log_diff_exp_vv_vari : public op_vv_vari { public: log_diff_exp_vv_vari(vari* avi, vari* bvi) : op_vv_vari(log_diff_exp(avi->val_, bvi->val_), avi, bvi) {} void chain() { avi_->adj_ -= adj_ / expm1(bvi_->val_ - avi_->val_); bvi_->adj_ -= adj_ / expm1(avi_->val_ - bvi_->val_); } }; class log_diff_exp_vd_vari : public op_vd_vari { public: log_diff_exp_vd_vari(vari* avi, double b) : op_vd_vari(log_diff_exp(avi->val_, b), avi, b) {} void chain() { if (val_ == NEGATIVE_INFTY) { avi_->adj_ += (bd_ == NEGATIVE_INFTY) ? adj_ : adj_ * INFTY; } else { avi_->adj_ -= adj_ / expm1(bd_ - avi_->val_); } } }; class log_diff_exp_dv_vari : public op_dv_vari { public: log_diff_exp_dv_vari(double a, vari* bvi) : op_dv_vari(log_diff_exp(a, bvi->val_), a, bvi) {} void chain() { if (val_ == NEGATIVE_INFTY) { bvi_->adj_ -= adj_ * INFTY; } else { bvi_->adj_ -= adj_ / expm1(ad_ - bvi_->val_); } } }; } // namespace internal /** * Returns the log difference of the exponentiated arguments. * * @param[in] a First argument. * @param[in] b Second argument. * @return Log difference of the exponentiated arguments. */ inline var log_diff_exp(const var& a, const var& b) { return var(new internal::log_diff_exp_vv_vari(a.vi_, b.vi_)); } /** * Returns the log difference of the exponentiated arguments. * * @param[in] a First argument. * @param[in] b Second argument. * @return Log difference of the exponentiated arguments. */ inline var log_diff_exp(const var& a, double b) { return var(new internal::log_diff_exp_vd_vari(a.vi_, b)); } /** * Returns the log difference of the exponentiated arguments. * * @param[in] a First argument. * @param[in] b Second argument. * @return Log difference of the exponentiated arguments. */ inline var log_diff_exp(double a, const var& b) { return var(new internal::log_diff_exp_dv_vari(a, b.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/cbrt.hpp0000644000176200001440000000303114645137106022004 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_CBRT_HPP #define STAN_MATH_REV_FUN_CBRT_HPP #include #include #include namespace stan { namespace math { /** * Returns the cube root of the specified variable (C99). * * The derivative is * * \f$\frac{d}{dx} x^{1/3} = \frac{1}{3 x^{2/3}}\f$. * \f[ \mbox{cbrt}(x) = \begin{cases} \sqrt[3]{x} & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{cbrt}(x)}{\partial x} = \begin{cases} \frac{1}{3x^{2/3}} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Specified variable. * @return Cube root of the variable. */ inline var cbrt(const var& a) { return make_callback_var(cbrt(a.val()), [a](const auto& vi) mutable { a.adj() += vi.adj() / (3.0 * vi.val() * vi.val()); }); } /** * Returns the cube root of the specified variable (C99). * @tparam Varmat a `var_value` with inner Eigen type * @param a Specified variable. * @return Cube root of the variable. */ template * = nullptr> inline auto cbrt(const VarMat& a) { return make_callback_var( a.val().unaryExpr([](const auto x) { return cbrt(x); }), [a](const auto& vi) mutable { a.adj().array() += vi.adj().array() / (3.0 * vi.val().array().square()); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/binary_log_loss.hpp0000644000176200001440000000566014645137106024251 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_BINARY_LOG_LOSS_HPP #define STAN_MATH_REV_FUN_BINARY_LOG_LOSS_HPP #include #include #include #include namespace stan { namespace math { /** * The log loss function for variables (stan). * * See binary_log_loss() for the double-based version. * * The derivative with respect to the variable \f$\hat{y}\f$ is * * \f$\frac{d}{d\hat{y}} \mbox{logloss}(1, \hat{y}) = - \frac{1}{\hat{y}}\f$, and * * \f$\frac{d}{d\hat{y}} \mbox{logloss}(0, \hat{y}) = \frac{1}{1 - \hat{y}}\f$. * * \f[ \mbox{binary\_log\_loss}(y, \hat{y}) = \begin{cases} y \log \hat{y} + (1 - y) \log (1 - \hat{y}) & \mbox{if } 0\leq \hat{y}\leq 1, y\in\{ 0, 1 \}\\[6pt] \textrm{NaN} & \mbox{if } \hat{y} = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{binary\_log\_loss}(y, \hat{y})}{\partial \hat{y}} = \begin{cases} \frac{y}{\hat{y}}-\frac{1-y}{1-\hat{y}} & \mbox{if } 0\leq \hat{y}\leq 1, y\in\{ 0, 1 \}\\[6pt] \textrm{NaN} & \mbox{if } \hat{y} = \textrm{NaN} \end{cases} \f] * * @param y Reference value. * @param y_hat Response variable. * @return Log loss of response versus reference value. */ inline var binary_log_loss(int y, const var& y_hat) { if (y == 0) { return make_callback_var(-log1p(-y_hat.val()), [y_hat](auto& vi) mutable { y_hat.adj() += vi.adj() / (1.0 - y_hat.val()); }); } else { return make_callback_var(-std::log(y_hat.val()), [y_hat](auto& vi) mutable { y_hat.adj() -= vi.adj() / y_hat.val(); }); } } /** * Overload with `int` and `var_value` */ template * = nullptr> inline auto binary_log_loss(int y, const var_value& y_hat) { if (y == 0) { return make_callback_var( -(-y_hat.val().array()).log1p(), [y_hat](auto& vi) mutable { y_hat.adj().array() += vi.adj().array() / (1.0 - y_hat.val().array()); }); } else { return make_callback_var( -y_hat.val().array().log(), [y_hat](auto& vi) mutable { y_hat.adj().array() -= vi.adj().array() / y_hat.val().array(); }); } } /** * Overload with `var_value` for `std::vector` and * `std::vector>` */ template * = nullptr, require_st_integral* = nullptr> inline auto binary_log_loss(const StdVec& y, const var_value& y_hat) { auto arena_y = to_arena(as_array_or_scalar(y).template cast()); auto ret_val = -(arena_y == 0) .select((-y_hat.val().array()).log1p(), y_hat.val().array().log()); return make_callback_var(ret_val, [y_hat, arena_y](auto& vi) mutable { y_hat.adj().array() += vi.adj().array() / (arena_y == 0) .select((1.0 - y_hat.val().array()), -y_hat.val().array()); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/svd_U.hpp0000644000176200001440000000432414645137106022140 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SVD_U_HPP #define STAN_MATH_REV_FUN_SVD_U_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Given input matrix m, return matrix U where `m = UDV^{T}` * * Adjoint update equation comes from Equation (4) in Differentiable Programming * Tensor Networks(H. Liao, J. Liu, et al., arXiv:1903.09650). * * @tparam EigMat type of input matrix * @param m MxN input matrix * @return Orthogonal matrix U */ template * = nullptr> inline auto svd_U(const EigMat& m) { using ret_type = return_var_matrix_t; if (unlikely(m.size() == 0)) { return ret_type(Eigen::MatrixXd(0, 0)); } const int M = std::min(m.rows(), m.cols()); auto arena_m = to_arena(m); Eigen::JacobiSVD svd( arena_m.val(), Eigen::ComputeThinU | Eigen::ComputeThinV); auto arena_D = to_arena(svd.singularValues()); arena_t arena_Fp(M, M); for (int i = 0; i < M; i++) { for (int j = 0; j < M; j++) { if (j == i) { arena_Fp(i, j) = 0.0; } else { arena_Fp(i, j) = 1.0 / (arena_D[j] - arena_D[i]) + 1.0 / (arena_D[i] + arena_D[j]); } } } arena_t arena_U = svd.matrixU(); auto arena_V = to_arena(svd.matrixV()); reverse_pass_callback([arena_m, arena_U, arena_D, arena_V, arena_Fp]() mutable { Eigen::MatrixXd UUadjT = arena_U.val_op().transpose() * arena_U.adj_op(); arena_m.adj() += .5 * arena_U.val_op() * (arena_Fp.array() * (UUadjT - UUadjT.transpose()).array()) .matrix() * arena_V.transpose() + (Eigen::MatrixXd::Identity(arena_m.rows(), arena_m.rows()) - arena_U.val_op() * arena_U.val_op().transpose()) * arena_U.adj_op() * arena_D.asDiagonal().inverse() * arena_V.transpose(); }); return ret_type(arena_U); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/lambert_w.hpp0000644000176200001440000000310314645137106023026 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LAMBERT_W_HPP #define STAN_MATH_REV_FUN_LAMBERT_W_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the Lambert W function on W0 branch applied to the specified variable. * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a Variable argument. * @return the Lambert W function (W0 branch) applied to the specified argument. */ template * = nullptr> inline auto lambert_w0(const var_value& a) { return make_callback_var(lambert_w0(a.val()), [a](auto& vi) mutable { as_array_or_scalar(a.adj()) += (as_array_or_scalar(vi.adj()) / as_array_or_scalar(a.val() + exp(vi.val()))); }); } /** * Return the Lambert W function on W-1 branch applied to the specified * variable. * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a Variable argument. * @return the Lambert W function (W-1 branch) applied to the specified * argument. */ template * = nullptr> inline auto lambert_wm1(const var_value& a) { return make_callback_var(lambert_wm1(a.val()), [a](auto& vi) mutable { as_array_or_scalar(a.adj()) += (as_array_or_scalar(vi.adj()) / as_array_or_scalar(a.val() + exp(vi.val()))); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/trunc.hpp0000644000176200001440000000222714645137106022213 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TRUNC_HPP #define STAN_MATH_REV_FUN_TRUNC_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the truncation of the specified variable (C99). * * See ::trunc() for the double-based version. * * The derivative is zero everywhere but at integer values, so for * convenience the derivative is defined to be everywhere zero, * * \f$\frac{d}{dx} \mbox{trunc}(x) = 0\f$. * * \f[ \mbox{trunc}(x) = \begin{cases} \lfloor x \rfloor & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{trunc}(x)}{\partial x} = \begin{cases} 0 & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Specified variable. * @return Truncation of the variable. */ inline var trunc(const var& a) { return var(trunc(a.val())); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/pow.hpp0000644000176200001440000003251114645137106021664 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_POW_HPP #define STAN_MATH_REV_FUN_POW_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the base raised to the power of the exponent (cmath). * * The partial derivatives are * * \f$\frac{\partial}{\partial x} \mbox{pow}(x, y) = y x^{y-1}\f$, and * * \f$\frac{\partial}{\partial y} \mbox{pow}(x, y) = x^y \ \log x\f$. * * \f[ \mbox{pow}(x, y) = \begin{cases} x^y & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{pow}(x, y)}{\partial x} = \begin{cases} yx^{y-1} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{pow}(x, y)}{\partial y} = \begin{cases} x^y\ln x & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param base Base variable. * @param exponent Exponent variable. * @return Base raised to the exponent. */ template * = nullptr, require_all_stan_scalar_t* = nullptr> inline var pow(const Scal1& base, const Scal2& exponent) { if (is_constant::value) { if (exponent == 0.5) { return sqrt(base); } else if (exponent == 1.0) { return base; } else if (exponent == 2.0) { return square(base); } else if (exponent == -2.0) { return inv_square(base); } else if (exponent == -1.0) { return inv(base); } else if (exponent == -0.5) { return inv_sqrt(base); } } return make_callback_var( std::pow(value_of(base), value_of(exponent)), [base, exponent](auto&& vi) mutable { if (value_of(base) == 0.0) { return; // partials zero, avoids 0 & log(0) } const double vi_mul = vi.adj() * vi.val(); if (!is_constant::value) { forward_as(base).adj() += vi_mul * value_of(exponent) / value_of(base); } if (!is_constant::value) { forward_as(exponent).adj() += vi_mul * std::log(value_of(base)); } }); } /** * Return the base raised to the power of the exponent (cmath). For matrices * this is performed elementwise. * @tparam Mat1 An Eigen type deriving from Eigen::EigenBase, a standard vector, * or a `var_value` with inner Eigen type as defined above. The `scalar_type` * must be a `var`. * @tparam Mat2 An Eigen type deriving from Eigen::EigenBase, a standard vector, * or a `var_value` with inner Eigen type as defined above. The `scalar_type` * must be a `var`. * @param base Base variable. * @param exponent Exponent variable. * @return Base raised to the exponent. */ template * = nullptr, require_any_matrix_st* = nullptr, require_all_not_stan_scalar_t* = nullptr> inline auto pow(const Mat1& base, const Mat2& exponent) { check_consistent_sizes("pow", "base", base, "exponent", exponent); using expr_type = decltype(as_array_or_scalar(value_of(base)) .pow(as_array_or_scalar(value_of(exponent)))); using val_type = std::conditional_t< math::disjunction, is_eigen_array>::value, decltype(std::declval().eval()), decltype(std::declval().matrix().eval())>; using ret_type = return_var_matrix_t; using base_t = decltype(as_array_or_scalar(base)); using exp_t = decltype(as_array_or_scalar(exponent)); using base_arena_t = arena_t; using exp_arena_t = arena_t; base_arena_t arena_base = as_array_or_scalar(base); exp_arena_t arena_exponent = as_array_or_scalar(exponent); arena_t ret = value_of(arena_base).pow(value_of(arena_exponent)).matrix(); reverse_pass_callback([arena_base, arena_exponent, ret]() mutable { const auto& are_vals_zero = to_ref(value_of(arena_base) != 0.0); const auto& ret_mul = to_ref(ret.adj().array() * ret.val().array()); if (!is_constant::value) { using base_var_arena_t = arena_t>; forward_as(arena_base).adj() += (are_vals_zero) .select( ret_mul * value_of(arena_exponent) / value_of(arena_base), 0); } if (!is_constant::value) { using exp_var_arena_t = arena_t>; forward_as(arena_exponent).adj() += (are_vals_zero).select(ret_mul * value_of(arena_base).log(), 0); } }); return ret_type(ret); } /** * Return the base raised to the power of the exponent (cmath). For matrices * this is performed elementwise. * @tparam Mat1 An Eigen type deriving from Eigen::EigenBase or * a `var_value` with inner Eigen type as defined above. The `scalar_type` * must be a `var` or Arithmetic. * @param base Base variable. * @param exponent Exponent variable. * @return Base raised to the exponent. */ template * = nullptr, require_all_matrix_st* = nullptr, require_stan_scalar_t* = nullptr> inline auto pow(const Mat1& base, const Scal1& exponent) { using ret_type = promote_scalar_t>; if (is_constant::value) { if (exponent == 0.5) { return ret_type(sqrt(base)); } else if (exponent == 1.0) { return ret_type(base); } else if (exponent == 2.0) { return ret_type(square(base)); } else if (exponent == -2.0) { return ret_type(inv_square(base)); } else if (exponent == -1.0) { return ret_type(inv(base)); } else if (exponent == -0.5) { return ret_type(inv_sqrt(base)); } } arena_t> arena_base = base; arena_t ret = value_of(arena_base).array().pow(value_of(exponent)).matrix(); reverse_pass_callback([arena_base, exponent, ret]() mutable { const auto& are_vals_zero = to_ref(value_of(arena_base).array() != 0.0); const auto& ret_mul = to_ref(ret.adj().array() * ret.val().array()); if (!is_constant::value) { forward_as(arena_base).adj().array() += (are_vals_zero) .select(ret_mul * value_of(exponent) / value_of(arena_base).array(), 0); } if (!is_constant::value) { forward_as(exponent).adj() += (are_vals_zero) .select(ret_mul * value_of(arena_base).array().log(), 0) .sum(); } }); return ret_type(ret); } /** * Return the base scalar raised to the power of the exponent * matrix elementwise. * * The derivative for the variable is * * \f$\frac{d}{d y} \mbox{pow}(c, y) = c^y \log c \f$. * * * @tparam Mat An Eigen type deriving from Eigen::EigenBase or * a `var_value` with inner Eigen type as defined above. The `scalar_type` * must be a `var`. * * @param base Base scalar. * @param exponent Exponent variable. * @return Base raised to the exponent. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_all_matrix_st* = nullptr> inline auto pow(Scal1 base, const Mat1& exponent) { using ret_type = promote_scalar_t>; arena_t arena_exponent = exponent; arena_t ret = Eigen::pow(value_of(base), value_of(arena_exponent).array()); reverse_pass_callback([base, arena_exponent, ret]() mutable { if (unlikely(value_of(base) == 0.0)) { return; // partials zero, avoids 0 & log(0) } const auto& ret_mul = to_ref(ret.adj().array() * ret.val().array()); if (!is_constant::value) { forward_as(base).adj() += (ret_mul * value_of(arena_exponent).array() / value_of(base)) .sum(); } if (!is_constant::value) { forward_as(arena_exponent).adj().array() += ret_mul * std::log(value_of(base)); } }); return ret_type(ret); } // must uniquely match all pairs of { complex, complex, var, T } // with at least one var and at least one complex, where T is arithmetic: // 1) complex, complex // 2) complex, complex // 3) complex, var // 4) complex, T // 5) complex, complex // 6) complex, var // 7) var, complex // 8) var, complex // 9) T, complex /** * Return the first argument raised to the power of the second argument. * * @param x first argument * @param y second argument * @return first argument to the power of the second argument */ inline std::complex pow(const std::complex& x, const std::complex& y) { return internal::complex_pow(x, y); } /** * Return the first argument raised to the power of the second argument. * * @tparam T arithmetic type * @param x first argument * @param y second argument * @return first argument to the power of the second argument */ template > inline std::complex pow(const std::complex& x, const std::complex y) { return internal::complex_pow(x, y); } /** * Return the first argument raised to the power of the second argument. * * @param x first argument * @param y second argument * @return first argument to the power of the second argument */ inline std::complex pow(const std::complex& x, const var& y) { return internal::complex_pow(x, y); } /** * Return the first argument raised to the power of the second argument. * * @tparam T arithmetic type * @param x first argument * @param y second argument * @return first argument to the power of the second argument */ template > inline std::complex pow(const std::complex& x, T y) { return internal::complex_pow(x, y); } /** * Return the first argument raised to the power of the second argument. * * @tparam T arithmetic type * @param x first argument * @param y second argument * @return first argument to the power of the second argument */ template > inline std::complex pow(std::complex x, const std::complex& y) { return internal::complex_pow(x, y); } /** * Return the first argument raised to the power of the second argument. * * @tparam T arithmetic type * @param x first argument * @param y second argument * @return first argument to the power of the second argument */ template > inline std::complex pow(std::complex x, const var& y) { return internal::complex_pow(x, y); } /** * Return the first argument raised to the power of the second argument. * * @param x first argument * @param y second argument * @return first argument to the power of the second argument */ inline std::complex pow(const var& x, const std::complex& y) { return internal::complex_pow(x, y); } /** * Return the first argument raised to the power of the second argument. * * @tparam T arithmetic type * @param x first argument * @param y second argument * @return first argument to the power of the second argument */ template > inline std::complex pow(const var& x, std::complex y) { return internal::complex_pow(x, y); } /** * Return the first argument raised to the power of the second argument. * * @tparam T arithmetic type * @param x first argument * @param y second argument * @return first argument to the power of the second argument */ template > inline std::complex pow(T x, const std::complex& y) { return internal::complex_pow(x, y); } /** * Return the first argument raised to the power of the second argument. * * Note: this overload is required because gcc still provides the * C++99 template function `pow(complex, int)`, which introduces * an ambiguity. * * @param x first argument * @param y second argument * @return first argument to the power of the second argument */ inline std::complex pow(const std::complex& x, int y) { return internal::complex_pow(x, y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/to_vector.hpp0000644000176200001440000000174414645137106023067 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TO_VECTOR_HPP #define STAN_MATH_REV_FUN_TO_VECTOR_HPP #include #include #include namespace stan { namespace math { /** * Reshape a `var_value` to a `var_value`. * @tparam EigMat Inner type of the `var_value` that must inherit from * `Eigen::EigenBase`. * @param x A var whose inner matrix type is to be reshaped to an Column matrix. * @return A view of the original `x` with inner value and adjoint matrices * mapped to a column vector. */ template * = nullptr> inline auto to_vector(const var_value& x) { using view_type = Eigen::Map>; return var_value(new vari_view( view_type(x.vi_->val_.data(), x.rows() * x.cols()), view_type(x.vi_->adj_.data(), x.rows() * x.cols()))); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/proj.hpp0000644000176200001440000000113414645137106022026 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_PROJ_HPP #define STAN_MATH_REV_FUN_PROJ_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the projection of the complex argument onto the Riemann * sphere. * * @param[in] z argument * @return projection of the argument onto the Riemann sphere */ inline std::complex proj(const std::complex& z) { return internal::complex_proj(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/squared_distance.hpp0000644000176200001440000001717714645137106024410 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SQUARED_DISTANCE_HPP #define STAN_MATH_REV_FUN_SQUARED_DISTANCE_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the squared distance. */ inline var squared_distance(const var& a, const var& b) { check_finite("squared_distance", "a", a); check_finite("squared_distance", "b", b); return make_callback_vari(std::pow(a.val() - b.val(), 2), [a, b](const auto& vi) mutable { const double diff = 2.0 * (a.val() - b.val()); a.adj() += vi.adj_ * diff; b.adj() -= vi.adj_ * diff; }); } /** * Returns the squared distance. */ inline var squared_distance(const var& a, double b) { check_finite("squared_distance", "a", a); check_finite("squared_distance", "b", b); return make_callback_vari(std::pow(a.val() - b, 2), [a, b](const auto& vi) mutable { a.adj() += vi.adj_ * 2.0 * (a.val() - b); }); } /** * Returns the squared distance. */ inline var squared_distance(double a, const var& b) { return squared_distance(b, a); } namespace internal { class squared_distance_vv_vari : public vari { protected: vari** v1_; vari** v2_; size_t length_; public: template < typename EigVecVar1, typename EigVecVar2, require_all_eigen_vector_vt* = nullptr> squared_distance_vv_vari(const EigVecVar1& v1, const EigVecVar2& v2) : vari((as_column_vector_or_scalar(v1).val() - as_column_vector_or_scalar(v2).val()) .squaredNorm()), length_(v1.size()) { v1_ = reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(length_ * sizeof(vari*))); v2_ = reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(length_ * sizeof(vari*))); Eigen::Map(v1_, length_) = v1.vi(); Eigen::Map(v2_, length_) = v2.vi(); } virtual void chain() { Eigen::Map v1_map(v1_, length_); Eigen::Map v2_map(v2_, length_); vector_d di = 2 * adj_ * (v1_map.val() - v2_map.val()); v1_map.adj() += di; v2_map.adj() -= di; } }; class squared_distance_vd_vari : public vari { protected: vari** v1_; double* v2_; size_t length_; public: template * = nullptr, require_eigen_vector_vt* = nullptr> squared_distance_vd_vari(const EigVecVar& v1, const EigVecArith& v2) : vari((as_column_vector_or_scalar(v1).val() - as_column_vector_or_scalar(v2)) .squaredNorm()), length_(v1.size()) { v1_ = reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(length_ * sizeof(vari*))); v2_ = reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(length_ * sizeof(double))); Eigen::Map(v1_, length_) = v1.vi(); Eigen::Map(v2_, length_) = v2; } virtual void chain() { Eigen::Map v1_map(v1_, length_); v1_map.adj() += 2 * adj_ * (v1_map.val() - Eigen::Map(v2_, length_)); } }; } // namespace internal template < typename EigVecVar1, typename EigVecVar2, require_all_eigen_vector_vt* = nullptr> inline var squared_distance(const EigVecVar1& v1, const EigVecVar2& v2) { check_matching_sizes("squared_distance", "v1", v1, "v2", v2); return {new internal::squared_distance_vv_vari(to_ref(v1), to_ref(v2))}; } template * = nullptr, require_eigen_vector_vt* = nullptr> inline var squared_distance(const EigVecVar& v1, const EigVecArith& v2) { check_matching_sizes("squared_distance", "v1", v1, "v2", v2); return {new internal::squared_distance_vd_vari(to_ref(v1), to_ref(v2))}; } template * = nullptr, require_eigen_vector_vt* = nullptr> inline var squared_distance(const EigVecArith& v1, const EigVecVar& v2) { check_matching_sizes("squared_distance", "v1", v1, "v2", v2); return {new internal::squared_distance_vd_vari(to_ref(v2), to_ref(v1))}; } /** * Compute the squared distance between the elements in * two inputs. * * This overload handles arguments where one of T1 or T2 are * `var_value` where `T` is an Eigen type. The other type can * also be a `var_value` or it can be a matrix type that inherits * from EigenBase * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param A first argument * @param B second argument * @return sum of squared difference of A and B */ template * = nullptr, require_any_var_vector_t* = nullptr> inline var squared_distance(const T1& A, const T2& B) { check_matching_sizes("squared_distance", "A", A.val(), "B", B.val()); if (unlikely(A.size() == 0)) { return var(0.0); } else if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; arena_t res_diff(arena_A.size()); double res_val = 0.0; for (size_t i = 0; i < arena_A.size(); ++i) { const double diff = arena_A.val().coeff(i) - arena_B.val().coeff(i); res_diff.coeffRef(i) = diff; res_val += diff * diff; } return var(make_callback_vari( res_val, [arena_A, arena_B, res_diff](const auto& res) mutable { const double res_adj = 2.0 * res.adj(); for (size_t i = 0; i < arena_A.size(); ++i) { const double diff = res_adj * res_diff.coeff(i); arena_A.adj().coeffRef(i) += diff; arena_B.adj().coeffRef(i) -= diff; } })); } else if (!is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = value_of(B); arena_t res_diff(arena_A.size()); double res_val = 0.0; for (size_t i = 0; i < arena_A.size(); ++i) { const double diff = arena_A.val().coeff(i) - arena_B.coeff(i); res_diff.coeffRef(i) = diff; res_val += diff * diff; } return var(make_callback_vari( res_val, [arena_A, arena_B, res_diff](const auto& res) mutable { arena_A.adj() += 2.0 * res.adj() * res_diff; })); } else { arena_t> arena_A = value_of(A); arena_t> arena_B = B; arena_t res_diff(arena_A.size()); double res_val = 0.0; for (size_t i = 0; i < arena_A.size(); ++i) { const double diff = arena_A.coeff(i) - arena_B.val().coeff(i); res_diff.coeffRef(i) = diff; res_val += diff * diff; } return var(make_callback_vari( res_val, [arena_A, arena_B, res_diff](const auto& res) mutable { arena_B.adj() -= 2.0 * res.adj() * res_diff; })); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/read_corr_matrix.hpp0000644000176200001440000000455214645137106024407 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_READ_CORR_MATRIX_HPP #define STAN_MATH_REV_FUN_READ_CORR_MATRIX_HPP #include #include #include #include namespace stan { namespace math { /** * Return the correlation matrix of the specified dimensionality * corresponding to the specified canonical partial correlations. * *

See read_corr_matrix(Array, size_t, T) * for more information. * * @tparam T_CPCs type of input vector (must be a `var_value` where `T` * inherits from EigenBase) * @param CPCs The (K choose 2) canonical partial correlations in (-1, 1). * @param K Dimensionality of correlation matrix. * @return Cholesky factor of correlation matrix for specified * canonical partial correlations. */ template * = nullptr> inline var_value read_corr_matrix(const T_CPCs& CPCs, size_t K) { if (K == 0) { return Eigen::MatrixXd(); } return multiply_lower_tri_self_transpose(read_corr_L(CPCs, K)); } /** * Return the correlation matrix of the specified dimensionality * corresponding to the specified canonical partial correlations, * incrementing the specified scalar reference with the log * absolute determinant of the Jacobian of the transformation. * * It is usually preferable to utilize the version that returns * the Cholesky factor of the correlation matrix rather than the * correlation matrix itself in statistical calculations. * * @tparam T_CPCs type of input vector (must be a `var_value` where `T` * inherits from EigenBase) * @param CPCs The (K choose 2) canonical partial correlations in * (-1, 1). * @param K Dimensionality of correlation matrix. * @param log_prob Reference to variable to increment with the log * Jacobian determinant. * @return Correlation matrix for specified partial correlations. */ template * = nullptr> inline var_value read_corr_matrix( const T_CPCs& CPCs, size_t K, scalar_type_t& log_prob) { if (K == 0) { return Eigen::MatrixXd(); } return multiply_lower_tri_self_transpose(read_corr_L(CPCs, K, log_prob)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/generalized_inverse.hpp0000644000176200001440000000731514645137106025107 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_GENERALIZED_INVERSE_HPP #define STAN_MATH_REV_FUN_GENERALIZED_INVERSE_HPP #include #include #include #include #include namespace stan { namespace math { namespace internal { /* * Reverse mode specialization of calculating the generalized inverse of a * matrix. *

  • Golub, G.H. and Pereyra, V. The Differentiation of Pseudo-Inverses * and Nonlinear Least Squares Problems Whose Variables Separate. SIAM * Journal on Numerical Analysis, Vol. 10, No. 2 (Apr., 1973), pp. * 413-432
*/ template inline auto generalized_inverse_lambda(T1& G_arena, T2& inv_G) { return [G_arena, inv_G]() mutable { G_arena.adj() += -(inv_G.val_op().transpose() * inv_G.adj_op() * inv_G.val_op().transpose()) + (-G_arena.val_op() * inv_G.val_op() + Eigen::MatrixXd::Identity(G_arena.rows(), inv_G.cols())) * inv_G.adj_op().transpose() * inv_G.val_op() * inv_G.val_op().transpose() + inv_G.val_op().transpose() * inv_G.val_op() * inv_G.adj_op().transpose() * (-inv_G.val_op() * G_arena.val_op() + Eigen::MatrixXd::Identity(inv_G.rows(), G_arena.cols())); }; } } // namespace internal /* * Reverse mode specialization of calculating the generalized inverse of a * matrix. * * @param G specified matrix * @return Generalized inverse of the matrix (an empty matrix if the specified * matrix has size zero). * * @note For the derivatives of this function to exist the matrix must be * of constant rank. * Reverse mode differentiation algorithm reference: * *
  • Golub, G.H. and Pereyra, V. The Differentiation of Pseudo-Inverses * and Nonlinear Least Squares Problems Whose Variables Separate. SIAM * Journal on Numerical Analysis, Vol. 10, No. 2 (Apr., 1973), pp. * 413-432
* * Equation 4.12 in the paper * * See also * http://mathoverflow.net/questions/25778/analytical-formula-for-numerical-derivative-of-the-matrix-pseudo-inverse * */ template * = nullptr> inline auto generalized_inverse(const VarMat& G) { using ret_type = promote_var_matrix_t; if (G.size() == 0) return ret_type(G); if (G.rows() == G.cols()) { arena_t G_arena(G); Eigen::CompleteOrthogonalDecomposition complete_ortho_decomp_G = G_arena.val().completeOrthogonalDecomposition(); if (!(complete_ortho_decomp_G.rank() < G.rows())) { return ret_type(inverse(G_arena)); } else { arena_t inv_G(complete_ortho_decomp_G.pseudoInverse()); reverse_pass_callback( internal::generalized_inverse_lambda(G_arena, inv_G)); return ret_type(inv_G); } } else if (G.rows() < G.cols()) { arena_t G_arena(G); arena_t inv_G((G_arena.val_op() * G_arena.val_op().transpose()) .ldlt() .solve(G_arena.val_op()) .transpose()); reverse_pass_callback(internal::generalized_inverse_lambda(G_arena, inv_G)); return ret_type(inv_G); } else { arena_t G_arena(G); arena_t inv_G((G_arena.val_op().transpose() * G_arena.val_op()) .ldlt() .solve(G_arena.val_op().transpose())); reverse_pass_callback(internal::generalized_inverse_lambda(G_arena, inv_G)); return ret_type(inv_G); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/corr_matrix_constrain.hpp0000644000176200001440000000617114645137106025473 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_CORR_MATRIX_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_CORR_MATRIX_CONSTRAIN_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the correlation matrix of the specified dimensionality * derived from the specified vector of unconstrained values. The * input vector must be of length \f${k \choose 2} = * \frac{k(k-1)}{2}\f$. The values in the input vector represent * unconstrained (partial) correlations among the dimensions. * *

The transform based on partial correlations is as specified * in * *

  • Lewandowski, Daniel, Dorota Kurowicka, and Harry * Joe. 2009. Generating random correlation matrices based on * vines and extended onion method. Journal of Multivariate * Analysis 100:1989–-2001.
* *

The free vector entries are first constrained to be * valid correlation values using corr_constrain(T). * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param x Vector of unconstrained partial correlations. * @param k Dimensionality of returned correlation matrix. * @throw std::invalid_argument if x is not a valid correlation * matrix. */ template * = nullptr> var_value corr_matrix_constrain(const T& x, Eigen::Index k) { Eigen::Index k_choose_2 = (k * (k - 1)) / 2; check_size_match("cov_matrix_constrain", "x.size()", x.size(), "k_choose_2", k_choose_2); return read_corr_matrix(corr_constrain(x), k); } /** * Return the correlation matrix of the specified dimensionality * derived from the specified vector of unconstrained values. The * input vector must be of length \f${k \choose 2} = * \frac{k(k-1)}{2}\f$. The values in the input vector represent * unconstrained (partial) correlations among the dimensions. * *

The transform is as specified for * corr_matrix_constrain(Matrix, size_t); the * paper it cites also defines the Jacobians for correlation inputs, * which are composed with the correlation constrained Jacobians * defined in corr_constrain(T, double) for * this function. * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param x Vector of unconstrained partial correlations. * @param k Dimensionality of returned correlation matrix. * @param lp Log probability reference to increment. */ template * = nullptr> var_value corr_matrix_constrain(const T& x, Eigen::Index k, scalar_type_t& lp) { Eigen::Index k_choose_2 = (k * (k - 1)) / 2; check_size_match("cov_matrix_constrain", "x.size()", x.size(), "k_choose_2", k_choose_2); return read_corr_matrix(corr_constrain(x, lp), k, lp); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_falling_factorial.hpp0000644000176200001440000000425214645137106025361 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_FALLING_FACTORIAL_HPP #define STAN_MATH_REV_FUN_LOG_FALLING_FACTORIAL_HPP #include #include #include #include #include #include namespace stan { namespace math { namespace internal { class log_falling_factorial_vv_vari : public op_vv_vari { public: log_falling_factorial_vv_vari(vari* avi, vari* bvi) : op_vv_vari(log_falling_factorial(avi->val_, bvi->val_), avi, bvi) {} void chain() { if (unlikely(is_any_nan(avi_->val_, bvi_->val_))) { avi_->adj_ = NOT_A_NUMBER; bvi_->adj_ = NOT_A_NUMBER; } else { avi_->adj_ += adj_ * (digamma(avi_->val_ + 1) - digamma(avi_->val_ - bvi_->val_ + 1)); bvi_->adj_ += adj_ * digamma(avi_->val_ - bvi_->val_ + 1); } } }; class log_falling_factorial_vd_vari : public op_vd_vari { public: log_falling_factorial_vd_vari(vari* avi, double b) : op_vd_vari(log_falling_factorial(avi->val_, b), avi, b) {} void chain() { if (unlikely(is_any_nan(avi_->val_, bd_))) { avi_->adj_ = NOT_A_NUMBER; } else { avi_->adj_ += adj_ * (digamma(avi_->val_ + 1) - digamma(avi_->val_ - bd_ + 1)); } } }; class log_falling_factorial_dv_vari : public op_dv_vari { public: log_falling_factorial_dv_vari(double a, vari* bvi) : op_dv_vari(log_falling_factorial(a, bvi->val_), a, bvi) {} void chain() { if (unlikely(is_any_nan(ad_, bvi_->val_))) { bvi_->adj_ = NOT_A_NUMBER; } else { bvi_->adj_ += adj_ * digamma(ad_ - bvi_->val_ + 1); } } }; } // namespace internal inline var log_falling_factorial(const var& a, double b) { return var(new internal::log_falling_factorial_vd_vari(a.vi_, b)); } inline var log_falling_factorial(const var& a, const var& b) { return var(new internal::log_falling_factorial_vv_vari(a.vi_, b.vi_)); } inline var log_falling_factorial(double a, const var& b) { return var(new internal::log_falling_factorial_dv_vari(a, b.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/append_col.hpp0000644000176200001440000001345014645137106023164 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_APPEND_COL_HPP #define STAN_MATH_REV_FUN_APPEND_COL_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the result of appending the second argument matrix after the * first argument matrix, that is, putting them side by side, with * the first matrix followed by the second matrix. * * Given input types result in following outputs: * (matrix, matrix) -> matrix, * (matrix, vector) -> matrix, * (vector, matrix) -> matrix, * (vector, vector) -> matrix, * (row vector, row vector) -> row_vector. * * @tparam T1 A `var_value` with inner matrix type. * @tparam T1 A `var_value` with inner matrix type. * * @param A First matrix. * @param B Second matrix. * @return Result of appending the first matrix followed by the * second matrix side by side. */ template * = nullptr> inline auto append_col(const T1& A, const T2& B) { check_size_match("append_col", "columns of A", A.rows(), "columns of B", B.rows()); if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; return make_callback_var( append_col(value_of(arena_A), value_of(arena_B)), [arena_A, arena_B](auto& vi) mutable { arena_A.adj() += vi.adj().leftCols(arena_A.cols()); arena_B.adj() += vi.adj().rightCols(arena_B.cols()); }); } else if (!is_constant::value) { arena_t> arena_A = A; return make_callback_var(append_col(value_of(arena_A), value_of(B)), [arena_A](auto& vi) mutable { arena_A.adj() += vi.adj().leftCols(arena_A.cols()); }); } else { arena_t> arena_B = B; return make_callback_var(append_col(value_of(A), value_of(arena_B)), [arena_B](auto& vi) mutable { arena_B.adj() += vi.adj().rightCols(arena_B.cols()); }); } } /** * Return the result of stacking an scalar on top of the * a row vector, with the result being a row vector. * * This function applies to (scalar, row vector) and returns a * row vector. * * @tparam Scal type of the scalar * @tparam RowVec A `var_value` with an inner type of row vector. * * @param A scalar. * @param B row vector. * @return Result of stacking the scalar on top of the row vector. */ template * = nullptr, require_t>* = nullptr> inline auto append_col(const Scal& A, const var_value& B) { if (!is_constant::value && !is_constant::value) { var arena_A = A; arena_t> arena_B = B; return make_callback_var(append_col(value_of(arena_A), value_of(arena_B)), [arena_A, arena_B](auto& vi) mutable { arena_A.adj() += vi.adj().coeff(0); arena_B.adj() += vi.adj().tail(arena_B.size()); }); } else if (!is_constant::value) { var arena_A = A; return make_callback_var( append_col(value_of(arena_A), value_of(B)), [arena_A](auto& vi) mutable { arena_A.adj() += vi.adj().coeff(0); }); } else { arena_t> arena_B = B; return make_callback_var(append_col(value_of(A), value_of(arena_B)), [arena_B](auto& vi) mutable { arena_B.adj() += vi.adj().tail(arena_B.size()); }); } } /** * Return the result of stacking a row vector on top of the * an scalar, with the result being a row vector. * * This function applies to (row vector, scalar) and returns a * row vector. * * @tparam RowVec A `var_value` with an inner type of row vector. * @tparam Scal type of the scalar * * @param A row vector. * @param B scalar. * @return Result of stacking the row vector on top of the scalar. */ template >* = nullptr, require_stan_scalar_t* = nullptr> inline auto append_col(const var_value& A, const Scal& B) { if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; var arena_B = B; return make_callback_var(append_col(value_of(arena_A), value_of(arena_B)), [arena_A, arena_B](auto& vi) mutable { arena_A.adj() += vi.adj().head(arena_A.size()); arena_B.adj() += vi.adj().coeff(vi.adj().size() - 1); }); } else if (!is_constant::value) { arena_t> arena_A = A; return make_callback_var(append_col(value_of(arena_A), value_of(B)), [arena_A](auto& vi) mutable { arena_A.adj() += vi.adj().head(arena_A.size()); }); } else { var arena_B = B; return make_callback_var(append_col(value_of(A), value_of(arena_B)), [arena_B](auto& vi) mutable { arena_B.adj() += vi.adj().coeff(vi.adj().size() - 1); }); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log1m_exp.hpp0000644000176200001440000000174714645137106022761 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG1M_EXP_HPP #define STAN_MATH_REV_FUN_LOG1M_EXP_HPP #include #include #include #include namespace stan { namespace math { /** * Return the log of 1 minus the exponential of the specified * variable. * *

The derivative of log(1 - exp(x)) with respect * to x is -1 / expm1(-x). * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param[in] x Argument. * @return Natural logarithm of one minus the exponential of the * argument. */ template * = nullptr> inline auto log1m_exp(const var_value& x) { return make_callback_var(log1m_exp(x.val()), [x](auto& vi) mutable { as_array_or_scalar(x.adj()) -= as_array_or_scalar(vi.adj()) / as_array_or_scalar(expm1(-x.val())); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/arg.hpp0000644000176200001440000000075414645137106021634 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ARG_HPP #define STAN_MATH_REV_FUN_ARG_HPP #include #include #include #include namespace stan { namespace math { /** * Return the phase angle of the complex argument. * * @param[in] z argument * @return phase angle of the argument */ inline var arg(const std::complex& z) { return internal::complex_arg(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/trigamma.hpp0000644000176200001440000000133414645137106022657 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TRIGAMMA_HPP #define STAN_MATH_REV_FUN_TRIGAMMA_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the value of the trigamma function at the specified * argument (i.e., the second derivative of the log Gamma function * at the specified argument). * * @param u argument * @return trigamma function at argument */ inline var trigamma(const var& u) { return trigamma_impl(u); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/round.hpp0000644000176200001440000000226614645137106022212 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ROUND_HPP #define STAN_MATH_REV_FUN_ROUND_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the rounded form of the specified variable (C99). * * The derivative is zero everywhere but numbers half way between * whole numbers, so for convenience the derivative is defined to * be everywhere zero, * * \f$\frac{d}{dx} \mbox{round}(x) = 0\f$. * * \f[ \mbox{round}(x) = \begin{cases} \lceil x \rceil & \mbox{if } x-\lfloor x\rfloor \geq 0.5 \\ \lfloor x \rfloor & \mbox{if } x-\lfloor x\rfloor < 0.5 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{round}(x)}{\partial x} = \begin{cases} 0 & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Specified variable. * @return Rounded variable. */ inline var round(const var& a) { return var(round(a.val())); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/fft.hpp0000644000176200001440000001240114645137106021632 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FFT_HPP #define STAN_MATH_REV_FUN_FFT_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the discrete Fourier transform of the specified complex * vector. * * Given an input complex vector `x[0:N-1]` of size `N`, the discrete * Fourier transform computes entries of the resulting complex * vector `y[0:N-1]` by * * ``` * y[n] = SUM_{i < N} x[i] * exp(-n * i * 2 * pi * sqrt(-1) / N) * ``` * * The adjoint computation is given by * ``` * adjoint(x) += length(y) * inv_fft(adjoint(y)) * ``` * * If the input is of size zero, the result is a size zero vector. * * @tparam V type of complex vector argument * @param[in] x vector to transform * @return discrete Fourier transform of `x` */ template * = nullptr, require_var_t>>* = nullptr> inline plain_type_t fft(const V& x) { if (unlikely(x.size() <= 1)) { return plain_type_t(x); } arena_t arena_v = x; arena_t res = fft(to_complex(arena_v.real().val(), arena_v.imag().val())); reverse_pass_callback([arena_v, res]() mutable { auto adj_inv_fft = inv_fft(to_complex(res.real().adj(), res.imag().adj())); adj_inv_fft *= res.size(); arena_v.real().adj() += adj_inv_fft.real(); arena_v.imag().adj() += adj_inv_fft.imag(); }); return plain_type_t(res); } /** * Return the inverse discrete Fourier transform of the specified * complex vector. * * Given an input complex vector `y[0:N-1]` of size `N`, the inverse * discrete Fourier transform computes entries of the resulting * complex vector `x[0:N-1]` by * * ``` * x[n] = SUM_{i < N} y[i] * exp(n * i * 2 * pi * sqrt(-1) / N) * ``` * * * The adjoint computation is given by * ``` * adjoint(y) += (1 / length(x)) * fft(adjoint(x)) * ``` * * If the input is of size zero, the result is a size zero vector. * The only difference between the discrete DFT and its inverse is * the sign of the exponent. * * @tparam V type of complex vector argument * @param[in] y vector to inverse transform * @return inverse discrete Fourier transform of `y` */ template * = nullptr, require_var_t>>* = nullptr> inline plain_type_t inv_fft(const V& y) { if (unlikely(y.size() <= 1)) { return plain_type_t(y); } arena_t arena_v = y; arena_t res = inv_fft(to_complex(arena_v.real().val(), arena_v.imag().val())); reverse_pass_callback([arena_v, res]() mutable { auto adj_fft = fft(to_complex(res.real().adj(), res.imag().adj())); adj_fft /= res.size(); arena_v.real().adj() += adj_fft.real(); arena_v.imag().adj() += adj_fft.imag(); }); return plain_type_t(res); } /** * Return the two-dimensional discrete Fourier transform of the * specified complex matrix. The 2D discrete Fourier transform first * runs the discrete Fourier transform on the each row, then on each * column of the result. * * The adjoint computation is given by * ``` * adjoint(x) += size(y) * inv_fft2(adjoint(y)) * ``` * * @tparam M type of complex matrix argument * @param[in] x matrix to transform * @return discrete 2D Fourier transform of `x` */ template * = nullptr, require_var_t>>* = nullptr> inline plain_type_t fft2(const M& x) { arena_t arena_v = x; arena_t res = fft2(to_complex(arena_v.real().val(), arena_v.imag().val())); reverse_pass_callback([arena_v, res]() mutable { auto adj_inv_fft = inv_fft2(to_complex(res.real().adj(), res.imag().adj())); adj_inv_fft *= res.size(); arena_v.real().adj() += adj_inv_fft.real(); arena_v.imag().adj() += adj_inv_fft.imag(); }); return plain_type_t(res); } /** * Return the two-dimensional inverse discrete Fourier transform of * the specified complex matrix. The 2D inverse discrete Fourier * transform first runs the 1D inverse Fourier transform on the * columns, and then on the resulting rows. The composition of the * FFT and inverse FFT (or vice-versa) is the identity. * * The adjoint computation is given by * ``` * adjoint(y) += (1 / size(x)) * fft2(adjoint(x)) * ``` * * @tparam M type of complex matrix argument * @param[in] y matrix to inverse trnasform * @return inverse discrete 2D Fourier transform of `y` */ template * = nullptr, require_var_t>>* = nullptr> inline plain_type_t inv_fft2(const M& y) { arena_t arena_v = y; arena_t res = inv_fft2(to_complex(arena_v.real().val(), arena_v.imag().val())); reverse_pass_callback([arena_v, res]() mutable { auto adj_fft = fft2(to_complex(res.real().adj(), res.imag().adj())); adj_fft /= res.size(); arena_v.real().adj() += adj_fft.real(); arena_v.imag().adj() += adj_fft.imag(); }); return plain_type_t(res); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/abs.hpp0000644000176200001440000000241314645137106021622 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ABS_HPP #define STAN_MATH_REV_FUN_ABS_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the absolute value of the variable (std). * * Delegates to fabs() (see for doc). * \f[ \mbox{abs}(x) = \begin{cases} |x| & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{abs}(x)}{\partial x} = \begin{cases} -1 & \mbox{if } x < 0 \\ 0 & \mbox{if } x = 0 \\ 1 & \mbox{if } x > 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @tparam T A floating point type or an Eigen type with floating point scalar. * @param a Variable input. * @return Absolute value of variable. */ template inline auto abs(const var_value& a) { return fabs(a); } /** * Return the absolute value of the complex argument. * * @param[in] z argument * @return absolute value of the argument */ inline var abs(const std::complex& z) { return internal::complex_abs(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/lb_constrain.hpp0000644000176200001440000003445414645137106023544 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LB_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_LB_CONSTRAIN_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the lower-bounded value for the specified unconstrained input * and specified lower bound. * *

The transform applied is * *

\f$f(x) = \exp(x) + L\f$ * *

where \f$L\f$ is the constant lower bound. * *

If the lower bound is negative infinity, this function * reduces to identity_constrain(x). * * @tparam T Scalar * @tparam L Scalar * @param[in] x Unconstrained input * @param[in] lb lower bound on constrained output * @return lower bound constrained value corresponding to inputs */ template * = nullptr, require_any_var_t* = nullptr> inline auto lb_constrain(const T& x, const L& lb) { const auto lb_val = value_of(lb); if (unlikely(lb_val == NEGATIVE_INFTY)) { return identity_constrain(x, lb); } else { if (!is_constant::value && !is_constant::value) { auto exp_x = std::exp(value_of(x)); return make_callback_var( exp_x + lb_val, [arena_x = var(x), arena_lb = var(lb), exp_x](auto& vi) mutable { arena_x.adj() += vi.adj() * exp_x; arena_lb.adj() += vi.adj(); }); } else if (!is_constant::value) { auto exp_x = std::exp(value_of(x)); return make_callback_var(exp_x + lb_val, [arena_x = var(x), exp_x](auto& vi) mutable { arena_x.adj() += vi.adj() * exp_x; }); } else { return make_callback_var(std::exp(value_of(x)) + lb_val, [arena_lb = var(lb)](auto& vi) mutable { arena_lb.adj() += vi.adj(); }); } } } /** * Return the lower-bounded value for the specified unconstrained input * and specified lower bound. * *

The transform applied is * *

\f$f(x) = \exp(x) + L\f$ * *

where \f$L\f$ is the constant lower bound. * *

If the lower bound is negative infinity, this function * reduces to identity_constrain(x). * * @tparam T Scalar * @tparam L Scalar * @param[in] x Unconstrained input * @param[in] lb lower bound on constrained output * @param[in,out] lp reference to log probability to increment * @return lower bound constrained value corresponding to inputs */ template * = nullptr, require_any_var_t* = nullptr> inline auto lb_constrain(const T& x, const L& lb, var& lp) { const auto lb_val = value_of(lb); if (unlikely(lb_val == NEGATIVE_INFTY)) { return identity_constrain(x, lb); } else { lp += value_of(x); if (!is_constant::value && !is_constant::value) { auto exp_x = std::exp(value_of(x)); return make_callback_var( exp_x + lb_val, [lp, arena_x = var(x), arena_lb = var(lb), exp_x](auto& vi) mutable { arena_x.adj() += vi.adj() * exp_x + lp.adj(); arena_lb.adj() += vi.adj(); }); } else if (!is_constant::value) { auto exp_x = std::exp(value_of(x)); return make_callback_var(exp_x + lb_val, [lp, arena_x = var(x), exp_x](auto& vi) mutable { arena_x.adj() += vi.adj() * exp_x + lp.adj(); }); } else { return make_callback_var(std::exp(value_of(x)) + lb_val, [arena_lb = var(lb)](auto& vi) mutable { arena_lb.adj() += vi.adj(); }); } } } /** * Specialization of `lb_constrain` to apply a scalar lower bound elementwise * to each input. * * @tparam T A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @tparam L Scalar. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @return lower-bound constrained value corresponding to inputs */ template * = nullptr, require_stan_scalar_t* = nullptr, require_any_st_var* = nullptr> inline auto lb_constrain(const T& x, const L& lb) { using ret_type = return_var_matrix_t; const auto lb_val = value_of(lb); if (unlikely(lb_val == NEGATIVE_INFTY)) { return ret_type(identity_constrain(x, lb)); } else { if (!is_constant::value && !is_constant::value) { arena_t> arena_x = x; auto exp_x = to_arena(arena_x.val().array().exp()); arena_t ret = exp_x + lb_val; reverse_pass_callback( [arena_x, ret, exp_x, arena_lb = var(lb)]() mutable { arena_x.adj().array() += ret.adj().array() * exp_x; arena_lb.adj() += ret.adj().sum(); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_x = x; auto exp_x = to_arena(arena_x.val().array().exp()); arena_t ret = exp_x + lb_val; reverse_pass_callback([arena_x, ret, exp_x]() mutable { arena_x.adj().array() += ret.adj().array() * exp_x; }); return ret_type(ret); } else { arena_t ret = value_of(x).array().exp() + lb_val; reverse_pass_callback([ret, arena_lb = var(lb)]() mutable { arena_lb.adj() += ret.adj().sum(); }); return ret_type(ret); } } } /** * Specialization of `lb_constrain` to apply a scalar lower bound elementwise * to each input. * * @tparam T A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @tparam L Scalar. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @param[in,out] lp reference to log probability to increment * @return lower-bound constrained value corresponding to inputs */ template * = nullptr, require_stan_scalar_t* = nullptr, require_any_st_var* = nullptr> inline auto lb_constrain(const T& x, const L& lb, return_type_t& lp) { using ret_type = return_var_matrix_t; const auto lb_val = value_of(lb); if (unlikely(lb_val == NEGATIVE_INFTY)) { return ret_type(identity_constrain(x, lb)); } else { if (!is_constant::value && !is_constant::value) { arena_t> arena_x = x; auto exp_x = to_arena(arena_x.val().array().exp()); arena_t ret = exp_x + lb_val; lp += arena_x.val().sum(); reverse_pass_callback( [arena_x, ret, lp, arena_lb = var(lb), exp_x]() mutable { arena_x.adj().array() += ret.adj().array() * exp_x + lp.adj(); arena_lb.adj() += ret.adj().sum(); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_x = x; auto exp_x = to_arena(arena_x.val().array().exp()); arena_t ret = exp_x + lb_val; lp += arena_x.val().sum(); reverse_pass_callback([arena_x, ret, exp_x, lp]() mutable { arena_x.adj().array() += ret.adj().array() * exp_x + lp.adj(); }); return ret_type(ret); } else { const auto& x_ref = to_ref(x); lp += sum(x_ref); arena_t ret = value_of(x_ref).array().exp() + lb_val; reverse_pass_callback([ret, arena_lb = var(lb)]() mutable { arena_lb.adj() += ret.adj().sum(); }); return ret_type(ret); } } } /** * Specialization of `lb_constrain` to apply a matrix of lower bounds * elementwise to each input element. * * @tparam T A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @tparam L A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @return lower-bound constrained value corresponding to inputs */ template * = nullptr, require_any_st_var* = nullptr> inline auto lb_constrain(const T& x, const L& lb) { check_matching_dims("lb_constrain", "x", x, "lb", lb); using ret_type = return_var_matrix_t; if (!is_constant::value && !is_constant::value) { arena_t> arena_x = x; arena_t> arena_lb = lb; auto is_not_inf_lb = to_arena((arena_lb.val().array() != NEGATIVE_INFTY)); auto precomp_x_exp = to_arena((arena_x.val().array()).exp()); arena_t ret = (is_not_inf_lb) .select(precomp_x_exp + arena_lb.val().array(), arena_x.val().array()); reverse_pass_callback([arena_x, arena_lb, ret, is_not_inf_lb, precomp_x_exp]() mutable { arena_x.adj().array() += (is_not_inf_lb) .select(ret.adj().array() * precomp_x_exp, ret.adj().array()); arena_lb.adj().array() += (is_not_inf_lb).select(ret.adj().array(), 0); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_x = x; auto lb_ref = to_ref(value_of(lb)); auto is_not_inf_lb = to_arena((lb_ref.array() != NEGATIVE_INFTY)); auto precomp_x_exp = to_arena((arena_x.val().array()).exp()); arena_t ret = (is_not_inf_lb) .select(precomp_x_exp + lb_ref.array(), arena_x.val().array()); reverse_pass_callback([arena_x, ret, is_not_inf_lb, precomp_x_exp]() mutable { arena_x.adj().array() += (is_not_inf_lb) .select(ret.adj().array() * precomp_x_exp, ret.adj().array()); }); return ret_type(ret); } else { arena_t> arena_lb = lb; const auto x_ref = to_ref(value_of(x)); auto is_not_inf_lb = to_arena((arena_lb.val().array() != NEGATIVE_INFTY)); arena_t ret = (is_not_inf_lb) .select(x_ref.array().exp() + arena_lb.val().array(), x_ref.array()); reverse_pass_callback([arena_lb, ret, is_not_inf_lb]() mutable { arena_lb.adj().array() += (is_not_inf_lb).select(ret.adj().array(), 0); }); return ret_type(ret); } } /** * Specialization of `lb_constrain` to apply a matrix of lower bounds * elementwise to each input element. * * @tparam T A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @tparam L A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @param[in,out] lp reference to log probability to increment * @return lower-bound constrained value corresponding to inputs */ template * = nullptr, require_any_st_var* = nullptr> inline auto lb_constrain(const T& x, const L& lb, return_type_t& lp) { check_matching_dims("lb_constrain", "x", x, "lb", lb); using ret_type = return_var_matrix_t; if (!is_constant::value && !is_constant::value) { arena_t> arena_x = x; arena_t> arena_lb = lb; auto is_not_inf_lb = to_arena((arena_lb.val().array() != NEGATIVE_INFTY)); auto exp_x = to_arena(arena_x.val().array().exp()); arena_t ret = (is_not_inf_lb) .select(exp_x + arena_lb.val().array(), arena_x.val().array()); lp += (is_not_inf_lb).select(arena_x.val(), 0).sum(); reverse_pass_callback( [arena_x, arena_lb, ret, lp, exp_x, is_not_inf_lb]() mutable { const auto lp_adj = lp.adj(); for (size_t j = 0; j < arena_x.cols(); ++j) { for (size_t i = 0; i < arena_x.rows(); ++i) { double ret_adj = ret.adj().coeff(i, j); if (likely(is_not_inf_lb.coeff(i, j))) { arena_x.adj().coeffRef(i, j) += ret_adj * exp_x.coeff(i, j) + lp_adj; arena_lb.adj().coeffRef(i, j) += ret_adj; } else { arena_x.adj().coeffRef(i, j) += ret_adj; } } } }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_x = x; auto lb_val = value_of(lb).array(); auto is_not_inf_lb = to_arena((lb_val != NEGATIVE_INFTY)); auto exp_x = to_arena(arena_x.val().array().exp()); arena_t ret = (is_not_inf_lb).select(exp_x + lb_val, arena_x.val().array()); lp += (is_not_inf_lb).select(arena_x.val(), 0).sum(); reverse_pass_callback([arena_x, ret, exp_x, lp, is_not_inf_lb]() mutable { const auto lp_adj = lp.adj(); for (size_t j = 0; j < arena_x.cols(); ++j) { for (size_t i = 0; i < arena_x.rows(); ++i) { if (likely(is_not_inf_lb.coeff(i, j))) { const double ret_adj = ret.adj().coeff(i, j); arena_x.adj().coeffRef(i, j) += ret_adj * exp_x.coeff(i, j) + lp_adj; } else { arena_x.adj().coeffRef(i, j) += ret.adj().coeff(i, j); } } } }); return ret_type(ret); } else { auto x_val = to_ref(value_of(x)).array(); arena_t> arena_lb = lb; auto is_not_inf_lb = to_arena((arena_lb.val().array() != NEGATIVE_INFTY)); arena_t ret = (is_not_inf_lb).select(x_val.exp() + arena_lb.val().array(), x_val); lp += (is_not_inf_lb).select(x_val, 0).sum(); reverse_pass_callback([arena_lb, ret, is_not_inf_lb]() mutable { arena_lb.adj().array() += ret.adj().array() * is_not_inf_lb.template cast(); }); return ret_type(ret); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log1p.hpp0000644000176200001440000000151614645137106022102 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG1P_HPP #define STAN_MATH_REV_FUN_LOG1P_HPP #include #include #include namespace stan { namespace math { /** * The log (1 + x) function for variables (C99). * * The derivative is given by * * \f$\frac{d}{dx} \log (1 + x) = \frac{1}{1 + x}\f$. * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a The variable. * @return The log of 1 plus the variable. */ template * = nullptr> inline auto log1p(const var_value& a) { return make_callback_var(log1p(a.val()), [a](auto& vi) mutable { as_array_or_scalar(a.adj()) += as_array_or_scalar(vi.adj()) / (1.0 + as_array_or_scalar(a.val())); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/fmin.hpp0000644000176200001440000000716014645137106022012 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FMIN_HPP #define STAN_MATH_REV_FUN_FMIN_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the minimum of the two variable arguments (C99). * * For fmin(a, b), if a's value is less than b's, * then a is returned, otherwise b is returned. * \f[ \mbox{fmin}(x, y) = \begin{cases} x & \mbox{if } x \leq y \\ y & \mbox{if } x > y \\[6pt] x & \mbox{if } -\infty\leq x\leq \infty, y = \textrm{NaN}\\ y & \mbox{if } -\infty\leq y\leq \infty, x = \textrm{NaN}\\ \textrm{NaN} & \mbox{if } x, y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{fmin}(x, y)}{\partial x} = \begin{cases} 1 & \mbox{if } x \leq y \\ 0 & \mbox{if } x > y \\[6pt] 1 & \mbox{if } -\infty\leq x\leq \infty, y = \textrm{NaN}\\ 0 & \mbox{if } -\infty\leq y\leq \infty, x = \textrm{NaN}\\ \textrm{NaN} & \mbox{if } x, y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{fmin}(x, y)}{\partial y} = \begin{cases} 0 & \mbox{if } x \leq y \\ 1 & \mbox{if } x > y \\[6pt] 0 & \mbox{if } -\infty\leq x\leq \infty, y = \textrm{NaN}\\ 1 & \mbox{if } -\infty\leq y\leq \infty, x = \textrm{NaN}\\ \textrm{NaN} & \mbox{if } x, y = \textrm{NaN} \end{cases} \f] * * @param a First variable. * @param b Second variable. * @return If the first variable's value is smaller than the * second's, the first variable, otherwise the second variable. */ inline var fmin(const var& a, const var& b) { if (unlikely(is_nan(a))) { if (unlikely(is_nan(b))) { return var(new precomp_vv_vari(NOT_A_NUMBER, a.vi_, b.vi_, NOT_A_NUMBER, NOT_A_NUMBER)); } return b; } if (unlikely(is_nan(b))) { return a; } return a < b ? a : b; } /** * Returns the minimum of the variable and scalar, promoting the * scalar to a variable if it is larger (C99). * * For fmin(a, b), if a's value is less than or equal * to b, then a is returned, otherwise a fresh variable wrapping b * is returned. * * @param a First variable. * @param b Second value * @return If the first variable's value is less than or equal to the second * value, the first variable, otherwise the second value promoted to a fresh * variable. */ inline var fmin(const var& a, double b) { if (unlikely(is_nan(a))) { if (unlikely(is_nan(b))) { return make_callback_var( NOT_A_NUMBER, [a](auto& vi) mutable { a.adj() = NOT_A_NUMBER; }); } return var(b); } if (unlikely(is_nan(b))) { return a; } return a <= b ? a : var(b); } /** * Returns the minimum of a scalar and variable, promoting the scalar to * a variable if it is larger (C99). * * For fmin(a, b), if a is less than b's value, then a * fresh variable implementation wrapping a is returned, otherwise * b is returned. * * @param a First value. * @param b Second variable. * @return If the first value is smaller than the second variable's value, * return the first value promoted to a variable, otherwise return the * second variable. */ inline var fmin(double a, const var& b) { if (unlikely(is_nan(b))) { if (unlikely(is_nan(a))) { return make_callback_var( NOT_A_NUMBER, [b](auto& vi) mutable { b.adj() = NOT_A_NUMBER; }); } return var(a); } if (unlikely(is_nan(a))) { return b; } return b <= a ? b : var(a); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log1p_exp.hpp0000644000176200001440000000160014645137106022750 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG1P_EXP_HPP #define STAN_MATH_REV_FUN_LOG1P_EXP_HPP #include #include #include #include namespace stan { namespace math { /** * Return the log of 1 plus the exponential of the specified * variable. * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a The variable. */ template * = nullptr> inline auto log1p_exp(const var_value& a) { auto precomp_inv_logit = to_arena(as_array_or_scalar(inv_logit(a.val()))); return make_callback_var( log1p_exp(a.val()), [a, precomp_inv_logit](auto& vi) mutable { as_array_or_scalar(a.adj()) += as_array_or_scalar(vi.adj()) * precomp_inv_logit; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log10.hpp0000644000176200001440000000343214645137106022001 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG10_HPP #define STAN_MATH_REV_FUN_LOG10_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the base 10 log of the specified variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \log_{10} x = \frac{1}{x \log 10}\f$. * * \f[ \mbox{log10}(x) = \begin{cases} \textrm{NaN} & \mbox{if } x < 0\\ \log_{10}(x) & \mbox{if } x \geq 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log10}(x)}{\partial x} = \begin{cases} \textrm{NaN} & \mbox{if } x < 0\\ \frac{1}{x \ln10} & \mbox{if } x\geq 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a Variable whose log is taken. * @return Base 10 log of variable. */ template * = nullptr> inline auto log10(const var_value& a) { return make_callback_var(log10(a.val()), [a](auto& vi) mutable { as_array_or_scalar(a.adj()) += as_array_or_scalar(vi.adj()) / (LOG_TEN * as_array_or_scalar(a.val())); }); } /** * Return the base 10 logarithm of the specified complex number. * * @param z complex argument * @return base 10 log of argument */ inline std::complex log10(const std::complex& z) { return internal::complex_log10(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/is_uninitialized.hpp0000644000176200001440000000132614645137106024422 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_IS_UNINITIALIZED_HPP #define STAN_MATH_REV_FUN_IS_UNINITIALIZED_HPP #include #include #include namespace stan { namespace math { /** * Returns true if the specified variable is * uninitialized. * * This overload of the * is_uninitialized() function delegates * the return to the is_uninitialized() method on the * specified variable. * * @param x Object to test. * @return true if the specified object is uninitialized. */ inline bool is_uninitialized(var x) { return x.is_uninitialized(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/hypergeometric_pFq.hpp0000644000176200001440000000344414645137106024716 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_HYPERGEOMETRIC_PFQ_HPP #define STAN_MATH_REV_FUN_HYPERGEOMETRIC_PFQ_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the generalized hypergeometric function (pFq) applied to the * input arguments. * * @tparam Ta Type of Eigen vector with scalar type var or arithmetic * @tparam Tb Type of Eigen vector with scalar type var or arithmetic * @tparam Tz Scalar of type var or arithmetic * @param[in] a Vector of 'a' arguments (of length p) * @param[in] b Vector of 'b' arguments (of length q) * @param[in] z Scalar z argument * @return Generalized hypergeometric function */ template * = nullptr, require_return_type_t* = nullptr> inline var hypergeometric_pFq(const Ta& a, const Tb& b, const Tz& z) { arena_t arena_a = a; arena_t arena_b = b; return make_callback_var( hypergeometric_pFq(value_of(arena_a), value_of(arena_b), value_of(z)), [arena_a, arena_b, z](auto& vi) mutable { auto grad_tuple = grad_pFq(arena_a, arena_b, z); if (!is_constant::value) { forward_as>(arena_a).adj() += vi.adj() * std::get<0>(grad_tuple); } if (!is_constant::value) { forward_as>(arena_b).adj() += vi.adj() * std::get<1>(grad_tuple); } if (!is_constant::value) { forward_as>(z).adj() += vi.adj() * std::get<2>(grad_tuple); } }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/ceil.hpp0000644000176200001440000000277114645137106022000 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_CEIL_HPP #define STAN_MATH_REV_FUN_CEIL_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the ceiling of the specified variable (cmath). * * The derivative of the ceiling function is defined and * zero everywhere but at integers, and we set them to zero for * convenience, * * \f$\frac{d}{dx} {\lceil x \rceil} = 0\f$. * * The ceiling function rounds up. For double values, this is the * smallest integral value that is not less than the specified * value. Although this function is not differentiable because it * is discontinuous at integral values, its gradient is returned * as zero everywhere. * \f[ \mbox{ceil}(x) = \begin{cases} \lceil x\rceil & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{ceil}(x)}{\partial x} = \begin{cases} 0 & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Input variable. * @return Ceiling of the variable. */ inline var ceil(const var& a) { return var(std::ceil(a.val())); } template * = nullptr> inline auto ceil(const var_value& a) { return var_value(a.val().array().ceil()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/inverse.hpp0000644000176200001440000000225614645137106022535 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INVERSE_HPP #define STAN_MATH_REV_FUN_INVERSE_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Reverse mode specialization of calculating the inverse of the matrix. * * @param m specified matrix * @return Inverse of the matrix (an empty matrix if the specified matrix has * size zero). * @throw std::invalid_argument if the matrix is not square. */ template * = nullptr> inline auto inverse(const T& m) { check_square("inverse", "m", m); using ret_type = return_var_matrix_t; if (unlikely(m.size() == 0)) { return ret_type(m); } arena_t arena_m = m; arena_t> res_val = arena_m.val().inverse(); arena_t res = res_val; reverse_pass_callback([res, res_val, arena_m]() mutable { arena_m.adj() -= res_val.transpose() * res.adj_op() * res_val.transpose(); }); return ret_type(res); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/rep_row_vector.hpp0000644000176200001440000000162014645137106024113 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_REP_ROW_VECTOR_HPP #define STAN_MATH_REV_FUN_REP_ROW_VECTOR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Overload for `var_value`. * @tparam T_ret The user supplied return type. * @tparam T A double or var type * @param x The type to be propogated through the new vector. * @param n The size of the new vector. */ template * = nullptr, require_eigen_row_vector_t>* = nullptr> inline auto rep_row_vector(var x, int n) { return make_callback_var(rep_row_vector(x.val(), n), [x](auto& vi) mutable { forward_as(x).adj() += vi.adj().sum(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_sum_exp.hpp0000644000176200001440000000572014645137106023402 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_SUM_EXP_HPP #define STAN_MATH_REV_FUN_LOG_SUM_EXP_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { class log_sum_exp_vv_vari : public op_vv_vari { public: log_sum_exp_vv_vari(vari* avi, vari* bvi) : op_vv_vari(log_sum_exp(avi->val_, bvi->val_), avi, bvi) {} void chain() { avi_->adj_ += adj_ * inv_logit(avi_->val_ - bvi_->val_); bvi_->adj_ += adj_ * inv_logit(bvi_->val_ - avi_->val_); } }; class log_sum_exp_vd_vari : public op_vd_vari { public: log_sum_exp_vd_vari(vari* avi, double b) : op_vd_vari(log_sum_exp(avi->val_, b), avi, b) {} void chain() { if (val_ == NEGATIVE_INFTY) { avi_->adj_ += adj_; } else { avi_->adj_ += adj_ * inv_logit(avi_->val_ - bd_); } } }; } // namespace internal /** * Returns the log sum of exponentials. */ inline var log_sum_exp(const var& a, const var& b) { return var(new internal::log_sum_exp_vv_vari(a.vi_, b.vi_)); } /** * Returns the log sum of exponentials. */ inline var log_sum_exp(const var& a, double b) { return var(new internal::log_sum_exp_vd_vari(a.vi_, b)); } /** * Returns the log sum of exponentials. */ inline var log_sum_exp(double a, const var& b) { return var(new internal::log_sum_exp_vd_vari(b.vi_, a)); } /** * Returns the log sum of exponentials of the input. * * @tparam T A type inheriting from EigenBase with scalar type var * @param v input */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline var log_sum_exp(const T& v) { arena_t arena_v = v; arena_t arena_v_val = arena_v.val(); var res = log_sum_exp(arena_v_val); reverse_pass_callback([arena_v, arena_v_val, res]() mutable { arena_v.adj() += res.adj() * (arena_v_val.array().val() - res.val()).exp().matrix(); }); return res; } /** * Returns the log sum of exponentials of the input. * * @tparam T A `var_value` with an input vector or matrix * @param x input */ template * = nullptr> inline var log_sum_exp(const T& x) { return make_callback_vari(log_sum_exp(x.val()), [x](const auto& res) mutable { x.adj() += res.adj() * (x.val().array().val() - res.val()).exp().matrix(); }); } /** * Returns the log sum of exponentials. * * @tparam T Type of input vector or matrix. * @param x matrix */ template * = nullptr> inline auto log_sum_exp(const T& x) { return apply_vector_unary::reduce( x, [](const auto& v) { return log_sum_exp(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/cumulative_sum.hpp0000644000176200001440000000267214645137106024126 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_CUMULATIVE_SUM_HPP #define STAN_MATH_REV_FUN_CUMULATIVE_SUM_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the cumulative sum of the specified vector. * * The cumulative sum is of the same type as the input and * has values defined by * * \code x(0), x(1) + x(2), ..., x(1) + , ..., + x(x.size()-1) @endcode * * @tparam EigVec type derived from `Eigen::EigenBase` or a `var_value` with * `T` deriving from `Eigen::EigenBase` with one compile time dimension * equal to 1 * * @param x Vector of values. * @return Cumulative sum of values. */ template * = nullptr> inline auto cumulative_sum(const EigVec& x) { arena_t x_arena(x); using return_t = return_var_matrix_t; arena_t res = cumulative_sum(x_arena.val()).eval(); if (unlikely(x.size() == 0)) { return return_t(res); } reverse_pass_callback([x_arena, res]() mutable { for (Eigen::Index i = x_arena.size() - 1; i > 0; --i) { x_arena.adj().coeffRef(i) += res.adj().coeffRef(i); res.adj().coeffRef(i - 1) += res.adj().coeffRef(i); } x_arena.adj().coeffRef(0) += res.adj().coeffRef(0); }); return return_t(res); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/variance.hpp0000644000176200001440000000567314645137106022660 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_VARIANCE_HPP #define STAN_MATH_REV_FUN_VARIANCE_HPP #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { inline var calc_variance(size_t size, const var* dtrs) { vari** varis = ChainableStack::instance_->memalloc_.alloc_array(size); double* partials = ChainableStack::instance_->memalloc_.alloc_array(size); Eigen::Map dtrs_map(dtrs, size); Eigen::Map(varis, size) = dtrs_map.vi(); vector_d dtrs_vals = dtrs_map.val(); vector_d diff = dtrs_vals.array() - dtrs_vals.mean(); double size_m1 = size - 1; Eigen::Map(partials, size) = 2 * diff.array() / size_m1; double variance = diff.squaredNorm() / size_m1; return {new stored_gradient_vari(variance, size, varis, partials)}; } } // namespace internal /** * Return the sample variance of the specified standard * vector. Raise domain error if size is not greater than zero. * * @param[in] v a vector * @return sample variance of specified vector */ inline var variance(const std::vector& v) { check_nonzero_size("variance", "v", v); if (v.size() == 1) { return var{0.0}; } return {internal::calc_variance(v.size(), &v[0])}; } /** * Return the sample variance of the specified vector, row vector, * or matrix. Raise domain error if size is not greater than * zero. * * @tparam EigMat type inheriting from `EigenBase` that has a `var` * scalar type. * @param[in] m input matrix * @return sample variance of specified matrix */ template * = nullptr> var variance(const EigMat& m) { const auto& mat = to_ref(m); check_nonzero_size("variance", "m", mat); if (mat.size() == 1) { return var{0.0}; } return {internal::calc_variance(mat.size(), mat.data())}; } /** * Return the sample variance of the var_value matrix * Raise domain error if size is not greater than zero. * * @tparam Mat input matrix type * @param[in] x a input * @return sample variance of input */ template * = nullptr> inline var variance(const Mat& x) { check_nonzero_size("variance", "x", x); if (x.size() == 1) { return 0.0; } double mean = x.val().mean(); arena_t> arena_diff(x.rows(), x.cols()); double squaredNorm = 0.0; for (Eigen::Index i = 0; i < arena_diff.size(); ++i) { double diff = x.val().coeff(i) - mean; arena_diff.coeffRef(i) = diff; squaredNorm += diff * diff; } var res = squaredNorm / (x.size() - 1); reverse_pass_callback([x, res, arena_diff]() mutable { x.adj() += (2.0 * res.adj() / (x.size() - 1)) * arena_diff; }); return res; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/trace_gen_quad_form.hpp0000644000176200001440000002526114645137106025047 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TRACE_GEN_QUAD_FORM_HPP #define STAN_MATH_REV_FUN_TRACE_GEN_QUAD_FORM_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { template class trace_gen_quad_form_vari_alloc : public chainable_alloc { public: trace_gen_quad_form_vari_alloc(const Eigen::Matrix& D, const Eigen::Matrix& A, const Eigen::Matrix& B) : D_(D), A_(A), B_(B) {} double compute() { return trace_gen_quad_form(value_of(D_), value_of(A_), value_of(B_)); } Eigen::Matrix D_; Eigen::Matrix A_; Eigen::Matrix B_; }; template class trace_gen_quad_form_vari : public vari { protected: static inline void computeAdjoints(double adj, const Eigen::Matrix& D, const Eigen::Matrix& A, const Eigen::Matrix& B, Eigen::Matrix* varD, Eigen::Matrix* varA, Eigen::Matrix* varB) { Eigen::Matrix AtB; Eigen::Matrix BD; if (varB || varA) { BD.noalias() = B * D; } if (varB || varD) { AtB.noalias() = A.transpose() * B; } if (varB) { (*varB).adj() += adj * (A * BD + AtB * D.transpose()); } if (varA) { (*varA).adj() += adj * (B * BD.transpose()); } if (varD) { (*varD).adj() += adj * (B.transpose() * AtB); } } public: explicit trace_gen_quad_form_vari( trace_gen_quad_form_vari_alloc* impl) : vari(impl->compute()), impl_(impl) {} virtual void chain() { computeAdjoints(adj_, value_of(impl_->D_), value_of(impl_->A_), value_of(impl_->B_), reinterpret_cast*>( std::is_same::value ? (&impl_->D_) : NULL), reinterpret_cast*>( std::is_same::value ? (&impl_->A_) : NULL), reinterpret_cast*>( std::is_same::value ? (&impl_->B_) : NULL)); } trace_gen_quad_form_vari_alloc* impl_; }; } // namespace internal template , value_type_t, value_type_t>, typename = require_all_eigen_t> inline var trace_gen_quad_form(const Td& D, const Ta& A, const Tb& B) { using Td_scal = value_type_t; using Ta_scal = value_type_t; using Tb_scal = value_type_t; constexpr int Rd = Td::RowsAtCompileTime; constexpr int Cd = Td::ColsAtCompileTime; constexpr int Ra = Ta::RowsAtCompileTime; constexpr int Ca = Ta::ColsAtCompileTime; constexpr int Rb = Tb::RowsAtCompileTime; constexpr int Cb = Tb::ColsAtCompileTime; check_square("trace_gen_quad_form", "A", A); check_square("trace_gen_quad_form", "D", D); check_multiplicable("trace_gen_quad_form", "A", A, "B", B); check_multiplicable("trace_gen_quad_form", "B", B, "D", D); auto* baseVari = new internal::trace_gen_quad_form_vari_alloc( D, A, B); return var( new internal::trace_gen_quad_form_vari(baseVari)); } /** * Return the trace of D times the quadratic form of B and A. * That is, `trace_gen_quad_form(D, A, B) = trace(D * B' * A * B).` * * This overload requires one of D, A, or B to be a `var_value` * where `T` inherits from EigenBase * * @tparam TD type of first matrix argument * @tparam TA type of second matrix argument * @tparam TB type of third matrix argument * * @param D multiplier * @param A outside term in quadratic form * @param B inner term in quadratic form * @return trace(D * B' * A * B) * @throw std::domain_error if A or D is not square * @throw std::domain_error if A cannot be multiplied by B or B cannot * be multiplied by D. */ template * = nullptr, require_all_matrix_t* = nullptr> inline var trace_gen_quad_form(const Td& D, const Ta& A, const Tb& B) { check_square("trace_gen_quad_form", "A", A); check_square("trace_gen_quad_form", "D", D); check_multiplicable("trace_gen_quad_form", "A", A, "B", B); check_multiplicable("trace_gen_quad_form", "B", B, "D", D); if (!is_constant::value && !is_constant::value && !is_constant::value) { arena_t> arena_D = D; arena_t> arena_A = A; arena_t> arena_B = B; auto arena_BDT = to_arena(arena_B.val_op() * arena_D.val_op().transpose()); auto arena_AB = to_arena(arena_A.val_op() * arena_B.val_op()); var res = (arena_BDT.transpose() * arena_AB).trace(); reverse_pass_callback( [arena_A, arena_B, arena_D, arena_BDT, arena_AB, res]() mutable { double C_adj = res.adj(); arena_A.adj() += C_adj * arena_BDT * arena_B.val_op().transpose(); arena_B.adj() += C_adj * (arena_AB * arena_D.val_op() + arena_A.val_op().transpose() * arena_BDT); arena_D.adj() += C_adj * (arena_AB.transpose() * arena_B.val_op()); }); return res; } else if (!is_constant::value && !is_constant::value && is_constant::value) { arena_t> arena_D = value_of(D); arena_t> arena_A = A; arena_t> arena_B = B; auto arena_BDT = to_arena(arena_B.val_op() * arena_D.transpose()); auto arena_AB = to_arena(arena_A.val_op() * arena_B.val_op()); var res = (arena_BDT.transpose() * arena_AB).trace(); reverse_pass_callback([arena_A, arena_B, arena_D, arena_BDT, arena_AB, res]() mutable { double C_adj = res.adj(); arena_A.adj() += C_adj * arena_BDT * arena_B.val_op().transpose(); arena_B.adj() += C_adj * (arena_AB * arena_D + arena_A.val_op().transpose() * arena_BDT); }); return res; } else if (!is_constant::value && is_constant::value && !is_constant::value) { arena_t> arena_D = D; arena_t> arena_A = A; arena_t> arena_B = value_of(B); auto arena_BDT = to_arena(arena_B.val_op() * arena_D.val_op().transpose()); auto arena_AB = to_arena(arena_A.val_op() * arena_B.val_op()); var res = (arena_BDT.transpose() * arena_A.val_op() * arena_B).trace(); reverse_pass_callback( [arena_A, arena_B, arena_D, arena_BDT, arena_AB, res]() mutable { double C_adj = res.adj(); arena_A.adj() += C_adj * arena_BDT * arena_B.transpose(); arena_D.adj() += C_adj * arena_AB.transpose() * arena_B; }); return res; } else if (!is_constant::value && is_constant::value && is_constant::value) { arena_t> arena_D = value_of(D); arena_t> arena_A = A; arena_t> arena_B = value_of(B); auto arena_BDT = to_arena(arena_B * arena_D); var res = (arena_BDT.transpose() * arena_A.val_op() * arena_B).trace(); reverse_pass_callback([arena_A, arena_B, arena_BDT, res]() mutable { arena_A.adj() += res.adj() * arena_BDT * arena_B.val_op().transpose(); }); return res; } else if (is_constant::value && !is_constant::value && !is_constant::value) { arena_t> arena_D = D; arena_t> arena_A = value_of(A); arena_t> arena_B = B; auto arena_AB = to_arena(arena_A * arena_B.val_op()); auto arena_BDT = to_arena(arena_B.val_op() * arena_D.val_op()); var res = (arena_BDT.transpose() * arena_AB).trace(); reverse_pass_callback([arena_A, arena_B, arena_D, arena_AB, arena_BDT, res]() mutable { double C_adj = res.adj(); arena_B.adj() += C_adj * (arena_AB * arena_D.val_op() + arena_A.transpose() * arena_BDT); arena_D.adj() += C_adj * (arena_AB.transpose() * arena_B.val_op()); }); return res; } else if (is_constant::value && !is_constant::value && is_constant::value) { arena_t> arena_D = value_of(D); arena_t> arena_A = value_of(A); arena_t> arena_B = B; auto arena_AB = to_arena(arena_A * arena_B.val_op()); auto arena_BDT = to_arena(arena_B.val_op() * arena_D.val_op()); var res = (arena_BDT.transpose() * arena_AB).trace(); reverse_pass_callback( [arena_A, arena_B, arena_D, arena_AB, arena_BDT, res]() mutable { arena_B.adj() += res.adj() * (arena_AB * arena_D.val_op() + arena_A.val_op().transpose() * arena_BDT); }); return res; } else if (is_constant::value && is_constant::value && !is_constant::value) { arena_t> arena_D = D; arena_t> arena_A = value_of(A); arena_t> arena_B = value_of(B); auto arena_AB = to_arena(arena_A * arena_B); var res = (arena_D.val_op() * arena_B.transpose() * arena_AB).trace(); reverse_pass_callback([arena_AB, arena_B, arena_D, res]() mutable { arena_D.adj() += res.adj() * (arena_AB.transpose() * arena_B); }); return res; } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/bessel_first_kind.hpp0000644000176200001440000000260414645137106024550 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_BESSEL_FIRST_KIND_HPP #define STAN_MATH_REV_FUN_BESSEL_FIRST_KIND_HPP #include #include #include namespace stan { namespace math { inline var bessel_first_kind(int v, const var& a) { auto ret_val = bessel_first_kind(v, a.val()); auto precomp_bessel = v * ret_val / a.val() - bessel_first_kind(v + 1, a.val()); return make_callback_var(ret_val, [precomp_bessel, a](const auto& vi) mutable { a.adj() += vi.adj_ * precomp_bessel; }); } /** * Overload with `var_value` for `int`, `std::vector`, and * `std::vector>` */ template * = nullptr, require_eigen_t* = nullptr> inline auto bessel_first_kind(const T1& v, const var_value& a) { auto ret_val = bessel_first_kind(v, a.val()).array().eval(); auto v_map = as_array_or_scalar(v); auto precomp_bessel = to_arena(v_map * ret_val / a.val().array() - bessel_first_kind(v_map + 1, a.val().array())); return make_callback_var( ret_val.matrix(), [precomp_bessel, a](const auto& vi) mutable { a.adj().array() += vi.adj().array() * precomp_bessel; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/cov_matrix_constrain_lkj.hpp0000644000176200001440000000610514645137106026152 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_COV_MATRIX_CONSTRAIN_LKJ_HPP #define STAN_MATH_REV_FUN_COV_MATRIX_CONSTRAIN_LKJ_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the covariance matrix of the specified dimensionality * derived from constraining the specified vector of unconstrained * values. The input vector must be of length \f$k \choose 2 + * k\f$. The first \f$k \choose 2\f$ values in the input * represent unconstrained (partial) correlations and the last * \f$k\f$ are unconstrained standard deviations of the dimensions. * *

The transform scales the correlation matrix transform defined * in corr_matrix_constrain(Matrix, size_t) * with the constrained deviations. * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param x Input vector of unconstrained partial correlations and * standard deviations. * @param k Dimensionality of returned covariance matrix. * @return Covariance matrix derived from the unconstrained partial * correlations and deviations. */ template * = nullptr> var_value cov_matrix_constrain_lkj(const T& x, size_t k) { size_t k_choose_2 = (k * (k - 1)) / 2; return read_cov_matrix(corr_constrain(x.head(k_choose_2)), positive_constrain(x.tail(k))); } /** * Return the covariance matrix of the specified dimensionality * derived from constraining the specified vector of unconstrained * values and increment the specified log probability reference * with the log absolute Jacobian determinant. * *

The transform is defined as for * cov_matrix_constrain(Matrix, size_t). * *

The log absolute Jacobian determinant is derived by * composing the log absolute Jacobian determinant for the * underlying correlation matrix as defined in * cov_matrix_constrain(Matrix, size_t, T&) with * the Jacobian of the transform of the correlation matrix * into a covariance matrix by scaling by standard deviations. * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param x Input vector of unconstrained partial correlations and * standard deviations. * @param k Dimensionality of returned covariance matrix. * @param lp Log probability reference to increment. * @return Covariance matrix derived from the unconstrained partial * correlations and deviations. */ template * = nullptr> var_value cov_matrix_constrain_lkj(const T& x, size_t k, scalar_type_t& lp) { size_t k_choose_2 = (k * (k - 1)) / 2; return read_cov_matrix(corr_constrain(x.head(k_choose_2)), positive_constrain(x.tail(k)), lp); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/sd.hpp0000644000176200001440000000555114645137106021471 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SD_HPP #define STAN_MATH_REV_FUN_SD_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the sample standard deviation of a variable which * inherits from EigenBase. * * @tparam T Input type * @param[in] x input * @return sample standard deviation * @throw domain error size is not greater than zero. */ template * = nullptr> var sd(const T& x) { using std::sqrt; using T_vi = promote_scalar_t; using T_d = promote_scalar_t; check_nonzero_size("sd", "x", x); if (x.size() == 1) { return 0.0; } vari** varis = ChainableStack::instance_->memalloc_.alloc_array(x.size()); double* partials = ChainableStack::instance_->memalloc_.alloc_array(x.size()); Eigen::Map varis_map(varis, x.rows(), x.cols()); Eigen::Map partials_map(partials, x.rows(), x.cols()); varis_map = x.vi(); T_d dtrs_val = x.val(); double mean = dtrs_val.mean(); T_d diff = dtrs_val.array() - mean; double sum_of_squares = diff.squaredNorm(); double size_m1 = x.size() - 1; double sd = sqrt(sum_of_squares / size_m1); if (sum_of_squares < 1e-20) { partials_map.fill(inv_sqrt(static_cast(x.size()))); } else { partials_map = diff.array() / (sd * size_m1); } return var(new stored_gradient_vari(sd, x.size(), varis, partials)); } /** * Return the sample standard deviation of the var_value matrix * * @tparam T Input type * @param[in] x input matrix * @return sample standard deviation of specified matrix * @throw domain error size is not greater than zero. */ template * = nullptr> var sd(const T& x) { check_nonzero_size("sd", "x", x); if (x.size() == 1) { return 0.0; } auto arena_diff = to_arena((x.val().array() - x.val().mean()).matrix()); double sum_of_squares = arena_diff.squaredNorm(); double sd = std::sqrt(sum_of_squares / (x.size() - 1)); return make_callback_vari(sd, [x, arena_diff](const auto& res) mutable { x.adj() += (res.adj() / (res.val() * (x.size() - 1))) * arena_diff; }); } /** * Return the sample standard deviation of the specified std vector, column * vector, row vector, matrix, or std vector of any of these types. * * @tparam T Input type * @param[in] m input matrix * @return sample standard deviation * @throw domain error size is not greater than zero. */ template * = nullptr> auto sd(const T& m) { return apply_vector_unary::reduce(m, [](const auto& x) { return sd(x); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/beta.hpp0000644000176200001440000002211014645137106021764 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_BETA_HPP #define STAN_MATH_REV_FUN_BETA_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the beta function and gradients for two var inputs. * \f[ \mathrm{beta}(a,b) = \left(B\left(a,b\right)\right) \f] \f[ \frac{\partial }{\partial a} = \left(\psi^{\left(0\right)}\left(a\right) - \psi^{\left(0\right)} \left(a + b\right)\right) * \mathrm{beta}(a,b) \f] \f[ \frac{\partial }{\partial b} = \left(\psi^{\left(0\right)}\left(b\right) - \psi^{\left(0\right)} \left(a + b\right)\right) * \mathrm{beta}(a,b) \f] * * @param a var Argument * @param b var Argument * @return Result of beta function */ inline var beta(const var& a, const var& b) { double digamma_ab = digamma(a.val() + b.val()); double digamma_a = digamma(a.val()) - digamma_ab; double digamma_b = digamma(b.val()) - digamma_ab; return make_callback_var(beta(a.val(), b.val()), [a, b, digamma_a, digamma_b](auto& vi) mutable { const double adj_val = vi.adj() * vi.val(); a.adj() += adj_val * digamma_a; b.adj() += adj_val * digamma_b; }); } /** * Returns the beta function and gradient for first var input. * \f[ \mathrm{beta}(a,b) = \left(B\left(a,b\right)\right) \f] \f[ \frac{\partial }{\partial a} = \left(\psi^{\left(0\right)}\left(a\right) - \psi^{\left(0\right)} \left(a + b\right)\right) * \mathrm{beta}(a,b) \f] * * @param a var Argument * @param b double Argument * @return Result of beta function */ inline var beta(const var& a, double b) { auto digamma_ab = digamma(a.val()) - digamma(a.val() + b); return make_callback_var(beta(a.val(), b), [a, digamma_ab](auto& vi) mutable { a.adj() += vi.adj() * digamma_ab * vi.val(); }); } /** * Returns the beta function and gradient for second var input. * \f[ \mathrm{beta}(a,b) = \left(B\left(a,b\right)\right) \f] \f[ \frac{\partial }{\partial b} = \left(\psi^{\left(0\right)}\left(b\right) - \psi^{\left(0\right)} \left(a + b\right)\right) * \mathrm{beta}(a,b) \f] * * @param a double Argument * @param b var Argument * @return Result of beta function */ inline var beta(double a, const var& b) { auto beta_val = beta(a, b.val()); auto digamma_ab = (digamma(b.val()) - digamma(a + b.val())) * beta_val; return make_callback_var(beta_val, [b, digamma_ab](auto& vi) mutable { b.adj() += vi.adj() * digamma_ab; }); } template * = nullptr, require_all_matrix_t* = nullptr> inline auto beta(const Mat1& a, const Mat2& b) { if (!is_constant::value && !is_constant::value) { arena_t> arena_a = a; arena_t> arena_b = b; auto beta_val = beta(arena_a.val(), arena_b.val()); auto digamma_ab = to_arena(digamma(arena_a.val().array() + arena_b.val().array())); return make_callback_var( beta(arena_a.val(), arena_b.val()), [arena_a, arena_b, digamma_ab](auto& vi) mutable { const auto adj_val = (vi.adj().array() * vi.val().array()).eval(); arena_a.adj().array() += adj_val * (digamma(arena_a.val().array()) - digamma_ab); arena_b.adj().array() += adj_val * (digamma(arena_b.val().array()) - digamma_ab); }); } else if (!is_constant::value) { arena_t> arena_a = a; arena_t> arena_b = value_of(b); auto digamma_ab = to_arena(digamma(arena_a.val()).array() - digamma(arena_a.val().array() + arena_b.array())); return make_callback_var(beta(arena_a.val(), arena_b), [arena_a, arena_b, digamma_ab](auto& vi) mutable { arena_a.adj().array() += vi.adj().array() * digamma_ab * vi.val().array(); }); } else if (!is_constant::value) { arena_t> arena_a = value_of(a); arena_t> arena_b = b; auto beta_val = beta(arena_a, arena_b.val()); auto digamma_ab = to_arena((digamma(arena_b.val()).array() - digamma(arena_a.array() + arena_b.val().array())) * beta_val.array()); return make_callback_var( beta_val, [arena_a, arena_b, digamma_ab](auto& vi) mutable { arena_b.adj().array() += vi.adj().array() * digamma_ab.array(); }); } } template * = nullptr, require_stan_scalar_t* = nullptr> inline auto beta(const Scalar& a, const VarMat& b) { if (!is_constant::value && !is_constant::value) { var arena_a = a; arena_t> arena_b = b; auto beta_val = beta(arena_a.val(), arena_b.val()); auto digamma_ab = to_arena(digamma(arena_a.val() + arena_b.val().array())); return make_callback_var( beta(arena_a.val(), arena_b.val()), [arena_a, arena_b, digamma_ab](auto& vi) mutable { const auto adj_val = (vi.adj().array() * vi.val().array()).eval(); arena_a.adj() += (adj_val * (digamma(arena_a.val()) - digamma_ab)).sum(); arena_b.adj().array() += adj_val * (digamma(arena_b.val().array()) - digamma_ab); }); } else if (!is_constant::value) { var arena_a = a; arena_t> arena_b = value_of(b); auto digamma_ab = to_arena(digamma(arena_a.val()) - digamma(arena_a.val() + arena_b.array())); return make_callback_var( beta(arena_a.val(), arena_b), [arena_a, arena_b, digamma_ab](auto& vi) mutable { arena_a.adj() += (vi.adj().array() * digamma_ab * vi.val().array()).sum(); }); } else if (!is_constant::value) { double arena_a = value_of(a); arena_t> arena_b = b; auto beta_val = beta(arena_a, arena_b.val()); auto digamma_ab = to_arena((digamma(arena_b.val()).array() - digamma(arena_a + arena_b.val().array())) * beta_val.array()); return make_callback_var(beta_val, [arena_b, digamma_ab](auto& vi) mutable { arena_b.adj().array() += vi.adj().array() * digamma_ab.array(); }); } } template * = nullptr, require_stan_scalar_t* = nullptr> inline auto beta(const VarMat& a, const Scalar& b) { if (!is_constant::value && !is_constant::value) { arena_t> arena_a = a; var arena_b = b; auto beta_val = beta(arena_a.val(), arena_b.val()); auto digamma_ab = to_arena(digamma(arena_a.val().array() + arena_b.val())); return make_callback_var( beta(arena_a.val(), arena_b.val()), [arena_a, arena_b, digamma_ab](auto& vi) mutable { const auto adj_val = (vi.adj().array() * vi.val().array()).eval(); arena_a.adj().array() += adj_val * (digamma(arena_a.val().array()) - digamma_ab); arena_b.adj() += (adj_val * (digamma(arena_b.val()) - digamma_ab)).sum(); }); } else if (!is_constant::value) { arena_t> arena_a = a; double arena_b = value_of(b); auto digamma_ab = to_arena(digamma(arena_a.val()).array() - digamma(arena_a.val().array() + arena_b)); return make_callback_var( beta(arena_a.val(), arena_b), [arena_a, digamma_ab](auto& vi) mutable { arena_a.adj().array() += vi.adj().array() * digamma_ab * vi.val().array(); }); } else if (!is_constant::value) { arena_t> arena_a = value_of(a); var arena_b = b; auto beta_val = beta(arena_a, arena_b.val()); auto digamma_ab = to_arena( (digamma(arena_b.val()) - digamma(arena_a.array() + arena_b.val())) * beta_val.array()); return make_callback_var( beta_val, [arena_a, arena_b, digamma_ab](auto& vi) mutable { arena_b.adj() += (vi.adj().array() * digamma_ab.array()).sum(); }); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/owens_t.hpp0000644000176200001440000001074614645137106022543 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_OWENS_T_HPP #define STAN_MATH_REV_FUN_OWENS_T_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * The Owen's T function of h and a. * * Used to compute the cumulative density function for the skew normal * distribution. * * @tparam Var1 A scalar or Eigen type whose `scalar_type` is an var. * @tparam Var2 A scalar or Eigen type whose `scalar_type` is an var. * @param h var parameter. * @param a var parameter. * @return The Owen's T function. */ template * = nullptr, require_all_not_std_vector_t* = nullptr> inline auto owens_t(const Var1& h, const Var2& a) { auto h_arena = to_arena(h); auto a_arena = to_arena(a); using return_type = return_var_matrix_t; arena_t ret = owens_t(h_arena.val(), a_arena.val()); reverse_pass_callback([h_arena, a_arena, ret]() mutable { const auto& h_val = as_value_array_or_scalar(h_arena); const auto& a_val = as_value_array_or_scalar(a_arena); const auto neg_h_sq_div_2 = stan::math::eval(-square(h_val) * 0.5); const auto one_p_a_sq = stan::math::eval(1.0 + square(a_val)); as_array_or_scalar(h_arena).adj() += possibly_sum>( as_array_or_scalar(ret.adj()) * erf(a_val * h_val * INV_SQRT_TWO) * exp(neg_h_sq_div_2) * INV_SQRT_TWO_PI * -0.5); as_array_or_scalar(a_arena).adj() += possibly_sum>( as_array_or_scalar(ret.adj()) * exp(neg_h_sq_div_2 * one_p_a_sq) / (one_p_a_sq * TWO_PI)); }); return return_type(ret); } /** * The Owen's T function of h and a. * * Used to compute the cumulative density function for the skew normal * distribution. * * @tparam Var A scalar or Eigen type whose `scalar_type` is an var. * @tparam Arith A scalar or Eigen type with an inner arirthmetic scalar value. * @param h var parameter. * @param a double parameter. * @return The Owen's T function. */ template * = nullptr, require_all_not_std_vector_t* = nullptr, require_st_var* = nullptr> inline auto owens_t(const Var& h, const Arith& a) { auto h_arena = to_arena(h); auto a_arena = to_arena(a); using return_type = return_var_matrix_t; arena_t ret = owens_t(h_arena.val(), a_arena); reverse_pass_callback([h_arena, a_arena, ret]() mutable { const auto& h_val = as_value_array_or_scalar(h_arena); as_array_or_scalar(h_arena).adj() += possibly_sum>( as_array_or_scalar(ret.adj()) * erf(as_array_or_scalar(a_arena) * h_val * INV_SQRT_TWO) * exp(-square(h_val) * 0.5) * INV_SQRT_TWO_PI * -0.5); }); return return_type(ret); } /** * The Owen's T function of h and a. * * Used to compute the cumulative density function for the skew normal * distribution. * * @tparam Var A scalar or Eigen type whose `scalar_type` is an var. * @tparam Arith A scalar or Eigen type with an inner arithmetic scalar value. * @param h double parameter. * @param a var parameter. * @return The Owen's T function. */ template * = nullptr, require_all_not_std_vector_t* = nullptr, require_st_var* = nullptr> inline auto owens_t(const Arith& h, const Var& a) { auto h_arena = to_arena(h); auto a_arena = to_arena(a); using return_type = return_var_matrix_t; arena_t ret = owens_t(h_arena, a_arena.val()); reverse_pass_callback([h_arena, a_arena, ret]() mutable { const auto one_p_a_sq = eval(1.0 + square(as_value_array_or_scalar(a_arena))); as_array_or_scalar(a_arena).adj() += possibly_sum>( as_array_or_scalar(ret.adj()) * exp(-0.5 * square(as_array_or_scalar(h_arena)) * one_p_a_sq) / (one_p_a_sq * TWO_PI)); }); return return_type(ret); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/mdivide_left_tri.hpp0000644000176200001440000003421314645137106024371 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MDIVIDE_LEFT_TRI_HPP #define STAN_MATH_REV_FUN_MDIVIDE_LEFT_TRI_HPP #include #include #include #include #include #include namespace stan { namespace math { namespace internal { template class mdivide_left_tri_vv_vari : public vari { public: int M_; // A.rows() = A.cols() = B.rows() int N_; // B.cols() double *A_; double *C_; vari **variRefA_; vari **variRefB_; vari **variRefC_; mdivide_left_tri_vv_vari(const Eigen::Matrix &A, const Eigen::Matrix &B) : vari(0.0), M_(A.rows()), N_(B.cols()), A_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(double) * A.rows() * A.cols()))), C_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(double) * B.rows() * B.cols()))), variRefA_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * A.rows() * (A.rows() + 1) / 2))), variRefB_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * B.rows() * B.cols()))), variRefC_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * B.rows() * B.cols()))) { using Eigen::Map; size_t pos = 0; if (TriView == Eigen::Lower) { for (size_type j = 0; j < M_; j++) { for (size_type i = j; i < M_; i++) { variRefA_[pos++] = A(i, j).vi_; } } } else if (TriView == Eigen::Upper) { for (size_type j = 0; j < M_; j++) { for (size_type i = 0; i < j + 1; i++) { variRefA_[pos++] = A(i, j).vi_; } } } Map c_map(C_, M_, N_); Map a_map(A_, M_, M_); a_map = A.val(); c_map = B.val(); Map(variRefB_, M_, N_) = B.vi(); c_map = a_map.template triangularView().solve(c_map); Map(variRefC_, M_, N_) = c_map.unaryExpr([](double x) { return new vari(x, false); }); } virtual void chain() { using Eigen::Map; matrix_d adjA; matrix_d adjB; adjB = Map(A_, M_, M_) .template triangularView() .transpose() .solve(Map(variRefC_, M_, N_).adj()); adjA = -adjB * Map(C_, M_, N_).transpose(); size_t pos = 0; if (TriView == Eigen::Lower) { for (size_type j = 0; j < adjA.cols(); j++) { for (size_type i = j; i < adjA.rows(); i++) { variRefA_[pos++]->adj_ += adjA(i, j); } } } else if (TriView == Eigen::Upper) { for (size_type j = 0; j < adjA.cols(); j++) { for (size_type i = 0; i < j + 1; i++) { variRefA_[pos++]->adj_ += adjA(i, j); } } } Map(variRefB_, M_, N_).adj() += adjB; } }; template class mdivide_left_tri_dv_vari : public vari { public: int M_; // A.rows() = A.cols() = B.rows() int N_; // B.cols() double *A_; double *C_; vari **variRefB_; vari **variRefC_; mdivide_left_tri_dv_vari(const Eigen::Matrix &A, const Eigen::Matrix &B) : vari(0.0), M_(A.rows()), N_(B.cols()), A_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(double) * A.rows() * A.cols()))), C_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(double) * B.rows() * B.cols()))), variRefB_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * B.rows() * B.cols()))), variRefC_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * B.rows() * B.cols()))) { using Eigen::Map; Map(A_, M_, M_) = A; Map(variRefB_, M_, N_) = B.vi(); Map c_map(C_, M_, N_); c_map = B.val(); c_map = Map(A_, M_, M_) .template triangularView() .solve(c_map); Map(variRefC_, M_, N_) = c_map.unaryExpr([](double x) { return new vari(x, false); }); } virtual void chain() { using Eigen::Map; Map(variRefB_, M_, N_).adj() += Map(A_, M_, M_) .template triangularView() .transpose() .solve(Map(variRefC_, M_, N_).adj()); } }; template class mdivide_left_tri_vd_vari : public vari { public: int M_; // A.rows() = A.cols() = B.rows() int N_; // B.cols() double *A_; double *C_; vari **variRefA_; vari **variRefC_; mdivide_left_tri_vd_vari(const Eigen::Matrix &A, const Eigen::Matrix &B) : vari(0.0), M_(A.rows()), N_(B.cols()), A_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(double) * A.rows() * A.cols()))), C_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(double) * B.rows() * B.cols()))), variRefA_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * A.rows() * (A.rows() + 1) / 2))), variRefC_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * B.rows() * B.cols()))) { using Eigen::Map; using Eigen::Matrix; size_t pos = 0; if (TriView == Eigen::Lower) { for (size_type j = 0; j < M_; j++) { for (size_type i = j; i < M_; i++) { variRefA_[pos++] = A(i, j).vi_; } } } else if (TriView == Eigen::Upper) { for (size_type j = 0; j < M_; j++) { for (size_type i = 0; i < j + 1; i++) { variRefA_[pos++] = A(i, j).vi_; } } } Map Ad(A_, M_, M_); Map Cd(C_, M_, N_); Ad = A.val(); Cd = Ad.template triangularView().solve(B); Map(variRefC_, M_, N_) = Cd.unaryExpr([](double x) { return new vari(x, false); }); } virtual void chain() { using Eigen::Map; using Eigen::Matrix; Matrix adjA(M_, M_); Matrix adjC(M_, N_); adjC = Map(variRefC_, M_, N_).adj(); adjA.noalias() = -Map>(A_, M_, M_) .template triangularView() .transpose() .solve(adjC * Map>(C_, M_, N_).transpose()); size_t pos = 0; if (TriView == Eigen::Lower) { for (size_type j = 0; j < adjA.cols(); j++) { for (size_type i = j; i < adjA.rows(); i++) { variRefA_[pos++]->adj_ += adjA(i, j); } } } else if (TriView == Eigen::Upper) { for (size_type j = 0; j < adjA.cols(); j++) { for (size_type i = 0; i < j + 1; i++) { variRefA_[pos++]->adj_ += adjA(i, j); } } } } }; } // namespace internal template * = nullptr> inline Eigen::Matrix mdivide_left_tri(const T1 &A, const T2 &b) { check_square("mdivide_left_tri", "A", A); check_multiplicable("mdivide_left_tri", "A", A, "b", b); if (A.rows() == 0) { return {0, b.cols()}; } // NOTE: this is not a memory leak, this vari is used in the // expression graph to evaluate the adjoint, but is not needed // for the returned matrix. Memory will be cleaned up with the // arena allocator. auto *baseVari = new internal::mdivide_left_tri_vv_vari< TriView, T1::RowsAtCompileTime, T1::ColsAtCompileTime, T2::RowsAtCompileTime, T2::ColsAtCompileTime>(A, b); Eigen::Matrix res( b.rows(), b.cols()); res.vi() = Eigen::Map(&(baseVari->variRefC_[0]), b.rows(), b.cols()); return res; } template * = nullptr, require_eigen_vt * = nullptr> inline Eigen::Matrix mdivide_left_tri(const T1 &A, const T2 &b) { check_square("mdivide_left_tri", "A", A); check_multiplicable("mdivide_left_tri", "A", A, "b", b); if (A.rows() == 0) { return {0, b.cols()}; } // NOTE: this is not a memory leak, this vari is used in the // expression graph to evaluate the adjoint, but is not needed // for the returned matrix. Memory will be cleaned up with the // arena allocator. auto *baseVari = new internal::mdivide_left_tri_dv_vari< TriView, T1::RowsAtCompileTime, T1::ColsAtCompileTime, T2::RowsAtCompileTime, T2::ColsAtCompileTime>(A, b); Eigen::Matrix res( b.rows(), b.cols()); res.vi() = Eigen::Map(&(baseVari->variRefC_[0]), b.rows(), b.cols()); return res; } template * = nullptr, require_eigen_vt * = nullptr> inline Eigen::Matrix mdivide_left_tri(const T1 &A, const T2 &b) { check_square("mdivide_left_tri", "A", A); check_multiplicable("mdivide_left_tri", "A", A, "b", b); if (A.rows() == 0) { return {0, b.cols()}; } // NOTE: this is not a memory leak, this vari is used in the // expression graph to evaluate the adjoint, but is not needed // for the returned matrix. Memory will be cleaned up with the // arena allocator. auto *baseVari = new internal::mdivide_left_tri_vd_vari< TriView, T1::RowsAtCompileTime, T1::ColsAtCompileTime, T2::RowsAtCompileTime, T2::ColsAtCompileTime>(A, b); Eigen::Matrix res( b.rows(), b.cols()); res.vi() = Eigen::Map(&(baseVari->variRefC_[0]), b.rows(), b.cols()); return res; } /** * Returns the solution of the system Ax=B when A is triangular. * * This overload handles arguments where one of T1 or T2 are * `var_value` where `T` is an Eigen type. The other type can * also be a `var_value` or it can be a matrix type that inherits * from EigenBase * * @tparam TriView Specifies whether A is upper (Eigen::Upper) * or lower triangular (Eigen::Lower). * @tparam T1 type of the triangular matrix * @tparam T2 type of the right-hand side matrix or vector * * @param A Triangular matrix. * @param B Right hand side matrix or vector. * @return x = A^-1 B, solution of the linear system. * @throws std::domain_error if A is not square or B does not have * as many rows as A has columns. */ template * = nullptr, require_any_var_matrix_t * = nullptr> inline auto mdivide_left_tri(const T1 &A, const T2 &B) { using ret_val_type = plain_type_t; using ret_type = var_value; if (A.size() == 0) { return ret_type(ret_val_type(0, B.cols())); } check_square("mdivide_left_tri", "A", A); check_multiplicable("mdivide_left_tri", "A", A, "B", B); if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; auto arena_A_val = to_arena(arena_A.val()); arena_t res = arena_A_val.template triangularView().solve(arena_B.val()); reverse_pass_callback([arena_A, arena_B, arena_A_val, res]() mutable { promote_scalar_t adjB = arena_A_val.template triangularView().transpose().solve( res.adj()); arena_B.adj() += adjB; arena_A.adj() -= (adjB * res.val().transpose().eval()) .template triangularView(); }); return ret_type(res); } else if (!is_constant::value) { arena_t> arena_A = A; auto arena_A_val = to_arena(arena_A.val()); arena_t res = arena_A_val.template triangularView().solve(value_of(B)); reverse_pass_callback([arena_A, arena_A_val, res]() mutable { promote_scalar_t adjB = arena_A_val.template triangularView().transpose().solve( res.adj()); arena_A.adj() -= (adjB * res.val().transpose().eval()) .template triangularView(); }); return ret_type(res); } else { arena_t> arena_A = value_of(A); arena_t> arena_B = B; arena_t res = arena_A.template triangularView().solve(arena_B.val()); reverse_pass_callback([arena_A, arena_B, res]() mutable { promote_scalar_t adjB = arena_A.template triangularView().transpose().solve( res.adj()); arena_B.adj() += adjB; }); return ret_type(res); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/columns_dot_product.hpp0000644000176200001440000001044714645137106025151 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_COLUMNS_DOT_PRODUCT_HPP #define STAN_MATH_REV_FUN_COLUMNS_DOT_PRODUCT_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the dot product of columns of the specified matrices. * * @tparam Mat1 type of the first matrix (must be derived from \c * Eigen::MatrixBase) * @tparam Mat2 type of the second matrix (must be derived from \c * Eigen::MatrixBase) * * @param v1 Matrix of first vectors. * @param v2 Matrix of second vectors. * @return Dot product of the vectors. * @throw std::domain_error If the vectors are not the same * size or if they are both not vector dimensioned. */ template * = nullptr, require_any_eigen_vt* = nullptr> inline Eigen::Matrix, 1, Mat1::ColsAtCompileTime> columns_dot_product(const Mat1& v1, const Mat2& v2) { check_matching_sizes("dot_product", "v1", v1, "v2", v2); Eigen::Matrix ret(1, v1.cols()); for (size_type j = 0; j < v1.cols(); ++j) { ret.coeffRef(j) = dot_product(v1.col(j), v2.col(j)); } return ret; } /** * Returns the dot product of columns of the specified matrices. * * This overload is used when at least one of Mat1 and Mat2 is * a `var_value` where `T` inherits from `EigenBase`. The other * argument can be another `var_value` or a type that inherits from * `EigenBase`. * * @tparam Mat1 type of the first matrix * @tparam Mat2 type of the second matrix * * @param v1 Matrix of first vectors. * @param v2 Matrix of second vectors. * @return Dot product of the vectors. * @throw std::domain_error If the vectors are not the same * size or if they are both not vector dimensioned. */ template * = nullptr, require_any_var_matrix_t* = nullptr> inline auto columns_dot_product(const Mat1& v1, const Mat2& v2) { check_matching_sizes("columns_dot_product", "v1", v1, "v2", v2); using inner_return_t = decltype( (value_of(v1).array() * value_of(v2).array()).colwise().sum().matrix()); using return_t = return_var_matrix_t; if (!is_constant::value && !is_constant::value) { arena_t> arena_v1 = v1; arena_t> arena_v2 = v2; return_t res = (arena_v1.val().array() * arena_v2.val().array()).colwise().sum(); reverse_pass_callback([arena_v1, arena_v2, res]() mutable { if (is_var_matrix::value) { arena_v1.adj().noalias() += arena_v2.val() * res.adj().asDiagonal(); } else { arena_v1.adj() += arena_v2.val() * res.adj().asDiagonal(); } if (is_var_matrix::value) { arena_v2.adj().noalias() += arena_v1.val() * res.adj().asDiagonal(); } else { arena_v2.adj() += arena_v1.val() * res.adj().asDiagonal(); } }); return res; } else if (!is_constant::value) { arena_t> arena_v1 = value_of(v1); arena_t> arena_v2 = v2; return_t res = (arena_v1.array() * arena_v2.val().array()).colwise().sum(); reverse_pass_callback([arena_v1, arena_v2, res]() mutable { if (is_var_matrix::value) { arena_v2.adj().noalias() += arena_v1 * res.adj().asDiagonal(); } else { arena_v2.adj() += arena_v1 * res.adj().asDiagonal(); } }); return res; } else { arena_t> arena_v1 = v1; arena_t> arena_v2 = value_of(v2); return_t res = (arena_v1.val().array() * arena_v2.array()).colwise().sum(); reverse_pass_callback([arena_v1, arena_v2, res]() mutable { if (is_var_matrix::value) { arena_v1.adj().noalias() += arena_v2 * res.adj().asDiagonal(); } else { arena_v1.adj() += arena_v2 * res.adj().asDiagonal(); } }); return res; } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/svd.hpp0000644000176200001440000000733114645137106021655 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SVD_HPP #define STAN_MATH_REV_FUN_SVD_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Given input matrix m, return the singular value decomposition (U,D,V) * such that `m = U*diag(D)*V^{T}` * * Adjoint update equation comes from Equation (4) in Differentiable Programming * Tensor Networks(H. Liao, J. Liu, et al., arXiv:1903.09650). * * @tparam EigMat type of input matrix * @param m MxN input matrix * @return a tuple (U,D,V) where U is an orthogonal matrix, D a vector of * singular values (in decreasing order), and V an orthogonal matrix */ template * = nullptr> inline auto svd(const EigMat& m) { using mat_ret_type = return_var_matrix_t; using vec_ret_type = return_var_matrix_t; if (unlikely(m.size() == 0)) { return std::make_tuple(mat_ret_type(Eigen::MatrixXd(0, 0)), vec_ret_type(Eigen::VectorXd(0, 1)), mat_ret_type(Eigen::MatrixXd(0, 0))); } const int M = std::min(m.rows(), m.cols()); auto arena_m = to_arena(m); Eigen::JacobiSVD svd( arena_m.val(), Eigen::ComputeThinU | Eigen::ComputeThinV); auto singular_values_d = svd.singularValues(); arena_t arena_Fp(M, M); arena_t arena_Fm(M, M); for (int i = 0; i < M; i++) { for (int j = 0; j < M; j++) { double a = 1.0 / (singular_values_d[j] - singular_values_d[i]); double b = 1.0 / (singular_values_d[i] + singular_values_d[j]); arena_Fp(i, j) = a + b; arena_Fm(i, j) = a - b; } } arena_Fp.diagonal().setZero(); arena_Fm.diagonal().setZero(); arena_t singular_values = singular_values_d; arena_t arena_U = svd.matrixU(); arena_t arena_V = svd.matrixV(); reverse_pass_callback([arena_m, arena_U, singular_values, arena_V, arena_Fp, arena_Fm]() mutable { // SVD-U reverse mode Eigen::MatrixXd UUadjT = arena_U.val_op().transpose() * arena_U.adj_op(); auto u_adj = .5 * arena_U.val_op() * (arena_Fp.array() * (UUadjT - UUadjT.transpose()).array()) .matrix() * arena_V.val_op().transpose() + (Eigen::MatrixXd::Identity(arena_m.rows(), arena_m.rows()) - arena_U.val_op() * arena_U.val_op().transpose()) * arena_U.adj_op() * singular_values.val_op().asDiagonal().inverse() * arena_V.val_op().transpose(); // Singular values reverse mode auto d_adj = arena_U.val_op() * singular_values.adj().asDiagonal() * arena_V.val_op().transpose(); // SVD-V reverse mode Eigen::MatrixXd VTVadj = arena_V.val_op().transpose() * arena_V.adj_op(); auto v_adj = 0.5 * arena_U.val_op() * (arena_Fm.array() * (VTVadj - VTVadj.transpose()).array()) .matrix() * arena_V.val_op().transpose() + arena_U.val_op() * singular_values.val_op().asDiagonal().inverse() * arena_V.adj_op().transpose() * (Eigen::MatrixXd::Identity(arena_m.cols(), arena_m.cols()) - arena_V.val_op() * arena_V.val_op().transpose()); arena_m.adj() += u_adj + d_adj + v_adj; }); return std::make_tuple(mat_ret_type(arena_U), vec_ret_type(singular_values), mat_ret_type(arena_V)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_rising_factorial.hpp0000644000176200001440000000332214645137106025235 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_RISING_FACTORIAL_HPP #define STAN_MATH_REV_FUN_LOG_RISING_FACTORIAL_HPP #include #include #include #include #include namespace stan { namespace math { namespace internal { class log_rising_factorial_vv_vari : public op_vv_vari { public: log_rising_factorial_vv_vari(vari* avi, vari* bvi) : op_vv_vari(log_rising_factorial(avi->val_, bvi->val_), avi, bvi) {} void chain() { avi_->adj_ += adj_ * (digamma(avi_->val_ + bvi_->val_) - digamma(avi_->val_)); bvi_->adj_ += adj_ * digamma(avi_->val_ + bvi_->val_); } }; class log_rising_factorial_vd_vari : public op_vd_vari { public: log_rising_factorial_vd_vari(vari* avi, double b) : op_vd_vari(log_rising_factorial(avi->val_, b), avi, b) {} void chain() { avi_->adj_ += adj_ * (digamma(avi_->val_ + bd_) - digamma(avi_->val_)); } }; class log_rising_factorial_dv_vari : public op_dv_vari { public: log_rising_factorial_dv_vari(double a, vari* bvi) : op_dv_vari(log_rising_factorial(a, bvi->val_), a, bvi) {} void chain() { bvi_->adj_ += adj_ * digamma(bvi_->val_ + ad_); } }; } // namespace internal inline var log_rising_factorial(const var& a, double b) { return var(new internal::log_rising_factorial_vd_vari(a.vi_, b)); } inline var log_rising_factorial(const var& a, const var& b) { return var(new internal::log_rising_factorial_vv_vari(a.vi_, b.vi_)); } inline var log_rising_factorial(double a, const var& b) { return var(new internal::log_rising_factorial_dv_vari(a, b.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/to_var.hpp0000644000176200001440000001110314645137106022343 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TO_VAR_HPP #define STAN_MATH_REV_FUN_TO_VAR_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Converts argument to an automatic differentiation variable. * * Returns a var variable with the input value. * * @param[in] x A scalar value * @return An automatic differentiation variable with the input value. */ inline var to_var(double x) { return var(x); } /** * Specialization of to_var for non-const var input * * @param[in,out] x An automatic differentiation variable. * @return The input automatic differentiation variable. */ inline var& to_var(var& x) { return x; } /** * Specialization of to_var for const var input * * @param[in,out] x An automatic differentiation variable. * @return The input automatic differentiation variable. */ inline const var& to_var(const var& x) { return x; } /** * Converts argument to an automatic differentiation variable. * * Returns a var variable with the input value. * * @param[in] v A std::vector * @return A std::vector with the values set */ inline std::vector to_var(const std::vector& v) { std::vector var_vector(v.size()); for (size_t n = 0; n < v.size(); n++) { var_vector[n] = v[n]; } return var_vector; } /** * Specialization of to_var to for const input vector of var * * Returns a var variable from the input * * @param[in] v A std::vector * @return The input std::vector */ inline const std::vector& to_var(const std::vector& v) { return v; } /** * Specialization of to_var to for non-const input vector of var * * Returns a var variable from the input * * @param[in] v A std::vector * @return The input std::vector */ inline std::vector& to_var(std::vector& v) { return v; } /** * Converts argument to an automatic differentiation variable. * * Returns a var variable with the input value. * * @param[in] m A Matrix with scalars * @return A Matrix with automatic differentiation variables */ inline matrix_v to_var(const matrix_d& m) { matrix_v m_v = m; return m_v; } /** * Specialization of to_var for non-const matrices of vars * * @param[in,out] m A matrix of automatic differentation variables. * @return The input matrix of automatic differentiation variables. */ inline matrix_v& to_var(matrix_v& m) { return m; } /** * Specialization of to_var for const matrices of vars * * @param[in,out] m A matrix of automatic differentation variables. * @return The input matrix of automatic differentiation variables. */ inline const matrix_v& to_var(const matrix_v& m) { return m; } /** * Converts argument to an automatic differentiation variable. * * Returns a var variable with the input value. * * @param[in] v A Vector of scalars * @return A Vector of automatic differentiation variables with * values of v */ inline vector_v to_var(const vector_d& v) { vector_v v_v = v; return v_v; } /** * Specialization of to_var for const column vector of vars * * @param[in,out] v A column vector of automatic differentation variables. * @return The input column vector of automatic differentiation variables. */ inline const vector_v& to_var(const vector_v& v) { return v; } /** * Specialization of to_var for non-const column vector of vars * * @param[in,out] v A column vector of automatic differentation variables. * @return The input column vector of automatic differentiation variables. */ inline vector_v& to_var(vector_v& v) { return v; } /** * Converts argument to an automatic differentiation variable. * * Returns a var variable with the input value. * * @param[in] rv A row vector of scalars * @return A row vector of automatic differentation variables with * values of rv. */ inline row_vector_v to_var(const row_vector_d& rv) { row_vector_v rv_v = rv; return rv_v; } /** * Specialization of to_var for const row vector of vars * * @param[in,out] rv A column vector of automatic differentation variables. * @return The input row vector of automatic differentiation variables. */ inline const row_vector_v& to_var(const row_vector_v& rv) { return rv; } /** * Specialization of to_var for non-const row vector of vars * * @param[in,out] rv A column vector of automatic differentation variables. * @return The input row vector of automatic differentiation variables. */ inline row_vector_v& to_var(row_vector_v& rv) { return rv; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/grad.hpp0000644000176200001440000000156614645137106022002 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_GRAD_HPP #define STAN_MATH_REV_FUN_GRAD_HPP #include #include #include namespace stan { namespace math { /** * Propagate chain rule to calculate gradients starting from * the specified variable. Resizes the input vector to be the * correct size. * * The grad() function does not itself recover any memory. use * recover_memory() or * recover_memory_nested() to recover memory. * * @param[in] v Value of function being differentiated * @param[in] x Variables being differentiated with respect to * @param[out] g Gradient, d/dx v, evaluated at x. */ inline void grad(var& v, Eigen::Matrix& x, Eigen::VectorXd& g) { grad(v.vi_); g = x.adj(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/accumulator.hpp0000644000176200001440000000716314645137106023403 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ACCUMULATOR_HPP #define STAN_MATH_REV_FUN_ACCUMULATOR_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Class to accumulate values and eventually return their sum. If * no values are ever added, the return value is 0. * * This class is useful for speeding up autodiff of long sums * because it uses the sum() operation (either from * stan::math or one defined by argument-dependent lookup. * * @tparam T Type of scalar added */ template class accumulator> { private: static const int max_size_ = 128; std::vector> buf_; /** * Checks if the internal buffer is full and if so reduces it to 1 element. */ void check_size() { if (buf_.size() == max_size_) { var tmp = stan::math::sum(buf_); buf_.resize(1); buf_[0] = tmp; } } public: /** * Constructor. Reserves space. */ accumulator() : buf_() { buf_.reserve(max_size_); } /** * Add the specified arithmetic type value to the buffer after * static casting it to the class type T. * *

See the std library doc for std::is_arithmetic * for information on what counts as an arithmetic type. * * @tparam S Type of argument * @param x Value to add */ template > inline void add(S x) { check_size(); buf_.push_back(x); } /** * Add each entry in the specified matrix, vector, or row vector * of values to the buffer. * * @tparam S type of the matrix * @param m Matrix of values to add */ template * = nullptr> inline void add(const S& m) { check_size(); buf_.push_back(stan::math::sum(m)); } /** * Add each entry in the specified std vector of values to the buffer. * * @tparam S type of the matrix * @param xs std vector of values to add */ template * = nullptr> inline void add(const S& xs) { check_size(); this->add(stan::math::sum(xs)); } /** * Recursively add each entry in the specified standard vector containint * containers to the buffer. * * @tparam S Type of value to recursively add. * @param xs Vector of entries to add */ template * = nullptr> inline void add(const S& xs) { check_size(); for (const auto& i : xs) { this->add(i); } } #ifdef STAN_OPENCL /** * Sum each entry and then push to the buffer. * @tparam S A Type inheriting from `matrix_cl_base` * @param xs An OpenCL matrix */ template * = nullptr> inline void add(const var_value& xs) { check_size(); buf_.push_back(stan::math::sum(xs)); } /** * Sum each entry and then push to the buffer. * @tparam S A Type inheriting from `matrix_cl_base` * @param xs An OpenCL matrix */ template * = nullptr> inline void add(const S& xs) { check_size(); buf_.push_back(stan::math::sum(xs)); } #endif /** * Return the sum of the accumulated values. * * @return Sum of accumulated values. */ inline var sum() const { return stan::math::sum(buf_); } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/cholesky_decompose.hpp0000644000176200001440000001515014645137106024736 0ustar liggesusers#ifndef STAN_MATH_REV_MAT_FUN_CHOLESKY_DECOMPOSE_HPP #define STAN_MATH_REV_MAT_FUN_CHOLESKY_DECOMPOSE_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { template inline void initialize_return(LMat& L, const LAMat& L_A, vari*& dummy) { for (Eigen::Index j = 0; j < L_A.rows(); ++j) { for (Eigen::Index i = 0; i < L_A.rows(); ++i) { if (j > i) { L.coeffRef(i, j) = dummy; } else { L.coeffRef(i, j) = new vari(L_A.coeffRef(i, j), false); } } } } /** * Reverse mode differentiation algorithm reference: * * Mike Giles. An extended collection of matrix derivative results for * forward and reverse mode AD. Jan. 2008. * * Note algorithm as laid out in Giles is row-major, so Eigen::Matrices * are explicitly storage order RowMajor, whereas Eigen defaults to * ColumnMajor. Also note algorithm starts by calculating the adjoint for * A(M_ - 1, M_ - 1), hence pos on line 94 is decremented to start at pos * = M_ * (M_ + 1) / 2. */ template inline auto unblocked_cholesky_lambda(T1& L_A, T2& L, T3& A) { return [L_A, L, A]() mutable { const size_t N = A.rows(); // Algorithm is in rowmajor so we make the adjoint copy rowmajor Eigen::Matrix adjL(L.rows(), L.cols()); Eigen::MatrixXd adjA = Eigen::MatrixXd::Zero(L.rows(), L.cols()); adjL.template triangularView() = L.adj(); for (int i = N - 1; i >= 0; --i) { for (int j = i; j >= 0; --j) { if (i == j) { adjA.coeffRef(i, j) = 0.5 * adjL.coeff(i, j) / L_A.coeff(i, j); } else { adjA.coeffRef(i, j) = adjL.coeff(i, j) / L_A.coeff(j, j); adjL.coeffRef(j, j) -= adjL.coeff(i, j) * L_A.coeff(i, j) / L_A.coeff(j, j); } for (int k = j - 1; k >= 0; --k) { adjL.coeffRef(i, k) -= adjA.coeff(i, j) * L_A.coeff(j, k); adjL.coeffRef(j, k) -= adjA.coeff(i, j) * L_A.coeff(i, k); } } } A.adj() += adjA; }; } /** * Reverse mode differentiation algorithm reference: * * Iain Murray: Differentiation of the Cholesky decomposition, 2016. * */ template inline auto cholesky_lambda(T1& L_A, T2& L, T3& A) { return [L_A, L, A]() mutable { using Eigen::Lower; using Eigen::StrictlyUpper; using Eigen::Upper; Eigen::MatrixXd L_adj = Eigen::MatrixXd::Zero(L.rows(), L.cols()); L_adj.template triangularView() = L.adj(); const int M_ = L_A.rows(); int block_size_ = std::max(M_ / 8, 8); block_size_ = std::min(block_size_, 128); for (int k = M_; k > 0; k -= block_size_) { int j = std::max(0, k - block_size_); auto R = L_A.block(j, 0, k - j, j); auto D = L_A.block(j, j, k - j, k - j).eval(); auto B = L_A.block(k, 0, M_ - k, j); auto C = L_A.block(k, j, M_ - k, k - j); auto R_adj = L_adj.block(j, 0, k - j, j); auto D_adj = L_adj.block(j, j, k - j, k - j); auto B_adj = L_adj.block(k, 0, M_ - k, j); auto C_adj = L_adj.block(k, j, M_ - k, k - j); D.transposeInPlace(); if (C_adj.size() > 0) { C_adj = D.template triangularView() .solve(C_adj.transpose()) .transpose(); B_adj.noalias() -= C_adj * R; D_adj.noalias() -= C_adj.transpose() * C; } D_adj = (D * D_adj.template triangularView()).eval(); D_adj.template triangularView() = D_adj.adjoint().template triangularView(); D.template triangularView().solveInPlace(D_adj); D.template triangularView().solveInPlace(D_adj.transpose()); R_adj.noalias() -= C_adj.transpose() * B; R_adj.noalias() -= D_adj.template selfadjointView() * R; D_adj.diagonal() *= 0.5; } A.adj().template triangularView() += L_adj; }; } } // namespace internal /** * Reverse mode specialization of cholesky decomposition * * Internally calls Eigen::LLT rather than using * stan::math::cholesky_decompose in order to use an inplace decomposition. * * Note chainable stack varis are created below in Matrix * * @param A Matrix * @return L cholesky factor of A */ template * = nullptr> inline auto cholesky_decompose(const EigMat& A) { check_square("cholesky_decompose", "A", A); arena_t arena_A = A; arena_t> L_A(arena_A.val()); check_symmetric("cholesky_decompose", "A", A); Eigen::LLT, Eigen::Lower> L_factor(L_A); check_pos_definite("cholesky_decompose", "m", L_factor); L_A.template triangularView().setZero(); // looping gradient calcs faster for small matrices compared to // cholesky_block vari* dummy = new vari(0.0, false); arena_t L(L_A.rows(), L_A.cols()); if (L_A.rows() <= 35) { internal::initialize_return(L, L_A, dummy); reverse_pass_callback(internal::unblocked_cholesky_lambda(L_A, L, arena_A)); } else { internal::initialize_return(L, L_A, dummy); reverse_pass_callback(internal::cholesky_lambda(L_A, L, arena_A)); } return plain_type_t(L); } /** * Reverse mode specialization of Cholesky decomposition * * Internally calls Eigen::LLT rather than using * stan::math::cholesky_decompose in order to use an inplace decomposition. * * Note chainable stack varis are created below in Matrix * @tparam T A `var_value` holding an inner eigen type. * @param A A square positive definite matrix with no nan values. * @return L Cholesky factor of A */ template * = nullptr> inline auto cholesky_decompose(const T& A) { check_symmetric("cholesky_decompose", "A", A.val()); plain_type_t L = cholesky_decompose(A.val()); if (A.rows() <= 35) { reverse_pass_callback(internal::unblocked_cholesky_lambda(L.val(), L, A)); } else { reverse_pass_callback(internal::cholesky_lambda(L.val(), L, A)); } return L; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/exp.hpp0000644000176200001440000000353314645137106021655 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_EXP_HPP #define STAN_MATH_REV_FUN_EXP_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the exponentiation of the specified variable (cmath). * \f[ \mbox{exp}(x) = \begin{cases} e^x & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{exp}(x)}{\partial x} = \begin{cases} e^x & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Variable to exponentiate. * @return Exponentiated variable. */ inline var exp(const var& a) { return make_callback_var(std::exp(a.val()), [a](auto& vi) mutable { a.adj() += vi.adj() * vi.val(); }); } /** * Return the exponentiation (base e) of the specified complex number. * @param z argument * @return exponentiation of argument */ inline std::complex exp(const std::complex& z) { return internal::complex_exp(z); } /** * Return the exponentiation of the elements of x * * @tparam T type of x * @param x argument * @return elementwise exponentiation of x */ template * = nullptr> inline auto exp(const T& x) { return make_callback_var( x.val().array().exp().matrix(), [x](const auto& vi) mutable { x.adj() += (vi.val().array() * vi.adj().array()).matrix(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/digamma.hpp0000644000176200001440000000241714645137106022460 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_DIGAMMA_HPP #define STAN_MATH_REV_FUN_DIGAMMA_HPP #include #include #include #include namespace stan { namespace math { /** * Return the derivative of the log gamma function * at the specified value. * * @param[in] a argument * @return derivative of log gamma function at argument */ inline var digamma(const var& a) { return make_callback_var(digamma(a.val()), [a](auto& vi) { a.adj() += vi.adj() * trigamma(a.val()); }); } /** * Return the elementwise derivative of the log gamma function * at the given input vector * * @tparam T a `var_value` with inner Eigen type * @param[in] a vector * @return elementwise derivative of log gamma function */ template * = nullptr> inline auto digamma(const T& a) { return make_callback_var( a.val() .array() .unaryExpr([](auto& x) { return digamma(x); }) .matrix() .eval(), [a](auto& vi) mutable { a.adj().array() += vi.adj().array() * a.val().array().unaryExpr([](auto& x) { return trigamma(x); }); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/softmax.hpp0000644000176200001440000000267114645137106022544 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SOFTMAX_HPP #define STAN_MATH_REV_FUN_SOFTMAX_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the softmax of the specified Eigen vector. Softmax is * guaranteed to return a simplex. * * @param alpha Unconstrained input vector. * @return Softmax of the input. * @throw std::domain_error If the input vector is size 0. */ template * = nullptr> inline auto softmax(const Mat& alpha) { using mat_plain = plain_type_t; using ret_type = return_var_matrix_t; if (alpha.size() == 0) { return ret_type(alpha); } arena_t alpha_arena = alpha; arena_t res_val = softmax(value_of(alpha_arena)); arena_t res = res_val; reverse_pass_callback([res_val, res, alpha_arena]() mutable { const auto& res_adj = to_ref(res.adj()); alpha_arena.adj() += -res_val * res_adj.dot(res_val) + res_val.cwiseProduct(res_adj); }); return ret_type(res); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/stan_print.hpp0000644000176200001440000000051114645137106023233 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_STAN_PRINT_HPP #define STAN_MATH_REV_FUN_STAN_PRINT_HPP #include #include #include namespace stan { namespace math { inline void stan_print(std::ostream* o, const var& x) { *o << x.val(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/to_arena.hpp0000644000176200001440000001162114645137106022646 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TO_ARENA_HPP #define STAN_MATH_REV_FUN_TO_ARENA_HPP #include #include #include #include #include namespace stan { namespace math { /** * Converts given argument into a type that either has any dynamic allocation on * AD stack or schedules its destructor to be called when AD stack memory is * recovered. * * This overload is used for kernel generator expressions. It also handles any * other types that do not have a special overload for them. * * @tparam T type of scalar * @param a argument * @return argument */ template >* = nullptr, require_not_container_t* = nullptr, require_not_matrix_cl_t* = nullptr> inline arena_t to_arena(T&& a) { return std::forward(a); } /** * Converts given argument into a type that either has any dynamic allocation on * AD stack or schedules its destructor to be called when AD stack memory is * recovered. * * For types that already have this property (including scalars and * `var_value`s) this is a no-op. * * Passing in a lvalue reference to objects not using AD stack, such as a * `matrix_cl` is inefficient as they need to be copied in this case. * @tparam T type of scalar * @param a argument * @return argument */ template >* = nullptr, require_not_matrix_cl_t* = nullptr, require_not_std_vector_t* = nullptr> inline std::remove_reference_t to_arena(T&& a) { // intentionally never returning a reference. If an object is just // referenced it will likely go out of scope before it is used. return std::forward(a); } /** * Converts given argument into a type that either has any dynamic allocation on * AD stack or schedules its destructor to be called when AD stack memory is * recovered. * * Converts eigen types to `arena_matrix`. * @tparam T type of argument * @param a argument * @return argument copied/evaluated on AD stack */ template * = nullptr, require_not_same_t>* = nullptr> inline arena_t to_arena(const T& a) { return arena_t(a); } /** * Converts given argument into a type that either has any dynamic allocation on * AD stack or schedules its destructor to be called when AD stack memory is * recovered. * * For std vectors that have data already on AD stack this is a shallow copy. * @tparam T type of scalar * @param a argument * @return argument */ template inline std::vector> to_arena( const std::vector>& a) { // What we want to do here is the same as moving input into output, except // that we want input to be left unchanged. With any normal allocator that // lead to deallocating memory twice (probably segfaulting). However, // dealocation with `arena_allocator` is a no-op, so we can do that. std::vector> res; std::memcpy(static_cast(&res), static_cast(&a), sizeof(std::vector>)); return res; } /** * Converts given argument into a type that has any dynamic allocation on AD * stack. * * Std vectors are copied into another std vector with custom allocator that * uses AD stack. * * This overload works on vectors with simple scalars that don't need to be * converthed themselves. * * @tparam T type of argument * @param a argument * @return argument copied on AD stack */ template >* = nullptr> inline arena_t> to_arena(const std::vector& a) { return {a.begin(), a.end()}; } /** * Converts given argument into a type that has any dynamic allocation on AD * stack. * * Std vectors are copied into another std vector with custom allocator that * uses AD stack. * * This overload works on vectors with scalars that also need conversion. * * @tparam T type of argument * @param a argument * @return argument copied on AD stack */ template >* = nullptr> inline arena_t> to_arena(const std::vector& a) { arena_t> res; res.reserve(a.size()); for (const T& i : a) { res.push_back(to_arena(i)); } return res; } /** * If the condition is true, converts given argument into a type that has any * dynamic allocation on AD stack. Otherwise returns the argument * * @tparam T type of argument * @param a argument * @return argument copied/evaluated on AD stack */ template * = nullptr> inline T to_arena_if(T&& a) { return std::forward(a); } template * = nullptr> inline arena_t to_arena_if(const T& a) { return to_arena(a); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/elt_divide.hpp0000644000176200001440000000645314645137106023175 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ELT_DIVIDE_HPP #define STAN_MATH_REV_FUN_ELT_DIVIDE_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the elementwise division of the specified * matrices. * * @tparam Mat1 type of the first matrix * @tparam Mat2 type of the second matrix * * @param m1 First matrix * @param m2 Second matrix * @return Elementwise division of matrices. */ template * = nullptr, require_any_rev_matrix_t* = nullptr> auto elt_divide(const Mat1& m1, const Mat2& m2) { check_matching_dims("elt_divide", "m1", m1, "m2", m2); using inner_ret_type = decltype((value_of(m1).array() / value_of(m2).array()).matrix()); using ret_type = return_var_matrix_t; if (!is_constant::value && !is_constant::value) { arena_t> arena_m1 = m1; arena_t> arena_m2 = m2; arena_t ret(arena_m1.val().array() / arena_m2.val().array()); reverse_pass_callback([ret, arena_m1, arena_m2]() mutable { for (Eigen::Index j = 0; j < arena_m2.cols(); ++j) { for (Eigen::Index i = 0; i < arena_m2.rows(); ++i) { const auto ret_div = ret.adj().coeff(i, j) / arena_m2.val().coeff(i, j); arena_m1.adj().coeffRef(i, j) += ret_div; arena_m2.adj().coeffRef(i, j) -= ret.val().coeff(i, j) * ret_div; } } }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_m1 = m1; arena_t> arena_m2 = value_of(m2); arena_t ret(arena_m1.val().array() / arena_m2.array()); reverse_pass_callback([ret, arena_m1, arena_m2]() mutable { arena_m1.adj().array() += ret.adj().array() / arena_m2.array(); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_m1 = value_of(m1); arena_t> arena_m2 = m2; arena_t ret(arena_m1.array() / arena_m2.val().array()); reverse_pass_callback([ret, arena_m2, arena_m1]() mutable { arena_m2.adj().array() -= ret.val().array() * ret.adj().array() / arena_m2.val().array(); }); return ret_type(ret); } } /** * Return the elementwise division of the specified scalar * by the specified matrix. * * @tparam Scal type of the scalar * @tparam Mat type of the matrix * * @param s scalar * @param m matrix or expression * @return Elementwise division of a scalar by matrix. */ template * = nullptr, require_var_matrix_t* = nullptr> auto elt_divide(Scal s, const Mat& m) { plain_type_t res = value_of(s) / m.val().array(); reverse_pass_callback([m, s, res]() mutable { m.adj().array() -= res.val().array() * res.adj().array() / m.val().array(); if (!is_constant::value) forward_as(s).adj() += (res.adj().array() / m.val().array()).sum(); }); return res; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/columns_dot_self.hpp0000644000176200001440000000261414645137106024417 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_COLUMNS_DOT_SELF_HPP #define STAN_MATH_REV_FUN_COLUMNS_DOT_SELF_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the dot product of each column of a matrix with itself. * * @tparam Mat An Eigen matrix with a `var` scalar type. * @param x Matrix. */ template * = nullptr> inline Eigen::Matrix columns_dot_self( const Mat& x) { Eigen::Matrix ret(1, x.cols()); for (size_type i = 0; i < x.cols(); i++) { ret(i) = dot_self(x.col(i)); } return ret; } /** * Returns the dot product of each column of a matrix with itself. * * @tparam Mat A `var_value<>` with an inner matrix type. * @param x Matrix. */ template * = nullptr> inline auto columns_dot_self(const Mat& x) { using ret_type = return_var_matrix_t; arena_t res = x.val().colwise().squaredNorm(); if (x.size() >= 0) { reverse_pass_callback([res, x]() mutable { x.adj() += x.val() * (2 * res.adj()).asDiagonal(); }); } return res; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/hypergeometric_2F1.hpp0000644000176200001440000000425714645137106024523 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_HYPERGEOMETRIC_2F1_HPP #define STAN_MATH_REV_FUN_HYPERGEOMETRIC_2F1_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the Gauss hypergeometric function applied to the * input arguments: * \f$_2F_1(a_1,a_2;b;z)\f$ * * See 'grad_2F1.hpp' for the derivatives wrt each parameter * * @tparam Ta1 Type of scalar first 'a' argument * @tparam Ta2 Type of scalar second 'a' argument * @tparam Tb Type of scalar 'b' argument * @tparam Tz Type of scalar 'z' argument * @param[in] a1 First of 'a' arguments to function * @param[in] a2 Second of 'a' arguments to function * @param[in] b 'b' argument to function * @param[in] z Scalar z argument * @return Gauss hypergeometric function */ template * = nullptr, require_any_var_t* = nullptr> inline return_type_t hypergeometric_2F1(const Ta1& a1, const Ta2& a2, const Tb& b, const Tz& z) { double a1_dbl = value_of(a1); double a2_dbl = value_of(a2); double b_dbl = value_of(b); double z_dbl = value_of(z); return make_callback_var( hypergeometric_2F1(a1_dbl, a2_dbl, b_dbl, z_dbl), [a1, a2, b, z](auto& vi) mutable { auto grad_tuple = grad_2F1(a1, a2, b, z); if (!is_constant::value) { forward_as(a1).adj() += vi.adj() * std::get<0>(grad_tuple); } if (!is_constant::value) { forward_as(a2).adj() += vi.adj() * std::get<1>(grad_tuple); } if (!is_constant::value) { forward_as(b).adj() += vi.adj() * std::get<2>(grad_tuple); } if (!is_constant::value) { forward_as(z).adj() += vi.adj() * std::get<3>(grad_tuple); } }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log.hpp0000644000176200001440000000355314645137106021644 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_HPP #define STAN_MATH_REV_FUN_LOG_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the natural log of the specified variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \log x = \frac{1}{x}\f$. * \f[ \mbox{log}(x) = \begin{cases} \textrm{NaN} & \mbox{if } x < 0\\ \ln(x) & \mbox{if } x \geq 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log}(x)}{\partial x} = \begin{cases} \textrm{NaN} & \mbox{if } x < 0\\ \frac{1}{x} & \mbox{if } x\geq 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a Variable whose log is taken. * @return Natural log of variable. */ template * = nullptr> inline auto log(const var_value& a) { return make_callback_var(log(a.val()), [a](auto& vi) mutable { as_array_or_scalar(a.adj()) += as_array_or_scalar(vi.adj()) / as_array_or_scalar(a.val()); }); } /** * Return the natural logarithm (base e) of the specified complex argument. * * @param z complex argument * @return natural logarithm of argument */ inline std::complex log(const std::complex& z) { return internal::complex_log(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/lmgamma.hpp0000644000176200001440000000141514645137106022471 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LMGAMMA_HPP #define STAN_MATH_REV_FUN_LMGAMMA_HPP #include #include #include #include namespace stan { namespace math { namespace internal { class lmgamma_dv_vari : public op_dv_vari { public: lmgamma_dv_vari(int a, vari* bvi) : op_dv_vari(lmgamma(a, bvi->val_), a, bvi) {} void chain() { double deriv = 0; for (int i = 1; i < ad_ + 1; i++) { deriv += digamma(bvi_->val_ + (1.0 - i) / 2.0); } bvi_->adj_ += adj_ * deriv; } }; } // namespace internal inline var lmgamma(int a, const var& b) { return var(new internal::lmgamma_dv_vari(a, b.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/multiply_log.hpp0000644000176200001440000001751014645137106023601 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MULTIPLY_LOG_HPP #define STAN_MATH_REV_FUN_MULTIPLY_LOG_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { class multiply_log_vv_vari : public op_vv_vari { public: multiply_log_vv_vari(vari* avi, vari* bvi) : op_vv_vari(multiply_log(avi->val_, bvi->val_), avi, bvi) {} void chain() { using std::log; avi_->adj_ += adj_ * log(bvi_->val_); bvi_->adj_ += adj_ * avi_->val_ / bvi_->val_; } }; class multiply_log_vd_vari : public op_vd_vari { public: multiply_log_vd_vari(vari* avi, double b) : op_vd_vari(multiply_log(avi->val_, b), avi, b) {} void chain() { using std::log; avi_->adj_ += adj_ * log(bd_); } }; class multiply_log_dv_vari : public op_dv_vari { public: multiply_log_dv_vari(double a, vari* bvi) : op_dv_vari(multiply_log(a, bvi->val_), a, bvi) {} void chain() { bvi_->adj_ += adj_ * ad_ / bvi_->val_; } }; } // namespace internal /** * Return the value of a*log(b). * * When both a and b are 0, the value returned is 0. * The partial derivative with respect to a is log(b). * The partial derivative with respect to b is a/b. * * @param a First variable. * @param b Second variable. * @return Value of a*log(b) */ inline var multiply_log(const var& a, const var& b) { return var(new internal::multiply_log_vv_vari(a.vi_, b.vi_)); } /** * Return the value of a*log(b). * * When both a and b are 0, the value returned is 0. * The partial derivative with respect to a is log(b). * * @param a First variable. * @param b Second scalar. * @return Value of a*log(b) */ inline var multiply_log(const var& a, double b) { return var(new internal::multiply_log_vd_vari(a.vi_, b)); } /** * Return the value of a*log(b). * * When both a and b are 0, the value returned is 0. * The partial derivative with respect to b is a/b. * * @param a First scalar. * @param b Second variable. * @return Value of a*log(b) */ inline var multiply_log(double a, const var& b) { if (a == 1.0) { return log(b); } return var(new internal::multiply_log_dv_vari(a, b.vi_)); } /** * Return the elementwise product `a * log(b)`. * * Both `T1` and `T2` are matrices, and one of `T1` or `T2` must be a * `var_value` * * @tparam T1 Type of first argument * @tparam T2 Type of second argument * @param a First argument * @param b Second argument * @return elementwise product of `a` and `log(b)` */ template * = nullptr, require_any_var_matrix_t* = nullptr> inline auto multiply_log(const T1& a, const T2& b) { check_matching_dims("multiply_log", "a", a, "b", b); if (!is_constant::value && !is_constant::value) { arena_t> arena_a = a; arena_t> arena_b = b; return make_callback_var( multiply_log(arena_a.val(), arena_b.val()), [arena_a, arena_b](const auto& res) mutable { arena_a.adj().array() += res.adj().array() * arena_b.val().array().log(); arena_b.adj().array() += res.adj().array() * arena_a.val().array() / arena_b.val().array(); }); } else if (!is_constant::value) { arena_t> arena_a = a; arena_t> arena_b = value_of(b); return make_callback_var(multiply_log(arena_a.val(), arena_b), [arena_a, arena_b](const auto& res) mutable { arena_a.adj().array() += res.adj().array() * arena_b.val().array().log(); }); } else { arena_t> arena_a = value_of(a); arena_t> arena_b = b; return make_callback_var(multiply_log(arena_a, arena_b.val()), [arena_a, arena_b](const auto& res) mutable { arena_b.adj().array() += res.adj().array() * arena_a.val().array() / arena_b.val().array(); }); } } /** * Return the product `a * log(b)`. * * @tparam T1 Type of matrix argument * @tparam T2 Type of scalar argument * @param a Matrix argument * @param b Scalar argument * @return Product of `a` and `log(b)` */ template * = nullptr, require_stan_scalar_t* = nullptr> inline auto multiply_log(const T1& a, const T2& b) { using std::log; if (!is_constant::value && !is_constant::value) { arena_t> arena_a = a; var arena_b = b; return make_callback_var( multiply_log(arena_a.val(), arena_b.val()), [arena_a, arena_b](const auto& res) mutable { arena_a.adj().array() += res.adj().array() * log(arena_b.val()); arena_b.adj() += (res.adj().array() * arena_a.val().array()).sum() / arena_b.val(); }); } else if (!is_constant::value) { arena_t> arena_a = a; return make_callback_var(multiply_log(arena_a.val(), value_of(b)), [arena_a, b](const auto& res) mutable { arena_a.adj().array() += res.adj().array() * log(value_of(b)); }); } else { arena_t> arena_a = value_of(a); var arena_b = b; return make_callback_var( multiply_log(arena_a, arena_b.val()), [arena_a, arena_b](const auto& res) mutable { arena_b.adj() += (res.adj().array() * arena_a.array()).sum() / arena_b.val(); }); } } /** * Return the product `a * log(b)`. * * @tparam T1 Type of scalar argument * @tparam T2 Type of matrix argument * @param a Scalar argument * @param b Matrix argument * @return Product of `a` and `log(b)` */ template * = nullptr, require_var_matrix_t* = nullptr> inline auto multiply_log(const T1& a, const T2& b) { if (!is_constant::value && !is_constant::value) { var arena_a = a; arena_t> arena_b = b; return make_callback_var( multiply_log(arena_a.val(), arena_b.val()), [arena_a, arena_b](const auto& res) mutable { arena_a.adj() += (res.adj().array() * arena_b.val().array().log()).sum(); arena_b.adj().array() += arena_a.val() * res.adj().array() / arena_b.val().array(); }); } else if (!is_constant::value) { var arena_a = a; arena_t> arena_b = value_of(b); return make_callback_var( multiply_log(arena_a.val(), arena_b), [arena_a, arena_b](const auto& res) mutable { arena_a.adj() += (res.adj().array() * arena_b.val().array().log()).sum(); }); } else { arena_t> arena_b = b; return make_callback_var(multiply_log(value_of(a), arena_b.val()), [a, arena_b](const auto& res) mutable { arena_b.adj().array() += value_of(a) * res.adj().array() / arena_b.val().array(); }); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/divide.hpp0000644000176200001440000000027214645137106022322 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_DIVIDE_HPP #define STAN_MATH_REV_FUN_DIVIDE_HPP #include #include #include #endif StanHeaders/inst/include/stan/math/rev/fun/modified_bessel_first_kind.hpp0000644000176200001440000000162114645137106026406 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MODIFIED_BESSEL_FIRST_KIND_HPP #define STAN_MATH_REV_FUN_MODIFIED_BESSEL_FIRST_KIND_HPP #include #include #include namespace stan { namespace math { namespace internal { class modified_bessel_first_kind_dv_vari : public op_dv_vari { public: modified_bessel_first_kind_dv_vari(int a, vari* bvi) : op_dv_vari(modified_bessel_first_kind(a, bvi->val_), a, bvi) {} void chain() { bvi_->adj_ += adj_ * (-ad_ * modified_bessel_first_kind(ad_, bvi_->val_) / bvi_->val_ + modified_bessel_first_kind(ad_ - 1, bvi_->val_)); } }; } // namespace internal inline var modified_bessel_first_kind(int v, const var& a) { return var(new internal::modified_bessel_first_kind_dv_vari(v, a.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/positive_ordered_constrain.hpp0000644000176200001440000000315314645137106026505 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_POSITIVE_ORDERED_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_POSITIVE_ORDERED_CONSTRAIN_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return an increasing positive ordered vector derived from the specified * free vector. The returned constrained vector will have the * same dimensionality as the specified free vector. * * @param x Free vector of scalars * @return Positive, increasing ordered vector */ template * = nullptr> inline auto positive_ordered_constrain(const T& x) { using ret_type = plain_type_t; size_t N = x.size(); if (unlikely(N == 0)) { return ret_type(x); } arena_t arena_x = x; Eigen::VectorXd y_val(N); arena_t exp_x(N); exp_x(0) = std::exp(value_of(arena_x)(0)); y_val(0) = exp_x.coeff(0); for (int n = 1; n < N; ++n) { exp_x(n) = std::exp(value_of(arena_x)(n)); y_val(n) = y_val.coeff(n - 1) + exp_x.coeff(n); } arena_t y = y_val; reverse_pass_callback([arena_x, exp_x, y]() mutable { const size_t N = arena_x.size(); double rolling_adjoint_sum = 0.0; for (int n = N; --n >= 0;) { rolling_adjoint_sum += y.adj().coeff(n); arena_x.adj().coeffRef(n) += exp_x.coeff(n) * rolling_adjoint_sum; } }); return ret_type(y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/quad_form.hpp0000644000176200001440000002666514645137106023051 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_QUAD_FORM_HPP #define STAN_MATH_REV_FUN_QUAD_FORM_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { template class quad_form_vari_alloc : public chainable_alloc { private: inline void compute(const Eigen::Matrix& A, const Eigen::Matrix& B) { matrix_d Cd = B.transpose() * A * B; if (sym_) { matrix_d M = 0.5 * (Cd + Cd.transpose()); Cd = M; } for (int j = 0; j < C_.cols(); j++) { for (int i = 0; i < C_.rows(); i++) { C_(i, j) = var(new vari(Cd(i, j), false)); } } } public: quad_form_vari_alloc(const Eigen::Matrix& A, const Eigen::Matrix& B, bool symmetric = false) : A_(A), B_(B), C_(B_.cols(), B_.cols()), sym_(symmetric) { compute(value_of(A), value_of(B)); } Eigen::Matrix A_; Eigen::Matrix B_; Eigen::Matrix C_; bool sym_; }; template class quad_form_vari : public vari { protected: inline void chainA(Eigen::Matrix& A, const Eigen::Matrix& Bd, const Eigen::Matrix& adjC) {} inline void chainB(Eigen::Matrix& B, const Eigen::Matrix& Ad, const Eigen::Matrix& Bd, const Eigen::Matrix& adjC) {} inline void chainA(Eigen::Matrix& A, const Eigen::Matrix& Bd, const Eigen::Matrix& adjC) { A.adj() += Bd * adjC * Bd.transpose(); } inline void chainB(Eigen::Matrix& B, const Eigen::Matrix& Ad, const Eigen::Matrix& Bd, const Eigen::Matrix& adjC) { B.adj() += Ad * Bd * adjC.transpose() + Ad.transpose() * Bd * adjC; } inline void chainAB(Eigen::Matrix& A, Eigen::Matrix& B, const Eigen::Matrix& Ad, const Eigen::Matrix& Bd, const Eigen::Matrix& adjC) { chainA(A, Bd, adjC); chainB(B, Ad, Bd, adjC); } public: quad_form_vari(const Eigen::Matrix& A, const Eigen::Matrix& B, bool symmetric = false) : vari(0.0) { impl_ = new quad_form_vari_alloc(A, B, symmetric); } virtual void chain() { matrix_d adjC = impl_->C_.adj(); chainAB(impl_->A_, impl_->B_, value_of(impl_->A_), value_of(impl_->B_), adjC); } quad_form_vari_alloc* impl_; }; /** * Return the quadratic form \f$ B^T A B \f$. * * Symmetry of the resulting matrix is not guaranteed due to numerical * precision. * * @tparam Mat1 type of the first (square) matrix * @tparam Mat2 type of the second matrix * * @param A square matrix * @param B second matrix * @param symmetric indicates whether the output should be made symmetric * @return The quadratic form * @throws std::invalid_argument if A is not square, or if A cannot be * multiplied by B */ template * = nullptr, require_any_var_matrix_t* = nullptr> inline auto quad_form_impl(const Mat1& A, const Mat2& B, bool symmetric) { check_square("quad_form", "A", A); check_multiplicable("quad_form", "A", A, "B", B); using return_t = return_var_matrix_t; if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; check_not_nan("multiply", "A", value_of(arena_A)); check_not_nan("multiply", "B", value_of(arena_B)); auto arena_res = to_arena(value_of(arena_B).transpose() * value_of(arena_A) * value_of(arena_B)); if (symmetric) { arena_res += arena_res.transpose().eval(); } return_t res = arena_res; reverse_pass_callback([arena_A, arena_B, res]() mutable { auto C_adj_B_t = (res.adj() * value_of(arena_B).transpose()).eval(); if (is_var_matrix::value) { arena_A.adj().noalias() += value_of(arena_B) * C_adj_B_t; } else { arena_A.adj() += value_of(arena_B) * C_adj_B_t; } if (is_var_matrix::value) { arena_B.adj().noalias() += value_of(arena_A) * C_adj_B_t.transpose() + value_of(arena_A).transpose() * value_of(arena_B) * res.adj(); } else { arena_B.adj() += value_of(arena_A) * C_adj_B_t.transpose() + value_of(arena_A).transpose() * value_of(arena_B) * res.adj(); } }); return res; } else if (!is_constant::value) { arena_t> arena_A = value_of(A); arena_t> arena_B = B; check_not_nan("multiply", "A", arena_A); check_not_nan("multiply", "B", arena_B.val()); auto arena_res = to_arena(value_of(arena_B).transpose() * arena_A * value_of(arena_B)); if (symmetric) { arena_res += arena_res.transpose().eval(); } return_t res = arena_res; reverse_pass_callback([arena_A, arena_B, res]() mutable { auto C_adj_B_t = (res.adj() * value_of(arena_B).transpose()); if (is_var_matrix::value) { arena_B.adj().noalias() += arena_A * C_adj_B_t.transpose() + arena_A.transpose() * value_of(arena_B) * res.adj(); } else { arena_B.adj() += arena_A * C_adj_B_t.transpose() + arena_A.transpose() * value_of(arena_B) * res.adj(); } }); return res; } else { arena_t> arena_A = A; arena_t> arena_B = value_of(B); check_not_nan("multiply", "A", value_of(arena_A)); check_not_nan("multiply", "B", arena_B); auto arena_res = to_arena(arena_B.transpose() * value_of(arena_A) * arena_B); if (symmetric) { arena_res += arena_res.transpose().eval(); } return_t res = arena_res; reverse_pass_callback([arena_A, arena_B, res]() mutable { auto C_adj_B_t = (res.adj() * arena_B.transpose()); if (is_var_matrix::value) { arena_A.adj().noalias() += arena_B * C_adj_B_t; } else { arena_A.adj() += arena_B * C_adj_B_t; } }); return res; } } } // namespace internal /** * Return the quadratic form \f$ B^T A B \f$. * * Symmetry of the resulting matrix is not guaranteed due to numerical * precision. * * @tparam EigMat1 type of the first (square) matrix * @tparam EigMat2 type of the second matrix * * @param A square matrix * @param B second matrix * @param symmetric indicates whether the output should be made symmetric * @return The quadratic form, which is a symmetric matrix. * @throws std::invalid_argument if A is not square, or if A cannot be * multiplied by B */ template * = nullptr, require_not_eigen_col_vector_t* = nullptr, require_any_vt_var* = nullptr> inline promote_scalar_t quad_form(const EigMat1& A, const EigMat2& B, bool symmetric = false) { check_square("quad_form", "A", A); check_multiplicable("quad_form", "A", A, "B", B); auto* baseVari = new internal::quad_form_vari< value_type_t, EigMat1::RowsAtCompileTime, EigMat1::ColsAtCompileTime, value_type_t, EigMat2::RowsAtCompileTime, EigMat2::ColsAtCompileTime>(A, B, symmetric); return baseVari->impl_->C_; } /** * Return the quadratic form \f$ B^T A B \f$. * * @tparam EigMat type of the matrix * @tparam ColVec type of the vector * * @param A square matrix * @param B vector * @param symmetric indicates whether the output should be made symmetric * @return The quadratic form (a scalar). * @throws std::invalid_argument if A is not square, or if A cannot be * multiplied by B */ template * = nullptr, require_eigen_col_vector_t* = nullptr, require_any_vt_var* = nullptr> inline var quad_form(const EigMat& A, const ColVec& B, bool symmetric = false) { check_square("quad_form", "A", A); check_multiplicable("quad_form", "A", A, "B", B); auto* baseVari = new internal::quad_form_vari< value_type_t, EigMat::RowsAtCompileTime, EigMat::ColsAtCompileTime, value_type_t, ColVec::RowsAtCompileTime, 1>(A, B, symmetric); return baseVari->impl_->C_(0, 0); } /** * Return the quadratic form \f$ B^T A B \f$. * * Symmetry of the resulting matrix is not guaranteed due to numerical * precision. * * This overload handles arguments where one of Mat1 or Mat2 are * `var_value` where `T` is an Eigen type. The other type can * also be a `var_value` or it can be a matrix type that inherits * from EigenBase * * @tparam Mat1 type of the first (square) matrix * @tparam Mat2 type of the second matrix * * @param A square matrix * @param B second matrix * @param symmetric indicates whether the output should be made symmetric * @return The quadratic form, which is a symmetric matrix. * @throws std::invalid_argument if A is not square, or if A cannot be * multiplied by B */ template * = nullptr, require_not_col_vector_t* = nullptr, require_any_var_matrix_t* = nullptr> inline auto quad_form(const Mat1& A, const Mat2& B, bool symmetric = false) { return internal::quad_form_impl(A, B, symmetric); } /** * Return the quadratic form \f$ B^T A B \f$. * * This overload handles arguments where one of Mat or Vec are * `var_value` where `T` is an Eigen type. The other type can * also be a `var_value`, or it can be a type that inherits from * EigenBase * * @tparam Mat type of the matrix * @tparam Vec type of the vector * * @param A square matrix * @param B vector * @param symmetric indicates whether the output should be made symmetric * @return The quadratic form (a scalar). * @throws std::invalid_argument if A is not square, or if A cannot be * multiplied by B */ template * = nullptr, require_col_vector_t* = nullptr, require_any_var_matrix_t* = nullptr> inline var quad_form(const Mat& A, const Vec& B, bool symmetric = false) { return internal::quad_form_impl(A, B, symmetric)(0, 0); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/simplex_constrain.hpp0000644000176200001440000001054414645137106024622 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SIMPLEX_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_SIMPLEX_CONSTRAIN_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the simplex corresponding to the specified free vector. * A simplex is a vector containing values greater than or equal * to 0 that sum to 1. A vector with (K-1) unconstrained values * will produce a simplex of size K. * * The transform is based on a centered stick-breaking process. * * @tparam T Type of vector to constrain * @param y Free vector input of dimensionality K - 1 * @return Simplex of dimensionality K */ template * = nullptr> inline auto simplex_constrain(const T& y) { using ret_type = plain_type_t; size_t N = y.size(); arena_t arena_y = y; arena_t arena_z(N); Eigen::VectorXd x_val(N + 1); double stick_len(1.0); for (Eigen::Index k = 0; k < N; ++k) { double log_N_minus_k = std::log(N - k); arena_z.coeffRef(k) = inv_logit(arena_y.val().coeff(k) - log_N_minus_k); x_val.coeffRef(k) = stick_len * arena_z.coeff(k); stick_len -= x_val(k); } x_val.coeffRef(N) = stick_len; arena_t arena_x = x_val; if (unlikely(N == 0)) { return ret_type(arena_x); } reverse_pass_callback([arena_y, arena_x, arena_z]() mutable { int N = arena_y.size(); double stick_len_val = arena_x.val().coeff(N); double stick_len_adj = arena_x.adj().coeff(N); for (Eigen::Index k = N; k-- > 0;) { arena_x.adj().coeffRef(k) -= stick_len_adj; stick_len_val += arena_x.val().coeff(k); stick_len_adj += arena_x.adj().coeff(k) * arena_z.coeff(k); double arena_z_adj = arena_x.adj().coeff(k) * stick_len_val; arena_y.adj().coeffRef(k) += arena_z_adj * arena_z.coeff(k) * (1.0 - arena_z.coeff(k)); } }); return ret_type(arena_x); } /** * Return the simplex corresponding to the specified free vector * and increment the specified log probability reference with * the log absolute Jacobian determinant of the transform. * * The simplex transform is defined through a centered * stick-breaking process. * * @tparam T type of the vector to constrain * @param y Free vector input of dimensionality N. * @param lp Log probability reference to increment. * @return Simplex of dimensionality N + 1. */ template * = nullptr> auto simplex_constrain(const T& y, scalar_type_t& lp) { using ret_type = plain_type_t; size_t N = y.size(); arena_t arena_y = y; arena_t arena_z(N); Eigen::VectorXd x_val(N + 1); double stick_len(1.0); for (Eigen::Index k = 0; k < N; ++k) { double log_N_minus_k = std::log(N - k); double adj_y_k = arena_y.val().coeff(k) - log_N_minus_k; arena_z.coeffRef(k) = inv_logit(adj_y_k); x_val.coeffRef(k) = stick_len * arena_z.coeff(k); lp += log(stick_len); lp -= log1p_exp(-adj_y_k); lp -= log1p_exp(adj_y_k); stick_len -= x_val(k); } x_val.coeffRef(N) = stick_len; arena_t arena_x = x_val; if (unlikely(N == 0)) { return ret_type(arena_x); } reverse_pass_callback([arena_y, arena_x, arena_z, lp]() mutable { int N = arena_y.size(); double stick_len_val = arena_x.val().coeff(N); double stick_len_adj = arena_x.adj().coeff(N); for (Eigen::Index k = N; k-- > 0;) { arena_x.adj().coeffRef(k) -= stick_len_adj; stick_len_val += arena_x.val().coeff(k); double log_N_minus_k = std::log(N - k); double adj_y_k = arena_y.val().coeff(k) - log_N_minus_k; arena_y.adj().coeffRef(k) -= lp.adj() * inv_logit(adj_y_k); arena_y.adj().coeffRef(k) += lp.adj() * inv_logit(-adj_y_k); stick_len_adj += lp.adj() / stick_len_val; stick_len_adj += arena_x.adj().coeff(k) * arena_z.coeff(k); double arena_z_adj = arena_x.adj().coeff(k) * stick_len_val; arena_y.adj().coeffRef(k) += arena_z_adj * arena_z.coeff(k) * (1.0 - arena_z.coeff(k)); } }); return ret_type(arena_x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/modified_bessel_second_kind.hpp0000644000176200001440000000163214645137106026534 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MODIFIED_BESSEL_SECOND_KIND_HPP #define STAN_MATH_REV_FUN_MODIFIED_BESSEL_SECOND_KIND_HPP #include #include #include namespace stan { namespace math { namespace internal { class modified_bessel_second_kind_dv_vari : public op_dv_vari { public: modified_bessel_second_kind_dv_vari(int a, vari* bvi) : op_dv_vari(modified_bessel_second_kind(a, bvi->val_), a, bvi) {} void chain() { bvi_->adj_ -= adj_ * (ad_ * modified_bessel_second_kind(ad_, bvi_->val_) / bvi_->val_ + modified_bessel_second_kind(ad_ - 1, bvi_->val_)); } }; } // namespace internal inline var modified_bessel_second_kind(int v, const var& a) { return var(new internal::modified_bessel_second_kind_dv_vari(v, a.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/LDLT_factor.hpp0000644000176200001440000000363214645137106023156 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LDLT_FACTOR_HPP #define STAN_MATH_REV_FUN_LDLT_FACTOR_HPP #include #include #include #include namespace stan { namespace math { /** * An LDLT_factor of an `Eigen::Matrix` * with `alloc_in_arena = True` holds a copy of the input matrix and the LDLT * of its values, with all member variable allocations are done in the arena. */ template class LDLT_factor> { private: arena_t> matrix_; Eigen::LDLT ldlt_; public: template , plain_type_t>* = nullptr> explicit LDLT_factor(const S& matrix) : matrix_(matrix), ldlt_(matrix.val().ldlt()) {} /** * Return a const reference to the underlying matrix */ const auto& matrix() const noexcept { return matrix_; } /** * Return a const reference to the LDLT factor of the matrix values */ const auto& ldlt() const noexcept { return ldlt_; } }; /** * An LDLT_factor of a `var_value` * holds a copy of the input `var_value` and the LDLT of its values. */ template class LDLT_factor> { private: std::decay_t matrix_; Eigen::LDLT ldlt_; public: template , plain_type_t>* = nullptr> explicit LDLT_factor(const S& matrix) : matrix_(matrix), ldlt_(matrix.val().ldlt()) {} /** * Return a const reference the underlying `var_value` */ const auto& matrix() const noexcept { return matrix_; } /** * Return a const reference to the LDLT factor of the matrix values */ const auto& ldlt() const noexcept { return ldlt_; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/gp_periodic_cov.hpp0000644000176200001440000001130514645137106024210 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_GP_PERIODIC_COV_HPP #define STAN_MATH_REV_FUN_GP_PERIODIC_COV_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns a periodic covariance matrix \f$ \mathbf{K} \f$ using the input \f$ * \mathbf{X} \f$. The elements of \f$ \mathbf{K} \f$ are defined as \f$ * \mathbf{K}_{ij} = k(\mathbf{X}_i,\mathbf{X}_j), \f$ where \f$ \mathbf{X}_i * \f$ is the \f$i\f$-th row of \f$ \mathbf{X} \f$ and \n \f$ * k(\mathbf{x},\mathbf{x}^\prime) = \sigma^2 \exp\left(-\frac{2\sin^2(\pi * |\mathbf{x}-\mathbf{x}^\prime|/p)}{\ell^2}\right), \f$ \n where \f$ \sigma^2 * \f$, \f$ \ell \f$ and \f$ p \f$ are the signal variance, length-scale and * period. * * @tparam T_x type of elements in the std::vector * @param x std::vector of input elements. * Assumes that all elements of x have the same size. * @param sigma standard deviation of the signal * @param l length-scale * @param p period * @return periodic covariance matrix * @throw std::domain_error if sigma <= 0, l <= 0, p <= 0, or * x is nan or infinite */ template * = nullptr, require_stan_scalar_t* = nullptr> inline Eigen::Matrix gp_periodic_cov( const std::vector& x, const T_sigma sigma, const var l, const var p) { using std::exp; using std::sin; const char* fun = "gp_periodic_cov"; check_positive(fun, "signal standard deviation", sigma); check_positive(fun, "length-scale", l); check_positive(fun, "period", p); size_t x_size = x.size(); for (size_t i = 0; i < x_size; ++i) { check_not_nan(fun, "element of x", x[i]); } Eigen::Matrix cov(x_size, x_size); if (x_size == 0) { return cov; } size_t l_tri_size = x_size * (x_size - 1) / 2; arena_matrix dists_lin(l_tri_size); arena_matrix sin_dists_sq_lin(l_tri_size); arena_matrix> cov_l_tri_lin(l_tri_size); arena_matrix> cov_diag( is_constant::value ? 0 : x_size); double sigma_sq = square(value_of(sigma)); double pi_div_p = pi() / value_of(p); double neg_two_inv_l_sq = -2.0 / square(value_of(l)); size_t block_size = 10; size_t pos = 0; for (size_t jb = 0; jb < x_size; jb += block_size) { size_t j_end = std::min(x_size, jb + block_size); size_t j_size = j_end - jb; cov.diagonal().segment(jb, j_size) = Eigen::VectorXd::Constant(j_size, sigma_sq); if (!is_constant::value) { cov_diag.segment(jb, j_size) = cov.diagonal().segment(jb, j_size); } for (size_t ib = jb; ib < x_size; ib += block_size) { size_t i_end = std::min(x_size, ib + block_size); for (size_t j = jb; j < j_end; ++j) { for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { double dist = distance(x[i], x[j]); double sin_dist = sin(pi_div_p * dist); double sin_dist_sq = square(sin_dist); dists_lin.coeffRef(pos) = dist; sin_dists_sq_lin.coeffRef(pos) = sin_dist_sq; cov_l_tri_lin.coeffRef(pos) = cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq * exp(sin_dist_sq * neg_two_inv_l_sq); ++pos; } } } } reverse_pass_callback([cov_l_tri_lin, cov_diag, dists_lin, sin_dists_sq_lin, sigma, l, p, x_size]() { size_t l_tri_size = x_size * (x_size - 1) / 2; double l_val = value_of(l); double p_val = value_of(p); double two_pi_div_p = TWO_PI / p_val; double adjl = 0; double adjsigma = 0; double adjp = 0; for (size_t pos = 0; pos < l_tri_size; ++pos) { double prod_add = cov_l_tri_lin.coeff(pos).val() * cov_l_tri_lin.coeff(pos).adj(); adjl += prod_add * sin_dists_sq_lin.coeff(pos); adjsigma += prod_add; double dist = dists_lin.coeff(pos); adjp += prod_add * sin(two_pi_div_p * dist) * dist; } if (!is_constant::value) { adjsigma += (cov_diag.val().array() * cov_diag.adj().array()).sum(); adjoint_of(sigma) += adjsigma * 2 / value_of(sigma); } double l_sq = square(l_val); l.adj() += adjl * 4 / (l_sq * l_val); p.adj() += adjp * TWO_PI / (l_sq * square(p_val)); }); return cov; } // namespace math } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/trace.hpp0000644000176200001440000000170214645137106022153 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TRACE_HPP #define STAN_MATH_REV_FUN_TRACE_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the trace of the specified matrix. The trace * is defined as the sum of the elements on the diagonal. * The matrix is not required to be square. Returns 0 if * matrix is empty. * * @tparam T type of the elements in the matrix * @param[in] m Specified matrix. * @return Trace of the matrix. */ template * = nullptr> inline auto trace(const T& m) { arena_t arena_m = m; return make_callback_var(arena_m.val_op().trace(), [arena_m](const auto& vi) mutable { arena_m.adj().diagonal().array() += vi.adj(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/tgamma.hpp0000644000176200001440000000362014645137106022324 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TGAMMA_HPP #define STAN_MATH_REV_FUN_TGAMMA_HPP #include #include #include #include namespace stan { namespace math { /** * Return the Gamma function applied to the specified variable (C99). * * The derivative with respect to the argument is * * \f$\frac{d}{dx} \Gamma(x) = \Gamma(x) \Psi^{(0)}(x)\f$ * * where \f$\Psi^{(0)}(x)\f$ is the digamma function. * \f[ \mbox{tgamma}(x) = \begin{cases} \textrm{error} & \mbox{if } x\in \{\dots, -3, -2, -1, 0\}\\ \Gamma(x) & \mbox{if } x\not\in \{\dots, -3, -2, -1, 0\}\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{tgamma}(x)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } x\in \{\dots, -3, -2, -1, 0\}\\ \frac{\partial\, \Gamma(x)}{\partial x} & \mbox{if } x\not\in \{\dots, -3, -2, -1, 0\}\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \Gamma(x)=\int_0^{\infty} u^{x - 1} \exp(-u) \, du \f] \f[ \frac{\partial \, \Gamma(x)}{\partial x} = \Gamma(x)\Psi(x) \f] * * @param a Argument to function. * @return The Gamma function applied to the specified argument. */ inline var tgamma(const var& a) { return make_callback_var(tgamma(a.val()), [a](auto& vi) mutable { a.adj() += vi.adj() * vi.val() * digamma(a.val()); }); } /** * Return elementwise gamma function * * @tparam T a `var_value` with inner Eigen type * @param a input * @return elementwise gamma */ template * = nullptr> inline auto tgamma(const T& a) { return make_callback_var(tgamma(a.val()), [a](auto& vi) mutable { a.adj().array() += vi.adj().array() * vi.val().array() * digamma(a.val()).array(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/rep_matrix.hpp0000644000176200001440000000411514645137106023230 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_REP_MATRIX_HPP #define STAN_MATH_REV_FUN_REP_MATRIX_HPP #include #include #include #include #include namespace stan { namespace math { /** * Impl of rep_matrix returning an `var_value` with a var scalar * type. * @tparam Ret A `var_value` with inner Eigen type. * @tparam T A Scalar type. * @param x A Scalar whose values are propogated to all values in the return * matrix. * @param m Number or rows. * @param n Number of columns. */ template * = nullptr, require_var_t* = nullptr> inline auto rep_matrix(const T& x, int m, int n) { check_nonnegative("rep_matrix", "rows", m); check_nonnegative("rep_matrix", "cols", n); return make_callback_var( value_type_t::Constant(m, n, x.val()), [x](auto& rep) mutable { x.adj() += rep.adj().sum(); }); } /** * Impl of rep_matrix returning a `var_value` from a `var_value` * with an inner Eigen vector type. * @tparam Ret A `var_value` with inner Eigen dynamic matrix type. * @tparam Vec A `var_value` with an inner Eigen vector type. * @param x A `var_value` with inner Eigen vector type. For Row vectors the * values are replacated rowwise and for column vectors the values are * repliacated colwise. * @param n Number of rows or columns. */ template * = nullptr, require_var_matrix_t* = nullptr> inline auto rep_matrix(const Vec& x, int n) { if (is_row_vector::value) { check_nonnegative("rep_matrix", "rows", n); return make_callback_var(x.val().replicate(n, 1), [x](auto& rep) mutable { x.adj() += rep.adj().colwise().sum(); }); } else { check_nonnegative("rep_matrix", "cols", n); return make_callback_var(x.val().replicate(1, n), [x](auto& rep) mutable { x.adj() += rep.adj().rowwise().sum(); }); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/mdivide_left.hpp0000644000176200001440000000612614645137106023515 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MDIVIDE_LEFT_HPP #define STAN_MATH_REV_FUN_MDIVIDE_LEFT_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the solution `X` of `AX = B`. * * A must be a square matrix, but B can be a matrix or a vector * * @tparam T1 type of first matrix * @tparam T2 type of second matrix * * @param[in] A square matrix * @param[in] B right hand side * @return solution of AX = B */ template * = nullptr, require_any_st_var* = nullptr> inline auto mdivide_left(const T1& A, const T2& B) { using ret_val_type = plain_type_t; using ret_type = promote_var_matrix_t; check_square("mdivide_left", "A", A); check_multiplicable("mdivide_left", "A", A, "B", B); if (A.size() == 0) { return ret_type(ret_val_type(0, B.cols())); } if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; auto hqr_A_ptr = make_chainable_ptr(arena_A.val().householderQr()); arena_t res = hqr_A_ptr->solve(arena_B.val()); reverse_pass_callback([arena_A, arena_B, hqr_A_ptr, res]() mutable { promote_scalar_t adjB = hqr_A_ptr->householderQ() * hqr_A_ptr->matrixQR() .template triangularView() .transpose() .solve(res.adj()); arena_A.adj() -= adjB * res.val_op().transpose(); arena_B.adj() += adjB; }); return ret_type(res); } else if (!is_constant::value) { arena_t> arena_B = B; auto hqr_A_ptr = make_chainable_ptr(value_of(A).householderQr()); arena_t res = hqr_A_ptr->solve(arena_B.val()); reverse_pass_callback([arena_B, hqr_A_ptr, res]() mutable { arena_B.adj() += hqr_A_ptr->householderQ() * hqr_A_ptr->matrixQR() .template triangularView() .transpose() .solve(res.adj()); }); return ret_type(res); } else { arena_t> arena_A = A; auto hqr_A_ptr = make_chainable_ptr(arena_A.val().householderQr()); arena_t res = hqr_A_ptr->solve(value_of(B)); reverse_pass_callback([arena_A, hqr_A_ptr, res]() mutable { arena_A.adj() -= hqr_A_ptr->householderQ() * hqr_A_ptr->matrixQR() .template triangularView() .transpose() .solve(res.adj()) * res.val_op().transpose(); }); return ret_type(res); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/trace_gen_inv_quad_form_ldlt.hpp0000644000176200001440000003015314645137106026736 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TRACE_GEN_INV_QUAD_FORM_LDLT_HPP #define STAN_MATH_REV_FUN_TRACE_GEN_INV_QUAD_FORM_LDLT_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Compute the trace of an inverse quadratic form premultiplied by a * square matrix. This computes * trace(D B^T A^-1 B) * where D is a square matrix and the LDLT_factor of A is provided. * * @tparam Td type of the first matrix * @tparam Ta type of matrix in the LDLT_factor * @tparam Tb type of the second matrix * * @param D a square matrix * @param A an LDLT_factor * @param B a matrix * @return The trace of the inverse quadratic form. */ template * = nullptr, require_all_matrix_t* = nullptr, require_any_st_var* = nullptr> inline var trace_gen_inv_quad_form_ldlt(const Td& D, LDLT_factor& A, const Tb& B) { check_square("trace_gen_inv_quad_form_ldlt", "D", D); check_multiplicable("trace_gen_inv_quad_form_ldlt", "A", A.matrix(), "B", B); check_multiplicable("trace_gen_inv_quad_form_ldlt", "B", B, "D", D); if (D.size() == 0 || A.matrix().size() == 0) { return 0; } if (!is_constant::value && !is_constant::value && !is_constant::value) { arena_t> arena_A = A.matrix(); arena_t> arena_B = B; arena_t> arena_D = D; auto AsolveB = to_arena(A.ldlt().solve(arena_B.val())); auto BTAsolveB = to_arena(arena_B.val_op().transpose() * AsolveB); var res = (arena_D.val() * BTAsolveB).trace(); reverse_pass_callback( [arena_A, BTAsolveB, AsolveB, arena_B, arena_D, res]() mutable { double C_adj = res.adj(); arena_A.adj() -= C_adj * AsolveB * arena_D.val_op().transpose() * AsolveB.transpose(); arena_B.adj() += C_adj * AsolveB * (arena_D.val_op() + arena_D.val_op().transpose()); arena_D.adj() += C_adj * BTAsolveB; }); return res; } else if (!is_constant::value && !is_constant::value && is_constant::value) { arena_t> arena_A = A.matrix(); arena_t> arena_B = B; arena_t> arena_D = value_of(D); auto AsolveB = to_arena(A.ldlt().solve(arena_B.val())); var res = (arena_D * arena_B.val_op().transpose() * AsolveB).trace(); reverse_pass_callback([arena_A, AsolveB, arena_B, arena_D, res]() mutable { double C_adj = res.adj(); arena_A.adj() -= C_adj * AsolveB * arena_D.transpose() * AsolveB.transpose(); arena_B.adj() += C_adj * AsolveB * (arena_D + arena_D.transpose()); }); return res; } else if (!is_constant::value && is_constant::value && !is_constant::value) { arena_t> arena_A = A.matrix(); const auto& B_ref = to_ref(B); arena_t> arena_D = D; auto AsolveB = to_arena(A.ldlt().solve(value_of(B_ref))); auto BTAsolveB = to_arena(value_of(B_ref).transpose() * AsolveB); var res = (arena_D.val() * BTAsolveB).trace(); reverse_pass_callback( [arena_A, BTAsolveB, AsolveB, arena_D, res]() mutable { double C_adj = res.adj(); arena_A.adj() -= C_adj * AsolveB * arena_D.val_op().transpose() * AsolveB.transpose(); arena_D.adj() += C_adj * BTAsolveB; }); return res; } else if (!is_constant::value && is_constant::value && is_constant::value) { arena_t> arena_A = A.matrix(); const auto& B_ref = to_ref(B); arena_t> arena_D = value_of(D); auto AsolveB = to_arena(A.ldlt().solve(value_of(B_ref))); var res = (arena_D * value_of(B_ref).transpose() * AsolveB).trace(); reverse_pass_callback([arena_A, AsolveB, arena_D, res]() mutable { double C_adj = res.adj(); arena_A.adj() -= C_adj * AsolveB * arena_D.val_op().transpose() * AsolveB.transpose(); }); return res; } else if (is_constant::value && !is_constant::value && !is_constant::value) { arena_t> arena_B = B; arena_t> arena_D = D; auto AsolveB = to_arena(A.ldlt().solve(arena_B.val())); auto BTAsolveB = to_arena(arena_B.val_op().transpose() * AsolveB); var res = (arena_D.val() * BTAsolveB).trace(); reverse_pass_callback( [BTAsolveB, AsolveB, arena_B, arena_D, res]() mutable { double C_adj = res.adj(); arena_B.adj() += C_adj * AsolveB * (arena_D.val_op() + arena_D.val_op().transpose()); arena_D.adj() += C_adj * BTAsolveB; }); return res; } else if (is_constant::value && !is_constant::value && is_constant::value) { arena_t> arena_B = B; arena_t> arena_D = value_of(D); auto AsolveB = to_arena(A.ldlt().solve(arena_B.val())); var res = (arena_D * arena_B.val_op().transpose() * AsolveB).trace(); reverse_pass_callback([AsolveB, arena_B, arena_D, res]() mutable { arena_B.adj() += res.adj() * AsolveB * (arena_D + arena_D.transpose()); }); return res; } else if (is_constant::value && is_constant::value && !is_constant::value) { const auto& B_ref = to_ref(B); arena_t> arena_D = D; auto BTAsolveB = to_arena(value_of(B_ref).transpose() * A.ldlt().solve(value_of(B_ref))); var res = (arena_D.val() * BTAsolveB).trace(); reverse_pass_callback([BTAsolveB, arena_D, res]() mutable { arena_D.adj() += res.adj() * BTAsolveB; }); return res; } } /** * Compute the trace of an inverse quadratic form. I.E., this computes * `trace(diag(D) B^T A^-1 B)` * where D is the diagonal of a diagonal matrix (`diag(D)` is the diagonal * matrix itself) and the LDLT_factor of A is provided. * * @tparam Td type of the diagonal of first matrix * @tparam Ta type of matrix in the LDLT_factor * @tparam Tb type of the B matrix * * @param D diagonal of multiplier * @param A LDLT_factor * @param B inner term in quadratic form * @return trace(diag(D) * B^T * A^-1 * B) * @throw std::domain_error if A cannot be multiplied by B or B cannot * be multiplied by diag(D). */ template * = nullptr, require_all_matrix_t* = nullptr, require_any_st_var* = nullptr> inline var trace_gen_inv_quad_form_ldlt(const Td& D, const LDLT_factor& A, const Tb& B) { check_multiplicable("trace_gen_inv_quad_form_ldlt", "A", A.matrix(), "B", B); check_multiplicable("trace_gen_inv_quad_form_ldlt", "B", B, "D", D); if (D.size() == 0 || A.matrix().size() == 0) { return 0; } if (!is_constant::value && !is_constant::value && !is_constant::value) { arena_t> arena_A = A.matrix(); arena_t> arena_B = B; arena_t> arena_D = D; auto AsolveB = to_arena(A.ldlt().solve(arena_B.val())); auto BTAsolveB = to_arena(arena_B.val_op().transpose() * AsolveB); var res = (arena_D.val().asDiagonal() * BTAsolveB).trace(); reverse_pass_callback( [arena_A, BTAsolveB, AsolveB, arena_B, arena_D, res]() mutable { double C_adj = res.adj(); arena_A.adj() -= C_adj * AsolveB * arena_D.val_op().asDiagonal() * AsolveB.transpose(); arena_B.adj() += C_adj * AsolveB * 2 * arena_D.val_op().asDiagonal(); arena_D.adj() += C_adj * BTAsolveB.diagonal(); }); return res; } else if (!is_constant::value && !is_constant::value && is_constant::value) { arena_t> arena_A = A.matrix(); arena_t> arena_B = B; arena_t> arena_D = value_of(D); auto AsolveB = to_arena(A.ldlt().solve(arena_B.val())); var res = (arena_D.asDiagonal() * arena_B.val_op().transpose() * AsolveB) .trace(); reverse_pass_callback([arena_A, AsolveB, arena_B, arena_D, res]() mutable { double C_adj = res.adj(); arena_A.adj() -= C_adj * AsolveB * arena_D.asDiagonal() * AsolveB.transpose(); arena_B.adj() += C_adj * AsolveB * 2 * arena_D.asDiagonal(); }); return res; } else if (!is_constant::value && is_constant::value && !is_constant::value) { arena_t> arena_A = A.matrix(); const auto& B_ref = to_ref(B); arena_t> arena_D = D; auto AsolveB = to_arena(A.ldlt().solve(value_of(B_ref))); auto BTAsolveB = to_arena(value_of(B_ref).transpose() * AsolveB); var res = (arena_D.val().asDiagonal() * BTAsolveB).trace(); reverse_pass_callback( [arena_A, BTAsolveB, AsolveB, arena_D, res]() mutable { double C_adj = res.adj(); arena_A.adj() -= C_adj * AsolveB * arena_D.val_op().asDiagonal() * AsolveB.transpose(); arena_D.adj() += C_adj * BTAsolveB.diagonal(); }); return res; } else if (!is_constant::value && is_constant::value && is_constant::value) { arena_t> arena_A = A.matrix(); const auto& B_ref = to_ref(B); arena_t> arena_D = value_of(D); auto AsolveB = to_arena(A.ldlt().solve(value_of(B_ref))); var res = (arena_D.asDiagonal() * value_of(B_ref).transpose() * AsolveB) .trace(); reverse_pass_callback([arena_A, AsolveB, arena_D, res]() mutable { double C_adj = res.adj(); arena_A.adj() -= C_adj * AsolveB * arena_D.val_op().asDiagonal() * AsolveB.transpose(); }); return res; } else if (is_constant::value && !is_constant::value && !is_constant::value) { arena_t> arena_B = B; arena_t> arena_D = D; auto AsolveB = to_arena(A.ldlt().solve(arena_B.val())); auto BTAsolveB = to_arena(arena_B.val_op().transpose() * AsolveB); var res = (arena_D.val().asDiagonal() * BTAsolveB).trace(); reverse_pass_callback( [BTAsolveB, AsolveB, arena_B, arena_D, res]() mutable { double C_adj = res.adj(); arena_B.adj() += C_adj * AsolveB * 2 * arena_D.val_op().asDiagonal(); arena_D.adj() += C_adj * BTAsolveB.diagonal(); }); return res; } else if (is_constant::value && !is_constant::value && is_constant::value) { arena_t> arena_B = B; arena_t> arena_D = value_of(D); auto AsolveB = to_arena(A.ldlt().solve(arena_B.val())); var res = (arena_D.asDiagonal() * arena_B.val_op().transpose() * AsolveB) .trace(); reverse_pass_callback([AsolveB, arena_B, arena_D, res]() mutable { arena_B.adj() += res.adj() * AsolveB * 2 * arena_D.asDiagonal(); }); return res; } else if (is_constant::value && is_constant::value && !is_constant::value) { const auto& B_ref = to_ref(B); arena_t> arena_D = D; auto BTAsolveB = to_arena(value_of(B_ref).transpose() * A.ldlt().solve(value_of(B_ref))); var res = (arena_D.val().asDiagonal() * BTAsolveB).trace(); reverse_pass_callback([BTAsolveB, arena_D, res]() mutable { arena_D.adj() += res.adj() * BTAsolveB.diagonal(); }); return res; } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/inv_square.hpp0000644000176200001440000000161614645137106023235 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INV_SQUARE_HPP #define STAN_MATH_REV_FUN_INV_SQUARE_HPP #include #include #include namespace stan { namespace math { /** * \f[ \mbox{inv\_square}(x) = \begin{cases} \frac{1}{x^2} & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{inv\_square}(x)}{\partial x} = \begin{cases} -\frac{2}{x^3} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * */ inline var inv_square(const var& a) { auto a_cube = a.val() * a.val() * a.val(); return make_callback_var(inv_square(a.val()), [a, a_cube](auto& vi) mutable { a.adj() -= 2 * vi.adj() / a_cube; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/step.hpp0000644000176200001440000000142414645137106022031 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_STEP_HPP #define STAN_MATH_REV_FUN_STEP_HPP #include #include namespace stan { namespace math { /** * Return the step, or heaviside, function applied to the * specified variable (stan). * * See step() for the double-based version. * * The derivative of the step function is zero everywhere * but at 0, so for convenience, it is taken to be everywhere * zero, * * \f$\mbox{step}(x) = 0\f$. * * @param a Variable argument. * @return The constant variable with value 1.0 if the argument's * value is greater than or equal to 0.0, and value 0.0 otherwise. */ inline var step(const var& a) { return var(new vari(a.vi_->val_ < 0.0 ? 0.0 : 1.0)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/sum.hpp0000644000176200001440000000301714645137106021662 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SUM_HPP #define STAN_MATH_REV_FUN_SUM_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the sum of the entries of the specified vector. * * @param m Vector. * @return Sum of vector entries. */ template inline var sum(const std::vector& m) { if (unlikely(m.empty())) { return 0.0; } else { auto arena_m = to_arena(as_array_or_scalar(m)); return make_callback_var(arena_m.val().sum(), [arena_m](auto& vi) mutable { arena_m.adj() += vi.adj(); }); } } /** * Returns the sum of the coefficients of the specified * matrix. * * @tparam T type of the matrix of vector. Can be either a var matrix or * matrix of vars. * @param x Specified var_value containing a matrix or vector. * @return Sum of coefficients of matrix. */ template * = nullptr> inline var sum(const T& x) { arena_t x_arena = x; return make_callback_var(sum(x_arena.val()), [x_arena](auto& vi) mutable { x_arena.adj().array() += vi.adj(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/inv_logit.hpp0000644000176200001440000000205214645137106023046 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INV_LOGIT_HPP #define STAN_MATH_REV_FUN_INV_LOGIT_HPP #include #include #include namespace stan { namespace math { /** * The inverse logit function for variables (stan). * * See inv_logit() for the double-based version. * * The derivative of inverse logit is * * \f$\frac{d}{dx} \mbox{logit}^{-1}(x) = \mbox{logit}^{-1}(x) (1 - * \mbox{logit}^{-1}(x))\f$. * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a Argument variable. * @return Inverse logit of argument. */ template * = nullptr> inline auto inv_logit(const var_value& a) { return make_callback_var(inv_logit(a.val()), [a](auto& vi) mutable { as_array_or_scalar(a).adj() += as_array_or_scalar(vi.adj()) * as_array_or_scalar(vi.val()) * (1.0 - as_array_or_scalar(vi.val())); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/rows_dot_product.hpp0000644000176200001440000001034614645137106024461 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ROWS_DOT_PRODUCT_HPP #define STAN_MATH_REV_FUN_ROWS_DOT_PRODUCT_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the dot product of rows of the specified matrices. * * @tparam Mat1 type of the first matrix (must be derived from \c * Eigen::MatrixBase) * @tparam Mat2 type of the second matrix (must be derived from \c * Eigen::MatrixBase) * * @param v1 Matrix of first vectors. * @param v2 Matrix of second vectors. * @return Dot product of the vectors. * @throw std::domain_error If the vectors are not the same * size or if they are both not vector dimensioned. */ template * = nullptr, require_any_eigen_vt* = nullptr> inline Eigen::Matrix rows_dot_product( const Mat1& v1, const Mat2& v2) { check_matching_sizes("dot_product", "v1", v1, "v2", v2); Eigen::Matrix ret(v1.rows(), 1); for (size_type j = 0; j < v1.rows(); ++j) { ret.coeffRef(j) = dot_product(v1.row(j), v2.row(j)); } return ret; } /** * Returns the dot product of rows of the specified matrices. * * This overload is used when at least one of Mat1 and Mat2 is * a `var_value` where `T` inherits from `EigenBase`. The other * argument can be another `var_value` or a type that inherits from * `EigenBase`. * * @tparam Mat1 type of the first matrix * @tparam Mat2 type of the second matrix * * @param v1 Matrix of first vectors. * @param v2 Matrix of second vectors. * @return Dot product of the vectors. * @throw std::domain_error If the vectors are not the same * size or if they are both not vector dimensioned. */ template * = nullptr, require_any_var_matrix_t* = nullptr> inline auto rows_dot_product(const Mat1& v1, const Mat2& v2) { check_matching_sizes("rows_dot_product", "v1", v1, "v2", v2); using return_t = return_var_matrix_t< decltype((v1.val().array() * v2.val().array()).rowwise().sum().matrix()), Mat1, Mat2>; if (!is_constant::value && !is_constant::value) { arena_t> arena_v1 = v1; arena_t> arena_v2 = v2; return_t res = (arena_v1.val().array() * arena_v2.val().array()).rowwise().sum(); reverse_pass_callback([arena_v1, arena_v2, res]() mutable { if (is_var_matrix::value) { arena_v1.adj().noalias() += res.adj().asDiagonal() * arena_v2.val(); } else { arena_v1.adj() += res.adj().asDiagonal() * arena_v2.val(); } if (is_var_matrix::value) { arena_v2.adj().noalias() += res.adj().asDiagonal() * arena_v1.val(); } else { arena_v2.adj() += res.adj().asDiagonal() * arena_v1.val(); } }); return res; } else if (!is_constant::value) { arena_t> arena_v1 = value_of(v1); arena_t> arena_v2 = v2; return_t res = (arena_v1.array() * arena_v2.val().array()).rowwise().sum(); reverse_pass_callback([arena_v1, arena_v2, res]() mutable { if (is_var_matrix::value) { arena_v2.adj().noalias() += res.adj().asDiagonal() * arena_v1; } else { arena_v2.adj() += res.adj().asDiagonal() * arena_v1; } }); return res; } else { arena_t> arena_v1 = v1; arena_t> arena_v2 = value_of(v2); return_t res = (arena_v1.val().array() * arena_v2.array()).rowwise().sum(); reverse_pass_callback([arena_v1, arena_v2, res]() mutable { if (is_var_matrix::value) { arena_v1.adj().noalias() += res.adj().asDiagonal() * arena_v2; } else { arena_v1.adj() += res.adj().asDiagonal() * arena_v2; } }); return res; } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/inv_Phi.hpp0000644000176200001440000000234314645137106022453 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INV_PHI_HPP #define STAN_MATH_REV_FUN_INV_PHI_HPP #include #include #include #include #include namespace stan { namespace math { /** * The inverse of unit normal cumulative density function. * * The derivative is the reciprocal of unit normal density function, * * @param p Probability * @return The unit normal inverse cdf evaluated at p */ inline var inv_Phi(const var& p) { return make_callback_var(inv_Phi(p.val()), [p](auto& vi) mutable { p.adj() += vi.adj() * SQRT_TWO_PI / std::exp(-0.5 * vi.val() * vi.val()); }); } /** * Return the elementwise inverse of unit normal cumulative density function. * * @tparam T a `var_value` with inner Eigen type * @param p Probability vector * @return Elementwise unit normal inverse cdf */ template * = nullptr> inline auto inv_Phi(const T& p) { return make_callback_var(inv_Phi(p.val()), [p](auto& vi) mutable { p.adj().array() += vi.adj().array() * SQRT_TWO_PI / (-0.5 * vi.val().array().square()).exp(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/conj.hpp0000644000176200001440000000074614645137106022015 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_CONJ_HPP #define STAN_MATH_REV_FUN_CONJ_HPP #include #include #include namespace stan { namespace math { /** * Return the complex conjugate of the complex argument. * * @param[in] z argument * @return complex conjugate of the argument */ inline std::complex conj(const std::complex& z) { return internal::complex_conj(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/determinant.hpp0000644000176200001440000000150414645137106023367 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_DETERMINANT_HPP #define STAN_MATH_REV_FUN_DETERMINANT_HPP #include #include #include #include namespace stan { namespace math { template * = nullptr> inline var determinant(const T& m) { check_square("determinant", "m", m); if (m.size() == 0) { return var(1.0); } arena_t arena_m = m; auto m_lu = arena_m.val().partialPivLu(); auto arena_m_inv_transpose = to_arena(m_lu.inverse().transpose()); var det = m_lu.determinant(); reverse_pass_callback([arena_m, det, arena_m_inv_transpose]() mutable { arena_m.adj() += (det.adj() * det.val()) * arena_m_inv_transpose; }); return det; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/to_var_value.hpp0000644000176200001440000000317014645137106023544 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TO_VAR_VALUE_HPP #define STAN_MATH_REV_FUN_TO_VAR_VALUE_HPP #include #include #include #include namespace stan { namespace math { /** * Converts an Eigen matrix (or vector or row_vector) or expression of `var`s * into `var_value`. Adjoint is propagated back to argument in the reverse * pass. * * @tparam T type of the input * @param a matrix to convert */ template * = nullptr> inline var_value< Eigen::Matrix> to_var_value(const T& a) { arena_matrix> a_arena = a; var_value> res(a_arena.val()); reverse_pass_callback( [res, a_arena]() mutable { a_arena.adj() += res.adj(); }); return res; } /** * This is a no-op for var_values. * * @tparam T type of the input * @param a matrix to convert */ template * = nullptr> inline T to_var_value(T&& a) { return std::forward(a); } /** * Convert the elements of the `std::vector` input to `var_value` types * if possible * * @tparam T type of elemnts of the input vector * @param a std::vector of elements to convert */ template inline auto to_var_value(const std::vector& a) { std::vector()))> out; out.reserve(a.size()); for (size_t i = 0; i < a.size(); ++i) { out.emplace_back(to_var_value(a[i])); } return out; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/ordered_constrain.hpp0000644000176200001440000000454414645137106024570 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ORDERED_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_ORDERED_CONSTRAIN_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return an increasing ordered vector derived from the specified * free vector. The returned constrained vector will have the * same dimensionality as the specified free vector. * * @param x Free vector of scalars * @return Increasing ordered vector */ template * = nullptr> inline auto ordered_constrain(const T& x) { using ret_type = plain_type_t; using std::exp; size_t N = x.size(); if (unlikely(N == 0)) { return ret_type(x); } Eigen::VectorXd y_val(N); arena_t arena_x = x; arena_t exp_x(N - 1); y_val.coeffRef(0) = arena_x.val().coeff(0); for (Eigen::Index n = 1; n < N; ++n) { exp_x.coeffRef(n - 1) = exp(arena_x.val().coeff(n)); y_val.coeffRef(n) = y_val.coeff(n - 1) + exp_x.coeff(n - 1); } arena_t y = y_val; reverse_pass_callback([arena_x, y, exp_x]() mutable { double rolling_adjoint_sum = 0.0; for (int n = arena_x.size() - 1; n > 0; --n) { rolling_adjoint_sum += y.adj().coeff(n); arena_x.adj().coeffRef(n) += exp_x.coeff(n - 1) * rolling_adjoint_sum; } arena_x.adj().coeffRef(0) += rolling_adjoint_sum + y.adj().coeff(0); }); return ret_type(y); } /** * Return a positive valued, increasing ordered vector derived * from the specified free vector and increment the specified log * probability reference with the log absolute Jacobian determinant * of the transform. The returned constrained vector * will have the same dimensionality as the specified free vector. * * @tparam T type of the vector * @param x Free vector of scalars. * @param lp Log probability reference. * @return Positive, increasing ordered vector. */ template * = nullptr> auto ordered_constrain(const VarVec& x, scalar_type_t& lp) { if (x.size() > 1) { lp += sum(x.tail(x.size() - 1)); } return ordered_constrain(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/matrix_power.hpp0000644000176200001440000000420114645137106023572 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MATRIX_POWER_HPP #define STAN_MATH_REV_FUN_MATRIX_POWER_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the nth power of the specific matrix. M^n = M * M * ... * M. * * @tparam R number of rows, can be Eigen::Dynamic * @tparam C number of columns, can be Eigen::Dynamic * @param[in] M a square matrix * @param[in] n exponent * @return nth power of M * @throw std::domain_error if the matrix contains NaNs or infinities. * @throw std::invalid_argument if the exponent is negative or the matrix is not * square. */ template * = nullptr> inline plain_type_t matrix_power(const T& M, const int n) { check_square("matrix_power", "M", M); check_nonnegative("matrix_power", "n", n); if (M.size() == 0) return M; const auto& M_ref = to_ref(M); check_finite("matrix_power", "M", M_ref); size_t N = M.rows(); if (n == 0) return Eigen::MatrixXd::Identity(N, N); if (n == 1) return M_ref; arena_t> arena_powers(n + 1); arena_t> arena_M = M_ref; arena_powers[0] = Eigen::MatrixXd::Identity(N, N); arena_powers[1] = M_ref.val(); for (size_t i = 2; i <= n; ++i) { arena_powers[i] = arena_powers[1] * arena_powers[i - 1]; } using ret_type = return_var_matrix_t; arena_t res = arena_powers[arena_powers.size() - 1]; reverse_pass_callback([arena_M, n, res, arena_powers]() mutable { const auto& M_val = arena_powers[1]; Eigen::MatrixXd adj_C = res.adj(); Eigen::MatrixXd adj_M = Eigen::MatrixXd::Zero(M_val.rows(), M_val.cols()); for (size_t i = n; i > 1; --i) { adj_M += adj_C * arena_powers[i - 1].transpose(); adj_C = M_val.transpose() * adj_C; } arena_M.adj() += adj_M + adj_C; }); return ret_type(res); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/inv_inc_beta.hpp0000644000176200001440000000712514645137106023502 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INV_INC_BETA_HPP #define STAN_MATH_REV_FUN_INV_INC_BETA_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * The inverse of the normalized incomplete beta function of a, b, with * probability p. * * Used to compute the inverse cumulative density function for the beta * distribution. * \f[ \frac{\partial }{\partial a} = (1-w)^{1-b}w^{1-a} \left( w^a\Gamma(a)^2 {}_3\tilde{F}_2(a,a,1-b;a+1,a+1;w) - B(a,b)I_w(a,b)\left(\log(w)-\psi(a) + \psi(a+b)\right) \right)/;w=I_z^{-1}(a,b) \f] \f[ \frac{\partial }{\partial b} = (1-w)^{-b}w^{1-a}(w-1) \left( (1-w)^{b}\Gamma(b)^2 {}_3\tilde{F}_2(b,b,1-a;b+1,b+1;1-w) - B_{1-w}(b,a)\left(\log(1-w)-\psi(b) + \psi(a+b)\right) \right)/;w=I_z^{-1}(a,b) \f] \f[ \frac{\partial }{\partial z} = (1-w)^{1-b}w^{1-a}B(a,b)/;w=I_z^{-1}(a,b) \f] * * @param a Shape parameter a >= 0; a and b can't both be 0 * @param b Shape parameter b >= 0 * @param p Random variate. 0 <= p <= 1 * @throws if constraints are violated or if any argument is NaN * @return The inverse of the normalized incomplete beta function. */ template * = nullptr, require_any_var_t* = nullptr> inline var inv_inc_beta(const T1& a, const T2& b, const T3& p) { double a_val = value_of(a); double b_val = value_of(b); double p_val = value_of(p); double w = inv_inc_beta(a_val, b_val, p_val); return make_callback_var(w, [a, b, p, a_val, b_val, w](auto& vi) { double log_w = log(w); double log1m_w = log1m(w); double one_m_a = 1 - a_val; double one_m_b = 1 - b_val; double one_m_w = 1 - w; double ap1 = a_val + 1; double bp1 = b_val + 1; double lbeta_ab = lbeta(a_val, b_val); double digamma_apb = digamma(a_val + b_val); if (!is_constant_all::value) { double da1 = exp(one_m_b * log1m_w + one_m_a * log_w); double da2 = a_val * log_w + 2 * lgamma(a_val) + log(hypergeometric_3F2({a_val, a_val, one_m_b}, {ap1, ap1}, w)) - 2 * lgamma(ap1); double da3 = inc_beta(a_val, b_val, w) * exp(lbeta_ab) * (log_w - digamma(a_val) + digamma_apb); forward_as(a).adj() += vi.adj() * da1 * (exp(da2) - da3); } if (!is_constant_all::value) { double db1 = (w - 1) * exp(-b_val * log1m_w + one_m_a * log_w); double db2 = 2 * lgamma(b_val) + log(hypergeometric_3F2({b_val, b_val, one_m_a}, {bp1, bp1}, one_m_w)) - 2 * lgamma(bp1) + b_val * log1m_w; double db3 = inc_beta(b_val, a_val, one_m_w) * exp(lbeta_ab) * (log1m_w - digamma(b_val) + digamma_apb); forward_as(b).adj() += vi.adj() * db1 * (exp(db2) - db3); } if (!is_constant_all::value) { forward_as(p).adj() += vi.adj() * exp(one_m_b * log1m_w + one_m_a * log_w + lbeta_ab); } }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_inv_logit.hpp0000644000176200001440000000241414645137106023711 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_INV_LOGIT_HPP #define STAN_MATH_REV_FUN_LOG_INV_LOGIT_HPP #include #include #include #include namespace stan { namespace math { /** * Return the natural logarithm of the inverse logit of the * specified argument. * * @tparam T An arithmetic type * @param u `var_value` with inner arithmetic type * @return log inverse logit of the argument */ template * = nullptr> inline auto log_inv_logit(const var_value& u) { return make_callback_var(log_inv_logit(u.val()), [u](auto& vi) mutable { u.adj() += vi.adj() * inv_logit(-u.val()); }); } /** * Return the natural logarithm of the inverse logit of the * specified argument. * * @tparam T A type derived from `Eigen::EigenBase` * @param u `var_value` with inner Eigen type * @return log inverse logit of the argument */ template * = nullptr> inline auto log_inv_logit(const var_value& u) { return make_callback_var(log_inv_logit(u.val()), [u](auto& vi) mutable { u.adj().array() += vi.adj().array() * inv_logit(-u.val()).array(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/matrix_exp_multiply.hpp0000644000176200001440000000213314645137106025173 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MATRIX_EXP_MULTIPLY_HPP #define STAN_MATH_REV_FUN_MATRIX_EXP_MULTIPLY_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Wrapper of matrix_exp_action function for a more literal name * * @tparam Ta type of the matrix A * @tparam Tb type of the matrix B * * @param[in] A Matrix * @param[in] B Matrix * @return exponential of A multiplies B */ template * = nullptr, require_any_st_autodiff* = nullptr> inline Eigen::Matrix, -1, Tb::ColsAtCompileTime> matrix_exp_multiply(const Ta& A, const Tb& B) { check_square("matrix_exp_multiply", "input matrix", A); check_multiplicable("matrix_exp_multiply", "A", A, "B", B); if (A.size() == 0) { return {0, B.cols()}; } return multiply(matrix_exp(A), B); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/fill.hpp0000644000176200001440000000305214645137106022003 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FILL_HPP #define STAN_MATH_REV_FUN_FILL_HPP #include #include #include #include namespace stan { namespace math { /** * Fill the specified container with the specified value. * * The specified matrix is filled by element. * * @tparam VarMat a `var_value` with inner type from `EigenBase` * @tparam S A var. * * @param x Container. * @param y Value. */ template * = nullptr, require_var_t* = nullptr> inline void fill(VarMat& x, const S& y) { arena_t>> prev_vals = x.val().eval(); x.vi_->val_.fill(y.val()); reverse_pass_callback([x, y, prev_vals]() mutable { x.vi_->val_ = prev_vals; y.adj() += x.adj().sum(); x.adj().setZero(); }); } /** * Fill the specified container with the specified value. * * The specified matrix is filled by element. * * @tparam VarMat a `var_value` with inner type from `EigenBase` * @tparam S An arithmetic type. * * @param x Container. * @param y Value. */ template * = nullptr, require_arithmetic_t* = nullptr> inline void fill(VarMat& x, const S& y) { arena_t>> prev_vals = x.val().eval(); x.vi_->val_.fill(y); reverse_pass_callback([x, prev_vals]() mutable { x.vi_->val_ = prev_vals; x.adj().setZero(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/acos.hpp0000644000176200001440000000446614645137106022014 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ACOS_HPP #define STAN_MATH_REV_FUN_ACOS_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the principal value of the arc cosine of a variable, * in radians (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \arccos x = \frac{-1}{\sqrt{1 - x^2}}\f$. * * \f[ \mbox{acos}(x) = \begin{cases} \textrm{NaN} & \mbox{if } x < -1\\ \arccos(x) & \mbox{if } -1\leq x\leq 1 \\ \textrm{NaN} & \mbox{if } x > 1\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{acos}(x)}{\partial x} = \begin{cases} \textrm{NaN} & \mbox{if } x < -1\\ \frac{\partial\, \arccos(x)}{\partial x} & \mbox{if } -1\leq x\leq 1 \\ \textrm{NaN} & \mbox{if } x < -1\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial \, \arccos(x)}{\partial x} = -\frac{1}{\sqrt{1-x^2}} \f] * * @param x argument * @return Arc cosine of variable, in radians. */ inline var acos(const var& x) { return make_callback_var(std::acos(x.val()), [x](const auto& vi) mutable { x.adj() -= vi.adj() / std::sqrt(1.0 - (x.val() * x.val())); }); } /** * Return the principal value of the arc cosine of a variable, * in radians (cmath). * * @param x a `var_value` with inner Eigen type * @return Arc cosine of variable, in radians. */ template * = nullptr> inline auto acos(const VarMat& x) { return make_callback_var( x.val().array().acos().matrix(), [x](const auto& vi) mutable { x.adj().array() -= vi.adj().array() / (1.0 - (x.val().array().square())).sqrt(); }); } /** * Return the arc cosine of the complex argument. * * @param x argument * @return arc cosine of the argument */ inline std::complex acos(const std::complex& x) { return stan::math::internal::complex_acos(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/eigenvalues_sym.hpp0000644000176200001440000000262114645137106024255 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_EIGENVALUES_HPP #define STAN_MATH_REV_FUN_EIGENVALUES_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the eigenvalues of the specified symmetric matrix. * @tparam T type of input matrix. * @param m Specified matrix. * @return Eigenvalues of matrix. */ template * = nullptr> inline auto eigenvalues_sym(const T& m) { using return_t = return_var_matrix_t; if (unlikely(m.size() == 0)) { return return_t(Eigen::VectorXd(0)); } check_symmetric("eigenvalues_sym", "m", m); auto arena_m = to_arena(m); Eigen::SelfAdjointEigenSolver solver(arena_m.val()); arena_t eigenvals = solver.eigenvalues(); auto eigenvecs = to_arena(solver.eigenvectors()); reverse_pass_callback([eigenvals, arena_m, eigenvecs]() mutable { arena_m.adj() += eigenvecs * eigenvals.adj().asDiagonal() * eigenvecs.transpose(); }); return return_t(eigenvals); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/to_soa_sparse_matrix.hpp0000644000176200001440000001252614645137106025310 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TO_SOA_SPARSE_MATRIX_HPP #define STAN_MATH_REV_FUN_TO_SOA_SPARSE_MATRIX_HPP #include #include #include #include #include namespace stan { namespace math { /** * Create a sparse matrix from the given SoA matrix and indexes. * @tparam Options Eigen matrix options. * @tparam VarMatrix A @ref var_value with a dense vector inner type * @tparam Vec1 Container type of the column indexes. * @tparam Vec2 Container type of the row indexes. * @param m Number of rows in matrix. * @param n Number of columns in matrix. * @param w Vector of non-zero values in matrix. * @param u Index of where each row starts in w, length equal to * the number of rows plus one. * @param v Column index of each non-zero value, same * length as w. * @return Sparse matrix. */ template * = nullptr, require_eigen_dense_base_t>* = nullptr, require_all_std_vector_vt* = nullptr> inline auto to_soa_sparse_matrix(int m, int n, VarMatrix&& w, Vec1&& u, Vec2&& v) { auto u_arena = to_arena(std::forward(u)); auto v_arena = to_arena(std::forward(v)); using sparse_mat_t = Eigen::SparseMatrix; using sparse_arena_mat_t = arena_t; sparse_arena_mat_t arena_val_x(m, n, w.val().size(), u_arena.data(), v_arena.data(), w.vi_->val_.data()); sparse_arena_mat_t arena_adj_x(m, n, w.adj().size(), u_arena.data(), v_arena.data(), w.vi_->adj_.data()); var_value var_x(arena_val_x, arena_adj_x); return var_x; } /** * Create a sparse matrix from the given AoS matrix of vars and indexes. * @tparam Options Eigen matrix options. * @tparam MatrixVar A type inheriting from `Eigen::DenseBase` with a scalar * type of @ref var_value * @tparam Vec1 Container type of the column indexes. * @tparam Vec2 Container type of the row indexes. * @param m Number of rows in matrix. * @param n Number of columns in matrix. * @param w Vector of non-zero values in matrix. * @param u Index of where each row starts in w, length equal to * the number of rows plus one. * @param v Column index of each non-zero value, same * length as w. * @return Sparse matrix. */ template * = nullptr, require_all_std_vector_vt* = nullptr> inline auto to_soa_sparse_matrix(int m, int n, MatrixVar&& w, Vec1&& u, Vec2&& v) { auto w_arena = to_arena(std::forward(w)); auto u_arena = to_arena(std::forward(u)); auto v_arena = to_arena(std::forward(v)); arena_t> arena_x( m, n, w_arena.size(), u_arena.data(), v_arena.data(), w_arena.data()); var_value> var_x(value_of(arena_x)); // No need to copy adj, but need to backprop reverse_pass_callback([arena_x, var_x]() mutable { using var_sparse_iterator_t = typename arena_t>::InnerIterator; using dbl_sparse_iterator_t = typename arena_t>::InnerIterator; // arena_x.adj() += var_x.adj() once custom adj() for var sparse matrix for (int k = 0; k < arena_x.outerSize(); ++k) { var_sparse_iterator_t it_arena_x(arena_x, k); dbl_sparse_iterator_t it_var_x(var_x.adj(), k); for (; static_cast(it_arena_x) && static_cast(it_var_x); ++it_arena_x, ++it_var_x) { it_arena_x.valueRef().adj() += it_var_x.valueRef(); } } }); return var_x; } /** * Create a sparse matrix from the given matrix of floats and indexes. * @tparam Options Eigen matrix options. * @tparam Mat A type inheriting from `Eigen::DenseBase` with an arithmetic * scalar type * @tparam Vec1 Container type of the column indexes. * @tparam Vec2 Container type of the row indexes. * @param m Number of rows in matrix. * @param n Number of columns in matrix. * @param w Vector of non-zero values in matrix. * @param u Index of where each row starts in w, length equal to * the number of rows plus one. * @param v Column index of each non-zero value, same * length as w. * @return Sparse matrix. */ template * = nullptr, require_all_std_vector_vt* = nullptr> inline auto to_soa_sparse_matrix(int m, int n, Mat&& w, Vec1&& u, Vec2&& v) { auto w_arena = to_arena(std::forward(w)); auto u_arena = to_arena(std::forward(u)); auto v_arena = to_arena(std::forward(v)); arena_t> arena_x( m, n, w_arena.size(), u_arena.data(), v_arena.data(), w_arena.data()); return var_value>(arena_x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_determinant.hpp0000644000176200001440000000161514645137106024233 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_DETERMINANT_HPP #define STAN_MATH_REV_FUN_LOG_DETERMINANT_HPP #include #include #include #include #include namespace stan { namespace math { template * = nullptr> inline var log_determinant(const T& m) { check_square("log_determinant", "m", m); if (m.size() == 0) { return var(0.0); } arena_t arena_m = m; auto m_hh = arena_m.val().colPivHouseholderQr(); auto arena_m_inv_transpose = to_arena(m_hh.inverse().transpose()); var log_det = m_hh.logAbsDeterminant(); reverse_pass_callback([arena_m, log_det, arena_m_inv_transpose]() mutable { arena_m.adj() += log_det.adj() * arena_m_inv_transpose; }); return log_det; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/square.hpp0000644000176200001440000000253114645137106022356 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SQUARE_HPP #define STAN_MATH_REV_FUN_SQUARE_HPP #include #include namespace stan { namespace math { /** * Return the square of the input variable. * *

Using square(x) is more efficient * than using x * x. * \f[ \mbox{square}(x) = \begin{cases} x^2 & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{square}(x)}{\partial x} = \begin{cases} 2x & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param x Variable to square. * @return Square of variable. */ inline var square(const var& x) { return make_callback_var(square(x.val()), [x](auto& vi) mutable { x.adj() += vi.adj() * 2.0 * x.val(); }); } /** * Return the elementwise square of x * * @tparam T type of x * @param x argument * @return elementwise square of x */ template * = nullptr> inline auto square(const T& x) { return make_callback_var( (x.val().array().square()).matrix(), [x](const auto& vi) mutable { x.adj() += (2.0 * x.val().array() * vi.adj().array()).matrix(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/as_column_vector_or_scalar.hpp0000644000176200001440000000206214645137106026444 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_AS_COLUMN_VECTOR_OR_SCALAR_HPP #define STAN_MATH_REV_FUN_AS_COLUMN_VECTOR_OR_SCALAR_HPP #include #include namespace stan { namespace math { /** * Converts input argument to a `var_value<>` with column vector or a scalar. * For column vector inputs this is an identity function. * * @tparam T Type of `var_value`. * @param a Specified vector. * @return Same vector. */ template * = nullptr> inline auto as_column_vector_or_scalar(T&& a) { return std::forward(a); } /** * Converts `var_value` with row vector inner type to a `var_value<>` * with inner column vector type * @tparam T A `var_value<>` with an inner row vector type. * @param a Specified vector. * @return Transposed vector. */ template * = nullptr, require_not_var_col_vector_t* = nullptr> inline auto as_column_vector_or_scalar(T&& a) { return a.transpose(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/dot_product.hpp0000644000176200001440000000564214645137106023412 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_DOT_PRODUCT_HPP #define STAN_MATH_REV_FUN_DOT_PRODUCT_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the dot product. * * @tparam T1 type of elements in the first vector * @tparam T2 type of elements in the second vector * * @param[in] v1 First vector. * @param[in] v2 Second vector. * @return Dot product of the vectors. * @throw std::domain_error if sizes of v1 and v2 do not match. */ template * = nullptr, require_not_complex_t>* = nullptr, require_all_not_std_vector_t* = nullptr, require_any_st_var* = nullptr> inline var dot_product(const T1& v1, const T2& v2) { check_matching_sizes("dot_product", "v1", v1, "v2", v2); if (v1.size() == 0) { return 0.0; } if (!is_constant::value && !is_constant::value) { arena_t> v1_arena = v1; arena_t> v2_arena = v2; return make_callback_var( v1_arena.val().dot(v2_arena.val()), [v1_arena, v2_arena](const auto& vi) mutable { const auto res_adj = vi.adj(); for (Eigen::Index i = 0; i < v1_arena.size(); ++i) { v1_arena.adj().coeffRef(i) += res_adj * v2_arena.val().coeff(i); v2_arena.adj().coeffRef(i) += res_adj * v1_arena.val().coeff(i); } }); } else if (!is_constant::value) { arena_t> v2_arena = v2; arena_t> v1_val_arena = value_of(v1); return make_callback_var(v1_val_arena.dot(v2_arena.val()), [v1_val_arena, v2_arena](const auto& vi) mutable { v2_arena.adj().array() += vi.adj() * v1_val_arena.array(); }); } else { arena_t> v1_arena = v1; arena_t> v2_val_arena = value_of(v2); return make_callback_var(v1_arena.val().dot(v2_val_arena), [v1_arena, v2_val_arena](const auto& vi) mutable { v1_arena.adj().array() += vi.adj() * v2_val_arena.array(); }); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/trace_inv_quad_form_ldlt.hpp0000644000176200001440000000465114645137106026111 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TRACE_INV_QUAD_FORM_LDLT_HPP #define STAN_MATH_REV_FUN_TRACE_INV_QUAD_FORM_LDLT_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Compute the trace of an inverse quadratic form premultiplied by a * square matrix. This computes * trace(B^T A^-1 B) * where the LDLT_factor of A is provided. * * @tparam T1 type of elements in the LDLT_factor * @tparam T2 type of the second matrix * * @param A an LDLT_factor * @param B a matrix * @return The trace of the inverse quadratic form. */ template * = nullptr, require_any_st_var* = nullptr> inline var trace_inv_quad_form_ldlt(LDLT_factor& A, const T2& B) { check_multiplicable("trace_quad_form", "A", A.matrix(), "B", B); if (A.matrix().size() == 0) return 0.0; if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A.matrix(); arena_t> arena_B = B; auto AsolveB = to_arena(A.ldlt().solve(arena_B.val())); var res = (arena_B.val_op().transpose() * AsolveB).trace(); reverse_pass_callback([arena_A, AsolveB, arena_B, res]() mutable { arena_A.adj() += -res.adj() * AsolveB * AsolveB.transpose(); arena_B.adj() += 2 * res.adj() * AsolveB; }); return res; } else if (!is_constant::value) { arena_t> arena_A = A.matrix(); const auto& B_ref = to_ref(B); auto AsolveB = to_arena(A.ldlt().solve(value_of(B_ref))); var res = (value_of(B_ref).transpose() * AsolveB).trace(); reverse_pass_callback([arena_A, AsolveB, res]() mutable { arena_A.adj() += -res.adj() * AsolveB * AsolveB.transpose(); }); return res; } else { arena_t> arena_B = B; auto AsolveB = to_arena(A.ldlt().solve(arena_B.val())); var res = (arena_B.val_op().transpose() * AsolveB).trace(); reverse_pass_callback([AsolveB, arena_B, res]() mutable { arena_B.adj() += 2 * res.adj() * AsolveB; }); return res; } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/quad_form_sym.hpp0000644000176200001440000000246414645137106023730 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_QUAD_FORM_SYM_HPP #define STAN_MATH_REV_FUN_QUAD_FORM_SYM_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the quadratic form \f$ B^T A B \f$ of a symmetric matrix. * * Symmetry of the resulting matrix is guaranteed. * * @tparam EigMat1 type of the first (symmetric) matrix * @tparam EigMat2 type of the second matrix * * @param A symmetric matrix * @param B second matrix * @return The quadratic form, which is a symmetric matrix of size Cb. * If \c B is a column vector returns a scalar. * @throws std::invalid_argument if A is not symmetric, or if A cannot be * multiplied by B */ template * = nullptr, require_any_vt_var* = nullptr> inline auto quad_form_sym(const EigMat1& A, const EigMat2& B) { check_multiplicable("quad_form_sym", "A", A, "B", B); const auto& A_ref = to_ref(A); check_symmetric("quad_form_sym", "A", A_ref); return quad_form(A_ref, B, true); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/rising_factorial.hpp0000644000176200001440000000147514645137106024403 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_RISING_FACTORIAL_HPP #define STAN_MATH_REV_FUN_RISING_FACTORIAL_HPP #include #include #include #include namespace stan { namespace math { namespace internal { class rising_factorial_vd_vari : public op_vd_vari { public: rising_factorial_vd_vari(vari* avi, int b) : op_vd_vari(rising_factorial(avi->val_, b), avi, b) {} void chain() { avi_->adj_ += adj_ * rising_factorial(avi_->val_, bd_) * (digamma(avi_->val_ + bd_) - digamma(avi_->val_)); } }; } // namespace internal inline var rising_factorial(const var& a, int b) { return var(new internal::rising_factorial_vd_vari(a.vi_, b)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/dims.hpp0000644000176200001440000000167614645137106022023 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_DIMS_HPP #define STAN_MATH_REV_FUN_DIMS_HPP #include #include #include #include namespace stan { namespace math { /** * Pushes dimensions of given argument into given result vector. * * For `var_value` that is the dimensions of inner `vari_value`. * @tparam type in `var_value` * @param x argument * @param result result */ template inline void dims(const var_value& x, std::vector& result) { dims(*x.vi_, result); } /** * Pushes dimensions of given argument into given result vector. * * For `vari_value` containing Eigen type those are the numbers of rows and * columns. * @param x argument * @param result result */ template inline void dims(const vari_value& x, std::vector& result) { dims(x.val_, result); } } // namespace math } // namespace stan #endif // DIMS_HPP StanHeaders/inst/include/stan/math/rev/fun/tanh.hpp0000644000176200001440000000356314645137106022016 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TANH_HPP #define STAN_MATH_REV_FUN_TANH_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the hyperbolic tangent of the specified variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \tanh x = \frac{1}{\cosh^2 x}\f$. * * \f[ \mbox{tanh}(x) = \begin{cases} \tanh(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{tanh}(x)}{\partial x} = \begin{cases} \mbox{sech}^2(x) & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Variable. * @return Hyperbolic tangent of variable. */ inline var tanh(const var& a) { return make_callback_var(std::tanh(a.val()), [a](const auto& vi) mutable { const auto a_cosh = std::cosh(a.val()); a.adj() += vi.adj_ / (a_cosh * a_cosh); }); } /** * Return the hyperbolic tangent of elements of a * * @tparam T type of a * @param a argument * @return elementwise hyperbolic tangent of a */ template * = nullptr> inline auto tanh(const VarMat& a) { return make_callback_var( a.val().array().tanh().matrix(), [a](const auto& vi) mutable { a.adj().array() += vi.adj_.array() / (a.val().array().cosh().square()); }); } /** * Return the hyperbolic tangent of the complex argument. * * @param[in] z argument * @return hyperbolic tangent of the argument */ inline std::complex tanh(const std::complex& z) { return stan::math::internal::complex_tanh(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log1m_inv_logit.hpp0000644000176200001440000000175614645137106024157 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG1M_INV_LOGIT_HPP #define STAN_MATH_REV_FUN_LOG1M_INV_LOGIT_HPP #include #include #include #include namespace stan { namespace math { /** * Return the natural logarithm of one minus the inverse logit of * the specified argument. * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param u argument * @return log of one minus the inverse logit of the argument */ template * = nullptr> inline auto log1m_inv_logit(const var_value& u) { auto precomp_inv_logit = to_arena(as_array_or_scalar(-inv_logit(u.val()))); return make_callback_var( log1m_inv_logit(u.val()), [u, precomp_inv_logit](auto& vi) mutable { as_array_or_scalar(u.adj()) += as_array_or_scalar(vi.adj()) * precomp_inv_logit; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/adjoint_of.hpp0000644000176200001440000000264514645137106023200 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ADJOINT_OF_HPP #define STAN_MATH_REV_FUN_ADJOINT_OF_HPP #include namespace stan { namespace math { namespace internal { struct nonexisting_adjoint { template nonexisting_adjoint operator+(const T&) { return *this; } template nonexisting_adjoint operator+=(T) { throw std::runtime_error( "internal::nonexisting_adjoint::operator+= should never be called! " "Please file a bug report."); } template nonexisting_adjoint operator-=(T) { throw std::runtime_error( "internal::nonexisting_adjoint::operator-= should never be called! " "Please file a bug report."); } }; } // namespace internal /** * Returns a reference to a variable's adjoint. * * @param x a var * @return reference to `x`'s adjoint */ template * = nullptr> auto& adjoint_of(const T& x) { return x.adj(); } /** * Returns a reference to a variable's adjoint. If the input object is not var, * it does not have an adjoint and this returns a dummy object. It defines * operators += and -=, but they should not actually be called. * * @param x any non-var object * @return a dummy adjoint */ template * = nullptr> internal::nonexisting_adjoint adjoint_of(const T& x) { return {}; } } // namespace math } // namespace stan #endif // ADJOINT_OF_HPP StanHeaders/inst/include/stan/math/rev/fun/gp_exp_quad_cov.hpp0000644000176200001440000000734614645137106024232 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_GP_EXP_QUAD_COV_HPP #define STAN_MATH_REV_FUN_GP_EXP_QUAD_COV_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns a squared exponential kernel. * * @tparam T_x type of elements in the vector * @param x std::vector input that can be used in square distance * Assumes each element of x is the same size * @param sigma standard deviation * @param length_scale length scale * @return squared distance * @throw std::domain_error if sigma <= 0, l <= 0, or * x is nan or infinite */ template * = nullptr, require_stan_scalar_t* = nullptr> inline Eigen::Matrix gp_exp_quad_cov(const std::vector& x, const T_sigma sigma, const var length_scale) { check_positive("gp_exp_quad_cov", "sigma", sigma); check_positive("gp_exp_quad_cov", "length_scale", length_scale); size_t x_size = x.size(); for (size_t i = 0; i < x_size; ++i) { check_not_nan("gp_exp_quad_cov", "x", x[i]); } Eigen::Matrix cov(x_size, x_size); if (x_size == 0) { return cov; } size_t l_tri_size = x_size * (x_size - 1) / 2; arena_matrix sq_dists_lin(l_tri_size); arena_matrix> cov_l_tri_lin(l_tri_size); arena_matrix> cov_diag( is_constant::value ? 0 : x_size); double l_val = value_of(length_scale); double sigma_sq = square(value_of(sigma)); double neg_half_inv_l_sq = -0.5 / square(l_val); size_t block_size = 10; size_t pos = 0; for (size_t jb = 0; jb < x_size; jb += block_size) { size_t j_end = std::min(x_size, jb + block_size); size_t j_size = j_end - jb; cov.diagonal().segment(jb, j_size) = Eigen::VectorXd::Constant(j_size, sigma_sq); if (!is_constant::value) { cov_diag.segment(jb, j_size) = cov.diagonal().segment(jb, j_size); } for (size_t ib = jb; ib < x_size; ib += block_size) { size_t i_end = std::min(x_size, ib + block_size); for (size_t j = jb; j < j_end; ++j) { for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { sq_dists_lin.coeffRef(pos) = squared_distance(x[i], x[j]); cov_l_tri_lin.coeffRef(pos) = cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq * exp(sq_dists_lin.coeff(pos) * neg_half_inv_l_sq); pos++; } } } } reverse_pass_callback( [cov_l_tri_lin, cov_diag, sq_dists_lin, sigma, length_scale, x_size]() { size_t l_tri_size = x_size * (x_size - 1) / 2; double adjl = 0; double adjsigma = 0; for (Eigen::Index pos = 0; pos < l_tri_size; pos++) { double prod_add = cov_l_tri_lin.coeff(pos).val() * cov_l_tri_lin.coeff(pos).adj(); adjl += prod_add * sq_dists_lin.coeff(pos); if (!is_constant::value) { adjsigma += prod_add; } } if (!is_constant::value) { adjsigma += (cov_diag.val().array() * cov_diag.adj().array()).sum(); adjoint_of(sigma) += adjsigma * 2 / value_of(sigma); } double l_val = value_of(length_scale); length_scale.adj() += adjl / (l_val * l_val * l_val); }); return cov; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/multiply_lower_tri_self_transpose.hpp0000644000176200001440000000257414645137106030141 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MULTIPLY_LOWER_TRI_SELF_TRANSPOSE_HPP #define STAN_MATH_REV_FUN_MULTIPLY_LOWER_TRI_SELF_TRANSPOSE_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> inline auto multiply_lower_tri_self_transpose(const T& L) { using ret_type = return_var_matrix_t; if (L.size() == 0) { return ret_type(decltype(multiply_lower_tri_self_transpose(value_of(L)))()); } arena_t arena_L = L; arena_t> arena_L_val = arena_L.val().template triangularView(); arena_t res = arena_L_val.template triangularView() * arena_L_val.transpose(); reverse_pass_callback([res, arena_L, arena_L_val]() mutable { arena_L.adj() += ((res.adj().transpose() + res.adj()) * arena_L_val.template triangularView()) .template triangularView(); }); return ret_type(res); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/read_cov_matrix.hpp0000644000176200001440000000442614645137106024231 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_READ_COV_MATRIX_HPP #define STAN_MATH_REV_FUN_READ_COV_MATRIX_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * A generally worse alternative to call prior to evaluating the * density of an elliptical distribution * * @tparam T_CPCs type of CPCs vector (must be a `var_value` where `T` * inherits from EigenBase) * @tparam T_sds type of sds vector (must be a `var_value` where `T` * inherits from EigenBase) * @param CPCs on (-1, 1) * @param sds on (0, inf) * @param log_prob the log probability value to increment with the Jacobian * @return Covariance matrix for specified partial correlations. */ template * = nullptr> var_value read_cov_matrix(const T_CPCs& CPCs, const T_sds& sds, scalar_type_t& log_prob) { return multiply_lower_tri_self_transpose(read_cov_L(CPCs, sds, log_prob)); } /** * Builds a covariance matrix from CPCs and standard deviations * * @tparam T_CPCs type of CPCs vector (must be a `var_value` where `T` * inherits from EigenBase) * @tparam T_sds type of sds vector (must be a `var_value` where `T` * inherits from EigenBase) * @param CPCs in (-1, 1) * @param sds in (0, inf) */ template * = nullptr> inline var_value read_cov_matrix(const T_CPCs& CPCs, const T_sds& sds) { size_t K = sds.rows(); var_value corr_L = read_corr_L(CPCs, K); var_value prod = sds.val().matrix().asDiagonal() * corr_L.val(); reverse_pass_callback([sds, corr_L, prod]() mutable { corr_L.adj() += sds.val().matrix().asDiagonal() * prod.adj(); sds.adj() += (prod.adj().cwiseProduct(corr_L.val())).rowwise().sum(); }); return multiply_lower_tri_self_transpose(prod); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/csr_matrix_times_vector.hpp0000644000176200001440000001402214645137106026012 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_CSR_MATRIX_TIMES_VECTOR_HPP #define STAN_MATH_REV_FUN_CSR_MATRIX_TIMES_VECTOR_HPP #include #include #include #include #include #include namespace stan { namespace math { namespace internal { template struct csr_adjoint : public vari { std::decay_t res_; std::decay_t w_mat_; std::decay_t b_; template csr_adjoint(T1&& res, T2&& w_mat, T3&& b) : vari(0.0), res_(std::forward(res)), w_mat_(std::forward(w_mat)), b_(std::forward(b)) {} void chain() { chain_internal(res_, w_mat_, b_); } template * = nullptr, require_rev_matrix_t* = nullptr> void chain_internal(Result&& res, WMat&& w_mat, B&& b) { w_mat.adj() += res.adj() * b.val().transpose(); b.adj() += w_mat.val().transpose() * res.adj(); } template * = nullptr, require_not_rev_matrix_t* = nullptr> void chain_internal(Result&& res, WMat&& w_mat, B&& b) { w_mat.adj() += res.adj() * b.transpose(); } template * = nullptr, require_rev_matrix_t* = nullptr> void chain_internal(Result&& res, WMat&& w_mat, B&& b) { b.adj() += w_mat.transpose() * res.adj(); } }; template inline vari* make_csr_adjoint(Result_&& res, WMat_&& w_mat, B_&& b) { return new csr_adjoint(std::forward(res), std::forward(w_mat), std::forward(b)); } } // namespace internal /** * \addtogroup csr_format * Return the multiplication of the sparse matrix (specified by * by values and indexing) by the specified dense vector. * * The sparse matrix X of dimension m by n is represented by the * vector w (of values), the integer array v (containing one-based * column index of each value), the integer array u (containing * one-based indexes of where each row starts in w). * * @tparam T1 type of the sparse matrix * @tparam T2 type of the dense vector * @param m Number of rows in matrix. * @param n Number of columns in matrix. * @param w Vector of non-zero values in matrix. * @param v Column index of each non-zero value, same * length as w. * @param u Index of where each row starts in w, length equal to * the number of rows plus one. * @param b Eigen vector which the matrix is multiplied by. * @return Dense vector for the product. * @throw std::domain_error if m and n are not positive or are nan. * @throw std::domain_error if the implied sparse matrix and b are * not multiplicable. * @throw std::invalid_argument if m/n/w/v/u are not internally * consistent, as defined by the indexing scheme. Extractors are * defined in Stan which guarantee a consistent set of m/n/w/v/u * for a given sparse matrix. * @throw std::out_of_range if any of the indexes are out of range. */ template * = nullptr> inline auto csr_matrix_times_vector(int m, int n, const T1& w, const std::vector& v, const std::vector& u, const T2& b) { using sparse_val_mat = Eigen::Map>; using sparse_dense_mul_type = decltype((std::declval() * value_of(b)).eval()); using return_t = return_var_matrix_t; check_positive("csr_matrix_times_vector", "m", m); check_positive("csr_matrix_times_vector", "n", n); check_size_match("csr_matrix_times_vector", "n", n, "b", b.size()); check_size_match("csr_matrix_times_vector", "w", w.size(), "v", v.size()); check_size_match("csr_matrix_times_vector", "m", m, "u", u.size() - 1); check_size_match("csr_matrix_times_vector", "u/z", u[m - 1] + csr_u_to_z(u, m - 1) - 1, "v", v.size()); for (int i : v) { check_range("csr_matrix_times_vector", "v[]", n, i); } std::vector> v_arena(v.size()); std::transform(v.begin(), v.end(), v_arena.begin(), [](auto&& x) { return x - 1; }); std::vector> u_arena(u.size()); std::transform(u.begin(), u.end(), u_arena.begin(), [](auto&& x) { return x - 1; }); using sparse_var_value_t = var_value>; if (!is_constant::value && !is_constant::value) { arena_t> b_arena = b; sparse_var_value_t w_mat_arena = to_soa_sparse_matrix(m, n, w, u_arena, v_arena); arena_t res = w_mat_arena.val() * value_of(b_arena); stan::math::internal::make_csr_adjoint(res, w_mat_arena, b_arena); return return_t(res); } else if (!is_constant::value) { arena_t> b_arena = b; auto w_val_arena = to_arena(value_of(w)); sparse_val_mat w_val_mat(m, n, w_val_arena.size(), u_arena.data(), v_arena.data(), w_val_arena.data()); arena_t res = w_val_mat * value_of(b_arena); stan::math::internal::make_csr_adjoint(res, w_val_mat, b_arena); return return_t(res); } else { sparse_var_value_t w_mat_arena = to_soa_sparse_matrix(m, n, w, u_arena, v_arena); auto b_arena = to_arena(value_of(b)); arena_t res = w_mat_arena.val() * b_arena; stan::math::internal::make_csr_adjoint(res, w_mat_arena, b_arena); return return_t(res); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/ldexp.hpp0000644000176200001440000000102714645137106022171 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LDEXP_HPP #define STAN_MATH_REV_FUN_LDEXP_HPP #include #include namespace stan { namespace math { namespace { class ldexp_vari : public op_vd_vari { public: explicit ldexp_vari(vari* avi, int b) : op_vd_vari(ldexp(avi->val_, b), avi, b) {} void chain() { avi_->adj_ += ldexp(adj_, bd_); } }; } // namespace inline var ldexp(const var& a, int b) { return var(new ldexp_vari(a.vi_, b)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/sqrt.hpp0000644000176200001440000000353414645137106022053 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SQRT_HPP #define STAN_MATH_REV_FUN_SQRT_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the square root of the specified variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \sqrt{x} = \frac{1}{2 \sqrt{x}}\f$. * \f[ \mbox{sqrt}(x) = \begin{cases} \textrm{NaN} & x < 0 \\ \sqrt{x} & \mbox{if } x\geq 0\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{sqrt}(x)}{\partial x} = \begin{cases} \textrm{NaN} & x < 0 \\ \frac{1}{2\sqrt{x}} & x\geq 0\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Variable whose square root is taken. * @return Square root of variable. */ inline var sqrt(const var& a) { return make_callback_var(std::sqrt(a.val()), [a](auto& vi) mutable { a.adj() += vi.adj() / (2.0 * vi.val()); }); } /** * Return elementwise square root of vector * * @tparam T a `var_value` with inner Eigen type * @param a input * @return elementwise square root of vector */ template * = nullptr> inline auto sqrt(const T& a) { return make_callback_var( a.val().array().sqrt().matrix(), [a](auto& vi) mutable { a.adj().array() += vi.adj().array() / (2.0 * vi.val_op().array()); }); } /** * Return the square root of the complex argument. * * @param[in] z argument * @return square root of the argument */ inline std::complex sqrt(const std::complex& z) { return internal::complex_sqrt(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/inv_sqrt.hpp0000644000176200001440000000230214645137106022717 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INV_SQRT_HPP #define STAN_MATH_REV_FUN_INV_SQRT_HPP #include #include #include #include #include namespace stan { namespace math { /** * @tparam T Arithmetic or a type inheriting from `EigenBase`. * \f[ \mbox{inv\_sqrt}(x) = \begin{cases} \frac{1}{\sqrt{x}} & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{inv\_sqrt}(x)}{\partial x} = \begin{cases} -\frac{1}{2\sqrt{x^3}} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * */ template * = nullptr> inline auto inv_sqrt(const var_value& a) { auto denom = to_arena(as_array_or_scalar(a.val()) * as_array_or_scalar(sqrt(a.val()))); return make_callback_var(inv_sqrt(a.val()), [a, denom](auto& vi) mutable { as_array_or_scalar(a.adj()) -= 0.5 * as_array_or_scalar(vi.adj()) / denom; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/falling_factorial.hpp0000644000176200001440000000232214645137106024514 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FALLING_FACTORIAL_HPP #define STAN_MATH_REV_FUN_FALLING_FACTORIAL_HPP #include #include #include #include namespace stan { namespace math { inline var falling_factorial(const var& a, int b) { auto digamma_ab = digamma(a.val() + 1) - digamma(a.val() - b + 1); return make_callback_var(falling_factorial(a.val(), b), [a, digamma_ab](auto& vi) mutable { a.adj() += vi.adj() * vi.val() * digamma_ab; }); } template * = nullptr, require_st_integral* = nullptr> inline auto falling_factorial(const var_value& a, const T2& b) { auto b_map = as_array_or_scalar(b); auto digamma_ab = to_arena(digamma(a.val().array() + 1) - digamma(a.val().array() - b_map + 1)); return make_callback_var( falling_factorial(a.val(), b), [a, digamma_ab](auto& vi) mutable { a.adj().array() += vi.adj().array() * vi.val().array() * digamma_ab; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/if_else.hpp0000644000176200001440000000275114645137106022470 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_IF_ELSE_HPP #define STAN_MATH_REV_FUN_IF_ELSE_HPP #include #include namespace stan { namespace math { /** * If the specified condition is true, return the first * variable, otherwise return the second variable. * * @param c Boolean condition. * @param y_true Variable to return if condition is true. * @param y_false Variable to return if condition is false. */ inline var if_else(bool c, const var& y_true, const var& y_false) { return c ? y_true : y_false; } /** * If the specified condition is true, return a new variable * constructed from the first scalar, otherwise return the second * variable. * * @param c Boolean condition. * @param y_true Value to promote to variable and return if condition is true. * @param y_false Variable to return if condition is false. */ inline var if_else(bool c, double y_true, const var& y_false) { if (c) { return var(y_true); } else { return y_false; } } /** * If the specified condition is true, return the first variable, * otherwise return a new variable constructed from the second * scalar. * * @param c Boolean condition. * @param y_true Variable to return if condition is true. * @param y_false Value to promote to variable and return if condition is false. */ inline var if_else(bool c, const var& y_true, double y_false) { if (c) { return y_true; } else { return var(y_false); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log1m.hpp0000644000176200001440000000154014645137106022074 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG1M_HPP #define STAN_MATH_REV_FUN_LOG1M_HPP #include #include #include namespace stan { namespace math { /** * The log (1 - x) function for variables. * * The derivative is given by * * \f$\frac{d}{dx} \log (1 - x) = -\frac{1}{1 - x}\f$. * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a The variable. * @return The variable representing log of 1 minus the variable. */ template * = nullptr> inline auto log1m(const var_value& a) { return make_callback_var(log1m(a.val()), [a](auto& vi) mutable { as_array_or_scalar(a.adj()) += as_array_or_scalar(vi.adj()) / (as_array_or_scalar(a.val()) - 1.0); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/fdim.hpp0000644000176200001440000000706714645137106022006 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FDIM_HPP #define STAN_MATH_REV_FUN_FDIM_HPP #include #include #include #include namespace stan { namespace math { namespace internal { class fdim_vv_vari : public op_vv_vari { public: fdim_vv_vari(vari* avi, vari* bvi) : op_vv_vari(avi->val_ - bvi->val_, avi, bvi) {} void chain() { if (unlikely(is_any_nan(avi_->val_, bvi_->val_))) { avi_->adj_ = NOT_A_NUMBER; bvi_->adj_ = NOT_A_NUMBER; } else { avi_->adj_ += adj_; bvi_->adj_ -= adj_; } } }; class fdim_vd_vari : public op_vd_vari { public: fdim_vd_vari(vari* avi, double b) : op_vd_vari(avi->val_ - b, avi, b) {} void chain() { if (unlikely(is_any_nan(avi_->val_, bd_))) { avi_->adj_ = NOT_A_NUMBER; } else { avi_->adj_ += adj_; } } }; class fdim_dv_vari : public op_dv_vari { public: fdim_dv_vari(double a, vari* bvi) : op_dv_vari(a - bvi->val_, a, bvi) {} void chain() { if (unlikely(is_any_nan(bvi_->val_, ad_))) { bvi_->adj_ = NOT_A_NUMBER; } else { bvi_->adj_ -= adj_; } } }; } // namespace internal /** * Return the positive difference between the first variable's the value * and the second's (C99, C++11). * * The function values and derivatives are defined by * \f[ \mbox{fdim}(x, y) = \begin{cases} x-y & \mbox{if } x > y \\[6pt] 0 & \mbox{otherwise} \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{fdim}(x, y)}{\partial x} = \begin{cases} 1 & \mbox{if } x > y \\[6pt] 0 & \mbox{otherwise} \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{fdim}(x, y)}{\partial y} = \begin{cases} -1 & \mbox{if } x > y \\[6pt] 0 & \mbox{otherwise} \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param a First variable. * @param b Second variable. * @return The positive difference between the first and second * variable. */ inline var fdim(const var& a, const var& b) { // reversed test to get NaN vals automatically in second case return (a.vi_->val_ <= b.vi_->val_) ? var(new vari(0.0)) : var(new internal::fdim_vv_vari(a.vi_, b.vi_)); } /** * Return the positive difference between the first value and the * value of the second variable (C99, C++11). * * See fdim(var, var) for definitions of values and * derivatives. * * @param a First value. * @param b Second variable. * @return The positive difference between the first and second * arguments. */ inline var fdim(double a, const var& b) { // reversed test to get NaN vals automatically in second case return a <= b.vi_->val_ ? var(new vari(0.0)) : var(new internal::fdim_dv_vari(a, b.vi_)); } /** * Return the positive difference between the first variable's value * and the second value (C99, C++11). * * See fdim(var, var) for definitions of values and * derivatives. * * @param a First value. * @param b Second variable. * @return The positive difference between the first and second arguments. */ inline var fdim(const var& a, double b) { // reversed test to get NaN vals automatically in second case return a.vi_->val_ <= b ? var(new vari(0.0)) : var(new internal::fdim_vd_vari(a.vi_, b)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/primitive_value.hpp0000644000176200001440000000077014645137106024265 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_PRIMITIVE_VALUE_HPP #define STAN_MATH_REV_FUN_PRIMITIVE_VALUE_HPP #include #include #include namespace stan { namespace math { /** * Return the primitive double value for the specified autodiff * variable. * * @param v input variable. * @return value of input. */ inline double primitive_value(const var& v) { return v.val(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_mix.hpp0000644000176200001440000001113714645137106022516 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_MIX_HPP #define STAN_MATH_REV_FUN_LOG_MIX_HPP #include #include #include #include #include #include namespace stan { namespace math { /* Computes shared terms in log_mix partial derivative calculations * * @param[in] theta_val value of mixing proportion theta. * @param[in] lambda1_val value of log density multiplied by theta. * @param[in] lambda2_val value of log density multiplied by 1 - theta. * @param[out] one_m_exp_lam2_m_lam1 shared term in deriv calculation. * @param[out] one_m_t_prod_exp_lam2_m_lam1 shared term in deriv calculation. * @param[out] one_d_t_plus_one_m_t_prod_exp_lam2_m_lam1 shared term in deriv * calculation. */ inline void log_mix_partial_helper( double theta_val, double lambda1_val, double lambda2_val, double& one_m_exp_lam2_m_lam1, double& one_m_t_prod_exp_lam2_m_lam1, double& one_d_t_plus_one_m_t_prod_exp_lam2_m_lam1) { using std::exp; double lam2_m_lam1 = lambda2_val - lambda1_val; double exp_lam2_m_lam1 = exp(lam2_m_lam1); one_m_exp_lam2_m_lam1 = 1 - exp_lam2_m_lam1; double one_m_t = 1 - theta_val; one_m_t_prod_exp_lam2_m_lam1 = one_m_t * exp_lam2_m_lam1; one_d_t_plus_one_m_t_prod_exp_lam2_m_lam1 = 1 / (theta_val + one_m_t_prod_exp_lam2_m_lam1); } /** * Return the log mixture density with specified mixing proportion * and log densities and its derivative at each. * * \f[ * \mbox{log\_mix}(\theta, \lambda_1, \lambda_2) * = \log \left( \theta \exp(\lambda_1) + (1 - \theta) \exp(\lambda_2) \right). * \f] * * \f[ * \frac{\partial}{\partial \theta} * \mbox{log\_mix}(\theta, \lambda_1, \lambda_2) * = \dfrac{\exp(\lambda_1) - \exp(\lambda_2)} * {\left( \theta \exp(\lambda_1) + (1 - \theta) \exp(\lambda_2) \right)} * \f] * * \f[ * \frac{\partial}{\partial \lambda_1} * \mbox{log\_mix}(\theta, \lambda_1, \lambda_2) * = \dfrac{\theta \exp(\lambda_1)} * {\left( \theta \exp(\lambda_1) + (1 - \theta) \exp(\lambda_2) \right)} * \f] * * \f[ * \frac{\partial}{\partial \lambda_2} * \mbox{log\_mix}(\theta, \lambda_1, \lambda_2) * = \dfrac{\theta \exp(\lambda_2)} * {\left( \theta \exp(\lambda_1) + (1 - \theta) \exp(\lambda_2) \right)} * \f] * * @tparam T_theta theta scalar type. * @tparam T_lambda1 lambda1 scalar type. * @tparam T_lambda2 lambda2 scalar type. * * @param[in] theta mixing proportion in [0, 1]. * @param[in] lambda1 first log density. * @param[in] lambda2 second log density. * @return log mixture of densities in specified proportion */ template * = nullptr> inline return_type_t log_mix( const T_theta& theta, const T_lambda1& lambda1, const T_lambda2& lambda2) { using std::log; auto ops_partials = make_partials_propagator(theta, lambda1, lambda2); double theta_double = value_of(theta); const double lambda1_double = value_of(lambda1); const double lambda2_double = value_of(lambda2); double log_mix_function_value = log_mix(theta_double, lambda1_double, lambda2_double); double one_m_exp_lam2_m_lam1(0.0); double one_m_t_prod_exp_lam2_m_lam1(0.0); double one_d_t_plus_one_m_t_prod_exp_lam2_m_lam1(0.0); if (lambda1 > lambda2) { log_mix_partial_helper(theta_double, lambda1_double, lambda2_double, one_m_exp_lam2_m_lam1, one_m_t_prod_exp_lam2_m_lam1, one_d_t_plus_one_m_t_prod_exp_lam2_m_lam1); } else { log_mix_partial_helper(1.0 - theta_double, lambda2_double, lambda1_double, one_m_exp_lam2_m_lam1, one_m_t_prod_exp_lam2_m_lam1, one_d_t_plus_one_m_t_prod_exp_lam2_m_lam1); one_m_exp_lam2_m_lam1 = -one_m_exp_lam2_m_lam1; theta_double = one_m_t_prod_exp_lam2_m_lam1; one_m_t_prod_exp_lam2_m_lam1 = 1.0 - value_of(theta); } if (!is_constant_all::value) { partials<0>(ops_partials)[0] = one_m_exp_lam2_m_lam1 * one_d_t_plus_one_m_t_prod_exp_lam2_m_lam1; } if (!is_constant_all::value) { partials<1>(ops_partials)[0] = theta_double * one_d_t_plus_one_m_t_prod_exp_lam2_m_lam1; } if (!is_constant_all::value) { partials<2>(ops_partials)[0] = one_m_t_prod_exp_lam2_m_lam1 * one_d_t_plus_one_m_t_prod_exp_lam2_m_lam1; } return ops_partials.build(log_mix_function_value); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/unit_vector_constrain.hpp0000644000176200001440000000547414645137106025510 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_UNIT_VECTOR_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_UNIT_VECTOR_CONSTRAIN_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the unit length vector corresponding to the free vector y. * See https://en.wikipedia.org/wiki/N-sphere#Generating_random_points * * @tparam EigMat type inheriting from `EigenBase` that has a `var` * scalar type. * @param y vector of K unrestricted variables * @return Unit length vector of dimension K **/ template * = nullptr> inline auto unit_vector_constrain(const T& y) { using ret_type = return_var_matrix_t; check_nonzero_size("unit_vector", "y", y); arena_t arena_y = y; arena_t> arena_y_val = arena_y.val(); const double r = arena_y_val.norm(); arena_t res = arena_y_val / r; reverse_pass_callback([arena_y, res, r, arena_y_val]() mutable { arena_y.adj() += res.adj() / r - arena_y_val * ((arena_y_val.array() * res.adj().array()).sum() / (r * r * r)); }); return ret_type(res); } /** * Return the unit length vector corresponding to the free vector y. * See https://en.wikipedia.org/wiki/N-sphere#Generating_random_points * * @tparam EigMat type inheriting from `EigenBase` that has a `var` * scalar type. * @param y vector of K unrestricted variables * @return Unit length vector of dimension K * @param lp Log probability reference to increment. **/ template * = nullptr> inline auto unit_vector_constrain(const T& y, var& lp) { const auto& y_ref = to_ref(y); auto x = unit_vector_constrain(y_ref); lp -= 0.5 * dot_self(y_ref); return x; } /** * Return the unit length vector corresponding to the free vector y. * See https://en.wikipedia.org/wiki/N-sphere#Generating_random_points * * @tparam EigMat type inheriting from `EigenBase` that has a `var` * scalar type. * @param y vector of K unrestricted variables * @return Unit length vector of dimension K * @param lp Log probability reference to increment. **/ template * = nullptr> inline auto unit_vector_constrain(const T& y, var& lp) { auto x = unit_vector_constrain(y); lp -= 0.5 * dot_self(y); return x; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/eigenvectors_sym.hpp0000644000176200001440000000335614645137106024451 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_EIGENVECTORS_HPP #define STAN_MATH_REV_FUN_EIGENVECTORS_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the eigenvectors of the specified symmetric matrix. * * @tparam T type of input matrix. * @param m Specified matrix. * @return Eigenvectors of matrix. */ template * = nullptr> inline auto eigenvectors_sym(const T& m) { using return_t = return_var_matrix_t; if (unlikely(m.size() == 0)) { return return_t(Eigen::MatrixXd(0, 0)); } check_symmetric("eigenvectors_sym", "m", m); auto arena_m = to_arena(m); Eigen::SelfAdjointEigenSolver solver(arena_m.val()); arena_t eigenvecs = solver.eigenvectors(); auto eigenvals = to_arena(solver.eigenvalues()); reverse_pass_callback([arena_m, eigenvals, eigenvecs]() mutable { const auto p = arena_m.val().cols(); Eigen::MatrixXd f = (1 / (eigenvals.rowwise().replicate(p).transpose() - eigenvals.rowwise().replicate(p)) .array()); f.diagonal().setZero(); arena_m.adj() += eigenvecs.val_op() * f.cwiseProduct(eigenvecs.val_op().transpose() * eigenvecs.adj_op()) * eigenvecs.val_op().transpose(); }); return return_t(eigenvecs); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log2.hpp0000644000176200001440000000257714645137106021733 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG2_HPP #define STAN_MATH_REV_FUN_LOG2_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the base 2 logarithm of the specified variable (C99). * * See log2() for the double-based version. * * The derivative is * * \f$\frac{d}{dx} \log_2 x = \frac{1}{x \log 2}\f$. * \f[ \mbox{log2}(x) = \begin{cases} \textrm{NaN} & \mbox{if } x < 0 \\ \log_2(x) & \mbox{if } x\geq 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log2}(x)}{\partial x} = \begin{cases} \textrm{NaN} & \mbox{if } x < 0 \\ \frac{1}{x\ln2} & \mbox{if } x\geq 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a The variable. * @return Base 2 logarithm of the variable. */ template * = nullptr> inline auto log2(const var_value& a) { return make_callback_var(log2(a.val()), [a](auto& vi) mutable { as_array_or_scalar(a.adj()) += as_array_or_scalar(vi.adj()) / (LOG_TWO * as_array_or_scalar(a.val())); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/erfc.hpp0000644000176200001440000000335714645137106022004 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ERFC_HPP #define STAN_MATH_REV_FUN_ERFC_HPP #include #include #include #include #include namespace stan { namespace math { /** * The complementary error function for variables (C99). * * The derivative is * * \f$\frac{d}{dx} \mbox{erfc}(x) = - \frac{2}{\sqrt{\pi}} \exp(-x^2)\f$. * * \f[ \mbox{erfc}(x) = \begin{cases} \operatorname{erfc}(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{erfc}(x)}{\partial x} = \begin{cases} \frac{\partial\, \operatorname{erfc}(x)}{\partial x} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \operatorname{erfc}(x)=\frac{2}{\sqrt{\pi}}\int_x^\infty e^{-t^2}dt \f] \f[ \frac{\partial \, \operatorname{erfc}(x)}{\partial x} = -\frac{2}{\sqrt{\pi}} e^{-x^2} \f] * * @param a The variable. * @return Complementary error function applied to the variable. */ inline var erfc(const var& a) { auto precomp_erfc = TWO_OVER_SQRT_PI * std::exp(-a.val() * a.val()); return make_callback_var(erfc(a.val()), [a, precomp_erfc](auto& vi) mutable { a.adj() -= vi.adj() * precomp_erfc; }); } template * = nullptr> inline auto erfc(const var_value& a) { auto precomp_erf = to_arena(TWO_OVER_SQRT_PI * (-a.val().array().square()).exp()); return make_callback_var(erfc(a.val()), [a, precomp_erf](auto& vi) mutable { a.adj().array() -= vi.adj().array() * precomp_erf; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/fmax.hpp0000644000176200001440000000742214645137106022015 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_FMAX_HPP #define STAN_MATH_REV_FUN_FMAX_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the maximum of the two variable arguments (C99). * * No new variable implementations are created, with this function * defined as if by * * fmax(a, b) = a if a's value is greater than b's, and . * * fmax(a, b) = b if b's value is greater than or equal to a's. * \f[ \mbox{fmax}(x, y) = \begin{cases} x & \mbox{if } x \geq y \\ y & \mbox{if } x < y \\[6pt] x & \mbox{if } -\infty\leq x\leq \infty, y = \textrm{NaN}\\ y & \mbox{if } -\infty\leq y\leq \infty, x = \textrm{NaN}\\ \textrm{NaN} & \mbox{if } x, y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{fmax}(x, y)}{\partial x} = \begin{cases} 1 & \mbox{if } x \geq y \\ 0 & \mbox{if } x < y \\[6pt] 1 & \mbox{if } -\infty\leq x\leq \infty, y = \textrm{NaN}\\ 0 & \mbox{if } -\infty\leq y\leq \infty, x = \textrm{NaN}\\ \textrm{NaN} & \mbox{if } x, y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{fmax}(x, y)}{\partial y} = \begin{cases} 0 & \mbox{if } x \geq y \\ 1 & \mbox{if } x < y \\[6pt] 0 & \mbox{if } -\infty\leq x\leq \infty, y = \textrm{NaN}\\ 1 & \mbox{if } -\infty\leq y\leq \infty, x = \textrm{NaN}\\ \textrm{NaN} & \mbox{if } x, y = \textrm{NaN} \end{cases} \f] * * @param a First variable. * @param b Second variable. * @return If the first variable's value is larger than the * second's, the first variable, otherwise the second variable. */ inline var fmax(const var& a, const var& b) { if (unlikely(is_nan(a))) { if (unlikely(is_nan(b))) { return make_callback_var(NOT_A_NUMBER, [a, b](auto& vi) mutable { a.adj() = NOT_A_NUMBER; b.adj() = NOT_A_NUMBER; }); } return b; } if (unlikely(is_nan(b))) { return a; } return a > b ? a : b; } /** * Returns the maximum of the variable and scalar, promoting the * scalar to a variable if it is larger (C99). * * For fmax(a, b), if a's value is greater than b, * then a is returned, otherwise a fresh variable implementation * wrapping the value b is returned. * * @param a First variable. * @param b Second value * @return If the first variable's value is larger than or equal * to the second value, the first variable, otherwise the second * value promoted to a fresh variable. */ inline var fmax(const var& a, double b) { if (unlikely(is_nan(a))) { if (unlikely(is_nan(b))) { return make_callback_var( NOT_A_NUMBER, [a](auto& vi) mutable { a.adj() = NOT_A_NUMBER; }); } return var(b); } if (unlikely(is_nan(b))) { return a; } return a >= b ? a : var(b); } /** * Returns the maximum of a scalar and variable, promoting the scalar to * a variable if it is larger (C99). * * For fmax(a, b), if a is greater than b's value, * then a fresh variable implementation wrapping a is returned, otherwise * b is returned. * * @param a First value. * @param b Second variable. * @return If the first value is larger than the second variable's value, * return the first value promoted to a variable, otherwise return the * second variable. */ inline var fmax(double a, const var& b) { if (unlikely(is_nan(b))) { if (unlikely(is_nan(a))) { return make_callback_var( NOT_A_NUMBER, [b](auto& vi) mutable { b.adj() = NOT_A_NUMBER; }); } return var(a); } if (unlikely(is_nan(a))) { return b; } return a > b ? var(a) : b; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/diag_post_multiply.hpp0000644000176200001440000000453614645137106024775 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_DIAG_POST_MULTIPLY_HPP #define STAN_MATH_REV_FUN_DIAG_POST_MULTIPLY_HPP #include #include #include namespace stan { namespace math { /** * Return the product of the matrix and a diagonal matrix formed from the vector * or row_vector. * * @tparam T1 type of the matrix * @tparam T2 type of the vector/row_vector * @param m1 input matrix * @param m2 input vector/row_vector * * @return product of the matrix and the diagonal matrix formed from the * vector or row_vector. */ template * = nullptr, require_vector_t* = nullptr, require_any_st_var* = nullptr> auto diag_post_multiply(const T1& m1, const T2& m2) { check_size_match("diag_post_multiply", "m2.size()", m2.size(), "m1.cols()", m1.cols()); using inner_ret_type = decltype(value_of(m1) * value_of(m2).asDiagonal()); using ret_type = return_var_matrix_t; if (!is_constant::value && !is_constant::value) { arena_t> arena_m1 = m1; arena_t> arena_m2 = m2; arena_t ret(arena_m1.val() * arena_m2.val().asDiagonal()); reverse_pass_callback([ret, arena_m1, arena_m2]() mutable { arena_m2.adj() += arena_m1.val().cwiseProduct(ret.adj()).colwise().sum(); arena_m1.adj() += ret.adj() * arena_m2.val().asDiagonal(); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_m1 = m1; arena_t> arena_m2 = value_of(m2); arena_t ret(arena_m1.val() * arena_m2.asDiagonal()); reverse_pass_callback([ret, arena_m1, arena_m2]() mutable { arena_m1.adj() += ret.adj() * arena_m2.val().asDiagonal(); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_m1 = value_of(m1); arena_t> arena_m2 = m2; arena_t ret(arena_m1 * arena_m2.val().asDiagonal()); reverse_pass_callback([ret, arena_m1, arena_m2]() mutable { arena_m2.adj() += arena_m1.val().cwiseProduct(ret.adj()).colwise().sum(); }); return ret_type(ret); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/polar.hpp0000644000176200001440000000305314645137106022173 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_POLAR_HPP #define STAN_MATH_REV_FUN_POLAR_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns complex number with specified magnitude and phase angle. * * @param[in] r magnitude * @param[in] theta phase angle * @return complex number with magnitude and phase angle */ inline std::complex polar(const var& r, const var& theta) { return internal::complex_polar(r, theta); } /** * Returns complex number with specified magnitude and phase angle. * * @tparam T arithmetic type of magnitude * @param[in] r magnitude * @param[in] theta phase angle * @return complex number with magnitude and phase angle */ template inline std::complex polar(T r, const var& theta) { return internal::complex_polar(r, theta); } /** * Returns complex number with specified magnitude and phase angle. * * @tparam T arithmetic type of phase angle * @param[in] r magnitude * @param[in] theta phase angle * @return complex number with magnitude and phase angle */ template inline std::complex polar(const var& r, T theta) { return internal::complex_polar(r, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/value_of_rec.hpp0000644000176200001440000000070014645137106023503 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_VALUE_OF_REC_HPP #define STAN_MATH_REV_FUN_VALUE_OF_REC_HPP #include #include namespace stan { namespace math { /** * Return the value of the specified variable. * * @param v Variable. * @return Value of variable. */ template inline auto& value_of_rec(const var_value& v) { return v.vi_->val_; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_softmax.hpp0000644000176200001440000000710014645137106023375 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_SOFTMAX_HPP #define STAN_MATH_REV_FUN_LOG_SOFTMAX_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { class log_softmax_elt_vari : public vari { private: vari** alpha_; const double* softmax_alpha_; const int size_; // array sizes const int idx_; // in in softmax output public: log_softmax_elt_vari(double val, vari** alpha, const double* softmax_alpha, int size, int idx) : vari(val), alpha_(alpha), softmax_alpha_(softmax_alpha), size_(size), idx_(idx) {} void chain() { for (int m = 0; m < size_; ++m) { if (m == idx_) { alpha_[m]->adj_ += adj_ * (1 - softmax_alpha_[m]); } else { alpha_[m]->adj_ -= adj_ * softmax_alpha_[m]; } } } }; } // namespace internal /** * Return the log softmax of the specified vector * * @tparam T type of input * @param x input * @return softmax of the input * @throw std::domain_error if the input size is 0 */ template * = nullptr> auto log_softmax(const T& x) { const int a_size = x.size(); check_nonzero_size("log_softmax", "x", x); const auto& x_ref = to_ref(x); vari** x_vi_array = ChainableStack::instance_->memalloc_.alloc_array(a_size); Eigen::Map(x_vi_array, a_size) = x_ref.vi(); vector_d x_d = x_ref.val(); // fold logic of math::softmax() and math::log_softmax() // to save computations vector_d diff = (x_d.array() - x_d.maxCoeff()); vector_d softmax_x_d = diff.array().exp(); double sum = softmax_x_d.sum(); vector_d log_softmax_x_d = diff.array() - std::log(sum); // end fold double* softmax_x_d_array = ChainableStack::instance_->memalloc_.alloc_array(a_size); Eigen::Map(softmax_x_d_array, a_size) = softmax_x_d.array() / sum; plain_type_t log_softmax_x(a_size); for (int k = 0; k < a_size; ++k) { log_softmax_x(k) = var(new internal::log_softmax_elt_vari( log_softmax_x_d[k], x_vi_array, softmax_x_d_array, a_size, k)); } return log_softmax_x; } /** * Return the log softmax of the specified vector * * @tparam T type of input * @param x input * @return softmax of the input * @throw std::domain_error if the input size is 0 */ template * = nullptr> inline auto log_softmax(const T& x) { check_nonzero_size("log_softmax", "x", x); const auto& theta = (x.val().array() - x.val().maxCoeff()).eval(); return make_callback_var( (theta.array() - log(theta.exp().sum())).matrix(), [x](const auto& res) mutable { x.adj().noalias() += res.adj() - (res.adj().sum() * res.val().array().exp()).matrix(); }); } /** * Return the log softmax of the specified `std::vector` or * `std::vector` of containers. * * @tparam T type of input * @param x input * @return softmax of the input * @throw std::domain_error if the input size is 0 */ template * = nullptr> inline auto log_softmax(const T& x) { return apply_vector_unary::apply( x, [](const auto& alpha) { return log_softmax(alpha); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/elt_multiply.hpp0000644000176200001440000000511714645137106023604 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ELT_MULTIPLY_HPP #define STAN_MATH_REV_FUN_ELT_MULTIPLY_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the elementwise multiplication of the specified * matrices. * * @tparam Mat1 type of the first matrix or expression * @tparam Mat2 type of the second matrix or expression * * @param m1 First matrix or expression * @param m2 Second matrix or expression * @return Elementwise product of matrices. */ template * = nullptr, require_any_rev_matrix_t* = nullptr> auto elt_multiply(const Mat1& m1, const Mat2& m2) { check_matching_dims("elt_multiply", "m1", m1, "m2", m2); using inner_ret_type = decltype(value_of(m1).cwiseProduct(value_of(m2))); using ret_type = return_var_matrix_t; if (!is_constant::value && !is_constant::value) { arena_t> arena_m1 = m1; arena_t> arena_m2 = m2; arena_t ret(arena_m1.val().cwiseProduct(arena_m2.val())); reverse_pass_callback([ret, arena_m1, arena_m2]() mutable { for (Eigen::Index j = 0; j < arena_m2.cols(); ++j) { for (Eigen::Index i = 0; i < arena_m2.rows(); ++i) { const auto ret_adj = ret.adj().coeffRef(i, j); arena_m1.adj().coeffRef(i, j) += arena_m2.val().coeff(i, j) * ret_adj; arena_m2.adj().coeffRef(i, j) += arena_m1.val().coeff(i, j) * ret_adj; } } }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_m1 = m1; arena_t> arena_m2 = value_of(m2); arena_t ret(arena_m1.val().cwiseProduct(arena_m2)); reverse_pass_callback([ret, arena_m1, arena_m2]() mutable { arena_m1.adj().array() += arena_m2.array() * ret.adj().array(); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_m1 = value_of(m1); arena_t> arena_m2 = m2; arena_t ret(arena_m1.cwiseProduct(arena_m2.val())); reverse_pass_callback([ret, arena_m2, arena_m1]() mutable { arena_m2.adj().array() += arena_m1.array() * ret.adj().array(); }); return ret_type(ret); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/initialize_variable.hpp0000644000176200001440000000224514645137106025066 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INITIALIZE_VARIABLE_HPP #define STAN_MATH_REV_FUN_INITIALIZE_VARIABLE_HPP #include #include #include #include namespace stan { namespace math { /** * Initialize variable to value. (Function may look pointless, but * it's needed to bottom out recursion.) */ inline void initialize_variable(var& variable, const var& value) { variable = value; } /** * Initialize every cell in the matrix to the specified value. * * @tparam R number of rows, can be Eigen::Dynamic * @tparam C number of columns, can be Eigen::Dynamic */ template inline void initialize_variable(Eigen::Matrix& matrix, const var& value) { matrix.fill(value); } /** * Initialize the variables in the standard vector recursively. * * @tparam T type of elements in the vector */ template inline void initialize_variable(std::vector& variables, const var& value) { for (size_t i = 0; i < variables.size(); ++i) { initialize_variable(variables[i], value); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/asinh.hpp0000644000176200001440000000505514645137106022164 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ASINH_HPP #define STAN_MATH_REV_FUN_ASINH_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * The inverse hyperbolic sine function for variables (C99). * * The derivative is defined by * * \f$\frac{d}{dx} \mbox{asinh}(x) = \frac{x}{x^2 + 1}\f$. * * \f[ \mbox{asinh}(x) = \begin{cases} \sinh^{-1}(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{asinh}(x)}{\partial x} = \begin{cases} \frac{\partial\, \sinh^{-1}(x)}{\partial x} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \sinh^{-1}(x)=\ln\left(x+\sqrt{x^2+1}\right) \f] \f[ \frac{\partial \, \sinh^{-1}(x)}{\partial x} = \frac{1}{\sqrt{x^2+1}} \f] * * @param x The variable. * @return Inverse hyperbolic sine of the variable. */ inline var asinh(const var& x) { return make_callback_var(std::asinh(x.val()), [x](const auto& vi) mutable { x.adj() += vi.adj() / std::sqrt(x.val() * x.val() + 1.0); }); } /** * The inverse hyperbolic sine function for variables (C99). * * @tparam Varmat a `var_value` with inner Eigen type * @param x The variable. * @return Inverse hyperbolic sine of the variable. */ template * = nullptr> inline auto asinh(const VarMat& x) { return make_callback_var( x.val().unaryExpr([](const auto x) { return asinh(x); }), [x](const auto& vi) mutable { x.adj().array() += vi.adj().array() / (x.val().array().square() + 1.0).sqrt(); }); } /** * Return the hyperbolic arcsine of the complex argument. * * @param[in] z argument * @return hyperbolic arcsine of the argument */ inline std::complex asinh(const std::complex& z) { return stan::math::internal::complex_asinh(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/value_of.hpp0000644000176200001440000000143714645137106022662 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_VALUE_OF_HPP #define STAN_MATH_REV_FUN_VALUE_OF_HPP #include #include namespace stan { namespace math { /** * Return the value of the specified variable. * *

This function is used internally by autodiff functions along * with value_of(T x) to extract the * double value of either a scalar or an autodiff * variable. This function will be called when the argument is a * var even if the function is not * referred to by namespace because of argument-dependent lookup. * * @param v Variable. * @return Value of variable. */ template inline auto& value_of(const var_value& v) { return v.vi_->val_; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/norm.hpp0000644000176200001440000000100014645137106022017 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_NORM_HPP #define STAN_MATH_REV_FUN_NORM_HPP #include #include #include #include namespace stan { namespace math { /** * Return the squared magnitude of the complex argument. * * @param[in] z argument * @return squared magnitude of the argument */ inline var norm(const std::complex& z) { return internal::complex_norm(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/inv_cloglog.hpp0000644000176200001440000000227314645137106023363 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INV_CLOGLOG_HPP #define STAN_MATH_REV_FUN_INV_CLOGLOG_HPP #include #include #include #include namespace stan { namespace math { /** * Return the inverse complementary log-log function applied * specified variable (stan). * * See inv_cloglog() for the double-based version. * * The derivative is given by * * \f$\frac{d}{dx} \mbox{cloglog}^{-1}(x) = \exp (x - \exp (x))\f$. * * @tparam T Arithmetic or a type inheriting from `EigenBase`. * @param a Variable argument. * @return The inverse complementary log-log of the specified * argument. */ template * = nullptr> inline auto inv_cloglog(const var_value& a) { auto precomp_exp = to_arena(as_array_or_scalar(exp(a.val() - exp(a.val())))); return make_callback_var(inv_cloglog(a.val()), [a, precomp_exp](auto& vi) mutable { as_array_or_scalar(a.adj()) += as_array_or_scalar(vi.adj()) * precomp_exp; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/asin.hpp0000644000176200001440000000451114645137106022010 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ASIN_HPP #define STAN_MATH_REV_FUN_ASIN_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the principal value of the arc sine, in radians, of the * specified variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \arcsin x = \frac{1}{\sqrt{1 - x^2}}\f$. * * \f[ \mbox{asin}(x) = \begin{cases} \textrm{NaN} & \mbox{if } x < -1\\ \arcsin(x) & \mbox{if } -1\leq x\leq 1 \\ \textrm{NaN} & \mbox{if } x > 1\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{asin}(x)}{\partial x} = \begin{cases} \textrm{NaN} & \mbox{if } x < -1\\ \frac{\partial\, \arcsin(x)}{\partial x} & \mbox{if } -1\leq x\leq 1 \\ \textrm{NaN} & \mbox{if } x < -1\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial \, \arcsin(x)}{\partial x} = \frac{1}{\sqrt{1-x^2}} \f] * * @param x Variable in range [-1, 1]. * @return Arc sine of variable, in radians. */ inline var asin(const var& x) { return make_callback_var(std::asin(x.val()), [x](const auto& vi) mutable { x.adj() += vi.adj() / std::sqrt(1.0 - (x.val() * x.val())); }); } /** * Return the principal value of the arc sine, in radians, of the * specified variable (cmath). * * @tparam Varmat a `var_value` with inner Eigen type * @param x Variable with cells in range [-1, 1]. * @return Arc sine of variable, in radians. */ template * = nullptr> inline auto asin(const VarMat& x) { return make_callback_var( x.val().array().asin().matrix(), [x](const auto& vi) mutable { x.adj().array() += vi.adj().array() / (1.0 - (x.val().array().square())).sqrt(); }); } /** * Return the arc sine of the complex argument. * * @param[in] z argument * @return arc sine of the argument */ inline std::complex asin(const std::complex& z) { return stan::math::internal::complex_asin(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/sin.hpp0000644000176200001440000000371014645137106021647 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_SIN_HPP #define STAN_MATH_REV_FUN_SIN_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the sine of a radian-scaled variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \sin x = \cos x\f$. * * \f[ \mbox{sin}(x) = \begin{cases} \sin(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{sin}(x)}{\partial x} = \begin{cases} \cos(x) & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Variable for radians of angle. * @return Sine of variable. */ inline var sin(const var& a) { return make_callback_var(std::sin(a.val()), [a](const auto& vi) mutable { a.adj() += vi.adj() * std::cos(a.val()); }); } /** * Return the sine of a radian-scaled variable (cmath). * * @tparam Varmat a `var_value` with inner Eigen type * @param a Variable for radians of angle. * @return Sine of variable. */ template * = nullptr> inline auto sin(const VarMat& a) { return make_callback_var( a.val().array().sin().matrix(), [a](const auto& vi) mutable { a.adj() += vi.adj().cwiseProduct(a.val().array().cos().matrix()); }); } /** * Return the sine of the complex argument. * * @param[in] z argument * @return sine of the argument */ inline std::complex sin(const std::complex& z) { return stan::math::internal::complex_sin(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/eigendecompose_sym.hpp0000644000176200001440000000471514645137106024742 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_EIGENDECOMPOSE_HPP #define STAN_MATH_REV_FUN_EIGENDECOMPOSE_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the decomposition of the specified symmetric matrix. * * @tparam T type of input matrix. * @param m Specified matrix. * @return A tuple V,D where V is a matrix where the columns are the * eigenvectors of m, and D is a column vector of the eigenvalues of m. * The eigenvalues are in ascending order of magnitude, with the eigenvectors * provided in the same order. */ template * = nullptr> inline auto eigendecompose_sym(const T& m) { using eigval_return_t = return_var_matrix_t; using eigvec_return_t = return_var_matrix_t; if (unlikely(m.size() == 0)) { return std::make_tuple(eigvec_return_t(Eigen::MatrixXd(0, 0)), eigval_return_t(Eigen::VectorXd(0))); } check_symmetric("eigendecompose_sym", "m", m); auto arena_m = to_arena(m); Eigen::SelfAdjointEigenSolver solver(arena_m.val()); arena_t eigenvals = solver.eigenvalues(); arena_t eigenvecs = solver.eigenvectors(); reverse_pass_callback([eigenvals, arena_m, eigenvecs]() mutable { // eigenvalue reverse calculation auto value_adj = eigenvecs.val_op() * eigenvals.adj().asDiagonal() * eigenvecs.val_op().transpose(); // eigenvector reverse calculation const auto p = arena_m.val().cols(); Eigen::MatrixXd f = (1 / (eigenvals.val_op().rowwise().replicate(p).transpose() - eigenvals.val_op().rowwise().replicate(p)) .array()); f.diagonal().setZero(); auto vector_adj = eigenvecs.val_op() * f.cwiseProduct(eigenvecs.val_op().transpose() * eigenvecs.adj_op()) * eigenvecs.val_op().transpose(); arena_m.adj() += value_adj + vector_adj; }); return std::make_tuple(std::move(eigvec_return_t(eigenvecs)), std::move(eigval_return_t(eigenvals))); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/multiply.hpp0000644000176200001440000002003714645137106022736 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MULTIPLY_HPP #define STAN_MATH_REV_FUN_MULTIPLY_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the product of two matrices. * * This version does not handle row vector times column vector * * @tparam T1 type of first matrix * @tparam T2 type of second matrix * * * @param[in] A first matrix * @param[in] B second matrix * @return A * B */ template * = nullptr, require_return_type_t* = nullptr, require_not_row_and_col_vector_t* = nullptr> inline auto multiply(const T1& A, const T2& B) { check_multiplicable("multiply", "A", A, "B", B); if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; auto arena_A_val = to_arena(arena_A.val()); auto arena_B_val = to_arena(arena_B.val()); using return_t = return_var_matrix_t; arena_t res = arena_A_val * arena_B_val; reverse_pass_callback( [arena_A, arena_B, arena_A_val, arena_B_val, res]() mutable { if (is_var_matrix::value || is_var_matrix::value) { arena_A.adj() += res.adj_op() * arena_B_val.transpose(); arena_B.adj() += arena_A_val.transpose() * res.adj_op(); } else { auto res_adj = res.adj().eval(); arena_A.adj() += res_adj * arena_B_val.transpose(); arena_B.adj() += arena_A_val.transpose() * res_adj; } }); return return_t(res); } else if (!is_constant::value) { arena_t> arena_A = value_of(A); arena_t> arena_B = B; using return_t = return_var_matrix_t; arena_t res = arena_A * arena_B.val_op(); reverse_pass_callback([arena_B, arena_A, res]() mutable { arena_B.adj() += arena_A.transpose() * res.adj_op(); }); return return_t(res); } else { arena_t> arena_A = A; arena_t> arena_B = value_of(B); using return_t = return_var_matrix_t; arena_t res = arena_A.val_op() * arena_B; reverse_pass_callback([arena_A, arena_B, res]() mutable { arena_A.adj() += res.adj_op() * arena_B.transpose(); }); return return_t(res); } } /** * Return the product of a row vector times a column vector as a scalar * * @tparam T1 type of row vector * @tparam T2 type of column vector * * @param[in] A row vector * @param[in] B column vector * @return A * B as a scalar */ template * = nullptr, require_return_type_t* = nullptr, require_row_and_col_vector_t* = nullptr> inline var multiply(const T1& A, const T2& B) { check_multiplicable("multiply", "A", A, "B", B); if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; arena_t> arena_A_val = value_of(arena_A); arena_t> arena_B_val = value_of(arena_B); var res = arena_A_val.dot(arena_B_val); reverse_pass_callback( [arena_A, arena_B, arena_A_val, arena_B_val, res]() mutable { auto res_adj = res.adj(); arena_A.adj().array() += res_adj * arena_B_val.transpose().array(); arena_B.adj().array() += arena_A_val.transpose().array() * res_adj; }); return res; } else if (!is_constant::value) { arena_t> arena_B = B; arena_t> arena_A_val = value_of(A); var res = arena_A_val.dot(value_of(arena_B)); reverse_pass_callback([arena_B, arena_A_val, res]() mutable { arena_B.adj().array() += arena_A_val.transpose().array() * res.adj(); }); return res; } else { arena_t> arena_A = A; arena_t> arena_B_val = value_of(B); var res = value_of(arena_A).dot(arena_B_val); reverse_pass_callback([arena_A, arena_B_val, res]() mutable { arena_A.adj().array() += res.adj() * arena_B_val.transpose().array(); }); return res; } } /** * Return specified matrix multiplied by specified scalar where at least one * input has a scalar type of a `var_value`. * * @tparam T1 type of the scalar * @tparam T2 type of the matrix or expression * * @param A scalar * @param B matrix * @return product of matrix and scalar */ template * = nullptr, require_matrix_t* = nullptr, require_return_type_t* = nullptr, require_not_row_and_col_vector_t* = nullptr> inline auto multiply(const T1& A, const T2& B) { if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; using return_t = return_var_matrix_t; arena_t res = arena_A.val() * arena_B.val().array(); reverse_pass_callback([arena_A, arena_B, res]() mutable { const auto a_val = arena_A.val(); for (Eigen::Index j = 0; j < res.cols(); ++j) { for (Eigen::Index i = 0; i < res.rows(); ++i) { const auto res_adj = res.adj().coeffRef(i, j); arena_A.adj() += res_adj * arena_B.val().coeff(i, j); arena_B.adj().coeffRef(i, j) += a_val * res_adj; } } }); return return_t(res); } else if (!is_constant::value) { arena_t> arena_A = value_of(A); arena_t> arena_B = B; using return_t = return_var_matrix_t; arena_t res = arena_A * arena_B.val().array(); reverse_pass_callback([arena_A, arena_B, res]() mutable { arena_B.adj().array() += arena_A * res.adj().array(); }); return return_t(res); } else { arena_t> arena_A = A; arena_t> arena_B = value_of(B); using return_t = return_var_matrix_t; arena_t res = arena_A.val() * arena_B.array(); reverse_pass_callback([arena_A, arena_B, res]() mutable { arena_A.adj() += (res.adj().array() * arena_B.array()).sum(); }); return return_t(res); } } /** * Return specified matrix multiplied by specified scalar where at least one * input has a scalar type of a `var_value`. * * @tparam T1 type of the matrix or expression * @tparam T2 type of the scalar * * @param A matrix * @param B scalar * @return product of matrix and scalar */ template * = nullptr, require_not_matrix_t* = nullptr, require_any_st_var* = nullptr, require_not_complex_t>* = nullptr, require_not_complex_t>* = nullptr, require_not_row_and_col_vector_t* = nullptr> inline auto multiply(const T1& A, const T2& B) { return multiply(B, A); } /** * Operator overload for multiplying a `var_value`. At least one input * must be a `var_value` type. * @tparam T1 Either a `var_value`, type inheriting from * `Eigen::DenseMatrix`, or a scalar * @tparam T2 Either a `var_value`, type inheriting from * `Eigen::DenseMatrix`, or a scalar * @param a The left hand side of the multiplication * @param b The right hand side of the multiplication */ template * = nullptr> inline auto operator*(const T1& a, const T2& b) { return multiply(a, b); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/acosh.hpp0000644000176200001440000000517414645137106022161 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ACOSH_HPP #define STAN_MATH_REV_FUN_ACOSH_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * The inverse hyperbolic cosine function for variables (C99). * * For non-variable function, see ::acosh(). * * The derivative is defined by * * \f$\frac{d}{dx} \mbox{acosh}(x) = \frac{x}{x^2 - 1}\f$. * * \f[ \mbox{acosh}(x) = \begin{cases} \textrm{NaN} & \mbox{if } x < 1 \\ \cosh^{-1}(x) & \mbox{if } x \geq 1 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{acosh}(x)}{\partial x} = \begin{cases} \textrm{NaN} & \mbox{if } x < 1 \\ \frac{\partial\, \cosh^{-1}(x)}{\partial x} & \mbox{if } x \geq 1 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \cosh^{-1}(x)=\ln\left(x+\sqrt{x^2-1}\right) \f] \f[ \frac{\partial \, \cosh^{-1}(x)}{\partial x} = \frac{1}{\sqrt{x^2-1}} \f] * * @param x The variable. * @return Inverse hyperbolic cosine of the variable. */ inline var acosh(const var& x) { return make_callback_var(acosh(x.val()), [x](const auto& vi) mutable { x.adj() += vi.adj() / std::sqrt(x.val() * x.val() - 1.0); }); } /** * The inverse hyperbolic cosine function for variables (C99). * * For non-variable function, see ::acosh(). * * @tparam Varmat a `var_value` with inner Eigen type * @param x The variable * @return Inverse hyperbolic cosine of the variable. */ template * = nullptr> inline auto acosh(const VarMat& x) { return make_callback_var( x.val().unaryExpr([](const auto x) { return acosh(x); }), [x](const auto& vi) mutable { x.adj().array() += vi.adj().array() / (x.val().array().square() - 1.0).sqrt(); }); } /** * Return the hyperbolic arc cosine of the complex argument. * * @param[in] z argument * @return hyperbolic arc cosine of the argument */ inline std::complex acosh(const std::complex& z) { return stan::math::internal::complex_acosh(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/Phi_approx.hpp0000644000176200001440000000451114645137106023167 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_PHI_APPROX_HPP #define STAN_MATH_REV_FUN_PHI_APPROX_HPP #include #include #include namespace stan { namespace math { /** * Approximation of the unit normal CDF for variables (stan). * * http://www.jiem.org/index.php/jiem/article/download/60/27 * * \f[ \mbox{Phi\_approx}(x) = \begin{cases} \Phi_{\mbox{\footnotesize approx}}(x) & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{Phi\_approx}(x)}{\partial x} = \begin{cases} \frac{\partial\, \Phi_{\mbox{\footnotesize approx}}(x)}{\partial x} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \Phi_{\mbox{\footnotesize approx}}(x) = \mbox{logit}^{-1}(0.07056 \, x^3 + 1.5976 \, x) \f] \f[ \frac{\partial \, \Phi_{\mbox{\footnotesize approx}}(x)}{\partial x} = -\Phi_{\mbox{\footnotesize approx}}^2(x) e^{-0.07056x^3 - 1.5976x}(-0.21168x^2-1.5976) \f] * * @param a Variable argument. * @return The corresponding unit normal cdf approximation. */ inline var Phi_approx(const var& a) { double av_squared = a.val() * a.val(); double f = inv_logit(0.07056 * a.val() * av_squared + 1.5976 * a.val()); double da = f * (1 - f) * (3.0 * 0.07056 * av_squared + 1.5976); return make_callback_var( f, [a, da](auto& vi) mutable { a.adj() += vi.adj() * da; }); } template * = nullptr> inline auto Phi_approx(const T& a) { arena_t> f(a.rows(), a.cols()); arena_t> da(a.rows(), a.cols()); for (Eigen::Index j = 0; j < a.cols(); ++j) { for (Eigen::Index i = 0; i < a.rows(); ++i) { const auto a_val = a.val().coeff(i, j); const auto av_squared = a_val * a_val; f.coeffRef(i, j) = inv_logit(0.07056 * a_val * av_squared + 1.5976 * a.val().coeff(i, j)); da.coeffRef(i, j) = f.coeff(i, j) * (1 - f.coeff(i, j)) * (3.0 * 0.07056 * av_squared + 1.5976); } } return make_callback_var(f, [a, da](auto& vi) mutable { a.adj().array() += vi.adj().array() * da.array(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/lmultiply.hpp0000644000176200001440000001743014645137106023115 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LMULTPLY_HPP #define STAN_MATH_REV_FUN_LMULTPLY_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { class lmultiply_vv_vari : public op_vv_vari { public: lmultiply_vv_vari(vari* avi, vari* bvi) : op_vv_vari(lmultiply(avi->val_, bvi->val_), avi, bvi) {} void chain() { using std::log; avi_->adj_ += adj_ * log(bvi_->val_); bvi_->adj_ += adj_ * avi_->val_ / bvi_->val_; } }; class lmultiply_vd_vari : public op_vd_vari { public: lmultiply_vd_vari(vari* avi, double b) : op_vd_vari(lmultiply(avi->val_, b), avi, b) {} void chain() { using std::log; avi_->adj_ += adj_ * log(bd_); } }; class lmultiply_dv_vari : public op_dv_vari { public: lmultiply_dv_vari(double a, vari* bvi) : op_dv_vari(lmultiply(a, bvi->val_), a, bvi) {} void chain() { bvi_->adj_ += adj_ * ad_ / bvi_->val_; } }; } // namespace internal /** * Return the value of a*log(b). * * When both a and b are 0, the value returned is 0. * The partial derivative with respect to a is log(b). * The partial derivative with respect to b is a/b. * * @param a First variable. * @param b Second variable. * @return Value of a*log(b) */ inline var lmultiply(const var& a, const var& b) { return var(new internal::lmultiply_vv_vari(a.vi_, b.vi_)); } /** * Return the value of a*log(b). * * When both a and b are 0, the value returned is 0. * The partial derivative with respect to a is log(b). * * @param a First variable. * @param b Second scalar. * @return Value of a*log(b) */ inline var lmultiply(const var& a, double b) { return var(new internal::lmultiply_vd_vari(a.vi_, b)); } /** * Return the value of a*log(b). * * When both a and b are 0, the value returned is 0. * The partial derivative with respect to b is a/b. * * @param a First scalar. * @param b Second variable. * @return Value of a*log(b) */ inline var lmultiply(double a, const var& b) { if (a == 1.0) { return log(b); } return var(new internal::lmultiply_dv_vari(a, b.vi_)); } /** * Return the elementwise product `a * log(b)`. * * Both `T1` and `T2` are matrices, and one of `T1` or `T2` must be a * `var_value` * * @tparam T1 Type of first argument * @tparam T2 Type of second argument * @param a First argument * @param b Second argument * @return elementwise product of `a` and `log(b)` */ template * = nullptr, require_any_var_matrix_t* = nullptr> inline auto lmultiply(const T1& a, const T2& b) { check_matching_dims("lmultiply", "a", a, "b", b); if (!is_constant::value && !is_constant::value) { arena_t> arena_a = a; arena_t> arena_b = b; return make_callback_var( lmultiply(arena_a.val(), arena_b.val()), [arena_a, arena_b](const auto& res) mutable { arena_a.adj().array() += res.adj().array() * arena_b.val().array().log(); arena_b.adj().array() += res.adj().array() * arena_a.val().array() / arena_b.val().array(); }); } else if (!is_constant::value) { arena_t> arena_a = a; arena_t> arena_b = value_of(b); return make_callback_var(lmultiply(arena_a.val(), arena_b), [arena_a, arena_b](const auto& res) mutable { arena_a.adj().array() += res.adj().array() * arena_b.val().array().log(); }); } else { arena_t> arena_a = value_of(a); arena_t> arena_b = b; return make_callback_var(lmultiply(arena_a, arena_b.val()), [arena_a, arena_b](const auto& res) mutable { arena_b.adj().array() += res.adj().array() * arena_a.val().array() / arena_b.val().array(); }); } } /** * Return the product `a * log(b)`. * * @tparam T1 Type of matrix argument * @tparam T2 Type of scalar argument * @param a Matrix argument * @param b Scalar argument * @return Product of `a` and `log(b)` */ template * = nullptr, require_stan_scalar_t* = nullptr> inline auto lmultiply(const T1& a, const T2& b) { using std::log; if (!is_constant::value && !is_constant::value) { arena_t> arena_a = a; var arena_b = b; return make_callback_var( lmultiply(arena_a.val(), arena_b.val()), [arena_a, arena_b](const auto& res) mutable { arena_a.adj().array() += res.adj().array() * log(arena_b.val()); arena_b.adj() += (res.adj().array() * arena_a.val().array()).sum() / arena_b.val(); }); } else if (!is_constant::value) { arena_t> arena_a = a; return make_callback_var(lmultiply(arena_a.val(), value_of(b)), [arena_a, b](const auto& res) mutable { arena_a.adj().array() += res.adj().array() * log(value_of(b)); }); } else { arena_t> arena_a = value_of(a); var arena_b = b; return make_callback_var( lmultiply(arena_a, arena_b.val()), [arena_a, arena_b](const auto& res) mutable { arena_b.adj() += (res.adj().array() * arena_a.array()).sum() / arena_b.val(); }); } } /** * Return the product `a * log(b)`. * * @tparam T1 Type of scalar argument * @tparam T2 Type of matrix argument * @param a Scalar argument * @param b Matrix argument * @return Product of `a` and `log(b)` */ template * = nullptr, require_var_matrix_t* = nullptr> inline auto lmultiply(const T1& a, const T2& b) { if (!is_constant::value && !is_constant::value) { var arena_a = a; arena_t> arena_b = b; return make_callback_var( lmultiply(arena_a.val(), arena_b.val()), [arena_a, arena_b](const auto& res) mutable { arena_a.adj() += (res.adj().array() * arena_b.val().array().log()).sum(); arena_b.adj().array() += arena_a.val() * res.adj().array() / arena_b.val().array(); }); } else if (!is_constant::value) { var arena_a = a; arena_t> arena_b = value_of(b); return make_callback_var( lmultiply(arena_a.val(), arena_b), [arena_a, arena_b](const auto& res) mutable { arena_a.adj() += (res.adj().array() * arena_b.val().array().log()).sum(); }); } else { arena_t> arena_b = b; return make_callback_var(lmultiply(value_of(a), arena_b.val()), [a, arena_b](const auto& res) mutable { arena_b.adj().array() += value_of(a) * res.adj().array() / arena_b.val().array(); }); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/dot_self.hpp0000644000176200001440000000312114645137106022651 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_DOT_SELF_HPP #define STAN_MATH_REV_FUN_DOT_SELF_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the dot product of a vector of var with itself. * * @tparam T type of the vector (must have one compile time dimension equal to * 1) * @param[in] v Vector. * @return Dot product of the vector with itself. */ template * = nullptr> inline var dot_self(const T& v) { const auto& v_ref = to_ref(v); arena_t arena_v(v_ref.size()); double res_val = 0; for (size_t i = 0; i < arena_v.size(); ++i) { arena_v.coeffRef(i) = v_ref.coeffRef(i); res_val += arena_v.coeffRef(i).val() * arena_v.coeffRef(i).val(); } var res(res_val); reverse_pass_callback([res, arena_v]() mutable { arena_v.adj() += 2.0 * res.adj() * arena_v.val(); }); return res; } /** * Returns the dot product of a `var_value` with itself. * * @tparam A `var_value<>` whose inner type has one compile time row or column. * @param[in] v Vector. * @return Dot product of the vector with itself. */ template * = nullptr> inline var dot_self(const T& v) { var res = v.val().dot(v.val()); reverse_pass_callback( [res, v]() mutable { v.adj() += (2.0 * res.adj()) * v.val(); }); return res; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/rep_vector.hpp0000644000176200001440000000157414645137106023234 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_REP_VECTOR_HPP #define STAN_MATH_REV_FUN_REP_VECTOR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Overload for `var_value`. * @tparam T_ret The user supplied return type. * @tparam T A double or var type * @param x The type to be propogated through the new vector. * @param n The size of the new vector. */ template * = nullptr, require_eigen_col_vector_t>* = nullptr> inline auto rep_vector(var x, int n) { return make_callback_var(rep_vector(x.val(), n), [x](auto& vi) mutable { forward_as(x).adj() += vi.adj().sum(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/identity_free.hpp0000644000176200001440000000152414645137106023711 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_IDENTITY_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_IDENTITY_CONSTRAIN_HPP #include #include #include namespace stan { namespace math { /** * Returns the result of applying the identity constraint * transform to the input. For any of the inputs being a `var_value` type * this promotes the input `x` to `var_value`. * * @tparam T Any type. * @tparam Types Any type with one of `T` and `Types` being a `var_value` * matrix. * @param[in] x object * @return transformed input */ template * = nullptr> inline auto identity_free(T&& x, Types&&... /* args */) { return to_var_value(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/rows_dot_self.hpp0000644000176200001440000000250714645137106023732 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_ROWS_DOT_SELF_HPP #define STAN_MATH_REV_FUN_ROWS_DOT_SELF_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the dot product of each row of a matrix with itself. * * @tparam Mat An Eigen matrix with a `var` scalar type. * @param x Matrix. */ template * = nullptr> inline Eigen::Matrix rows_dot_self( const Mat& x) { Eigen::Matrix ret(x.rows()); for (size_type i = 0; i < x.rows(); i++) { ret(i) = dot_self(x.row(i)); } return ret; } /** * Returns the dot product of row row of a matrix with itself. * * @tparam Mat A `var_value<>` with an inner matrix type. * @param x Matrix. */ template * = nullptr> inline auto rows_dot_self(const Mat& x) { using ret_type = var_value; arena_t res = x.val().rowwise().squaredNorm(); if (x.size() >= 0) { reverse_pass_callback([res, x]() mutable { x.adj() += (2 * res.adj()).asDiagonal() * x.val(); }); } return res; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/ub_constrain.hpp0000644000176200001440000003361314645137106023551 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_UB_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_UB_CONSTRAIN_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the upper-bounded value for the specified unconstrained * matrix and upper bound. * *

The transform is * *

\f$f(x) = U - \exp(x)\f$ * *

where \f$U\f$ is the upper bound. * * @tparam T Scalar * @tparam U Scalar * @param[in] x free Matrix. * @param[in] ub upper bound * @return matrix constrained to have upper bound */ template * = nullptr, require_any_var_t* = nullptr> inline auto ub_constrain(const T& x, const U& ub) { const auto ub_val = value_of(ub); if (unlikely(ub_val == INFTY)) { return identity_constrain(x, ub); } else { if (!is_constant::value && !is_constant::value) { auto neg_exp_x = -std::exp(value_of(x)); return make_callback_var( ub_val + neg_exp_x, [arena_x = var(x), arena_ub = var(ub), neg_exp_x](auto& vi) mutable { const auto vi_adj = vi.adj(); arena_x.adj() += vi_adj * neg_exp_x; arena_ub.adj() += vi_adj; }); } else if (!is_constant::value) { auto neg_exp_x = -std::exp(value_of(x)); return make_callback_var(ub_val + neg_exp_x, [arena_x = var(x), neg_exp_x](auto& vi) mutable { arena_x.adj() += vi.adj() * neg_exp_x; }); } else { return make_callback_var(ub_val - std::exp(value_of(x)), [arena_ub = var(ub)](auto& vi) mutable { arena_ub.adj() += vi.adj(); }); } } } /** * Return the upper-bounded value for the specified unconstrained * scalar and upper bound and increment the specified log * probability reference with the log absolute Jacobian * determinant of the transform. * *

The transform is as specified for * ub_constrain(T, double). The log absolute Jacobian * determinant is * *

\f$ \log | \frac{d}{dx} -\mbox{exp}(x) + U | * = \log | -\mbox{exp}(x) + 0 | = x\f$. * * @tparam T type of scalar * @tparam U type of upper bound * @param[in] x free scalar * @param[in] ub upper bound * @param[in, out] lp log density * @return scalar constrained to have upper bound */ template * = nullptr, require_any_var_t* = nullptr> inline auto ub_constrain(const T& x, const U& ub, return_type_t& lp) { const auto ub_val = value_of(ub); const bool is_ub_inf = ub_val == INFTY; if (!is_constant::value && !is_constant::value) { if (unlikely(is_ub_inf)) { return identity_constrain(x, ub); } else { lp += value_of(x); auto neg_exp_x = -std::exp(value_of(x)); return make_callback_var(value_of(ub) + neg_exp_x, [lp, arena_x = var(x), arena_ub = var(ub), neg_exp_x](auto& vi) mutable { const auto vi_adj = vi.adj(); arena_x.adj() += vi_adj * neg_exp_x + lp.adj(); arena_ub.adj() += vi_adj; }); } } else if (!is_constant::value) { if (unlikely(is_ub_inf)) { return identity_constrain(x, ub); } else { lp += value_of(x); auto neg_exp_x = -std::exp(value_of(x)); return make_callback_var( value_of(ub) + neg_exp_x, [lp, arena_x = var(x), neg_exp_x](auto& vi) mutable { arena_x.adj() += vi.adj() * neg_exp_x + lp.adj(); }); } } else { if (unlikely(is_ub_inf)) { return identity_constrain(x, ub); } else { lp += value_of(x); return make_callback_var(value_of(ub) - std::exp(value_of(x)), [arena_ub = var(ub)](auto& vi) mutable { arena_ub.adj() += vi.adj(); }); } } } /** * Specialization of `ub_constrain` to apply a scalar upper bound elementwise * to each input. * * @tparam T A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @tparam U Scalar. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @return upper-bound constrained value corresponding to inputs */ template * = nullptr, require_stan_scalar_t* = nullptr, require_any_st_var* = nullptr> inline auto ub_constrain(const T& x, const U& ub) { using ret_type = return_var_matrix_t; const auto ub_val = value_of(ub); if (unlikely(ub_val == INFTY)) { return ret_type(identity_constrain(x, ub)); } else { if (!is_constant::value && !is_constant::value) { arena_t> arena_x = x; auto arena_neg_exp_x = to_arena(-arena_x.val().array().exp()); arena_t ret = ub_val + arena_neg_exp_x; reverse_pass_callback( [arena_x, arena_neg_exp_x, ret, arena_ub = var(ub)]() mutable { arena_x.adj().array() += ret.adj().array() * arena_neg_exp_x; arena_ub.adj() += ret.adj().sum(); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_x = x; auto arena_neg_exp_x = to_arena(-arena_x.val().array().exp()); arena_t ret = ub_val + arena_neg_exp_x; reverse_pass_callback([arena_x, arena_neg_exp_x, ret]() mutable { arena_x.adj().array() += ret.adj().array() * arena_neg_exp_x; }); return ret_type(ret); } else { arena_t ret = ub_val - value_of(x).array().exp(); reverse_pass_callback([ret, arena_ub = var(ub)]() mutable { arena_ub.adj() += ret.adj().sum(); }); return ret_type(ret); } } } /** * Specialization of `ub_constrain` to apply a scalar upper bound elementwise * to each input. * * @tparam T A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @tparam U Scalar. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @param[in,out] lp reference to log probability to increment * @return upper-bound constrained value corresponding to inputs */ template * = nullptr, require_stan_scalar_t* = nullptr, require_any_st_var* = nullptr> inline auto ub_constrain(const T& x, const U& ub, return_type_t& lp) { using ret_type = return_var_matrix_t; const auto ub_val = value_of(ub); if (unlikely(ub_val == INFTY)) { return ret_type(identity_constrain(x, ub)); } else { if (!is_constant::value && !is_constant::value) { arena_t> arena_x = x; auto arena_neg_exp_x = to_arena(-arena_x.val().array().exp()); arena_t ret = ub_val + arena_neg_exp_x; lp += arena_x.val().sum(); reverse_pass_callback([arena_x, arena_neg_exp_x, ret, lp, arena_ub = var(ub)]() mutable { arena_x.adj().array() += ret.adj().array() * arena_neg_exp_x + lp.adj(); arena_ub.adj() += ret.adj().sum(); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_x = x; auto arena_neg_exp_x = to_arena(-arena_x.val().array().exp()); arena_t ret = ub_val + arena_neg_exp_x; lp += arena_x.val().sum(); reverse_pass_callback([arena_x, arena_neg_exp_x, ret, lp]() mutable { arena_x.adj().array() += ret.adj().array() * arena_neg_exp_x + lp.adj(); }); return ret_type(ret); } else { auto x_ref = to_ref(value_of(x)); arena_t ret = ub_val - x_ref.array().exp(); lp += x_ref.sum(); reverse_pass_callback([ret, arena_ub = var(ub)]() mutable { arena_ub.adj() += ret.adj().sum(); }); return ret_type(ret); } } } /** * Specialization of `ub_constrain` to apply a matrix of upper bounds * elementwise to each input element. * * @tparam T A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @tparam U A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @return upper-bound constrained value corresponding to inputs */ template * = nullptr, require_any_st_var* = nullptr> inline auto ub_constrain(const T& x, const U& ub) { check_matching_dims("ub_constrain", "x", x, "ub", ub); using ret_type = return_var_matrix_t; if (!is_constant::value && !is_constant::value) { arena_t> arena_x = x; arena_t> arena_ub = ub; auto ub_val = to_ref(arena_ub.val()); auto is_not_inf_ub = to_arena((ub_val.array() != INFTY)); auto neg_exp_x = to_arena(-arena_x.val().array().exp()); arena_t ret = (is_not_inf_ub) .select(ub_val.array() + neg_exp_x, arena_x.val().array()); reverse_pass_callback([arena_x, neg_exp_x, arena_ub, ret, is_not_inf_ub]() mutable { arena_x.adj().array() += (is_not_inf_ub) .select(ret.adj().array() * neg_exp_x, ret.adj().array()); arena_ub.adj().array() += (is_not_inf_ub).select(ret.adj().array(), 0.0); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_x = x; auto ub_val = to_ref(value_of(ub)); auto is_not_inf_ub = to_arena((ub_val.array() != INFTY)); auto neg_exp_x = to_arena(-arena_x.val().array().exp()); arena_t ret = (is_not_inf_ub) .select(ub_val.array() + neg_exp_x, arena_x.val().array()); reverse_pass_callback([arena_x, neg_exp_x, ret, is_not_inf_ub]() mutable { arena_x.adj().array() += (is_not_inf_ub) .select(ret.adj().array() * neg_exp_x, ret.adj().array()); }); return ret_type(ret); } else { arena_t> arena_ub = to_arena(ub); auto is_not_inf_ub = to_arena((arena_ub.val().array() != INFTY).template cast()); auto&& x_ref = to_ref(value_of(x).array()); arena_t ret = (is_not_inf_ub).select(arena_ub.val().array() - x_ref.exp(), x_ref); reverse_pass_callback([arena_ub, ret, is_not_inf_ub]() mutable { arena_ub.adj().array() += ret.adj().array() * is_not_inf_ub; }); return ret_type(ret); } } /** * Specialization of `ub_constrain` to apply a matrix of upper bounds * elementwise to each input element. * * @tparam T A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @tparam U A type inheriting from `EigenBase` or a `var_value` with inner type * inheriting from `EigenBase`. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @param[in,out] lp reference to log probability to increment * @return upper-bound constrained value corresponding to inputs */ template * = nullptr, require_any_st_var* = nullptr> inline auto ub_constrain(const T& x, const U& ub, return_type_t& lp) { check_matching_dims("ub_constrain", "x", x, "ub", ub); using ret_type = return_var_matrix_t; if (!is_constant::value && !is_constant::value) { arena_t> arena_x = x; arena_t> arena_ub = ub; auto ub_val = to_ref(arena_ub.val()); auto is_not_inf_ub = to_arena((ub_val.array() != INFTY)); auto neg_exp_x = to_arena(-arena_x.val().array().exp()); arena_t ret = (is_not_inf_ub) .select(ub_val.array() + neg_exp_x, arena_x.val().array()); lp += (is_not_inf_ub).select(arena_x.val().array(), 0).sum(); reverse_pass_callback([arena_x, neg_exp_x, arena_ub, ret, lp, is_not_inf_ub]() mutable { arena_x.adj().array() += (is_not_inf_ub) .select(ret.adj().array() * neg_exp_x + lp.adj(), ret.adj().array()); arena_ub.adj().array() += (is_not_inf_ub).select(ret.adj().array(), 0.0); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_x = x; auto ub_val = to_ref(value_of(ub)); auto is_not_inf_ub = to_arena((ub_val.array() != INFTY)); auto neg_exp_x = to_arena(-arena_x.val().array().exp()); arena_t ret = (is_not_inf_ub) .select(ub_val.array() + neg_exp_x, arena_x.val().array()); lp += (is_not_inf_ub).select(arena_x.val().array(), 0).sum(); reverse_pass_callback( [arena_x, neg_exp_x, ret, lp, is_not_inf_ub]() mutable { arena_x.adj().array() += (is_not_inf_ub) .select(ret.adj().array() * neg_exp_x + lp.adj(), ret.adj().array()); }); return ret_type(ret); } else { arena_t> arena_ub = to_arena(ub); auto is_not_inf_ub = to_arena((arena_ub.val().array() != INFTY).template cast()); auto&& x_ref = to_ref(value_of(x).array()); arena_t ret = (is_not_inf_ub).select(arena_ub.val().array() - x_ref.exp(), x_ref); lp += (is_not_inf_ub).select(x_ref, 0).sum(); reverse_pass_callback([arena_ub, ret, is_not_inf_ub]() mutable { arena_ub.adj().array() += ret.adj().array() * is_not_inf_ub; }); return ret_type(ret); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/read_corr_L.hpp0000644000176200001440000001353614645137106023300 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_READ_CORR_L_HPP #define STAN_MATH_REV_FUN_READ_CORR_L_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the Cholesky factor of the correlation matrix of the * specified dimensionality corresponding to the specified * canonical partial correlations. * * It is generally better to work with the Cholesky factor rather * than the correlation matrix itself when the determinant, * inverse, etc. of the correlation matrix is needed for some * statistical calculation. * *

See read_corr_matrix(Array, size_t, T) * for more information. * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param CPCs The (K choose 2) canonical partial correlations in * (-1, 1). * @param K Dimensionality of correlation matrix. * @return Cholesky factor of correlation matrix for specified * canonical partial correlations. */ template * = nullptr> auto read_corr_L(const T& CPCs, size_t K) { // on (-1, 1) using ret_type = var_value; if (K == 0) { return ret_type(Eigen::MatrixXd()); } if (K == 1) { return ret_type(Eigen::MatrixXd::Identity(1, 1)); } using std::sqrt; arena_t arena_acc = Eigen::ArrayXd::Ones(K - 1); // Cholesky factor of correlation matrix arena_t L = Eigen::MatrixXd::Zero(K, K); size_t pos = 0; size_t pull = K - 1; L(0, 0) = 1.0; L.col(0).tail(pull) = CPCs.val().head(pull); arena_acc.tail(pull) = 1.0 - CPCs.val().head(pull).array().square(); for (size_t i = 1; i < (K - 1); i++) { pos += pull; pull = K - 1 - i; const auto& cpc_seg = CPCs.val().array().segment(pos, pull); L.coeffRef(i, i) = sqrt(arena_acc.coeff(i - 1)); L.col(i).tail(pull) = cpc_seg * arena_acc.tail(pull).sqrt(); arena_acc.tail(pull) = arena_acc.tail(pull) * (1.0 - cpc_seg.square()); } L.coeffRef(K - 1, K - 1) = sqrt(arena_acc.coeff(K - 2)); arena_t L_res = L; reverse_pass_callback([CPCs, arena_acc, K, L_res]() mutable { Eigen::ArrayXd acc_val = arena_acc; Eigen::ArrayXd acc_adj = Eigen::ArrayXd::Zero(K - 1); acc_adj.coeffRef(K - 2) += 0.5 * L_res.adj().coeff(K - 1, K - 1) / L_res.val().coeff(K - 1, K - 1); int pos = CPCs.size() - 1; for (size_t i = K - 2; i > 0; --i) { int pull = K - 1 - i; const auto& cpc_seg_val = CPCs.val().array().segment(pos, pull); auto cpc_seg_adj = CPCs.adj().array().segment(pos, pull); acc_val.tail(pull) /= 1.0 - cpc_seg_val.square(); cpc_seg_adj -= 2.0 * acc_adj.tail(pull) * acc_val.tail(pull) * cpc_seg_val; acc_adj.tail(pull) = (acc_adj.tail(pull).array() * (1.0 - cpc_seg_val.square())).eval(); cpc_seg_adj += L_res.adj().array().col(i).tail(pull) * acc_val.tail(pull).sqrt(); acc_adj.tail(pull) += 0.5 * L_res.adj().array().col(i).tail(pull) * cpc_seg_val / acc_val.tail(pull).sqrt(); acc_adj.coeffRef(i - 1) += 0.5 * L_res.adj().coeff(i, i) / L_res.val().coeff(i, i); pos -= pull + 1; } int pull = K - 1; CPCs.adj().array().head(pull) -= 2.0 * acc_adj.tail(pull) * CPCs.val().array().head(pull); CPCs.adj().head(pull) += L_res.adj().col(0).tail(pull); }); return ret_type(L_res); } /** * Return the Cholesky factor of the correlation matrix of the * specified dimensionality corresponding to the specified * canonical partial correlations, incrementing the specified * scalar reference with the log absolute determinant of the * Jacobian of the transformation. * *

The implementation is Ben Goodrich's Cholesky * factor-based approach to the C-vine method of: * *

  • Daniel Lewandowski, Dorota Kurowicka, and Harry Joe, * Generating random correlation matrices based on vines and * extended onion method Journal of Multivariate Analysis 100 * (2009) 1989–2001
* * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param CPCs The (K choose 2) canonical partial correlations in * (-1, 1). * @param K Dimensionality of correlation matrix. * @param log_prob Reference to variable to increment with the log * Jacobian determinant. * @return Cholesky factor of correlation matrix for specified * partial correlations. */ template * = nullptr, require_stan_scalar_t* = nullptr> auto read_corr_L(const T1& CPCs, size_t K, T2& log_prob) { using ret_val_type = Eigen::MatrixXd; using ret_type = var_value; if (K == 0) { return ret_type(ret_val_type()); } if (K == 1) { return ret_type(Eigen::MatrixXd::Identity(1, 1)); } size_t pos = 0; double acc_val = 0; // no need to abs() because this Jacobian determinant // is strictly positive (and triangular) // see inverse of Jacobian in equation 11 of LKJ paper for (size_t k = 1; k <= (K - 2); k++) { for (size_t i = k + 1; i <= K; i++) { acc_val += (K - k - 1) * log1m(square(CPCs.val().coeff(pos))); pos++; } } log_prob += 0.5 * acc_val; reverse_pass_callback([K, CPCs, log_prob]() mutable { double acc_adj = log_prob.adj() * 0.5; size_t pos = CPCs.size() - 2; for (size_t k = K - 2; k >= 1; --k) { for (size_t i = K; i >= k + 1; --i) { CPCs.adj().coeffRef(pos) -= 2.0 * acc_adj * (K - k - 1) * CPCs.val().coeff(pos) / (1.0 - square(CPCs.val().coeff(pos))); pos--; } } }); return read_corr_L(CPCs, K); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/trace_quad_form.hpp0000644000176200001440000001475614645137106024225 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_TRACE_QUAD_FORM_HPP #define STAN_MATH_REV_FUN_TRACE_QUAD_FORM_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { template class trace_quad_form_vari_alloc : public chainable_alloc { public: trace_quad_form_vari_alloc(const Eigen::Matrix& A, const Eigen::Matrix& B) : A_(A), B_(B) {} double compute() { return trace_quad_form(value_of(A_), value_of(B_)); } Eigen::Matrix A_; Eigen::Matrix B_; }; template class trace_quad_form_vari : public vari { protected: static inline void chainA(Eigen::Matrix& A, const Eigen::Matrix& Bd, double adjC) {} static inline void chainB(Eigen::Matrix& B, const Eigen::Matrix& Ad, const Eigen::Matrix& Bd, double adjC) {} static inline void chainA(Eigen::Matrix& A, const Eigen::Matrix& Bd, double adjC) { A.adj() += adjC * Bd * Bd.transpose(); } static inline void chainB(Eigen::Matrix& B, const Eigen::Matrix& Ad, const Eigen::Matrix& Bd, double adjC) { B.adj() += adjC * (Ad + Ad.transpose()) * Bd; } inline void chainAB(Eigen::Matrix& A, Eigen::Matrix& B, const Eigen::Matrix& Ad, const Eigen::Matrix& Bd, double adjC) { chainA(A, Bd, adjC); chainB(B, Ad, Bd, adjC); } public: explicit trace_quad_form_vari( trace_quad_form_vari_alloc* impl) : vari(impl->compute()), impl_(impl) {} virtual void chain() { chainAB(impl_->A_, impl_->B_, value_of(impl_->A_), value_of(impl_->B_), adj_); } trace_quad_form_vari_alloc* impl_; }; } // namespace internal template * = nullptr, require_any_st_var* = nullptr> inline return_type_t trace_quad_form(const EigMat1& A, const EigMat2& B) { using Ta = value_type_t; using Tb = value_type_t; constexpr int Ra = EigMat1::RowsAtCompileTime; constexpr int Ca = EigMat1::ColsAtCompileTime; constexpr int Rb = EigMat2::RowsAtCompileTime; constexpr int Cb = EigMat2::ColsAtCompileTime; check_square("trace_quad_form", "A", A); check_multiplicable("trace_quad_form", "A", A, "B", B); auto* baseVari = new internal::trace_quad_form_vari_alloc(A, B); return var( new internal::trace_quad_form_vari(baseVari)); } /** * Compute trace(B^T A B). * * This overload handles arguments where one of Mat1 or Mat2 are * `var_value` where `T` is an Eigen type. The other type can * also be a `var_value` or it can be a type that inherits * from EigenBase * * @tparam Mat1 type of the first matrix * @tparam Mat2 type of the second matrix * * @param A matrix * @param B matrix * @return The trace of B^T A B * @throw std::domain_error if A is not square * @throw std::domain_error if A cannot be multiplied by B */ template * = nullptr, require_any_var_matrix_t* = nullptr> inline var trace_quad_form(const Mat1& A, const Mat2& B) { check_square("trace_quad_form", "A", A); check_multiplicable("trace_quad_form", "A", A, "B", B); var res; if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; res = (value_of(arena_B).transpose() * value_of(arena_A) * value_of(arena_B)) .trace(); reverse_pass_callback([arena_A, arena_B, res]() mutable { if (is_var_matrix::value) { arena_A.adj().noalias() += res.adj() * value_of(arena_B) * value_of(arena_B).transpose(); } else { arena_A.adj() += res.adj() * value_of(arena_B) * value_of(arena_B).transpose(); } if (is_var_matrix::value) { arena_B.adj().noalias() += res.adj() * (value_of(arena_A) + value_of(arena_A).transpose()) * value_of(arena_B); } else { arena_B.adj() += res.adj() * (value_of(arena_A) + value_of(arena_A).transpose()) * value_of(arena_B); } }); } else if (!is_constant::value) { arena_t> arena_A = value_of(A); arena_t> arena_B = B; res = (value_of(arena_B).transpose() * value_of(arena_A) * value_of(arena_B)) .trace(); reverse_pass_callback([arena_A, arena_B, res]() mutable { if (is_var_matrix::value) { arena_B.adj().noalias() += res.adj() * (arena_A + arena_A.transpose()) * value_of(arena_B); } else { arena_B.adj() += res.adj() * (arena_A + arena_A.transpose()) * value_of(arena_B); } }); } else { arena_t> arena_A = A; arena_t> arena_B = value_of(B); res = (arena_B.transpose() * value_of(arena_A) * arena_B).trace(); reverse_pass_callback([arena_A, arena_B, res]() mutable { if (is_var_matrix::value) { arena_A.adj().noalias() += res.adj() * arena_B * arena_B.transpose(); } else { arena_A.adj() += res.adj() * arena_B * arena_B.transpose(); } }); } return res; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/initialize_fill.hpp0000644000176200001440000000151714645137106024230 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INITIALIZE_FILL_HPP #define STAN_MATH_REV_FUN_INITIALIZE_FILL_HPP #include #include #include #include namespace stan { namespace math { /** * Fill the specified container with the specified value. This function does * not perform a callback to propogate the adjoints upward * * The specified matrix is filled by element. * * @tparam VarMat a `var_value` with inner type from `EigenBase` * @tparam S A var. * * @param x Container. * @param y Value. */ template * = nullptr, require_stan_scalar_t* = nullptr> inline void initialize_fill(VarMat& x, const S& y) { x.vi_->val_.fill(value_of(y)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/cov_exp_quad.hpp0000644000176200001440000001172314645137106023536 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_COV_EXP_QUAD_HPP #define STAN_MATH_REV_FUN_COV_EXP_QUAD_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * @deprecated use gp_exp_quad_cov_vari */ template class cov_exp_quad_vari : public vari { public: const size_t size_; const size_t size_ltri_; const double l_d_; const double sigma_d_; const double sigma_sq_d_; double* dist_; vari* l_vari_; vari* sigma_vari_; vari** cov_lower_; vari** cov_diag_; /** * @deprecated use gp_exp_quad_cov_vari */ cov_exp_quad_vari(const std::vector& x, const T_sigma& sigma, const T_l& l) : vari(0.0), size_(x.size()), size_ltri_(size_ * (size_ - 1) / 2), l_d_(value_of(l)), sigma_d_(value_of(sigma)), sigma_sq_d_(sigma_d_ * sigma_d_), dist_(ChainableStack::instance_->memalloc_.alloc_array( size_ltri_)), l_vari_(l.vi_), sigma_vari_(sigma.vi_), cov_lower_(ChainableStack::instance_->memalloc_.alloc_array( size_ltri_)), cov_diag_( ChainableStack::instance_->memalloc_.alloc_array(size_)) { double inv_half_sq_l_d = 0.5 / (l_d_ * l_d_); size_t pos = 0; for (size_t j = 0; j < size_ - 1; ++j) { for (size_t i = j + 1; i < size_; ++i) { double dist_sq = squared_distance(x[i], x[j]); dist_[pos] = dist_sq; cov_lower_[pos] = new vari( sigma_sq_d_ * std::exp(-dist_sq * inv_half_sq_l_d), false); ++pos; } } for (size_t i = 0; i < size_; ++i) { cov_diag_[i] = new vari(sigma_sq_d_, false); } } virtual void chain() { double adjl = 0; double adjsigma = 0; for (size_t i = 0; i < size_ltri_; ++i) { vari* el_low = cov_lower_[i]; double prod_add = el_low->adj_ * el_low->val_; adjl += prod_add * dist_[i]; adjsigma += prod_add; } for (size_t i = 0; i < size_; ++i) { vari* el = cov_diag_[i]; adjsigma += el->adj_ * el->val_; } l_vari_->adj_ += adjl / (l_d_ * l_d_ * l_d_); sigma_vari_->adj_ += adjsigma * 2 / sigma_d_; } }; /** * @deprecated use gp_exp_quad_cov_vari */ template class cov_exp_quad_vari : public vari { public: const size_t size_; const size_t size_ltri_; const double l_d_; const double sigma_d_; const double sigma_sq_d_; double* dist_; vari* l_vari_; vari** cov_lower_; vari** cov_diag_; /** * @deprecated use gp_exp_quad_cov_vari */ cov_exp_quad_vari(const std::vector& x, double sigma, const T_l& l) : vari(0.0), size_(x.size()), size_ltri_(size_ * (size_ - 1) / 2), l_d_(value_of(l)), sigma_d_(value_of(sigma)), sigma_sq_d_(sigma_d_ * sigma_d_), dist_(ChainableStack::instance_->memalloc_.alloc_array( size_ltri_)), l_vari_(l.vi_), cov_lower_(ChainableStack::instance_->memalloc_.alloc_array( size_ltri_)), cov_diag_( ChainableStack::instance_->memalloc_.alloc_array(size_)) { double inv_half_sq_l_d = 0.5 / (l_d_ * l_d_); size_t pos = 0; for (size_t j = 0; j < size_ - 1; ++j) { for (size_t i = j + 1; i < size_; ++i) { double dist_sq = squared_distance(x[i], x[j]); dist_[pos] = dist_sq; cov_lower_[pos] = new vari( sigma_sq_d_ * std::exp(-dist_sq * inv_half_sq_l_d), false); ++pos; } } for (size_t i = 0; i < size_; ++i) { cov_diag_[i] = new vari(sigma_sq_d_, false); } } virtual void chain() { double adjl = 0; for (size_t i = 0; i < size_ltri_; ++i) { vari* el_low = cov_lower_[i]; adjl += el_low->adj_ * el_low->val_ * dist_[i]; } l_vari_->adj_ += adjl / (l_d_ * l_d_ * l_d_); } }; /** * @deprecated use gp_exp_quad_cov_vari */ template ::type>> inline Eigen::Matrix cov_exp_quad(const std::vector& x, const var& sigma, const var& l) { return gp_exp_quad_cov(x, sigma, l); } /** * @deprecated use gp_exp_quad_cov_vari */ template ::type>> inline Eigen::Matrix cov_exp_quad(const std::vector& x, double sigma, const var& l) { return gp_exp_quad_cov(x, sigma, l); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/mdivide_left_ldlt.hpp0000644000176200001440000000477514645137106024544 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MDIVIDE_LEFT_LDLT_HPP #define STAN_MATH_REV_FUN_MDIVIDE_LEFT_LDLT_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the solution of the system Ax=b given an LDLT_factor of A * * @tparam T type of B * @param A LDLT_factor * @param B Right hand side matrix or vector. * @return x = A^-1 B, solution of the linear system. * @throws std::domain_error if rows of B don't match the size of A. */ template * = nullptr, require_any_st_var* = nullptr> inline auto mdivide_left_ldlt(LDLT_factor& A, const T2& B) { using ret_val_type = Eigen::Matrix; using ret_type = promote_var_matrix_t; check_multiplicable("mdivide_left_ldlt", "A", A.matrix().val(), "B", B); if (A.matrix().size() == 0) { return ret_type(ret_val_type(0, B.cols())); } if (!is_constant::value && !is_constant::value) { arena_t> arena_B = B; arena_t> arena_A = A.matrix(); arena_t res = A.ldlt().solve(arena_B.val()); const auto* ldlt_ptr = make_chainable_ptr(A.ldlt()); reverse_pass_callback([arena_A, arena_B, ldlt_ptr, res]() mutable { promote_scalar_t adjB = ldlt_ptr->solve(res.adj()); arena_A.adj() -= adjB * res.val_op().transpose(); arena_B.adj() += adjB; }); return ret_type(res); } else if (!is_constant::value) { arena_t> arena_A = A.matrix(); arena_t res = A.ldlt().solve(value_of(B)); const auto* ldlt_ptr = make_chainable_ptr(A.ldlt()); reverse_pass_callback([arena_A, ldlt_ptr, res]() mutable { arena_A.adj() -= ldlt_ptr->solve(res.adj()) * res.val_op().transpose(); }); return ret_type(res); } else { arena_t> arena_B = B; arena_t res = A.ldlt().solve(arena_B.val()); const auto* ldlt_ptr = make_chainable_ptr(A.ldlt()); reverse_pass_callback([arena_B, ldlt_ptr, res]() mutable { arena_B.adj() += ldlt_ptr->solve(res.adj()); }); return ret_type(res); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/Phi.hpp0000644000176200001440000000375714645137106021611 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_PHI_HPP #define STAN_MATH_REV_FUN_PHI_HPP #include #include #include #include namespace stan { namespace math { /** * The unit normal cumulative density function for variables (stan). * * See Phi() for the double-based version. * * The derivative is the unit normal density function, * * \f$\frac{d}{dx} \Phi(x) = \mbox{\sf Norm}(x|0, 1) = \frac{1}{\sqrt{2\pi}} \exp(-\frac{1}{2} x^2)\f$. * * \f[ \mbox{Phi}(x) = \begin{cases} 0 & \mbox{if } x < -37.5 \\ \Phi(x) & \mbox{if } -37.5 \leq x \leq 8.25 \\ 1 & \mbox{if } x > 8.25 \\[6pt] \textrm{error} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{Phi}(x)}{\partial x} = \begin{cases} 0 & \mbox{if } x < -27.5 \\ \frac{\partial\, \Phi(x)}{\partial x} & \mbox{if } -27.5 \leq x \leq 27.5 \\ 0 & \mbox{if } x > 27.5 \\[6pt] \textrm{error} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \Phi(x) = \frac{1}{\sqrt{2\pi}} \int_{0}^{x} e^{-t^2/2} dt \f] \f[ \frac{\partial \, \Phi(x)}{\partial x} = \frac{e^{-x^2/2}}{\sqrt{2\pi}} \f] * * @param a Variable argument. * @return The unit normal cdf evaluated at the specified argument. */ inline var Phi(const var& a) { return make_callback_var(Phi(a.val()), [a](auto& vi) mutable { a.adj() += vi.adj() * INV_SQRT_TWO_PI * std::exp(-0.5 * a.val() * a.val()); }); } /** * Elementwise unit normal cumulative density function for varmat types. * * @tparam T a `var_value` with inner Eigen type * @param a input * @return The vectorized unit normal cdf */ template * = nullptr> inline auto Phi(const T& a) { return make_callback_var(Phi(a.val()), [a](auto& vi) mutable { a.adj().array() += vi.adj().array() * INV_SQRT_TWO_PI * (-0.5 * a.val().array().square()).exp(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/diag_pre_multiply.hpp0000644000176200001440000000452714645137106024576 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_DIAG_PRE_MULTIPLY_HPP #define STAN_MATH_REV_FUN_DIAG_PRE_MULTIPLY_HPP #include #include #include namespace stan { namespace math { /** * Return the product of the diagonal matrix formed from the vector * or row_vector and a matrix. * * @tparam T1 type of the vector/row_vector * @tparam T2 type of the matrix * @param m1 input vector/row_vector * @param m2 input matrix * * @return product of the diagonal matrix formed from the * vector or row_vector and a matrix. */ template * = nullptr, require_matrix_t* = nullptr, require_any_st_var* = nullptr> auto diag_pre_multiply(const T1& m1, const T2& m2) { check_size_match("diag_pre_multiply", "m1.size()", m1.size(), "m2.rows()", m2.rows()); using inner_ret_type = decltype(value_of(m1).asDiagonal() * value_of(m2)); using ret_type = return_var_matrix_t; if (!is_constant::value && !is_constant::value) { arena_t> arena_m1 = m1; arena_t> arena_m2 = m2; arena_t ret(arena_m1.val().asDiagonal() * arena_m2.val()); reverse_pass_callback([ret, arena_m1, arena_m2]() mutable { arena_m1.adj() += arena_m2.val().cwiseProduct(ret.adj()).rowwise().sum(); arena_m2.adj() += arena_m1.val().asDiagonal() * ret.adj(); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_m1 = m1; arena_t> arena_m2 = value_of(m2); arena_t ret(arena_m1.val().asDiagonal() * arena_m2); reverse_pass_callback([ret, arena_m1, arena_m2]() mutable { arena_m1.adj() += arena_m2.val().cwiseProduct(ret.adj()).rowwise().sum(); }); return ret_type(ret); } else if (!is_constant::value) { arena_t> arena_m1 = value_of(m1); arena_t> arena_m2 = m2; arena_t ret(arena_m1.asDiagonal() * arena_m2.val()); reverse_pass_callback([ret, arena_m1, arena_m2]() mutable { arena_m2.adj() += arena_m1.val().asDiagonal() * ret.adj(); }); return ret_type(ret); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/cholesky_factor_constrain.hpp0000644000176200001440000000700614645137106026317 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_CHOLESKY_FACTOR_CONSTRAIN_HPP #define STAN_MATH_REV_FUN_CHOLESKY_FACTOR_CONSTRAIN_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the Cholesky factor of the specified size read from the * specified vector. A total of (N choose 2) + N + (M - N) * N * elements are required to read an M by N Cholesky factor. * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param x Vector of unconstrained values * @param M number of rows * @param N number of columns * @return Cholesky factor */ template * = nullptr> var_value cholesky_factor_constrain(const T& x, int M, int N) { using std::exp; check_greater_or_equal("cholesky_factor_constrain", "num rows (must be greater or equal to num cols)", M, N); check_size_match("cholesky_factor_constrain", "x.size()", x.size(), "((N * (N + 1)) / 2 + (M - N) * N)", ((N * (N + 1)) / 2 + (M - N) * N)); arena_t y_val = Eigen::MatrixXd::Zero(M, N); int pos = 0; for (int m = 0; m < N; ++m) { y_val.row(m).head(m) = x.val().segment(pos, m); pos += m; y_val.coeffRef(m, m) = exp(x.val().coeff(pos)); pos++; } for (int m = N; m < M; ++m) { y_val.row(m) = x.val().segment(pos, N); pos += N; } var_value y = y_val; reverse_pass_callback([x, M, N, y]() mutable { int pos = x.size(); for (int m = M - 1; m >= N; --m) { pos -= N; x.adj().segment(pos, N) += y.adj().row(m); } for (int m = N - 1; m >= 0; --m) { pos--; x.adj().coeffRef(pos) += y.adj().coeff(m, m) * y.val().coeff(m, m); pos -= m; x.adj().segment(pos, m) += y.adj().row(m).head(m); } }); return y; } /** * Return the Cholesky factor of the specified size read from the * specified vector and increment the specified log probability * reference with the log Jacobian adjustment of the transform. A total * of (N choose 2) + N + N * (M - N) free parameters are required to read * an M by N Cholesky factor. * * @tparam T type of input vector (must be a `var_value` where `S` * inherits from EigenBase) * @param x Vector of unconstrained values * @param M number of rows * @param N number of columns * @param[out] lp Log density that is incremented with the log Jacobian * @return Cholesky factor */ template * = nullptr> var_value cholesky_factor_constrain(const T& x, int M, int N, scalar_type_t& lp) { check_size_match("cholesky_factor_constrain", "x.size()", x.size(), "((N * (N + 1)) / 2 + (M - N) * N)", ((N * (N + 1)) / 2 + (M - N) * N)); int pos = 0; double lp_val = 0.0; for (int n = 0; n < N; ++n) { pos += n; lp_val += x.val().coeff(pos); pos++; } lp += lp_val; reverse_pass_callback([x, N, lp]() mutable { int pos = 0; for (int n = 0; n < N; ++n) { pos += n; x.adj().coeffRef(pos) += lp.adj(); pos++; } }); return cholesky_factor_constrain(x, M, N); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/inc_beta.hpp0000644000176200001440000000242314645137106022622 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_INC_BETA_HPP #define STAN_MATH_REV_FUN_INC_BETA_HPP #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { class inc_beta_vvv_vari : public op_vvv_vari { public: inc_beta_vvv_vari(vari* avi, vari* bvi, vari* cvi) : op_vvv_vari(inc_beta(avi->val_, bvi->val_, cvi->val_), avi, bvi, cvi) {} void chain() { double d_a; double d_b; const double beta_ab = beta(avi_->val_, bvi_->val_); grad_reg_inc_beta(d_a, d_b, avi_->val_, bvi_->val_, cvi_->val_, digamma(avi_->val_), digamma(bvi_->val_), digamma(avi_->val_ + bvi_->val_), beta_ab); avi_->adj_ += adj_ * d_a; bvi_->adj_ += adj_ * d_b; cvi_->adj_ += adj_ * std::pow(1 - cvi_->val_, bvi_->val_ - 1) * std::pow(cvi_->val_, avi_->val_ - 1) / beta_ab; } }; } // namespace internal inline var inc_beta(const var& a, const var& b, const var& c) { return var(new internal::inc_beta_vvv_vari(a.vi_, b.vi_, c.vi_)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/mdivide_left_spd.hpp0000644000176200001440000003131414645137106024360 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_MDIVIDE_LEFT_SPD_HPP #define STAN_MATH_REV_FUN_MDIVIDE_LEFT_SPD_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { template class mdivide_left_spd_alloc : public chainable_alloc { public: virtual ~mdivide_left_spd_alloc() {} Eigen::LLT> llt_; Eigen::Matrix C_; }; template class mdivide_left_spd_vv_vari : public vari { public: int M_; // A.rows() = A.cols() = B.rows() int N_; // B.cols() vari **variRefA_; vari **variRefB_; vari **variRefC_; mdivide_left_spd_alloc *alloc_; mdivide_left_spd_vv_vari(const Eigen::Matrix &A, const Eigen::Matrix &B) : vari(0.0), M_(A.rows()), N_(B.cols()), variRefA_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * A.rows() * A.cols()))), variRefB_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * B.rows() * B.cols()))), variRefC_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * B.rows() * B.cols()))), alloc_(new mdivide_left_spd_alloc()) { Eigen::Map(variRefA_, M_, M_) = A.vi(); Eigen::Map(variRefB_, M_, N_) = B.vi(); alloc_->C_ = B.val(); alloc_->llt_ = A.val().llt(); check_pos_definite("mdivide_left_spd", "A", alloc_->llt_); alloc_->llt_.solveInPlace(alloc_->C_); Eigen::Map(variRefC_, M_, N_) = alloc_->C_.unaryExpr([](double x) { return new vari(x, false); }); } virtual void chain() { matrix_d adjB = Eigen::Map(variRefC_, M_, N_).adj(); alloc_->llt_.solveInPlace(adjB); Eigen::Map(variRefA_, M_, M_).adj() -= adjB * alloc_->C_.transpose(); Eigen::Map(variRefB_, M_, N_).adj() += adjB; } }; template class mdivide_left_spd_dv_vari : public vari { public: int M_; // A.rows() = A.cols() = B.rows() int N_; // B.cols() vari **variRefB_; vari **variRefC_; mdivide_left_spd_alloc *alloc_; mdivide_left_spd_dv_vari(const Eigen::Matrix &A, const Eigen::Matrix &B) : vari(0.0), M_(A.rows()), N_(B.cols()), variRefB_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * B.rows() * B.cols()))), variRefC_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * B.rows() * B.cols()))), alloc_(new mdivide_left_spd_alloc()) { alloc_->C_ = B.val(); Eigen::Map(variRefB_, M_, N_) = B.vi(); alloc_->llt_ = A.llt(); check_pos_definite("mdivide_left_spd", "A", alloc_->llt_); alloc_->llt_.solveInPlace(alloc_->C_); Eigen::Map(variRefC_, M_, N_) = alloc_->C_.unaryExpr([](double x) { return new vari(x, false); }); } virtual void chain() { matrix_d adjB = Eigen::Map(variRefC_, M_, N_).adj(); alloc_->llt_.solveInPlace(adjB); Eigen::Map(variRefB_, M_, N_).adj() += adjB; } }; template class mdivide_left_spd_vd_vari : public vari { public: int M_; // A.rows() = A.cols() = B.rows() int N_; // B.cols() vari **variRefA_; vari **variRefC_; mdivide_left_spd_alloc *alloc_; mdivide_left_spd_vd_vari(const Eigen::Matrix &A, const Eigen::Matrix &B) : vari(0.0), M_(A.rows()), N_(B.cols()), variRefA_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * A.rows() * A.cols()))), variRefC_(reinterpret_cast( ChainableStack::instance_->memalloc_.alloc(sizeof(vari *) * B.rows() * B.cols()))), alloc_(new mdivide_left_spd_alloc()) { Eigen::Map(variRefA_, M_, M_) = A.vi(); alloc_->llt_ = A.val().llt(); check_pos_definite("mdivide_left_spd", "A", alloc_->llt_); alloc_->C_ = alloc_->llt_.solve(B); Eigen::Map(variRefC_, M_, N_) = alloc_->C_.unaryExpr([](double x) { return new vari(x, false); }); } virtual void chain() { matrix_d adjC = Eigen::Map(variRefC_, M_, N_).adj(); Eigen::Map(variRefA_, M_, M_).adj() -= alloc_->llt_.solve(adjC * alloc_->C_.transpose()); } }; } // namespace internal template < typename EigMat1, typename EigMat2, require_all_eigen_matrix_base_vt * = nullptr> inline Eigen::Matrix mdivide_left_spd(const EigMat1 &A, const EigMat2 &b) { constexpr int R1 = EigMat1::RowsAtCompileTime; constexpr int C1 = EigMat1::ColsAtCompileTime; constexpr int R2 = EigMat2::RowsAtCompileTime; constexpr int C2 = EigMat2::ColsAtCompileTime; static const char *function = "mdivide_left_spd"; check_multiplicable(function, "A", A, "b", b); const auto &A_ref = to_ref(A); check_symmetric(function, "A", A_ref); check_not_nan(function, "A", A_ref); if (A.size() == 0) { return {0, b.cols()}; } // NOTE: this is not a memory leak, this vari is used in the // expression graph to evaluate the adjoint, but is not needed // for the returned matrix. Memory will be cleaned up with the // arena allocator. internal::mdivide_left_spd_vv_vari *baseVari = new internal::mdivide_left_spd_vv_vari(A_ref, b); Eigen::Matrix res(b.rows(), b.cols()); res.vi() = Eigen::Map(&baseVari->variRefC_[0], b.rows(), b.cols()); return res; } template * = nullptr, require_eigen_matrix_base_vt * = nullptr> inline Eigen::Matrix mdivide_left_spd(const EigMat1 &A, const EigMat2 &b) { constexpr int R1 = EigMat1::RowsAtCompileTime; constexpr int C1 = EigMat1::ColsAtCompileTime; constexpr int R2 = EigMat2::RowsAtCompileTime; constexpr int C2 = EigMat2::ColsAtCompileTime; static const char *function = "mdivide_left_spd"; check_multiplicable(function, "A", A, "b", b); const auto &A_ref = to_ref(A); check_symmetric(function, "A", A_ref); check_not_nan(function, "A", A_ref); if (A.size() == 0) { return {0, b.cols()}; } // NOTE: this is not a memory leak, this vari is used in the // expression graph to evaluate the adjoint, but is not needed // for the returned matrix. Memory will be cleaned up with the // arena allocator. internal::mdivide_left_spd_vd_vari *baseVari = new internal::mdivide_left_spd_vd_vari(A_ref, b); Eigen::Matrix res(b.rows(), b.cols()); res.vi() = Eigen::Map(&baseVari->variRefC_[0], b.rows(), b.cols()); return res; } template * = nullptr, require_eigen_matrix_base_vt * = nullptr> inline Eigen::Matrix mdivide_left_spd(const EigMat1 &A, const EigMat2 &b) { constexpr int R1 = EigMat1::RowsAtCompileTime; constexpr int C1 = EigMat1::ColsAtCompileTime; constexpr int R2 = EigMat2::RowsAtCompileTime; constexpr int C2 = EigMat2::ColsAtCompileTime; static const char *function = "mdivide_left_spd"; check_multiplicable(function, "A", A, "b", b); const auto &A_ref = to_ref(A); check_symmetric(function, "A", A_ref); check_not_nan(function, "A", A_ref); if (A.size() == 0) { return {0, b.cols()}; } // NOTE: this is not a memory leak, this vari is used in the // expression graph to evaluate the adjoint, but is not needed // for the returned matrix. Memory will be cleaned up with the // arena allocator. internal::mdivide_left_spd_dv_vari *baseVari = new internal::mdivide_left_spd_dv_vari(A_ref, b); Eigen::Matrix res(b.rows(), b.cols()); res.vi() = Eigen::Map(&baseVari->variRefC_[0], b.rows(), b.cols()); return res; } /** * Returns the solution of the system Ax=B where A is symmetric positive * definite. * * This overload handles arguments where one of T1 or T2 are * `var_value` where `T` is an Eigen type. The other type can * also be a `var_value` or it can be a matrix type that inherits * from EigenBase * * @tparam T1 type of the first matrix * @tparam T2 type of the right-hand side matrix or vector * * @param A Matrix. * @param B Right hand side matrix or vector. * @return x = A^-1 B, solution of the linear system. * @throws std::domain_error if A is not square or B does not have * as many rows as A has columns. */ template * = nullptr, require_any_var_matrix_t * = nullptr> inline auto mdivide_left_spd(const T1 &A, const T2 &B) { using ret_val_type = plain_type_t; using ret_type = var_value; if (A.size() == 0) { return ret_type(ret_val_type(0, B.cols())); } check_multiplicable("mdivide_left_spd", "A", A, "B", B); if (!is_constant::value && !is_constant::value) { arena_t> arena_A = A; arena_t> arena_B = B; check_symmetric("mdivide_left_spd", "A", arena_A.val()); check_not_nan("mdivide_left_spd", "A", arena_A.val()); auto A_llt = arena_A.val().llt(); check_pos_definite("mdivide_left_spd", "A", A_llt); arena_t arena_A_llt = A_llt.matrixL(); arena_t res = A_llt.solve(arena_B.val()); reverse_pass_callback([arena_A, arena_B, arena_A_llt, res]() mutable { promote_scalar_t adjB = res.adj(); arena_A_llt.template triangularView().solveInPlace(adjB); arena_A_llt.template triangularView() .transpose() .solveInPlace(adjB); arena_A.adj() -= adjB * res.val_op().transpose(); arena_B.adj() += adjB; }); return ret_type(res); } else if (!is_constant::value) { arena_t> arena_A = A; check_symmetric("mdivide_left_spd", "A", arena_A.val()); check_not_nan("mdivide_left_spd", "A", arena_A.val()); auto A_llt = arena_A.val().llt(); check_pos_definite("mdivide_left_spd", "A", A_llt); arena_t arena_A_llt = A_llt.matrixL(); arena_t res = A_llt.solve(value_of(B)); reverse_pass_callback([arena_A, arena_A_llt, res]() mutable { promote_scalar_t adjB = res.adj(); arena_A_llt.template triangularView().solveInPlace(adjB); arena_A_llt.template triangularView() .transpose() .solveInPlace(adjB); arena_A.adj() -= adjB * res.val().transpose().eval(); }); return ret_type(res); } else { const auto &A_ref = to_ref(value_of(A)); arena_t> arena_B = B; check_symmetric("mdivide_left_spd", "A", A_ref); check_not_nan("mdivide_left_spd", "A", A_ref); auto A_llt = A_ref.llt(); check_pos_definite("mdivide_left_spd", "A", A_llt); arena_t arena_A_llt = A_llt.matrixL(); arena_t res = A_llt.solve(arena_B.val()); reverse_pass_callback([arena_B, arena_A_llt, res]() mutable { promote_scalar_t adjB = res.adj(); arena_A_llt.template triangularView().solveInPlace(adjB); arena_A_llt.template triangularView() .transpose() .solveInPlace(adjB); arena_B.adj() += adjB; }); return ret_type(res); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/cos.hpp0000644000176200001440000000371714645137106021651 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_COS_HPP #define STAN_MATH_REV_FUN_COS_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the cosine of a radian-scaled variable (cmath). * * The derivative is defined by * * \f$\frac{d}{dx} \cos x = - \sin x\f$. * * \f[ \mbox{cos}(x) = \begin{cases} \cos(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{cos}(x)}{\partial x} = \begin{cases} -\sin(x) & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Variable for radians of angle. * @return Cosine of variable. */ inline var cos(var a) { return make_callback_var(std::cos(a.val()), [a](const auto& vi) mutable { a.adj() -= vi.adj() * std::sin(a.val()); }); } /** * Return the cosine of a radian-scaled variable (cmath). * * * @tparam Varmat a `var_value` with inner Eigen type * @param a Variable for radians of angle. * @return Cosine of variable. */ template * = nullptr> inline auto cos(const VarMat& a) { return make_callback_var( a.val().array().cos().matrix(), [a](const auto& vi) mutable { a.adj() -= vi.adj().cwiseProduct(a.val().array().sin().matrix()); }); } /** * Return the cosine of the complex argument. * * @param[in] z argument * @return cosine of the argument */ inline std::complex cos(const std::complex& z) { return stan::math::internal::complex_cos(z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun/log_determinant_spd.hpp0000644000176200001440000000347514645137106025107 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_LOG_DETERMINANT_SPD_HPP #define STAN_MATH_REV_FUN_LOG_DETERMINANT_SPD_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the log det of a symmetric, positive-definite matrix * * @tparam T Type is an Eigen Matrix or `var_value` with inner Eigen matrix type * @param M a symmetric, positive-definite matrix * @return The log determinant of the specified matrix */ template * = nullptr> inline var log_determinant_spd(const T& M) { if (M.size() == 0) { return var(0.0); } check_symmetric("log_determinant", "M", M); arena_t arena_M = M; matrix_d M_d = arena_M.val(); auto M_ldlt = M_d.ldlt(); if (M_ldlt.info() != Eigen::Success) { constexpr double y = 0; throw_domain_error("log_determinant_spd", "matrix argument", y, "failed LDLT factorization"); } // compute the inverse of A (needed for the derivative) M_d.setIdentity(M.rows(), M.cols()); M_ldlt.solveInPlace(M_d); auto arena_M_inv_transpose = to_arena(M_d.transpose()); if (M_ldlt.isNegative() || (M_ldlt.vectorD().array() <= 1e-16).any()) { constexpr double y = 0; throw_domain_error("log_determinant_spd", "matrix argument", y, "matrix is negative definite"); } var log_det = sum(log(M_ldlt.vectorD())); reverse_pass_callback([arena_M, log_det, arena_M_inv_transpose]() mutable { arena_M.adj() += log_det.adj() * arena_M_inv_transpose; }); return log_det; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor.hpp0000644000176200001440000000312514645137106021746 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_HPP #define STAN_MATH_REV_FUNCTOR_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/rev/core.hpp0000644000176200001440000000727514645137106021230 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_HPP #define STAN_MATH_REV_CORE_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/rev/prob.hpp0000644000176200001440000000024414645137106021227 0ustar liggesusers#ifndef STAN_MATH_REV_PROB_HPP #define STAN_MATH_REV_PROB_HPP #include #include #endif StanHeaders/inst/include/stan/math/rev/prob/0000755000176200001440000000000014645161272020517 5ustar liggesusersStanHeaders/inst/include/stan/math/rev/prob/std_normal_log_qf.hpp0000644000176200001440000000234314645137106024722 0ustar liggesusers#ifndef STAN_MATH_REV_PROB_STD_NORMAL_LOG_QF_HPP #define STAN_MATH_REV_PROB_STD_NORMAL_LOG_QF_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the elementwise inverse of unit normal cumulative density function. * * @tparam T a `var_value` with inner Eigen type * @param log_p log probability vector * @return Elementwise unit normal inverse cdf */ template * = nullptr> inline auto std_normal_log_qf(const var_value& log_p) { return make_callback_var( std_normal_log_qf(log_p.val()), [log_p](auto& vi) mutable { auto vi_array = as_array_or_scalar(vi.val()); auto vi_sign = sign(as_array_or_scalar(vi.adj())); const auto& deriv = as_array_or_scalar(log_p).val() + log(as_array_or_scalar(vi.adj()) * vi_sign) - NEG_LOG_SQRT_TWO_PI + 0.5 * square(vi_array); as_array_or_scalar(log_p).adj() += vi_sign * exp(deriv); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/fun.hpp0000644000176200001440000002117214645137106021060 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_HPP #define STAN_MATH_REV_FUN_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/rev/meta/0000755000176200001440000000000014645161272020503 5ustar liggesusersStanHeaders/inst/include/stan/math/rev/meta/is_rev_matrix.hpp0000644000176200001440000000522214645137106024067 0ustar liggesusers#ifndef STAN_MATH_REV_META_IS_REV_MATRIX_HPP #define STAN_MATH_REV_META_IS_REV_MATRIX_HPP #include #include #include #include #include #include #include namespace stan { /** \ingroup type_trait * Defines a static member named value which is defined to be true * if the type is either a type derived from `Eigen::EigenBase` with a `Scalar` * type of `var_value` or a `var_value` where T is derived from * `Eigen::EigenBase` */ template struct is_rev_matrix< T, require_all_t>, math::disjunction< math::conjunction, is_eigen>>, is_eigen>>> : std::true_type {}; /** \ingroup type_trait * Defines a static member named value which is defined to be true * if the type is either a type derived from `Eigen::EigenBase` with a `Scalar` * type of `var_value` or a `var_value` where T is derived from * `Eigen::EigenBase`. And the type must have a compile time constant number * of columns equal to 1. */ template struct is_rev_col_vector< T, require_all_t>, math::disjunction, is_eigen_col_vector>>>> : std::true_type {}; /** \ingroup type_trait * Defines a static member named value which is defined to be true * if the type is either a type derived from `Eigen::EigenBase` with a `Scalar` * type of `var_value` or a `var_value` where T is derived from * `Eigen::EigenBase`. And the type must have a compile time constant number * of rows equal to 1. */ template struct is_rev_row_vector< T, require_all_t>, math::disjunction, is_eigen_row_vector>>>> : std::true_type {}; /** \ingroup type_trait * Defines a static member named value which is defined to be true * if the type is either a type derived from `Eigen::EigenBase` with a `Scalar` * type of `var_value` or a `var_value` where T is derived from * `Eigen::EigenBase`. And the type must have a compile time constant number * of rows equal to 1. */ template struct is_rev_vector, is_rev_row_vector>> : std::true_type {}; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/meta/is_var.hpp0000644000176200001440000000123714645137106022501 0ustar liggesusers#ifndef STAN_MATH_REV_META_IS_VAR_HPP #define STAN_MATH_REV_META_IS_VAR_HPP #include #include #include namespace stan { namespace internal { template struct is_var_impl : std::false_type {}; template struct is_var_impl> : std::true_type {}; } // namespace internal /** \ingroup type_trait * Specialization for checking if value of T minus cv qualifier is a var_value. */ template struct is_var>::value>> : std::true_type {}; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/meta/conditional_var_value.hpp0000644000176200001440000000270614645137106025567 0ustar liggesusers#ifndef STAN_MATH_REV_META_CONDITIONAL_VAR_EIGEN_HPP #define STAN_MATH_REV_META_CONDITIONAL_VAR_EIGEN_HPP #include #include #include namespace stan { /** * Conditionally construct a var_value container based on a scalar type. For * var types as the scalar, the `var_value`'s inner type will have a * scalar of a double. * @tparam T_scalar Determines the scalar (var/double) of the type. * @tparam T_container Determines the container (matrix/vector/matrix_cl ...) of * the type. */ template struct conditional_var_value { using type = std::conditional_t>::value, math::var_value>>, plain_type_t>; }; template struct conditional_var_value> { using type = std::vector>::type>; }; template using conditional_var_value_t = typename conditional_var_value::type; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/meta/rev_matrix_type.hpp0000644000176200001440000000224114645137106024433 0ustar liggesusers#ifndef STAN_MATH_REV_META_REV_MATRIX_TYPE_HPP #define STAN_MATH_REV_META_REV_MATRIX_TYPE_HPP #include #include #include namespace stan { /** * Determines a return type for a function that accepts given inputs and wants * to return a matrix (or vector or row vector) with given compile time number * of rows and columns. * * Returns `var_value` if any of the inputs are `var_value`, otherwise an Eigen * matrix of `var` or `double`. * @tparam Rows number of compile time rows * @tparam Cols number of compile time columns * @tparam Inputs types of arguments the function accepts */ template struct rev_matrix_type { using type = std::conditional_t< math::disjunction, is_eigen>>...>::value, math::var_value>, Eigen::Matrix, Rows, Cols>>; }; template using rev_matrix_t = typename rev_matrix_type::type; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/meta/partials_type.hpp0000644000176200001440000000073714645137106024102 0ustar liggesusers#ifndef STAN_MATH_REV_META_PARTIALS_TYPE_HPP #define STAN_MATH_REV_META_PARTIALS_TYPE_HPP #include #include #include namespace stan { /** \ingroup type_trait * Specialization of partials type returns double if input type is a double. */ template struct partials_type> { using type = typename std::decay_t::value_type; }; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/meta/is_arena_matrix.hpp0000644000176200001440000000126314645137106024362 0ustar liggesusers#ifndef STAN_MATH_REV_META_IS_ARENA_MATRIX_HPP #define STAN_MATH_REV_META_IS_ARENA_MATRIX_HPP #include #include namespace stan { namespace internal { template struct is_arena_matrix_impl : std::false_type {}; template struct is_arena_matrix_impl> : std::true_type {}; } // namespace internal /** \ingroup type_trait * Defines a static member named value which is defined to be true * if the type is `arena_matrix` */ template struct is_arena_matrix< T, require_t>>> : std::true_type {}; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/meta/is_vari.hpp0000644000176200001440000000135114645137106022647 0ustar liggesusers#ifndef STAN_MATH_REV_META_IS_VARI_HPP #define STAN_MATH_REV_META_IS_VARI_HPP #include #include #include namespace stan { namespace internal { template struct is_vari_impl : std::false_type {}; template struct is_vari_impl> : std::true_type {}; template struct is_vari_impl> : std::true_type {}; } // namespace internal /** \ingroup type_trait * Specialization for checking if value of T minus cv qualifier and pointer is a * vari_value. */ template struct is_vari>>> : std::true_type {}; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/meta/promote_var_matrix.hpp0000644000176200001440000000171314645137106025136 0ustar liggesusers#ifndef STAN_MATH_REV_META_PROMOTE_VAR_MATRIX #define STAN_MATH_REV_META_PROMOTE_VAR_MATRIX #include #include namespace stan { /** * Given an Eigen type and several inputs, determine if a matrix should be * `var` or `Matrix`. * * `Matrix` will not necessarily be a plain type * * @tparam ReturnType An Eigen matrix used for composing the `var` or * `Matrix` type. * @tparam Types Parameter pack holding any mix of types. If any of `Types` * are a `var` this holds a `var` type. * Else the type will be `Matrix` */ template using promote_var_matrix_t = std::conditional_t< is_any_var_matrix::value, stan::math::var_value>, stan::math::promote_scalar_t, ReturnType>>; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/meta/plain_type.hpp0000644000176200001440000000127214645137106023361 0ustar liggesusers#ifndef STAN_MATH_REV_META_PLAIN_TYPE_HPP #define STAN_MATH_REV_META_PLAIN_TYPE_HPP #include #include #include namespace stan { /** * Determines plain (non expression) type associated with \c T. For `var_value` * with an underlying eigen type this is a `var_value>` * * @tparam T type to determine plain type of */ template struct plain_type< T, require_t, is_eigen>>>> { using type = typename stan::math::var_value>>; }; } // namespace stan #endif // STAN_MATH_PRIM_META_PLAIN_TYPE_HPP StanHeaders/inst/include/stan/math/rev/meta/return_var_matrix.hpp0000644000176200001440000000177614645137106025001 0ustar liggesusers#ifndef STAN_MATH_REV_META_RETURN_VAR_MATRIX #define STAN_MATH_REV_META_RETURN_VAR_MATRIX #include #include namespace stan { /** * Given an Eigen type and several inputs, determine if a matrix should be * `var` or `Matrix`. * * `Matrix` will be a plain type * * @tparam ReturnType An Eigen matrix used for composing the `var` or * `Matrix` type. * @tparam Types Parameter pack holding any mix of types. If any of `Types` * are a `var` this holds a `var` type. * Else the type will be `Matrix` */ template using return_var_matrix_t = std::conditional_t< is_any_var_matrix::value, stan::math::var_value< stan::math::promote_scalar_t>>, stan::math::promote_scalar_t, plain_type_t>>; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/meta/arena_type.hpp0000644000176200001440000000355114645137106023346 0ustar liggesusers#ifndef STAN_MATH_REV_META_ARENA_TYPE_HPP #define STAN_MATH_REV_META_ARENA_TYPE_HPP #include #include #include #include #include #include namespace stan { namespace internal { template struct arena_type_impl {}; template struct arena_type_impl< T, require_t>> { using type = T; }; template struct arena_type_impl, bool_constant::value>>> { using type = T; }; template struct arena_type_impl> { using T_ad = typename arena_type_impl>::type; using type = std::vector>; }; template struct arena_type_impl< T, require_eigen_t, std::enable_if_t> { using type = math::arena_matrix>; }; template struct arena_type_impl< T, require_eigen_t, std::enable_if_t> { using type = plain_type_t; }; } // namespace internal /** * Determines a type that can be used in place of `T` that does any dynamic * allocations on the AD stack. This way resulting types are trivially * destructible and can be used in vari classes. */ template using arena_t = typename internal::arena_type_impl>::type; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/0000755000176200001440000000000014645161272021235 5ustar liggesusersStanHeaders/inst/include/stan/math/rev/functor/cvodes_integrator.hpp0000644000176200001440000003010714645137106025467 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_INTEGRATE_ODE_CVODES_HPP #define STAN_MATH_REV_FUNCTOR_INTEGRATE_ODE_CVODES_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Integrator interface for CVODES' ODE solvers (Adams & BDF * methods). * * @tparam Lmm ID of ODE solver (1: ADAMS, 2: BDF) * @tparam F Type of ODE right hand side * @tparam T_y0 Type of initial state * @tparam T_param Type of scalars for parameters * @tparam T_t0 Type of scalar of initial time point * @tparam T_ts Type of time-points where ODE solution is returned */ template class cvodes_integrator { using T_Return = return_type_t; using T_y0_t0 = return_type_t; const char* function_name_; sundials::Context sundials_context_; const F& f_; const Eigen::Matrix y0_; const T_t0 t0_; const std::vector& ts_; std::tuple args_tuple_; std::tuple()))>...> value_of_args_tuple_; const size_t N_; std::ostream* msgs_; double relative_tolerance_; double absolute_tolerance_; long int max_num_steps_; // NOLINT(runtime/int) const size_t num_y0_vars_; const size_t num_args_vars_; coupled_ode_system coupled_ode_; std::vector coupled_state_; N_Vector nv_state_; N_Vector* nv_state_sens_; SUNMatrix A_; SUNLinearSolver LS_; /** * Implements the function of type CVRhsFn which is the user-defined * ODE RHS passed to CVODES. */ static int cv_rhs(realtype t, N_Vector y, N_Vector ydot, void* user_data) { cvodes_integrator* integrator = static_cast(user_data); integrator->rhs(t, NV_DATA_S(y), NV_DATA_S(ydot)); return 0; } /** * Implements the function of type CVSensRhsFn which is the * RHS of the sensitivity ODE system. */ static int cv_rhs_sens(int Ns, realtype t, N_Vector y, N_Vector ydot, N_Vector* yS, N_Vector* ySdot, void* user_data, N_Vector tmp1, N_Vector tmp2) { cvodes_integrator* integrator = static_cast(user_data); integrator->rhs_sens(t, NV_DATA_S(y), yS, ySdot); return 0; } /** * Implements the function of type CVDlsJacFn which is the * user-defined callback for CVODES to calculate the jacobian of the * ode_rhs wrt to the states y. The jacobian is stored in column * major format. */ static int cv_jacobian_states(realtype t, N_Vector y, N_Vector fy, SUNMatrix J, void* user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { cvodes_integrator* integrator = static_cast(user_data); integrator->jacobian_states(t, NV_DATA_S(y), J); return 0; } /** * Calculates the ODE RHS, dy_dt, using the user-supplied functor at * the given time t and state y. */ inline void rhs(double t, const double y[], double dy_dt[]) const { const Eigen::VectorXd y_vec = Eigen::Map(y, N_); Eigen::VectorXd dy_dt_vec = math::apply( [&](auto&&... args) { return f_(t, y_vec, msgs_, args...); }, value_of_args_tuple_); check_size_match("cvodes_integrator", "dy_dt", dy_dt_vec.size(), "states", N_); std::copy(dy_dt_vec.data(), dy_dt_vec.data() + dy_dt_vec.size(), dy_dt); } /** * Calculates the jacobian of the ODE RHS wrt to its states y at the * given time-point t and state y. */ inline void jacobian_states(double t, const double y[], SUNMatrix J) const { Eigen::VectorXd fy; Eigen::MatrixXd Jfy; auto f_wrapped = [&](const Eigen::Matrix& y) { return math::apply( [&](auto&&... args) { return f_(t, y, msgs_, args...); }, value_of_args_tuple_); }; jacobian(f_wrapped, Eigen::Map(y, N_), fy, Jfy); for (size_t j = 0; j < Jfy.cols(); ++j) { for (size_t i = 0; i < Jfy.rows(); ++i) { SM_ELEMENT_D(J, i, j) = Jfy(i, j); } } } /** * Calculates the RHS of the sensitivity ODE system which * corresponds to the coupled ode system from which the first N * states are omitted, since the first N states are the ODE RHS * which CVODES separates from the main ODE RHS. */ inline void rhs_sens(double t, const double y[], N_Vector* yS, N_Vector* ySdot) { std::vector z(coupled_state_.size()); std::vector dz_dt; std::copy(y, y + N_, z.data()); for (std::size_t s = 0; s < num_y0_vars_ + num_args_vars_; s++) { std::copy(NV_DATA_S(yS[s]), NV_DATA_S(yS[s]) + N_, z.data() + (s + 1) * N_); } coupled_ode_(z, dz_dt, t); for (std::size_t s = 0; s < num_y0_vars_ + num_args_vars_; s++) { std::move(dz_dt.data() + (s + 1) * N_, dz_dt.data() + (s + 2) * N_, NV_DATA_S(ySdot[s])); } } public: /** * Construct cvodes_integrator object * * @param function_name Calling function name (for printing debugging * messages) * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param relative_tolerance Relative tolerance passed to CVODES * @param absolute_tolerance Absolute tolerance passed to CVODES * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand * side function * @return Solution to ODE at times \p ts * @return a vector of states, each state being a vector of the * same size as the state variable, corresponding to a time in ts. * @throw std::domain_error if y0, t0, ts, theta, x are not * finite, all elements of ts are not greater than t0, or ts is not * sorted in strictly increasing order. * @throw std::invalid_argument if arguments are the wrong * size or tolerances or max_num_steps are out of range. */ template * = nullptr> cvodes_integrator(const char* function_name, const F& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, double relative_tolerance, double absolute_tolerance, long int max_num_steps, // NOLINT(runtime/int) std::ostream* msgs, const T_Args&... args) : function_name_(function_name), sundials_context_(), f_(f), y0_(y0.template cast()), t0_(t0), ts_(ts), args_tuple_(args...), value_of_args_tuple_(value_of(args)...), N_(y0.size()), msgs_(msgs), relative_tolerance_(relative_tolerance), absolute_tolerance_(absolute_tolerance), max_num_steps_(max_num_steps), num_y0_vars_(count_vars(y0_)), num_args_vars_(count_vars(args...)), coupled_ode_(f, y0_, msgs, args...), coupled_state_(coupled_ode_.initial_state()) { check_finite(function_name, "initial state", y0_); check_finite(function_name, "initial time", t0_); check_finite(function_name, "times", ts_); // Code from: https://stackoverflow.com/a/17340003 . Should probably do // something better math::apply( [&](auto&&... args) { std::vector unused_temp{ 0, (check_finite(function_name, "ode parameters and data", args), 0)...}; }, args_tuple_); check_nonzero_size(function_name, "times", ts_); check_nonzero_size(function_name, "initial state", y0_); check_sorted(function_name, "times", ts_); check_less(function_name, "initial time", t0_, ts_[0]); check_positive_finite(function_name, "relative_tolerance", relative_tolerance_); check_positive_finite(function_name, "absolute_tolerance", absolute_tolerance_); check_positive(function_name, "max_num_steps", max_num_steps_); nv_state_ = N_VMake_Serial(N_, &coupled_state_[0], sundials_context_); nv_state_sens_ = nullptr; A_ = SUNDenseMatrix(N_, N_, sundials_context_); LS_ = SUNLinSol_Dense(nv_state_, A_, sundials_context_); if (num_y0_vars_ + num_args_vars_ > 0) { nv_state_sens_ = N_VCloneEmptyVectorArray(num_y0_vars_ + num_args_vars_, nv_state_); for (std::size_t i = 0; i < num_y0_vars_ + num_args_vars_; i++) { NV_DATA_S(nv_state_sens_[i]) = &coupled_state_[N_] + i * N_; } } } ~cvodes_integrator() { SUNLinSolFree(LS_); SUNMatDestroy(A_); N_VDestroy_Serial(nv_state_); if (num_y0_vars_ + num_args_vars_ > 0) { N_VDestroyVectorArray(nv_state_sens_, num_y0_vars_ + num_args_vars_); } } /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the stiff backward differentiation formula * (BDF) solver in CVODES. * * @return std::vector of Eigen::Matrix of the states of the ODE, one for each * solution time (excluding the initial state) */ std::vector> operator()() { std::vector> y; void* cvodes_mem = CVodeCreate(Lmm, sundials_context_); if (cvodes_mem == nullptr) { throw std::runtime_error("CVodeCreate failed to allocate memory"); } try { CHECK_CVODES_CALL(CVodeInit(cvodes_mem, &cvodes_integrator::cv_rhs, value_of(t0_), nv_state_)); // Assign pointer to this as user data CHECK_CVODES_CALL( CVodeSetUserData(cvodes_mem, reinterpret_cast(this))); cvodes_set_options(cvodes_mem, max_num_steps_); CHECK_CVODES_CALL(CVodeSStolerances(cvodes_mem, relative_tolerance_, absolute_tolerance_)); CHECK_CVODES_CALL(CVodeSetLinearSolver(cvodes_mem, LS_, A_)); CHECK_CVODES_CALL( CVodeSetJacFn(cvodes_mem, &cvodes_integrator::cv_jacobian_states)); // initialize forward sensitivity system of CVODES as needed if (num_y0_vars_ + num_args_vars_ > 0) { CHECK_CVODES_CALL(CVodeSensInit( cvodes_mem, static_cast(num_y0_vars_ + num_args_vars_), CV_STAGGERED, &cvodes_integrator::cv_rhs_sens, nv_state_sens_)); CHECK_CVODES_CALL(CVodeSetSensErrCon(cvodes_mem, SUNTRUE)); CHECK_CVODES_CALL(CVodeSensEEtolerances(cvodes_mem)); } double t_init = value_of(t0_); for (size_t n = 0; n < ts_.size(); ++n) { double t_final = value_of(ts_[n]); if (t_final != t_init) { CHECK_CVODES_CALL( CVode(cvodes_mem, t_final, nv_state_, &t_init, CV_NORMAL)); if (num_y0_vars_ + num_args_vars_ > 0) { CHECK_CVODES_CALL( CVodeGetSens(cvodes_mem, &t_init, nv_state_sens_)); } } y.emplace_back(math::apply( [&](auto&&... args) { return ode_store_sensitivities(f_, coupled_state_, y0_, t0_, ts_[n], msgs_, args...); }, args_tuple_)); t_init = t_final; } } catch (const std::exception& e) { CVodeFree(&cvodes_mem); throw; } CVodeFree(&cvodes_mem); return y; } }; // cvodes integrator } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/dae_system.hpp0000644000176200001440000001430214645137106024102 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_DAE_RESIDUAL_HPP #define STAN_MATH_REV_FUNCTOR_DAE_RESIDUAL_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * IDAS DAE system that contains information on residual * equation functor, sensitivity residual equation functor, * as well as initial conditions. This is a base type that * is intended to contain common values used by forward * sensitivity system. * * @tparam F type of functor for DAE residual * @tparam Tyy scalar type of initial unknown values * @tparam Typ scalar type of initial unknown's derivative values * @tparam Tpar scalar type of parameters */ template class dae_system { using dae_type = dae_system; protected: const F& f_; /**< residual functor */ std::tuple()))...> local_args_tuple_; std::tuple()))>...> dbl_args_tuple_; std::tuple args_tuple_; std::ostream* msgs_; public: Tyy const& yy; /**< state variable y */ Typ const& yp; /**< time derivatives of y */ Eigen::VectorXd dbl_yy; Eigen::VectorXd dbl_yp; const size_t N; const size_t M; const size_t ns; vari** varis; std::vector all_vars; Eigen::VectorXd dbl_rr; static constexpr bool is_var_yy0 = stan::is_var>::value; static constexpr bool is_var_yp0 = stan::is_var>::value; static constexpr bool is_var_par = stan::is_var>::value; static constexpr bool use_fwd_sens = is_var_yy0 || is_var_yp0 || is_var_par; using scalar_t = stan::return_type_t; using return_t = std::vector>; /** * Construct IDAS DAE system from initial condition and parameters * * @param[in] f DAE residual functor * @param[in] eq_id array for DAE's variable ID(1 for * * derivative variables, 0 for algebraic variables). * @param[in] yy0 initial condition * @param[in] yp0 initial condition for derivatives * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to DAE right hand * side */ dae_system(const F& f, const Tyy& yy0, const Typ& yp0, std::ostream* msgs, const T_par&... args) : f_(f), local_args_tuple_(deep_copy_vars(args)...), dbl_args_tuple_(value_of(args)...), args_tuple_(std::forward_as_tuple(args...)), msgs_(msgs), yy(yy0), yp(yp0), dbl_yy(stan::math::value_of(yy0)), dbl_yp(stan::math::value_of(yp0)), N(yy0.size()), M(count_vars(args...)), ns((is_var_yy0 ? N : 0) + (is_var_yp0 ? N : 0) + M), varis(ChainableStack::instance_->memalloc_.alloc_array(ns)), all_vars(ns), dbl_rr(N) { save_varis(varis, yy0, yp0, args...); for (int i = 0; i < ns; ++i) { all_vars[i].vi_ = *(varis + i); } } void eval_residual(double t) { dbl_rr = math::apply( [&](auto&&... args) { return f_(t, dbl_yy, dbl_yp, msgs_, args...); }, dbl_args_tuple_); } /** * Evaluate DAE residual according to IDAS signature */ static int idas_res(double t, N_Vector yy, N_Vector yp, N_Vector rr, void* user_data) { dae_type* dae = static_cast(user_data); dae->dbl_yy = Eigen::Map(NV_DATA_S(yy), dae->N); dae->dbl_yp = Eigen::Map(NV_DATA_S(yp), dae->N); dae->eval_residual(t); Eigen::Map(NV_DATA_S(rr), dae->N) = dae->dbl_rr; return 0; } /** * Evaluate DAE sensitivity residual according to IDAS signature */ static int idas_sens_res(int ns, double t, N_Vector yy, N_Vector yp, N_Vector res, N_Vector* yys, N_Vector* yps, N_Vector* ress, void* user_data, N_Vector temp1, N_Vector temp2, N_Vector temp3) { dae_type* dae = static_cast(user_data); const int n = dae->N; const int m = dae->M; for (int i = 0; i < ns; ++i) { N_VConst(0.0, ress[i]); } // Run nested autodiff in this scope stan::math::nested_rev_autodiff nested; Eigen::Matrix yy_var(n), yp_var(n); for (int i = 0; i < n; ++i) { yy_var.coeffRef(i) = NV_Ith_S(yy, i); yp_var.coeffRef(i) = NV_Ith_S(yp, i); } Eigen::VectorXd g(m); Eigen::Matrix fy = math::apply( [&](auto&&... args) { return dae->f_(t, yy_var, yp_var, dae->msgs_, args...); }, dae->local_args_tuple_); for (int i = 0; i < n; ++i) { if (i > 0) { nested.set_zero_all_adjoints(); } fy[i].grad(); for (int j = 0; j < ns; ++j) { auto yysp = N_VGetArrayPointer(yys[j]); auto ypsp = N_VGetArrayPointer(yps[j]); auto ressp = N_VGetArrayPointer(ress[j]); for (int k = 0; k < n; ++k) { ressp[i] += yy_var[k].adj() * yysp[k] + yp_var[k].adj() * ypsp[k]; } } if (is_var_par) { g.fill(0); stan::math::apply( [&](auto&&... args) { stan::math::accumulate_adjoints(g.data(), args...); }, dae->local_args_tuple_); for (int j = 0; j < m; ++j) { auto ressp = N_VGetArrayPointer(ress[ns - m + j]); ressp[i] += g[j]; } stan::math::for_each([](auto&& arg) { stan::math::zero_adjoints(arg); }, dae->local_args_tuple_); } } return 0; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/cvodes_integrator_adjoint.hpp0000644000176200001440000007151014645137106027202 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_CVODES_INTEGRATOR_ADJOINT_HPP #define STAN_MATH_REV_FUNCTOR_CVODES_INTEGRATOR_ADJOINT_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Integrator interface for CVODES' adjoint ODE solvers (Adams & BDF * methods). * * @tparam F Type of ODE right hand side * @tparam T_y0 Type of scalars for initial state * @tparam T_t0 Type of initial time * @tparam T_ts Type of time-points where ODE solution is returned * @tparam T_Args Types of pass-through parameters */ template class cvodes_integrator_adjoint_vari : public vari_base { using T_Return = return_type_t; using T_y0_t0 = return_type_t; static constexpr bool is_var_ts_{is_var::value}; static constexpr bool is_var_t0_{is_var::value}; static constexpr bool is_var_y0_{is_var::value}; static constexpr bool is_var_y0_t0_{is_var::value}; static constexpr bool is_any_var_args_{ disjunction>...>::value}; static constexpr bool is_var_return_{is_var::value}; static constexpr bool is_var_only_ts_{ is_var_ts_ && !(is_var_t0_ || is_var_y0_t0_ || is_any_var_args_)}; const size_t num_args_vars_; const double relative_tolerance_forward_; const double relative_tolerance_backward_; const double relative_tolerance_quadrature_; const double absolute_tolerance_quadrature_; const long int max_num_steps_; // NOLINT(runtime/int) const long int num_steps_between_checkpoints_; // NOLINT(runtime/int) const size_t N_; std::ostream* msgs_; vari** y_return_varis_; vari** args_varis_; const int interpolation_polynomial_; const int solver_forward_; const int solver_backward_; int index_backward_; bool backward_is_initialized_{false}; /** * Since the CVODES solver manages memory with malloc calls, these resources * must be freed using a destructor call (which is not being called for the * vari class). */ struct cvodes_solver : public chainable_alloc { sundials::Context sundials_context_; const std::string function_name_str_; const std::decay_t f_; const size_t N_; std::vector y_; std::vector ts_; Eigen::Matrix y0_; Eigen::VectorXd absolute_tolerance_forward_; Eigen::VectorXd absolute_tolerance_backward_; Eigen::VectorXd state_forward_; Eigen::VectorXd state_backward_; Eigen::VectorXd quad_; T_t0 t0_; N_Vector nv_state_forward_; N_Vector nv_state_backward_; N_Vector nv_quad_; N_Vector nv_absolute_tolerance_forward_; N_Vector nv_absolute_tolerance_backward_; SUNMatrix A_forward_; SUNMatrix A_backward_; SUNLinearSolver LS_forward_; SUNLinearSolver LS_backward_; void* cvodes_mem_; std::tuple local_args_tuple_; const std::tuple< promote_scalar_t>, T_Args>...> value_of_args_tuple_; template cvodes_solver(const char* function_name, FF&& f, size_t N, const T_y0& y0, const T_t0& t0, const std::vector& ts, const Eigen::VectorXd& absolute_tolerance_forward, const Eigen::VectorXd& absolute_tolerance_backward, size_t num_args_vars, int solver_forward, const T_Args&... args) : chainable_alloc(), sundials_context_(), function_name_str_(function_name), f_(std::forward(f)), N_(N), y_(ts.size()), ts_(ts.begin(), ts.end()), y0_(y0), absolute_tolerance_forward_(absolute_tolerance_forward), absolute_tolerance_backward_(absolute_tolerance_backward), state_forward_(value_of(y0)), state_backward_(Eigen::VectorXd::Zero(N)), quad_(Eigen::VectorXd::Zero(num_args_vars)), t0_(t0), nv_state_forward_( N_VMake_Serial(N, state_forward_.data(), sundials_context_)), nv_state_backward_( N_VMake_Serial(N, state_backward_.data(), sundials_context_)), nv_quad_( N_VMake_Serial(num_args_vars, quad_.data(), sundials_context_)), nv_absolute_tolerance_forward_(N_VMake_Serial( N, absolute_tolerance_forward_.data(), sundials_context_)), nv_absolute_tolerance_backward_(N_VMake_Serial( N, absolute_tolerance_backward_.data(), sundials_context_)), A_forward_(SUNDenseMatrix(N, N, sundials_context_)), A_backward_(SUNDenseMatrix(N, N, sundials_context_)), LS_forward_(N == 0 ? nullptr : SUNLinSol_Dense(nv_state_forward_, A_forward_, sundials_context_)), LS_backward_(N == 0 ? nullptr : SUNLinSol_Dense(nv_state_backward_, A_backward_, sundials_context_)), cvodes_mem_(CVodeCreate(solver_forward, sundials_context_)), local_args_tuple_(deep_copy_vars(args)...), value_of_args_tuple_(value_of(args)...) { if (cvodes_mem_ == nullptr) { throw std::runtime_error("CVodeCreate failed to allocate memory"); } } virtual ~cvodes_solver() { SUNMatDestroy(A_forward_); SUNMatDestroy(A_backward_); if (N_ > 0) { SUNLinSolFree(LS_forward_); SUNLinSolFree(LS_backward_); } N_VDestroy_Serial(nv_state_forward_); N_VDestroy_Serial(nv_state_backward_); N_VDestroy_Serial(nv_quad_); N_VDestroy_Serial(nv_absolute_tolerance_forward_); N_VDestroy_Serial(nv_absolute_tolerance_backward_); CVodeFree(&cvodes_mem_); } }; cvodes_solver* solver_{nullptr}; public: /** * Construct cvodes_integrator object. Note: All arguments must be stored as * copies if in doubt. The reason is that the references can go out of scope, * since the work done from the integrator is in the chain method. * * @param function_name Calling function name (for printing debugging * messages) * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param relative_tolerance_forward Relative tolerance for forward problem * passed to CVODES * @param absolute_tolerance_forward Absolute tolerance per ODE state for * forward problem passed to CVODES * @param relative_tolerance_backward Relative tolerance for backward problem * passed to CVODES * @param absolute_tolerance_backward Absolute tolerance per ODE state for * backward problem passed to CVODES * @param relative_tolerance_quadrature Relative tolerance for quadrature * problem passed to CVODES * @param absolute_tolerance_quadrature Absolute tolerance for quadrature * problem passed to CVODES * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param num_steps_between_checkpoints Number of integrator steps after which * a checkpoint is stored for the backward pass * @param interpolation_polynomial type of polynomial used for interpolation * @param solver_forward solver used for forward pass * @param solver_backward solver used for backward pass * take between each output (error if exceeded) * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand * side function * @return Solution to ODE at times \p ts * @return a vector of states, each state being a vector of the * same size as the state variable, corresponding to a time in ts. */ template * = nullptr> cvodes_integrator_adjoint_vari( const char* function_name, FF&& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, double relative_tolerance_forward, const Eigen::VectorXd& absolute_tolerance_forward, double relative_tolerance_backward, const Eigen::VectorXd& absolute_tolerance_backward, double relative_tolerance_quadrature, double absolute_tolerance_quadrature, long int max_num_steps, // NOLINT(runtime/int) long int num_steps_between_checkpoints, // NOLINT(runtime/int) int interpolation_polynomial, int solver_forward, int solver_backward, std::ostream* msgs, const T_Args&... args) : vari_base(), num_args_vars_(count_vars(args...)), relative_tolerance_forward_(relative_tolerance_forward), relative_tolerance_backward_(relative_tolerance_backward), relative_tolerance_quadrature_(relative_tolerance_quadrature), absolute_tolerance_quadrature_(absolute_tolerance_quadrature), max_num_steps_(max_num_steps), num_steps_between_checkpoints_(num_steps_between_checkpoints), N_(y0.size()), msgs_(msgs), y_return_varis_(is_var_return_ ? ChainableStack::instance_->memalloc_ .alloc_array(N_ * ts.size()) : nullptr), args_varis_([&args..., num_vars = this->num_args_vars_]() { vari** vari_mem = ChainableStack::instance_->memalloc_.alloc_array( num_vars); save_varis(vari_mem, args...); return vari_mem; }()), interpolation_polynomial_(interpolation_polynomial), solver_forward_(solver_forward), solver_backward_(solver_backward), backward_is_initialized_(false), solver_(nullptr) { check_finite(function_name, "initial state", y0); check_finite(function_name, "initial time", t0); check_finite(function_name, "times", ts); check_nonzero_size(function_name, "times", ts); check_nonzero_size(function_name, "initial state", y0); check_sorted(function_name, "times", ts); check_less(function_name, "initial time", t0, ts[0]); check_positive_finite(function_name, "relative_tolerance_forward", relative_tolerance_forward_); check_positive_finite(function_name, "absolute_tolerance_forward", absolute_tolerance_forward); check_size_match(function_name, "absolute_tolerance_forward", absolute_tolerance_forward.size(), "states", N_); check_positive_finite(function_name, "relative_tolerance_backward", relative_tolerance_backward_); check_positive_finite(function_name, "absolute_tolerance_backward", absolute_tolerance_backward); check_size_match(function_name, "absolute_tolerance_backward", absolute_tolerance_backward.size(), "states", N_); check_positive_finite(function_name, "relative_tolerance_quadrature", relative_tolerance_quadrature_); check_positive_finite(function_name, "absolute_tolerance_quadrature", absolute_tolerance_quadrature_); check_positive(function_name, "max_num_steps", max_num_steps_); check_positive(function_name, "num_steps_between_checkpoints", num_steps_between_checkpoints_); // for polynomial: 1=CV_HERMITE / 2=CV_POLYNOMIAL if (interpolation_polynomial_ != 1 && interpolation_polynomial_ != 2) invalid_argument(function_name, "interpolation_polynomial", interpolation_polynomial_, "", ", must be 1 for Hermite or 2 for polynomial " "interpolation of ODE solution"); // 1=Adams=CV_ADAMS, 2=BDF=CV_BDF if (solver_forward_ != 1 && solver_forward_ != 2) invalid_argument(function_name, "solver_forward", solver_forward_, "", ", must be 1 for Adams or 2 for BDF forward solver"); if (solver_backward_ != 1 && solver_backward_ != 2) invalid_argument(function_name, "solver_backward", solver_backward_, "", ", must be 1 for Adams or 2 for BDF backward solver"); solver_ = new cvodes_solver(function_name, std::forward(f), N_, y0, t0, ts, absolute_tolerance_forward, absolute_tolerance_backward, num_args_vars_, solver_forward_, args...); stan::math::for_each( [func_name = function_name](auto&& arg) { check_finite(func_name, "ode parameters and data", arg); }, solver_->local_args_tuple_); CHECK_CVODES_CALL( CVodeInit(solver_->cvodes_mem_, &cvodes_integrator_adjoint_vari::cv_rhs, value_of(solver_->t0_), solver_->nv_state_forward_)); // Assign pointer to this as user data CHECK_CVODES_CALL( CVodeSetUserData(solver_->cvodes_mem_, reinterpret_cast(this))); cvodes_set_options(solver_->cvodes_mem_, max_num_steps_); CHECK_CVODES_CALL( CVodeSVtolerances(solver_->cvodes_mem_, relative_tolerance_forward_, solver_->nv_absolute_tolerance_forward_)); CHECK_CVODES_CALL(CVodeSetLinearSolver( solver_->cvodes_mem_, solver_->LS_forward_, solver_->A_forward_)); CHECK_CVODES_CALL( CVodeSetJacFn(solver_->cvodes_mem_, &cvodes_integrator_adjoint_vari::cv_jacobian_rhs_states)); // initialize backward sensitivity system of CVODES as needed if (is_var_return_ && !is_var_only_ts_) { CHECK_CVODES_CALL(CVodeAdjInit(solver_->cvodes_mem_, num_steps_between_checkpoints_, interpolation_polynomial_)); } /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the requested forward solver of CVODES. */ const auto ts_dbl = value_of(solver_->ts_); double t_init = value_of(solver_->t0_); for (size_t n = 0; n < ts_dbl.size(); ++n) { double t_final = ts_dbl[n]; if (t_final != t_init) { if (is_var_return_ && !is_var_only_ts_) { int ncheck; CHECK_CVODES_CALL(CVodeF(solver_->cvodes_mem_, t_final, solver_->nv_state_forward_, &t_init, CV_NORMAL, &ncheck)); } else { CHECK_CVODES_CALL(CVode(solver_->cvodes_mem_, t_final, solver_->nv_state_forward_, &t_init, CV_NORMAL)); } } solver_->y_[n] = solver_->state_forward_; if (is_var_return_) { for (std::size_t i = 0; i < N_; ++i) y_return_varis_[N_ * n + i] = new vari(solver_->state_forward_.coeff(i), false); } t_init = t_final; } ChainableStack::instance_->var_stack_.push_back(this); } private: /** * Overloads which setup the states returned from the forward solve. In case * the return type is a double only, then no autodiff is needed. In case of * autodiff then non-chaining varis are setup accordingly. */ void store_state(std::size_t n, const Eigen::VectorXd& state, Eigen::Matrix& state_return) { state_return.resize(N_); for (size_t i = 0; i < N_; i++) { state_return.coeffRef(i) = var(y_return_varis_[N_ * n + i]); } } void store_state(std::size_t n, const Eigen::VectorXd& state, Eigen::Matrix& state_return) { state_return = state; } public: /** * Obtain solution of ODE. * * @return std::vector of Eigen::Matrix of the states of the ODE, one for each * solution time (excluding the initial state) */ std::vector> solution() noexcept { std::vector> y_return( solver_->ts_.size()); for (std::size_t n = 0; n < solver_->ts_.size(); ++n) store_state(n, solver_->y_[n], y_return[n]); return y_return; } /** * No-op for setting adjoints since this class does not own any adjoints. */ void set_zero_adjoint() final{}; void chain() final { if (!is_var_return_) { return; } // for sensitivities wrt to ts we do not need to run the backward // integration if (is_var_ts_) { Eigen::VectorXd step_sens = Eigen::VectorXd::Zero(N_); for (int i = 0; i < solver_->ts_.size(); ++i) { for (int j = 0; j < N_; ++j) { step_sens.coeffRef(j) += y_return_varis_[N_ * i + j]->adj_; } adjoint_of(solver_->ts_[i]) += step_sens.dot(rhs(value_of(solver_->ts_[i]), solver_->y_[i], solver_->value_of_args_tuple_)); step_sens.setZero(); } if (is_var_only_ts_) { return; } } solver_->state_backward_.setZero(); solver_->quad_.setZero(); // At every time step, collect the adjoints from the output // variables and re-initialize the solver double t_init = value_of(solver_->ts_.back()); for (int i = solver_->ts_.size() - 1; i >= 0; --i) { // Take in the adjoints from all the output variables at this point // in time for (int j = 0; j < N_; ++j) { solver_->state_backward_.coeffRef(j) += y_return_varis_[N_ * i + j]->adj_; } double t_final = value_of((i > 0) ? solver_->ts_[i - 1] : solver_->t0_); if (t_final != t_init) { if (unlikely(!backward_is_initialized_)) { CHECK_CVODES_CALL(CVodeCreateB(solver_->cvodes_mem_, solver_backward_, &index_backward_)); CHECK_CVODES_CALL(CVodeSetUserDataB(solver_->cvodes_mem_, index_backward_, reinterpret_cast(this))); // initialize CVODES backward machinery. // the states of the backward problem *are* the adjoints // of the ode states CHECK_CVODES_CALL( CVodeInitB(solver_->cvodes_mem_, index_backward_, &cvodes_integrator_adjoint_vari::cv_rhs_adj, t_init, solver_->nv_state_backward_)); CHECK_CVODES_CALL( CVodeSVtolerancesB(solver_->cvodes_mem_, index_backward_, relative_tolerance_backward_, solver_->nv_absolute_tolerance_backward_)); CHECK_CVODES_CALL(CVodeSetMaxNumStepsB( solver_->cvodes_mem_, index_backward_, max_num_steps_)); CHECK_CVODES_CALL(CVodeSetLinearSolverB( solver_->cvodes_mem_, index_backward_, solver_->LS_backward_, solver_->A_backward_)); CHECK_CVODES_CALL(CVodeSetJacFnB( solver_->cvodes_mem_, index_backward_, &cvodes_integrator_adjoint_vari::cv_jacobian_rhs_adj_states)); // Allocate space for backwards quadrature needed when // parameters vary. if (is_any_var_args_) { CHECK_CVODES_CALL( CVodeQuadInitB(solver_->cvodes_mem_, index_backward_, &cvodes_integrator_adjoint_vari::cv_quad_rhs_adj, solver_->nv_quad_)); CHECK_CVODES_CALL( CVodeQuadSStolerancesB(solver_->cvodes_mem_, index_backward_, relative_tolerance_quadrature_, absolute_tolerance_quadrature_)); CHECK_CVODES_CALL(CVodeSetQuadErrConB(solver_->cvodes_mem_, index_backward_, SUNTRUE)); } backward_is_initialized_ = true; } else { // just re-initialize the solver CHECK_CVODES_CALL(CVodeReInitB(solver_->cvodes_mem_, index_backward_, t_init, solver_->nv_state_backward_)); if (is_any_var_args_) { CHECK_CVODES_CALL(CVodeQuadReInitB( solver_->cvodes_mem_, index_backward_, solver_->nv_quad_)); } } CHECK_CVODES_CALL(CVodeB(solver_->cvodes_mem_, t_final, CV_NORMAL)); // obtain adjoint states and update t_init to time point // reached of t_final CHECK_CVODES_CALL(CVodeGetB(solver_->cvodes_mem_, index_backward_, &t_init, solver_->nv_state_backward_)); if (is_any_var_args_) { CHECK_CVODES_CALL(CVodeGetQuadB(solver_->cvodes_mem_, index_backward_, &t_init, solver_->nv_quad_)); } } } // After integrating all the way back to t0, we finally have the // the adjoints we wanted // This is the dlog_density / d(initial_time_point) adjoint if (is_var_t0_) { adjoint_of(solver_->t0_) += -solver_->state_backward_.dot( rhs(t_init, value_of(solver_->y0_), solver_->value_of_args_tuple_)); } // These are the dlog_density / d(initial_conditions[s]) adjoints if (is_var_y0_t0_) { forward_as>(solver_->y0_).adj() += solver_->state_backward_; } // These are the dlog_density / d(parameters[s]) adjoints if (is_any_var_args_) { for (size_t s = 0; s < num_args_vars_; ++s) { args_varis_[s]->adj_ += solver_->quad_.coeff(s); } } } private: /** * Call the ODE RHS with given tuple. */ template constexpr auto rhs(double t, const yT& y, const std::tuple& args_tuple) const { return math::apply( [&](auto&&... args) { return solver_->f_(t, y, msgs_, args...); }, args_tuple); } /** * Utility to cast user memory pointer passed in from CVODES to actual typed * object pointer. */ constexpr static cvodes_integrator_adjoint_vari* cast_to_self(void* mem) { return static_cast(mem); } /** * Calculates the ODE RHS, dy_dt, using the user-supplied functor at * the given time t and state y. */ inline int rhs(double t, const double* y, double*& dy_dt) const { const Eigen::VectorXd y_vec = Eigen::Map(y, N_); const Eigen::VectorXd dy_dt_vec = rhs(t, y_vec, solver_->value_of_args_tuple_); check_size_match(solver_->function_name_str_.c_str(), "dy_dt", dy_dt_vec.size(), "states", N_); Eigen::Map(dy_dt, N_) = dy_dt_vec; return 0; } /** * Implements the function of type CVRhsFn which is the user-defined * ODE RHS passed to CVODES. */ constexpr static int cv_rhs(realtype t, N_Vector y, N_Vector ydot, void* user_data) { return cast_to_self(user_data)->rhs(t, NV_DATA_S(y), NV_DATA_S(ydot)); } /* * Calculate the adjoint sensitivity RHS for varying initial conditions * and parameters * * Equation 2.23 in the cvs_guide. * * @param[in] t time * @param[in] y state of the base ODE system * @param[in] yB state of the adjoint ODE system * @param[out] yBdot evaluation of adjoint ODE RHS */ inline int rhs_adj(double t, N_Vector y, N_Vector yB, N_Vector yBdot) const { const nested_rev_autodiff nested; Eigen::Matrix y_vars( Eigen::Map(NV_DATA_S(y), N_)); Eigen::Matrix f_y_t_vars = rhs(t, y_vars, solver_->value_of_args_tuple_); check_size_match(solver_->function_name_str_.c_str(), "dy_dt", f_y_t_vars.size(), "states", N_); f_y_t_vars.adj() = -Eigen::Map(NV_DATA_S(yB), N_); grad(); Eigen::Map(NV_DATA_S(yBdot), N_) = y_vars.adj(); return 0; } /** * Implements the function of type CVRhsFnB which is the * RHS of the backward ODE system. */ constexpr static int cv_rhs_adj(realtype t, N_Vector y, N_Vector yB, N_Vector yBdot, void* user_data) { return cast_to_self(user_data)->rhs_adj(t, y, yB, yBdot); } /* * Calculate the RHS for the quadrature part of the adjoint ODE * problem. * * This is the integrand of equation 2.22 in the cvs_guide. * * @param[in] t time * @param[in] y state of the base ODE system * @param[in] yB state of the adjoint ODE system * @param[out] qBdot evaluation of adjoint ODE quadrature RHS */ inline int quad_rhs_adj(double t, N_Vector y, N_Vector yB, N_Vector qBdot) { Eigen::Map y_vec(NV_DATA_S(y), N_); const nested_rev_autodiff nested; // The vars here do not live on the nested stack so must be zero'd // separately stan::math::for_each([](auto&& arg) { zero_adjoints(arg); }, solver_->local_args_tuple_); Eigen::Matrix f_y_t_vars = rhs(t, y_vec, solver_->local_args_tuple_); check_size_match(solver_->function_name_str_.c_str(), "dy_dt", f_y_t_vars.size(), "states", N_); f_y_t_vars.adj() = -Eigen::Map(NV_DATA_S(yB), N_); grad(); math::apply( [&qBdot](auto&&... args) { accumulate_adjoints(NV_DATA_S(qBdot), args...); }, solver_->local_args_tuple_); return 0; } /** * Implements the function of type CVQuadRhsFnB which is the * RHS of the backward ODE system's quadrature. */ constexpr static int cv_quad_rhs_adj(realtype t, N_Vector y, N_Vector yB, N_Vector qBdot, void* user_data) { return cast_to_self(user_data)->quad_rhs_adj(t, y, yB, qBdot); } /** * Calculates the jacobian of the ODE RHS wrt to its states y at the * given time-point t and state y. */ inline int jacobian_rhs_states(double t, N_Vector y, SUNMatrix J) const { Eigen::Map Jfy(SM_DATA_D(J), N_, N_); nested_rev_autodiff nested; Eigen::Matrix y_var( Eigen::Map(NV_DATA_S(y), N_)); Eigen::Matrix fy_var = rhs(t, y_var, solver_->value_of_args_tuple_); check_size_match(solver_->function_name_str_.c_str(), "dy_dt", fy_var.size(), "states", N_); grad(fy_var.coeffRef(0).vi_); Jfy.col(0) = y_var.adj(); for (int i = 1; i < fy_var.size(); ++i) { nested.set_zero_all_adjoints(); grad(fy_var.coeffRef(i).vi_); Jfy.col(i) = y_var.adj(); } Jfy.transposeInPlace(); return 0; } /** * Implements the function of type CVDlsJacFn which is the * user-defined callback for CVODES to calculate the jacobian of the * ode_rhs wrt to the states y. The jacobian is stored in column * major format. */ constexpr static int cv_jacobian_rhs_states(realtype t, N_Vector y, N_Vector fy, SUNMatrix J, void* user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { return cast_to_self(user_data)->jacobian_rhs_states(t, y, J); } /* * Calculate the Jacobian of the RHS of the adjoint ODE (see rhs_adj * below for citation for how this is done) * * @param[in] t Time * @param[in] y State of system * @param[out] J CVode structure where output is to be stored */ inline int jacobian_rhs_adj_states(double t, N_Vector y, SUNMatrix J) const { // J_adj_y = -1 * transpose(J_y) int error_code = jacobian_rhs_states(t, y, J); Eigen::Map J_adj_y(SM_DATA_D(J), N_, N_); J_adj_y.transposeInPlace(); J_adj_y.array() *= -1.0; return error_code; } /** * Implements the CVLsJacFnB function for evaluating the jacobian of * the adjoint problem wrt to the backward states. */ constexpr static int cv_jacobian_rhs_adj_states(realtype t, N_Vector y, N_Vector yB, N_Vector fyB, SUNMatrix J, void* user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { return cast_to_self(user_data)->jacobian_rhs_adj_states(t, y, J); } }; // cvodes integrator adjoint vari } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/partials_propagator.hpp0000644000176200001440000000754014645137106026030 0ustar liggesusers#ifndef STAN_MATH_REV_META_PARTIALS_PROPOGATOR_HPP #define STAN_MATH_REV_META_PARTIALS_PROPOGATOR_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** \ingroup type_trait * \callergraph * This class builds partial derivatives with respect to a set of * operands. There are two reason for the generality of this * class. The first is to handle vector and scalar arguments * without needing to write additional code. The second is to use * this class for writing probability distributions that handle * primitives, reverse mode, and forward mode variables * seamlessly. * * Conceptually, this class is used when we want to manually calculate * the derivative of a function and store this manual result on the * autodiff stack in a sort of "compressed" form. Think of it like an * easy-to-use interface to rev/core/precomputed_gradients. * * This class now supports multivariate use-cases as well by * exposing edge#_.partials_vec * * This is the specialization for when the return type is var, * which should be for all of the reverse mode cases. * * NB: since ops_partials_edge.partials_ and ops_partials_edge.partials_vec * are sometimes represented internally as a broadcast_array, we need to take * care with assignments to them. Indeed, we can assign any right hand side * which allows for indexing to a broadcast_array. The resulting behaviour is * that the entry for the first index is what gets assigned. The most common * use-case should be where the rhs is some container of length 1. * * @tparam Ops Type of the operands placed into the edges * @tparam ReturnType The type returned from the `build()` method. */ template class partials_propagator, Ops...> { public: std::tuple< internal::ops_partials_edge>>...> edges_; template explicit partials_propagator(Types&&... ops) : edges_( internal::ops_partials_edge>>( std::forward(ops))...) {} /** \ingroup type_trait * Build the node to be stored on the autodiff graph. * This should contain both the value and the tangent. * * For scalars, we don't calculate any tangents. * For reverse mode, we end up returning a type of var that will calculate * the appropriate adjoint using the stored operands and partials. * Forward mode just calculates the tangent on the spot and returns it in * a vanilla fvar. * * @param value the return value of the function we are compressing * @return the node to be stored in the expression graph for autodiff */ inline var build(double value) { var ret(value); stan::math::for_each( [ret](auto&& edge) mutable { reverse_pass_callback( [operand = edge.operand(), partial = edge.partial(), ret]() mutable { update_adjoints(operand, partial, ret); }); }, edges_); return ret; } }; } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/jacobian.hpp0000644000176200001440000000203114645137106023507 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_JACOBIAN_HPP #define STAN_MATH_REV_FUNCTOR_JACOBIAN_HPP #include #include #include #include #include namespace stan { namespace math { template void jacobian(const F& f, const Eigen::Matrix& x, Eigen::Matrix& fx, Eigen::Matrix& J) { using Eigen::Dynamic; using Eigen::Matrix; // Run nested autodiff in this scope nested_rev_autodiff nested; Matrix x_var(x); Matrix fx_var = f(x_var); fx.resize(fx_var.size()); J.resize(x.size(), fx_var.size()); fx = fx_var.val(); grad(fx_var(0).vi_); J.col(0) = x_var.adj(); for (int i = 1; i < fx_var.size(); ++i) { nested.set_zero_all_adjoints(); grad(fx_var(i).vi_); J.col(i) = x_var.adj(); } J.transposeInPlace(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/apply_scalar_unary.hpp0000644000176200001440000000253114645137106025636 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_APPLY_SCALAR_UNARY_HPP #define STAN_MATH_REV_FUNCTOR_APPLY_SCALAR_UNARY_HPP #include #include namespace stan { namespace math { /** * Template specialization to var for vectorizing a unary scalar * function. This is a base scalar specialization. It applies * the function specified by the template parameter to the * argument. * * @tparam F Type of function to apply. */ template struct apply_scalar_unary { /** * Function return type, which is var. */ using return_t = var; /** * Apply the function specified by F to the specified argument. * * @param x Argument variable. * @return Function applied to the variable. */ static inline return_t apply(const var& x) { return F::fun(x); } }; template struct apply_scalar_unary> { /** * Function return type, which is a `var_value` with plain value type. */ using return_t = plain_type_t; /** * Apply the function specified by F to the specified argument. * * @param x Argument variable. * @return Function applied to the variable. */ static inline return_t apply(const T& x) { return F::fun(x); } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/apply_vector_unary.hpp0000644000176200001440000000405214645137106025673 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_APPLY_VECTOR_UNARY_HPP #define STAN_MATH_REV_FUNCTOR_APPLY_VECTOR_UNARY_HPP #include #include #include namespace stan { namespace math { /** * Specialization for use with `var_value` types where T inherits from * EigenBase. Inputs are passed through unmodified. */ template struct apply_vector_unary> { /** * Member function for applying a functor to a `var_value` and * subsequently returning a `var_value`. * * @tparam T Type of argument to which functor is applied. * @tparam F Type of functor to apply. * @param x input to which operation is applied. * @param f functor to apply to Eigen input. * @return object with result of applying functor to input */ template static inline plain_type_t apply(const T& x, const F& f) { return f(x); } /** * Member function for applying a functor to a `var_value` and * subsequently returning a `var_value`. * * @tparam T Type of argument to which functor is applied. * @tparam F Type of functor to apply. * @param x input to which operation is applied. * @param f functor to apply to Eigen input. * @return object with result of applying functor to input */ template static inline plain_type_t apply_no_holder(const T& x, const F& f) { return f(x); } /** * Member function for applying a functor to a `var_value` and * subsequently returning a var. The reduction to a var needs * to be implemented in the definition of the functor. * * @tparam T Type of argument to which functor is applied. * @tparam F Type of functor to apply. * @param x input to which operation is applied. * @param f functor to apply to input. * @return scalar result of applying functor to input. */ template static inline var reduce(const T& x, const F& f) { return f(x); } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/operands_and_partials.hpp0000644000176200001440000003701314645137106026305 0ustar liggesusers#ifndef STAN_MATH_REV_META_OPERANDS_AND_PARTIALS_HPP #define STAN_MATH_REV_META_OPERANDS_AND_PARTIALS_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { // OpenCL template * = nullptr> inline void update_adjoints(var_value& x, const T2& y, const vari& z) { x.adj() += z.adj() * y; } template * = nullptr> inline void update_adjoints(var_value& x, const T2& y, const var& z) { x.adj() += z.adj() * y; } // Scalars template * = nullptr, require_not_var_matrix_t* = nullptr, require_arithmetic_t* = nullptr> inline void update_adjoints(Scalar1 x, Scalar2 y, const vari& z) noexcept { x.adj() += z.adj() * y; } template * = nullptr, require_not_var_matrix_t* = nullptr, require_arithmetic_t* = nullptr> inline void update_adjoints(Scalar1 x, Scalar2 y, const var& z) noexcept { x.adj() += z.adj() * y; } // Matrix template * = nullptr, require_st_arithmetic* = nullptr> inline void update_adjoints(Matrix1& x, const Matrix2& y, const vari& z) { x.adj().array() += z.adj() * y.array(); } template * = nullptr, require_st_arithmetic* = nullptr> inline void update_adjoints(Matrix1& x, const Matrix2& y, const var& z) { x.adj().array() += z.adj() * y.array(); } template * = nullptr> inline constexpr void update_adjoints(Arith&& /* x */, Alt&& /* y */, const vari& /* z */) noexcept {} template * = nullptr> inline constexpr void update_adjoints(Arith&& /* x */, Alt&& /* y */, const var& /* z */) noexcept {} // Vectors template * = nullptr, require_st_arithmetic* = nullptr> inline void update_adjoints(StdVec1& x, const Vec2& y, const vari& z) { for (size_t i = 0; i < x.size(); ++i) { update_adjoints(x[i], y[i], z); } } template * = nullptr, require_st_arithmetic* = nullptr> inline void update_adjoints(StdVec1& x, const Vec2& y, const var& z) { for (size_t i = 0; i < x.size(); ++i) { update_adjoints(x[i], y[i], z); } } /** \ingroup type_trait * \callergraph */ template <> class ops_partials_edge { public: double partial_{0}; broadcast_array partials_{partial_}; explicit ops_partials_edge(const var& op) noexcept : operands_(op) {} explicit ops_partials_edge(const ops_partials_edge& other) : partial_(other.partial_), partials_(partial_), operands_(other.operands_) {} inline auto& partial() { return partial_; } inline auto& operand() noexcept { return operands_; } var operands_; static constexpr int size() { return 1; } }; // Vectorized Univariate // Vectorized Univariate template <> class ops_partials_edge> { public: using Op = std::vector>; using partials_t = arena_t; partials_t partials_; // For univariate use-cases broadcast_array partials_vec_; // For multivariate explicit ops_partials_edge(const std::vector& op) : partials_(partials_t::Zero(op.size())), partials_vec_(partials_), operands_(op.begin(), op.end()) {} Op operands_; inline int size() const noexcept { return this->operands_.size(); } inline auto&& operand() noexcept { return std::move(this->operands_); } inline auto& partial() noexcept { return this->partials_; } }; template class ops_partials_edge> { public: using partials_t = arena_t>; partials_t partials_; // For univariate use-cases broadcast_array partials_vec_; // For multivariate explicit ops_partials_edge(const Op& ops) : partials_(partials_t::Zero(ops.rows(), ops.cols())), partials_vec_(partials_), operands_(to_arena(ops)) {} explicit ops_partials_edge( const ops_partials_edge>& other) : partials_(other.partials_), partials_vec_(partials_), operands_(other.operands_) {} inline auto& partial() noexcept { return partials_; } inline auto& operand() noexcept { return operands_; } arena_t operands_; inline auto size() const noexcept { return this->operands_.size(); } }; template class ops_partials_edge, require_eigen_t> { public: using partials_t = arena_t; partials_t partials_; // For univariate use-cases broadcast_array partials_vec_; // For multivariate explicit ops_partials_edge(const var_value& ops) : partials_( plain_type_t::Zero(ops.vi_->rows(), ops.vi_->cols())), partials_vec_(partials_), operands_(ops) {} explicit ops_partials_edge( const ops_partials_edge, require_eigen_t>& other) : partials_(other.partials_), partials_vec_(partials_), operands_(other.operands_) {} inline auto& partial() noexcept { return partials_; } inline auto& operand() noexcept { return operands_; } var_value operands_; static constexpr int size() { return 0; } }; // SPECIALIZATIONS FOR MULTIVARIATE VECTORIZATIONS // (i.e. nested containers) template class ops_partials_edge>> { public: using inner_op = arena_t>; using Op = std::vector>; using partial_t = arena_t>; std::vector> partials_vec_; explicit ops_partials_edge(const std::vector>& ops) : partials_vec_(ops.size()), operands_(ops.begin(), ops.end()) { for (size_t i = 0; i < ops.size(); ++i) { partials_vec_[i] = partial_t::Zero(ops[i].rows(), ops[i].cols()); } } Op operands_; inline int size() const noexcept { if (unlikely(this->operands_.size() == 0)) { return 0; } return this->operands_.size() * this->operands_[0].size(); } inline auto&& operand() noexcept { return std::move(this->operands_); } inline auto& partial() noexcept { return this->partials_vec_; } }; template <> class ops_partials_edge>> { public: using inner_vec = std::vector>; using Op = std::vector>; using partial_t = std::vector>; std::vector> partials_vec_; explicit ops_partials_edge(const std::vector>& ops) : partials_vec_(stan::math::size(ops)), operands_(ops.size()) { for (size_t i = 0; i < stan::math::size(ops); ++i) { operands_[i] = inner_vec(ops[i].begin(), ops[i].end()); partials_vec_[i] = partial_t(stan::math::size(ops[i]), 0.0); } } Op operands_; inline int size() const noexcept { return this->operands_.size() * this->operands_[0].size(); } inline auto&& operand() noexcept { return std::move(this->operands_); } inline auto&& partial() noexcept { return std::move(this->partials_vec_); } }; template class ops_partials_edge>, require_eigen_t> { public: using partials_t = std::vector, arena_allocator>>; partials_t partials_vec_; explicit ops_partials_edge(const std::vector>& ops) : partials_vec_(ops.size()), operands_(ops.begin(), ops.end()) { for (size_t i = 0; i < ops.size(); ++i) { partials_vec_[i] = plain_type_t::Zero(ops[i].vi_->rows(), ops[i].vi_->cols()); } } std::vector, arena_allocator>> operands_; static constexpr int size() noexcept { return 0; } inline auto&& operand() noexcept { return std::move(this->operands_); } inline auto&& partial() noexcept { return std::move(this->partials_vec_); } }; } // namespace internal /** \ingroup type_trait * \callergraph * This class builds partial derivatives with respect to a set of * operands. There are two reason for the generality of this * class. The first is to handle vector and scalar arguments * without needing to write additional code. The second is to use * this class for writing probability distributions that handle * primitives, reverse mode, and forward mode variables * seamlessly. * * Conceptually, this class is used when we want to manually calculate * the derivative of a function and store this manual result on the * autodiff stack in a sort of "compressed" form. Think of it like an * easy-to-use interface to rev/core/precomputed_gradients. * * This class now supports multivariate use-cases as well by * exposing edge#_.partials_vec * * This is the specialization for when the return type is var, * which should be for all of the reverse mode cases. * * NB: since ops_partials_edge.partials_ and ops_partials_edge.partials_vec * are sometimes represented internally as a broadcast_array, we need to take * care with assignments to them. Indeed, we can assign any right hand side * which allows for indexing to a broadcast_array. The resulting behaviour is * that the entry for the first index is what gets assigned. The most common * use-case should be where the rhs is some container of length 1. * * @tparam Op1 type of the first operand * @tparam Op2 type of the second operand * @tparam Op3 type of the third operand * @tparam Op4 type of the fourth operand * @tparam Op5 type of the fifth operand * @tparam Op6 type of the sixth operand * @tparam Op7 type of the seventh operand * @tparam Op8 type of the eighth operand */ template class operands_and_partials { public: internal::ops_partials_edge> edge1_; internal::ops_partials_edge> edge2_; internal::ops_partials_edge> edge3_; internal::ops_partials_edge> edge4_; internal::ops_partials_edge> edge5_; internal::ops_partials_edge> edge6_; internal::ops_partials_edge> edge7_; internal::ops_partials_edge> edge8_; explicit operands_and_partials(const Op1& o1) : edge1_(o1) {} operands_and_partials(const Op1& o1, const Op2& o2) : edge1_(o1), edge2_(o2) {} operands_and_partials(const Op1& o1, const Op2& o2, const Op3& o3) : edge1_(o1), edge2_(o2), edge3_(o3) {} operands_and_partials(const Op1& o1, const Op2& o2, const Op3& o3, const Op4& o4) : edge1_(o1), edge2_(o2), edge3_(o3), edge4_(o4) {} operands_and_partials(const Op1& o1, const Op2& o2, const Op3& o3, const Op4& o4, const Op5& o5) : edge1_(o1), edge2_(o2), edge3_(o3), edge4_(o4), edge5_(o5) {} operands_and_partials(const Op1& o1, const Op2& o2, const Op3& o3, const Op4& o4, const Op5& o5, const Op6& o6) : edge1_(o1), edge2_(o2), edge3_(o3), edge4_(o4), edge5_(o5), edge6_(o6) {} operands_and_partials(const Op1& o1, const Op2& o2, const Op3& o3, const Op4& o4, const Op5& o5, const Op6& o6, const Op7& o7) : edge1_(o1), edge2_(o2), edge3_(o3), edge4_(o4), edge5_(o5), edge6_(o6), edge7_(o7) {} operands_and_partials(const Op1& o1, const Op2& o2, const Op3& o3, const Op4& o4, const Op5& o5, const Op6& o6, const Op7& o7, const Op8& o8) : edge1_(o1), edge2_(o2), edge3_(o3), edge4_(o4), edge5_(o5), edge6_(o6), edge7_(o7), edge8_(o8) {} /** \ingroup type_trait * Build the node to be stored on the autodiff graph. * This should contain both the value and the tangent. * * For scalars, we don't calculate any tangents. * For reverse mode, we end up returning a type of var that will calculate * the appropriate adjoint using the stored operands and partials. * Forward mode just calculates the tangent on the spot and returns it in * a vanilla fvar. * * @param value the return value of the function we are compressing * @return the node to be stored in the expression graph for autodiff */ var build(double value) { return make_callback_var( value, [operand1 = edge1_.operand(), partial1 = edge1_.partial(), operand2 = edge2_.operand(), partial2 = edge2_.partial(), operand3 = edge3_.operand(), partial3 = edge3_.partial(), operand4 = edge4_.operand(), partial4 = edge4_.partial(), operand5 = edge5_.operand(), partial5 = edge5_.partial(), operand6 = edge6_.operand(), partial6 = edge6_.partial(), operand7 = edge7_.operand(), partial7 = edge7_.partial(), operand8 = edge8_.operand(), partial8 = edge8_.partial()](const auto& vi) mutable { if (!is_constant::value) { internal::update_adjoints(operand1, partial1, vi); } if (!is_constant::value) { internal::update_adjoints(operand2, partial2, vi); } if (!is_constant::value) { internal::update_adjoints(operand3, partial3, vi); } if (!is_constant::value) { internal::update_adjoints(operand4, partial4, vi); } if (!is_constant::value) { internal::update_adjoints(operand5, partial5, vi); } if (!is_constant::value) { internal::update_adjoints(operand6, partial6, vi); } if (!is_constant::value) { internal::update_adjoints(operand7, partial7, vi); } if (!is_constant::value) { internal::update_adjoints(operand8, partial8, vi); } }); } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/dae.hpp0000644000176200001440000001717414645137106022510 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_DAE_HPP #define STAN_MATH_REV_FUNCTOR_DAE_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Solve the DAE initial value problem f(t, y, y')=0, y(t0) = yy0, y'(t0)=yp0 at * a set of times, { t1, t2, t3, ... } using IDAS. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, * 1> operator()(double t, const Eigen::Matrix& yy, * const Eigen::Matrix& yp, * std::ostream* msgs, const T_Args&... args); * * t is the time, yy the vector-valued state, yp the vector-valued * state derivative, msgs a stream for error * messages, and args the optional arguments passed to the DAE solve function * (which are passed through to \p f without modification). * * @tparam F Type of DAE residual functor * @tparam T_yy0 Type of initial state * @tparam T_yp0 Type of initial state derivatives * @tparam T_Args Types of pass-through parameters * * @param func Calling function name (for printing debugging messages) * @param f DAE residual functor * @param yy0 Initial state * @param yp0 Initial state derivatives * @param t0 Initial time * @param ts Times at which to solve the DAE at. All values must be sorted and * not less than t0. * @param rtol Relative tolerance passed to IDAS * @param atol Absolute tolerance passed to IDAS * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to DAE right hand side * @return Solution to DAE at times \p ts */ template * = nullptr> std::vector, -1, 1>> dae_tol_impl(const char* func, const F& f, const T_yy& yy0, const T_yp& yp0, double t0, const std::vector& ts, double rtol, double atol, int64_t max_num_steps, std::ostream* msgs, const T_Args&... args) { check_finite(func, "initial state", yy0); check_finite(func, "initial state derivative", yp0); check_nonzero_size(func, "initial state", yy0); check_nonzero_size(func, "initial state derivative", yp0); check_finite(func, "initial time", t0); check_finite(func, "times", ts); check_nonzero_size(func, "times", ts); check_sorted(func, "times", ts); check_less(func, "initial time", t0, ts[0]); check_positive_finite(func, "relative_tolerance", rtol); check_positive_finite(func, "absolute_tolerance", atol); check_positive(func, "max_num_steps", max_num_steps); const auto& args_ref_tuple = std::make_tuple(to_ref(args)...); math::apply( [&](auto&&... args) { std::vector unused_temp{ 0, (check_finite("dae", "DAE parameters and data", args), 0)...}; }, args_ref_tuple); return math::apply( [&](const auto&... args_refs) { dae_system...> dae(f, yy0, yp0, msgs, args_refs...); idas_integrator integ(rtol, atol, max_num_steps); return integ(func, dae, t0, ts); }, args_ref_tuple); } /** * Solve the DAE initial value problem f(t, y, y')=0, y(t0) = yy0, y'(t0)=yp0 at * a set of times, { t1, t2, t3, ... } using IDAS. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, * 1> operator()(double t, const Eigen::Matrix& yy, * const Eigen::Matrix& yp, * std::ostream* msgs, const T_Args&... args); * * t is the time, yy the vector-valued state, yp the vector-valued * state derivative, msgs a stream for error * messages, and args the optional arguments passed to the DAE solve function * (which are passed through to \p f without modification). * * @tparam F Type of DAE residual functor * @tparam T_yy0 Type of initial state * @tparam T_yp0 Type of initial state derivatives * @tparam T_Args Types of pass-through parameters * * @param f DAE residual functor * @param yy0 Initial state * @param yp0 Initial state derivatives * @param t0 Initial time * @param ts Times at which to solve the DAE at. All values must be sorted and * not less than t0. * @param rtol Relative tolerance passed to IDAS * @param atol Absolute tolerance passed to IDAS * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to DAE right hand side * @return Solution to DAE at times \p ts */ template * = nullptr> std::vector, -1, 1>> dae_tol(const F& f, const T_yy& yy0, const T_yp& yp0, double t0, const std::vector& ts, double rtol, double atol, int64_t max_num_steps, std::ostream* msgs, const T_Args&... args) { return dae_tol_impl("dae_tol", f, yy0, yp0, t0, ts, rtol, atol, max_num_steps, msgs, args...); } /** * Solve the DAE initial value problem f(t, y, y')=0, y(t0) = yy0, y'(t0)=yp0 at * a set of times, { t1, t2, t3, ... } using IDAS, assuming default controls * (relative tol, absolute tol, max number of steps) = (1.e-10, 1.e-10, 1e8). * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, * 1> operator()(double t, const Eigen::Matrix& yy, * const Eigen::Matrix& yp, * std::ostream* msgs, const T_Args&... args); * * t is the time, yy the vector-valued state, yp the vector-valued * state derivative, msgs a stream for error * messages, and args the optional arguments passed to the DAE solve function * (which are passed through to \p f without modification). * * @tparam F Type of DAE residual functor * @tparam T_yy0 Type of initial state * @tparam T_yp0 Type of initial state derivatives * @tparam T_Args Types of pass-through parameters * * @param f DAE residual functor * @param yy0 Initial state * @param yp0 Initial state derivatives * @param t0 Initial time * @param ts Times at which to solve the DAE at. All values must be sorted and * not less than t0. * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to DAE right hand side * @return Solution to DAE at times \p ts */ template * = nullptr> std::vector, -1, 1>> dae(const F& f, const T_yy& yy0, const T_yp& yp0, double t0, const std::vector& ts, std::ostream* msgs, const T_Args&... args) { return dae_tol_impl("dae", f, yy0, yp0, t0, ts, 1.e-10, 1.e-10, 1e8, msgs, args...); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/apply_scalar_binary.hpp0000644000176200001440000001006114645137106025761 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_APPLY_SCALAR_BINARY_HPP #define STAN_MATH_REV_FUNCTOR_APPLY_SCALAR_BINARY_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Specialization for use with combinations of * `Eigen::Matrix` and `var_value` inputs. * Eigen's binaryExpr framework is used for more efficient indexing of both row- * and column-major inputs without separate loops. * * @tparam T1 Type of first argument to which functor is applied. * @tparam T2 Type of second argument to which functor is applied. * @tparam F Type of functor to apply. * @param x First Matrix input to which operation is applied. * @param y Second Matrix input to which operation is applied. * @param f functor to apply to Matrix inputs. * @return `var_value` with result of applying functor to inputs. */ template * = nullptr, require_all_matrix_t* = nullptr> inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { check_matching_dims("Binary function", "x", x, "y", y); return f(x, y); } /** * Specialization for use with one `var_value` (row or column) and * a one-dimensional std::vector of integer types * * @tparam T1 Type of first argument to which functor is applied. * @tparam T2 Type of second argument to which functor is applied. * @tparam F Type of functor to apply. * @param x Matrix input to which operation is applied. * @param y Integer std::vector input to which operation is applied. * @param f functor to apply to inputs. * @return var_value object with result of applying functor to inputs. */ template * = nullptr, require_any_std_vector_vt* = nullptr> inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { check_matching_sizes("Binary function", "x", x, "y", y); return f(x, y); } /** * Specialization for use with a two-dimensional std::vector of integer types * and one `var_value`. * * @tparam T1 Type of first argument to which functor is applied. * @tparam T2 Type of second argument to which functor is applied. * @tparam F Type of functor to apply. * @param x Either a var matrix or nested integer std::vector input to which * operation is applied. * @param x Either a var matrix or nested integer std::vector input to which * operation is applied. * @param f functor to apply to inputs. * @return Eigen object with result of applying functor to inputs. */ template * = nullptr, require_any_std_vector_st* = nullptr, require_any_var_matrix_t* = nullptr> inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { return f(x, y); } /** * Specialization for use when the one input is an `var_value type and * the other is a scalar. * * @tparam T1 Type of either `var_value` or scalar object to which * functor is applied. * @tparam T2 Type of either `var_value` or scalar object to which * functor is applied. * @tparam F Type of functor to apply. * @param x Matrix or Scalar input to which operation is applied. * @param x Matrix or Scalar input to which operation is applied. * @param f functor to apply to var matrix and scalar inputs. * @return `var_value object with result of applying functor to inputs. * */ template * = nullptr, require_any_var_matrix_t* = nullptr> inline auto apply_scalar_binary(const T1& x, const T2& y, const F& f) { return f(x, y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/algebra_solver_fp.hpp0000644000176200001440000003404114645137106025423 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_FP_SOLVER_HPP #define STAN_MATH_REV_FUNCTOR_FP_SOLVER_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * KINSOL algebraic system data holder that handles * construction & destruction of SUNDIALS data, as well as * auxiliary data that will be used for functor evaluation. * * @tparam F functor type for system function. */ template struct KinsolFixedPointEnv { /** Sundials context **/ sundials::Context sundials_context_; /** RHS functor. */ const F& f_; /** val of params for @c y_ to refer to when params are @c var type */ const Eigen::VectorXd y_dummy; /** ref to val of params */ const Eigen::VectorXd& y_; /** system size */ const size_t N_; /** nb. of params */ const size_t M_; /** real data */ const std::vector& x_r_; /** integer data */ const std::vector& x_i_; /** message stream */ std::ostream* msgs_; /** KINSOL memory block */ void* mem_; /** NVECTOR for unknowns */ N_Vector nv_x_; /** NVECTOR for scaling u */ N_Vector nv_u_scal_; /** NVECTOR for scaling f */ N_Vector nv_f_scal_; /** Constructor when y is data */ template KinsolFixedPointEnv(const F& f, const Eigen::Matrix& x, const Eigen::VectorXd& y, const std::vector& x_r, const std::vector& x_i, std::ostream* msgs, const std::vector& u_scale, const std::vector& f_scale) : sundials_context_(), f_(f), y_dummy(), y_(y), N_(x.size()), M_(y.size()), x_r_(x_r), x_i_(x_i), msgs_(msgs), mem_(KINCreate(sundials_context_)), nv_x_(N_VNew_Serial(N_, sundials_context_)), nv_u_scal_(N_VNew_Serial(N_, sundials_context_)), nv_f_scal_(N_VNew_Serial(N_, sundials_context_)) { for (int i = 0; i < N_; ++i) { NV_Ith_S(nv_x_, i) = stan::math::value_of(x(i)); NV_Ith_S(nv_u_scal_, i) = stan::math::value_of(u_scale[i]); NV_Ith_S(nv_f_scal_, i) = stan::math::value_of(f_scale[i]); } } /** Constructor when y is param */ template KinsolFixedPointEnv(const F& f, const Eigen::Matrix& x, const Eigen::Matrix& y, const std::vector& x_r, const std::vector& x_i, std::ostream* msgs, const std::vector& u_scale, const std::vector& f_scale) : sundials_context_(), f_(f), y_dummy(stan::math::value_of(y)), y_(y_dummy), N_(x.size()), M_(y.size()), x_r_(x_r), x_i_(x_i), msgs_(msgs), mem_(KINCreate(sundials_context_)), nv_x_(N_VNew_Serial(N_, sundials_context_)), nv_u_scal_(N_VNew_Serial(N_, sundials_context_)), nv_f_scal_(N_VNew_Serial(N_, sundials_context_)) { for (int i = 0; i < N_; ++i) { NV_Ith_S(nv_x_, i) = stan::math::value_of(x(i)); NV_Ith_S(nv_u_scal_, i) = stan::math::value_of(u_scale[i]); NV_Ith_S(nv_f_scal_, i) = stan::math::value_of(f_scale[i]); } } ~KinsolFixedPointEnv() { N_VDestroy_Serial(nv_x_); N_VDestroy_Serial(nv_u_scal_); N_VDestroy_Serial(nv_f_scal_); KINFree(&mem_); } /** Implements the user-defined function passed to KINSOL. */ static int kinsol_f_system(N_Vector x, N_Vector f, void* user_data) { auto g = static_cast*>(user_data); Eigen::VectorXd x_eigen(Eigen::Map(NV_DATA_S(x), g->N_)); Eigen::Map(N_VGetArrayPointer(f), g->N_) = g->f_(x_eigen, g->y_, g->x_r_, g->x_i_, g->msgs_); return 0; } }; /** * Calculate Jacobian Jxy(Jacobian of unknown x w.r.t. the * param y) * given the solution. Specifically, for * * x - f(x, y) = 0 * * we have (Jpq = Jacobian matrix dq/dq) * * Jxy - Jfx * Jxy = Jfy * * therefore Jxy can be solved from system * * (I - Jfx) * Jxy = Jfy * * Jfx and Jfy are obtained through one AD evaluation of f * w.r.t combined vector [x, y]. */ struct FixedPointADJac { /** * Calculate Jacobian Jxy. * * @tparam F RHS functor type * @param x fixed point solution * @param y RHS parameters * @param env KINSOL working environment, see doc for @c KinsolFixedPointEnv. */ template inline Eigen::Matrix operator()( const Eigen::VectorXd& x, const Eigen::Matrix& y, KinsolFixedPointEnv& env) { using stan::math::precomputed_gradients; using stan::math::to_array_1d; using stan::math::var; auto g = [&env](const Eigen::Matrix& par_) { Eigen::Matrix x_(par_.head(env.N_)); Eigen::Matrix y_(par_.tail(env.M_)); return env.f_(x_, y_, env.x_r_, env.x_i_, env.msgs_); }; Eigen::VectorXd theta(x.size() + y.size()); for (int i = 0; i < env.N_; ++i) { theta(i) = x(i); } for (int i = 0; i < env.M_; ++i) { theta(i + env.N_) = env.y_(i); } Eigen::Matrix fx; Eigen::Matrix J_theta; stan::math::jacobian(g, theta, fx, J_theta); Eigen::MatrixXd A(J_theta.block(0, 0, env.N_, env.N_)); Eigen::MatrixXd b(J_theta.block(0, env.N_, env.N_, env.M_)); A = Eigen::MatrixXd::Identity(env.N_, env.N_) - A; Eigen::MatrixXd Jxy = A.colPivHouseholderQr().solve(b); std::vector gradients(env.M_); Eigen::Matrix x_sol(env.N_); std::vector yv(to_array_1d(y)); for (int i = 0; i < env.N_; ++i) { gradients = to_array_1d(Eigen::VectorXd(Jxy.row(i))); x_sol[i] = precomputed_gradients(x(i), yv, gradients); } return x_sol; } }; /** * Fixed point solver for problem of form * * x = F(x; theta) * * with x as unknowns and theta parameters. * * The solution for FP iteration * doesn't involve derivatives but only data types. * * @tparam fp_env_type solver environment setup that handles * workspace & auxiliary data encapsulation & RAII, namely * the work environment. Currently only support KINSOL's * dense matrix. * @tparam fp_jac_type functor type for calculating the * jacobian. Currently only support @c * FixedPointADJac that obtain dense Jacobian * through QR decomposition. */ template struct FixedPointSolver; /** * Specialization for fixed point solver when using KINSOL. * * @tparam F RHS functor for fixed point iteration. * @tparam fp_jac_type functor type for calculating the jacobian */ template struct FixedPointSolver, fp_jac_type> { /** * Solve FP using KINSOL * * @param x initial point and final solution. * @param env KINSOL solution environment * @param f_tol Function tolerance * @param max_num_steps max nb. of iterations. */ void kinsol_solve_fp(Eigen::VectorXd& x, KinsolFixedPointEnv& env, double f_tol, int max_num_steps) { int N = env.N_; void* mem = env.mem_; const int default_anderson_depth = 4; int anderson_depth = std::min(N, default_anderson_depth); CHECK_KINSOL_CALL(KINSetNumMaxIters(mem, max_num_steps)); CHECK_KINSOL_CALL(KINSetMAA(mem, anderson_depth)); CHECK_KINSOL_CALL(KINInit(mem, &env.kinsol_f_system, env.nv_x_)); CHECK_KINSOL_CALL(KINSetFuncNormTol(mem, f_tol)); CHECK_KINSOL_CALL(KINSetUserData(mem, static_cast(&env))); kinsol_check(KINSol(mem, env.nv_x_, KIN_FP, env.nv_u_scal_, env.nv_f_scal_), "KINSol", max_num_steps); for (int i = 0; i < N; ++i) { x(i) = NV_Ith_S(env.nv_x_, i); } } /** * Solve data-only FP problem so no need to calculate jacobian. * * @tparam T1 type of init point of iterations * * @param x initial point and final solution. * @param y RHS functor parameters * @param env KINSOL solution environment * @param f_tol Function tolerance * @param max_num_steps max nb. of iterations. */ template Eigen::Matrix solve(const Eigen::Matrix& x, const Eigen::Matrix& y, KinsolFixedPointEnv& env, double f_tol, int max_num_steps) { Eigen::VectorXd xd(stan::math::value_of(x)); kinsol_solve_fp(xd, env, f_tol, max_num_steps); return xd; } /** * Solve FP problem and calculate jacobian. * * @tparam T1 type of init point of iterations * * @param x initial point and final solution. * @param y RHS functor parameters * @param env KINSOL solution environment * @param f_tol Function tolerance * @param max_num_steps max nb. of iterations. */ template Eigen::Matrix solve( const Eigen::Matrix& x, const Eigen::Matrix& y, KinsolFixedPointEnv& env, double f_tol, int max_num_steps) { using stan::math::value_of; using stan::math::var; // FP solution Eigen::VectorXd xd(solve(x, Eigen::VectorXd(), env, f_tol, max_num_steps)); fp_jac_type jac_sol; return jac_sol(xd, y, env); } }; /** * Return a fixed pointer to the specified system of algebraic * equations of form * * x = F(x; theta) * * given an initial guess x, and parameters theta and data. Use the * KINSOL solver from the SUNDIALS suite. * * The user can also specify the scaling controls, the function * tolerance, and the maximum number of steps. * * @tparam F type of equation system function. * @tparam T type of initial guess vector. The final solution * type doesn't depend on initial guess type, * but we allow initial guess to be either data or param. * @tparam T_u type of scaling vector for unknowns. We allow * it to be @c var because scaling could be parameter * dependent. Internally these params are converted to data * because scaling is applied. * @tparam T_f type of scaling vector for residual. We allow * it to be @c var because scaling could be parameter * dependent. Internally these params are converted to data * because scaling is applied. * * @param[in] f Functor that evaluated the system of equations. * @param[in] x Vector of starting values. * @param[in] y Parameter vector for the equation system. The function * is overloaded to treat y as a vector of doubles or of a * a template type T. * @param[in] x_r Continuous data vector for the equation system. * @param[in] x_i Integer data vector for the equation system. * @param[in, out] msgs The print stream for warning messages. * @param[in] u_scale diagonal scaling matrix elements Du * such that Du*x has all components roughly the same * magnitude when x is close to a solution. * (ref. KINSOL user guide chap.2 sec. "Scaling") * @param[in] f_scale diagonal scaling matrix elements such * that Df*(x-f(x)) has all components roughly the same * magnitude when x is not too close to a solution. * (ref. KINSOL user guide chap.2 sec. "Scaling") * @param[in] f_tol Function-norm stopping tolerance. * @param[in] max_num_steps maximum number of function evaluations. * @throw std::invalid_argument if x has size zero. * @throw std::invalid_argument if x has non-finite elements. * @throw std::invalid_argument if y has non-finite elements. * @throw std::invalid_argument if dat has non-finite elements. * @throw std::invalid_argument if dat_int has non-finite elements. * @throw std::invalid_argument if scaled_step_size is strictly * negative. * @throw std::invalid_argument if function_tolerance is strictly * negative. * @throw std::invalid_argument if max_num_steps is not positive. * @throw boost::math::evaluation_error (which is a subclass of * std::runtime_error) if solver exceeds max_num_steps. */ template Eigen::Matrix algebra_solver_fp( const F& f, const Eigen::Matrix& x, const Eigen::Matrix& y, const std::vector& x_r, const std::vector& x_i, const std::vector& u_scale, const std::vector& f_scale, std::ostream* msgs = nullptr, double f_tol = 1e-8, int max_num_steps = 200) { // NOLINT(runtime/int) algebra_solver_check(x, y, x_r, x_i, f_tol, max_num_steps); check_nonnegative("algebra_solver", "u_scale", u_scale); check_nonnegative("algebra_solver", "f_scale", f_scale); check_matching_sizes("algebra_solver", "the algebraic system's output", value_of(f(x, y, x_r, x_i, msgs)), "the vector of unknowns, x,", x); KinsolFixedPointEnv env(f, x, y, x_r, x_i, msgs, u_scale, f_scale); // NOLINT FixedPointSolver, FixedPointADJac> fp; return fp.solve(x, y, env, f_tol, max_num_steps); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/integrate_ode_adams.hpp0000644000176200001440000000310314645137106025720 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_INTEGRATE_ODE_ADAMS_HPP #define STAN_MATH_REV_FUNCTOR_INTEGRATE_ODE_ADAMS_HPP #include #include #include #include #include namespace stan { namespace math { /** * @deprecated use ode_adams */ template std::vector>> integrate_ode_adams(const F& f, const std::vector& y0, const T_t0& t0, const std::vector& ts, const std::vector& theta, const std::vector& x, const std::vector& x_int, std::ostream* msgs = nullptr, double relative_tolerance = 1e-10, double absolute_tolerance = 1e-10, long int max_num_steps = 1e8) { // NOLINT(runtime/int) internal::integrate_ode_std_vector_interface_adapter f_adapted(f); auto y = ode_adams_tol_impl("integrate_ode_adams", f_adapted, to_vector(y0), t0, ts, relative_tolerance, absolute_tolerance, max_num_steps, msgs, theta, x, x_int); std::vector>> y_converted; for (size_t i = 0; i < y.size(); ++i) y_converted.push_back(to_array_1d(y[i])); return y_converted; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/reduce_sum.hpp0000644000176200001440000002571714645137106024114 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_REDUCE_SUM_HPP #define STAN_MATH_REV_FUNCTOR_REDUCE_SUM_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * Var specialization of reduce_sum_impl * * @tparam ReduceFunction Type of reducer function * @tparam ReturnType Must be var * @tparam Vec Type of sliced argument * @tparam Args Types of shared arguments */ template struct reduce_sum_impl, ReturnType, Vec, Args...> { struct scoped_args_tuple { ScopedChainableStack stack_; using args_tuple_t = std::tuple()))...>; std::unique_ptr args_tuple_holder_; scoped_args_tuple() : stack_(), args_tuple_holder_(nullptr) {} }; /** * This struct is used by the TBB to accumulate partial * sums over consecutive ranges of the input. To distribute the workload, * the TBB can split larger partial sums into smaller ones in which * case the splitting copy constructor is used. It is designed to * meet the Imperative form requirements of `tbb::parallel_reduce`. * * @note see link [here](https://tinyurl.com/vp7xw2t) for requirements. */ struct recursive_reducer { const size_t num_vars_per_term_; const size_t num_vars_shared_terms_; // Number of vars in shared arguments double* sliced_partials_; // Points to adjoints of the partial calculations Vec vmapped_; std::stringstream msgs_; std::tuple args_tuple_; scoped_args_tuple local_args_tuple_scope_; double sum_{0.0}; Eigen::VectorXd args_adjoints_{0}; template recursive_reducer(size_t num_vars_per_term, size_t num_vars_shared_terms, double* sliced_partials, VecT&& vmapped, ArgsT&&... args) : num_vars_per_term_(num_vars_per_term), num_vars_shared_terms_(num_vars_shared_terms), sliced_partials_(sliced_partials), vmapped_(std::forward(vmapped)), args_tuple_(std::forward(args)...) {} /* * This is the copy operator as required for tbb::parallel_reduce * Imperative form. This requires sum_ and arg_adjoints_ be reset * to zero since the newly created reducer is used to accumulate * an independent partial sum. */ recursive_reducer(recursive_reducer& other, tbb::split) : num_vars_per_term_(other.num_vars_per_term_), num_vars_shared_terms_(other.num_vars_shared_terms_), sliced_partials_(other.sliced_partials_), vmapped_(other.vmapped_), args_tuple_(other.args_tuple_) {} /** * Compute, using nested autodiff, the value and Jacobian of * `ReduceFunction` called over the range defined by r and accumulate those * in member variable sum_ (for the value) and args_adjoints_ (for the * Jacobian). The nested autodiff uses deep copies of the involved operands * ensuring that no side effects are implied to the adjoints of the input * operands which reside potentially on a autodiff tape stored in a * different thread other than the current thread of execution. This * function may be called multiple times per object instantiation (so the * sum_ and args_adjoints_ must be accumulated, not just assigned). * * @param r Range over which to compute reduce_sum */ inline void operator()(const tbb::blocked_range& r) { if (r.empty()) { return; } if (args_adjoints_.size() == 0) { args_adjoints_ = Eigen::VectorXd::Zero(num_vars_shared_terms_); } // Obtain reference to a local copy of all shared arguments that do // not point // back to main autodiff stack if (!local_args_tuple_scope_.args_tuple_holder_) { // shared arguments need to be copied to reducer-specific // scope. In this case no need for zeroing adjoints, since the // fresh copy has all adjoints set to zero. local_args_tuple_scope_.stack_.execute([&]() { math::apply( [&](auto&&... args) { local_args_tuple_scope_.args_tuple_holder_ = std::make_unique< typename scoped_args_tuple::args_tuple_t>( deep_copy_vars(args)...); }, args_tuple_); }); } else { // set adjoints of shared arguments to zero local_args_tuple_scope_.stack_.execute([] { set_zero_all_adjoints(); }); } auto& args_tuple_local = *(local_args_tuple_scope_.args_tuple_holder_); // Initialize nested autodiff stack const nested_rev_autodiff begin_nest; // Create nested autodiff copies of sliced argument that do not point // back to main autodiff stack std::decay_t local_sub_slice; local_sub_slice.reserve(r.size()); for (size_t i = r.begin(); i < r.end(); ++i) { local_sub_slice.emplace_back(deep_copy_vars(vmapped_[i])); } // Perform calculation var sub_sum_v = math::apply( [&](auto&&... args) { return ReduceFunction()(local_sub_slice, r.begin(), r.end() - 1, &msgs_, args...); }, args_tuple_local); // Compute Jacobian sub_sum_v.grad(); // Accumulate value of reduce_sum sum_ += sub_sum_v.val(); // Accumulate adjoints of sliced_arguments accumulate_adjoints(sliced_partials_ + r.begin() * num_vars_per_term_, std::move(local_sub_slice)); // Accumulate adjoints of shared_arguments math::apply( [&](auto&&... args) { accumulate_adjoints(args_adjoints_.data(), args...); }, args_tuple_local); } /** * Join reducers. Accumuluate the value (sum_) and Jacobian (arg_adoints_) * of the other reducer. * * @param rhs Another partial sum */ inline void join(const recursive_reducer& rhs) { sum_ += rhs.sum_; if (args_adjoints_.size() != 0 && rhs.args_adjoints_.size() != 0) { args_adjoints_ += rhs.args_adjoints_; } else if (args_adjoints_.size() == 0 && rhs.args_adjoints_.size() != 0) { args_adjoints_ = rhs.args_adjoints_; } msgs_ << rhs.msgs_.str(); } }; /** * Call an instance of the function `ReduceFunction` on every element * of an input sequence and sum these terms. * * This specialization is parallelized using tbb and works for reverse * mode autodiff. * * ReduceFunction must define an operator() with the same signature as: * var f(Vec&& vmapped_subset, int start, int end, std::ostream* msgs, * Args&&... args) * * `ReduceFunction` must be default constructible without any arguments * * Each call to `ReduceFunction` is responsible for computing the * start through end (inclusive) terms of the overall sum. All args are * passed from this function through to the `ReduceFunction` instances. * However, only the start through end (inclusive) elements of the vmapped * argument are passed to the `ReduceFunction` instances (as the * `vmapped_subset` argument). * * This function distributes computation of the desired sum and the Jacobian * of that sum over multiple threads by coordinating calls to `ReduceFunction` * instances. Results are stored as precomputed varis in the autodiff tree. * * If auto partitioning is true, break work into pieces automatically, * taking grainsize as a recommended work size. The partitioning is * not deterministic nor is the order guaranteed in which partial * sums are accumulated. Due to floating point imprecisions this will likely * lead to slight differences in the accumulated results between * multiple runs. If false, break work deterministically into pieces smaller * than or equal to grainsize and accumulate all the partial sums * in the same order. This still may not achieve bitwise reproducibility. * * @param vmapped Vector containing one element per term of sum * @param auto_partitioning Work partitioning style * @param grainsize Suggested grainsize for tbb * @param[in, out] msgs The print stream for warning messages * @param args Shared arguments used in every sum term * @return Summation of all terms */ inline var operator()(Vec&& vmapped, bool auto_partitioning, int grainsize, std::ostream* msgs, Args&&... args) const { if (vmapped.empty()) { return var(0.0); } const std::size_t num_terms = vmapped.size(); const std::size_t num_vars_per_term = count_vars(vmapped[0]); const std::size_t num_vars_sliced_terms = num_terms * num_vars_per_term; const std::size_t num_vars_shared_terms = count_vars(args...); vari** varis = ChainableStack::instance_->memalloc_.alloc_array( num_vars_sliced_terms + num_vars_shared_terms); double* partials = ChainableStack::instance_->memalloc_.alloc_array( num_vars_sliced_terms + num_vars_shared_terms); save_varis(varis, vmapped); save_varis(varis + num_vars_sliced_terms, args...); for (size_t i = 0; i < num_vars_sliced_terms; ++i) { partials[i] = 0.0; } recursive_reducer worker(num_vars_per_term, num_vars_shared_terms, partials, std::forward(vmapped), std::forward(args)...); // we must use task isolation as described here: // https://software.intel.com/content/www/us/en/develop/documentation/tbb-documentation/top/intel-threading-building-blocks-developer-guide/task-isolation.html // this is to ensure that the thread local AD tape ressource is // not being modified from a different task which may happen // whenever this function is being used itself in a parallel // context (like running multiple chains for Stan) tbb::this_task_arena::isolate([&] { if (auto_partitioning) { tbb::parallel_reduce( tbb::blocked_range(0, num_terms, grainsize), worker); } else { tbb::simple_partitioner partitioner; tbb::parallel_deterministic_reduce( tbb::blocked_range(0, num_terms, grainsize), worker, partitioner); } }); for (size_t i = 0; i < num_vars_shared_terms; ++i) { partials[num_vars_sliced_terms + i] = worker.args_adjoints_.coeff(i); } if (msgs) { *msgs << worker.msgs_.str(); } return var(new precomputed_gradients_vari( worker.sum_, num_vars_sliced_terms + num_vars_shared_terms, varis, partials)); } }; } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/coupled_ode_system.hpp0000644000176200001440000001774414645137106025650 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_COUPLED_ODE_SYSTEM_HPP #define STAN_MATH_REV_FUNCTOR_COUPLED_ODE_SYSTEM_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * The coupled_ode_system_impl template specialization when * the state or parameters are autodiff types. * *

This coupled ode system has N + (N + M) * N states where N is * the size of the base ode system and M is the number of parameters. * *

For the coupled ode system, the first N states are the base * system's states: \f$ \frac{d x_n}{dt} \f$. * *

The next N + M states correspond to the sensitivities of the * initial conditions, then to the sensitivities of the parameters * with respect to the to the first base system equation. Calling * initial conditions \f$ y0 \f$ and parameters \f$ \theta \f$: * * \f[ * \frac{d x_{N + n}}{dt} * = \frac{d}{dt} \frac{\partial x_1}{\partial y0_n} * \f] * * \f[ * \frac{d x_{N + N + m}}{dt} * = \frac{d}{dt} \frac{\partial x_1}{\partial \theta_m} * \f] * *

The next N + M states correspond to the sensitivities * of the initial conditions followed by the sensitivites of the * parameters with respect to the second base system equation, and * so on through the last base system equation. * *

Note: Calculating the sensitivity system requires the Jacobian * of the base ODE RHS wrt to the parameters. The parameters are constant * for successive calls to the exposed operator(). For this reason, * the parameter vector theta is copied upon construction onto the nochain * var autodiff tape which is used in the the nested autodiff performed * in the operator() of this adaptor. Doing so reduces the size of the * nested autodiff and speeds up autodiff. As a side effect, the parameters * will remain on the nochain autodiff part of the autodiff tape being * in use even after destruction of the given instance. Moreover, the * adjoint zeroing for the nested system does not cover the theta * parameter vector part of the nochain autodiff tape and is therefore * set to zero separately. * * @tparam F base ode system functor. Must provide * * template * operator()(double t, const Eigen::Matrix& y, * std::ostream* msgs, const T_args&... args) */ template struct coupled_ode_system_impl { const F& f_; const Eigen::Matrix& y0_; std::tuple()))...> local_args_tuple_; const size_t num_y0_vars_; const size_t num_args_vars; const size_t N_; Eigen::VectorXd args_adjoints_; Eigen::VectorXd y_adjoints_; std::ostream* msgs_; /** * Construct a coupled ode system from the base system function, * initial state of the base system, parameters, and a stream for * messages. * * @param[in] f the base ODE system functor * @param[in] y0 the initial state of the base ode * @param[in, out] msgs stream for messages * @param[in] args other additional arguments */ coupled_ode_system_impl(const F& f, const Eigen::Matrix& y0, std::ostream* msgs, const Args&... args) : f_(f), y0_(y0), local_args_tuple_(deep_copy_vars(args)...), num_y0_vars_(count_vars(y0_)), num_args_vars(count_vars(args...)), N_(y0.size()), args_adjoints_(num_args_vars), y_adjoints_(N_), msgs_(msgs) {} /** * Calculates the right hand side of the coupled ode system (the regular * ode system with forward sensitivities). * * @param[in] z state of the coupled ode system; this must be size * size() * @param[out] dz_dt a vector of size size() with the * derivatives of the coupled system with respect to time * @param[in] t time * @throw exception if the base ode function does not return the * expected number of derivatives, N. */ void operator()(const std::vector& z, std::vector& dz_dt, double t) { using std::vector; dz_dt.resize(size()); // Run nested autodiff in this scope nested_rev_autodiff nested; Eigen::Matrix y_vars(N_); for (size_t n = 0; n < N_; ++n) y_vars.coeffRef(n) = z[n]; Eigen::Matrix f_y_t_vars = math::apply( [&](auto&&... args) { return f_(t, y_vars, msgs_, args...); }, local_args_tuple_); check_size_match("coupled_ode_system", "dy_dt", f_y_t_vars.size(), "states", N_); for (size_t i = 0; i < N_; ++i) { dz_dt[i] = f_y_t_vars.coeffRef(i).val(); f_y_t_vars.coeffRef(i).grad(); y_adjoints_ = y_vars.adj(); if (args_adjoints_.size() > 0) { memset(args_adjoints_.data(), 0, sizeof(double) * args_adjoints_.size()); } math::apply( [&](auto&&... args) { accumulate_adjoints(args_adjoints_.data(), args...); }, local_args_tuple_); // The vars here do not live on the nested stack so must be zero'd // separately stan::math::for_each([](auto&& arg) { zero_adjoints(arg); }, local_args_tuple_); // No need to zero adjoints after last sweep if (i + 1 < N_) { nested.set_zero_all_adjoints(); } // Compute the right hand side for the sensitivities with respect to the // initial conditions for (size_t j = 0; j < num_y0_vars_; ++j) { double temp_deriv = 0.0; for (size_t k = 0; k < N_; ++k) { temp_deriv += z[N_ + N_ * j + k] * y_adjoints_.coeffRef(k); } dz_dt[N_ + N_ * j + i] = temp_deriv; } // Compute the right hand size for the sensitivities with respect to the // parameters for (size_t j = 0; j < num_args_vars; ++j) { double temp_deriv = args_adjoints_.coeffRef(j); for (size_t k = 0; k < N_; ++k) { temp_deriv += z[N_ + N_ * num_y0_vars_ + N_ * j + k] * y_adjoints_.coeffRef(k); } dz_dt[N_ + N_ * num_y0_vars_ + N_ * j + i] = temp_deriv; } } } /** * Returns the size of the coupled system. * * @return size of the coupled system. */ size_t size() const { return N_ + N_ * num_y0_vars_ + N_ * num_args_vars; } /** * Returns the initial state of the coupled system. * *

Because the starting state is unknown, the coupled system * incorporates the initial conditions as parameters. At the initial * time the Jacobian of the integrated function is the identity * matrix. In addition the coupled system includes the Jacobian of * the integrated function wrt to the parameters. This Jacobian is * zero at the initial time-point. * * @return the initial condition of the coupled system. This is a * vector of length size() where the first N values are the * initial condition of the base ODE and the next N*N elements * correspond to the identity matrix which is the Jacobian of the * integrated function at the initial time-point. The last N*M * elements are all zero as these are the Jacobian wrt to the * parameters at the initial time-point, which is zero. */ std::vector initial_state() const { std::vector initial(size(), 0.0); for (size_t i = 0; i < N_; i++) { initial[i] = value_of(y0_(i)); } for (size_t i = 0; i < num_y0_vars_; i++) { initial[N_ + i * N_ + i] = 1.0; } return initial; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/gradient.hpp0000644000176200001440000001002414645137106023537 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_GRADIENT_HPP #define STAN_MATH_REV_FUNCTOR_GRADIENT_HPP #include #include #include #include #include namespace stan { namespace math { /** * Calculate the value and the gradient of the specified function * at the specified argument. * *

The functor must implement * * * var * operator()(const * Eigen::Matrix&) * * * using only operations that are defined for * var. This latter constraint usually * requires the functions to be defined in terms of the libraries * defined in Stan or in terms of functions with appropriately * general namespace imports that eventually depend on functions * defined in Stan. * * The evaluated gradient is stored into a * Eigen::VectorXd named grad_fx. * *

Time and memory usage is on the order of the size of the * fully unfolded expression for the function applied to the * argument, independently of dimension. * * @tparam F Type of function * @param[in] f Function * @param[in] x Argument to function * @param[out] fx Function applied to argument * @param[out] grad_fx Gradient of function at argument */ template void gradient(const F& f, const Eigen::Matrix& x, double& fx, Eigen::Matrix& grad_fx) { nested_rev_autodiff nested; Eigen::Matrix x_var(x); var fx_var = f(x_var); fx = fx_var.val(); grad_fx.resize(x.size()); grad(fx_var.vi_); grad_fx = x_var.adj(); } /** * Calculate the value and the gradient of the specified function * at the specified argument. * *

The functor must implement * * * var * operator()(const * Eigen::Matrix&) * * * using only operations that are defined for * var. This latter constraint usually * requires the functions to be defined in terms of the libraries * defined in Stan or in terms of functions with appropriately * general namespace imports that eventually depend on functions * defined in Stan. * * The evaluated gradient is stored into the object whose data * begins at *first_grad_fx and ends at * *last_grad_fx. The caller is responsible for * ensuring the size of the object pointed to by * first_grad_fx matches the size of the argument * x. * *

Time and memory usage is on the order of the size of the * fully unfolded expression for the function applied to the * argument, independently of dimension. * * @tparam F Type of function * @tparam EigVec Type of Eigen vector * @tparam InputIt must meet the requirements of * [LegacyInputIterator](https://en.cppreference.com/w/cpp/named_req/InputIterator). * @param[in] f Function * @param[in] x Argument to function * @param[out] fx Function applied to argument * @param[out] first_grad_fx First element of gradient of function at argument * @param[out] last_grad_fx Last element of gradient of function at argument * @throw std::invalid_argument if the iterator isn't the right size * to hold the gradients */ template * = nullptr> void gradient(const F& f, const EigVec& x, double& fx, InputIt first_grad_fx, InputIt last_grad_fx) { nested_rev_autodiff nested; if (last_grad_fx - first_grad_fx != x.size()) { std::stringstream s; s << "gradient(): iterator and gradient different sizes; iterator size = " << last_grad_fx - first_grad_fx << "; grad size = " << x.size() << std::endl; throw std::invalid_argument(s.str()); } Eigen::Matrix x_var(x); var fx_var = f(x_var); fx = fx_var.val(); grad(fx_var.vi_); for (Eigen::VectorXd::Index i = 0; i < x_var.size(); ++i) { *first_grad_fx++ = x_var.coeff(i).adj(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/finite_diff_hessian_times_vector_auto.hpp0000644000176200001440000000402714645137106031543 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_HESSIAN_TIMES_VECTOR_HPP #define STAN_MATH_REV_FUNCTOR_HESSIAN_TIMES_VECTOR_HPP #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * Calculate the value and the product of the Hessian and the specified * vector of the specified function at the specified argument using * central finite difference of gradients, automatically setting * the stepsize between the function evaluations along a dimension. * *

The functor must implement * * * double operator()(const Eigen::Matrix&) * * *

For details of the algorithm, see * https://justindomke.wordpress.com/2009/01/17/hessian-vector-products/ * *

Step size is set automatically using * `sqrt(epsilon) * (1 + ||x||) / ||v||`, * as suggested in https://doi.org/10.1016/j.cam.2008.12.024 * * 2 gradient calls are needed for the algorithm. * * @tparam F Type of function * @param[in] f Function * @param[in] x Argument to function * @param[in] v Vector to multiply Hessian with * @param[out] fx Function applied to argument * @param[out] hvp Product of Hessian and vector at argument */ template void finite_diff_hessian_times_vector_auto(const F& f, const Eigen::VectorXd& x, const Eigen::VectorXd& v, double& fx, Eigen::VectorXd& hvp) { fx = f(x); double epsilon = std::sqrt(EPSILON) * (1 + x.norm()) / v.norm(); Eigen::VectorXd v_eps = epsilon * v; int d = x.size(); double tmp; Eigen::VectorXd grad_forward(d); gradient(f, x + v_eps, tmp, grad_forward); Eigen::VectorXd grad_backward(d); gradient(f, x - v_eps, tmp, grad_backward); hvp.resize(d); hvp = (grad_forward - grad_backward) / (2 * epsilon); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/integrate_1d.hpp0000644000176200001440000002053014645137106024313 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_integrate_1d_HPP #define STAN_MATH_REV_FUNCTOR_integrate_1d_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the integral of f from a to b to the given relative tolerance * * @tparam F Type of f * @tparam T_a type of first limit * @tparam T_b type of second limit * @tparam Args types of parameter pack arguments * * @param f the functor to integrate * @param a lower limit of integration * @param b upper limit of integration * @param relative_tolerance relative tolerance passed to Boost quadrature * @param[in, out] msgs the print stream for warning messages * @param args additional arguments to pass to f * @return numeric integral of function f */ template * = nullptr> inline return_type_t integrate_1d_impl( const F &f, const T_a &a, const T_b &b, double relative_tolerance, std::ostream *msgs, const Args &... args) { static constexpr const char *function = "integrate_1d"; check_less_or_equal(function, "lower limit", a, b); double a_val = value_of(a); double b_val = value_of(b); if (unlikely(a_val == b_val)) { if (is_inf(a_val)) { throw_domain_error(function, "Integration endpoints are both", a_val, "", ""); } return var(0.0); } else { auto args_val_tuple = std::make_tuple(value_of(args)...); double integral = integrate( [&](const auto &x, const auto &xc) { return math::apply( [&](auto &&... val_args) { return f(x, xc, msgs, val_args...); }, args_val_tuple); }, a_val, b_val, relative_tolerance); constexpr size_t num_vars_ab = is_var::value + is_var::value; size_t num_vars_args = count_vars(args...); vari **varis = ChainableStack::instance_->memalloc_.alloc_array( num_vars_ab + num_vars_args); double *partials = ChainableStack::instance_->memalloc_.alloc_array( num_vars_ab + num_vars_args); // We move this pointer up based on whether we a or b is a var type. double *partials_ptr = partials; save_varis(varis, a, b, args...); for (size_t i = 0; i < num_vars_ab + num_vars_args; ++i) { partials[i] = 0.0; } if (is_var::value && !is_inf(a)) { *partials_ptr = math::apply( [&f, a_val, msgs](auto &&... val_args) { return -f(a_val, 0.0, msgs, val_args...); }, args_val_tuple); partials_ptr++; } if (!is_inf(b) && is_var::value) { *partials_ptr = math::apply( [&f, b_val, msgs](auto &&... val_args) { return f(b_val, 0.0, msgs, val_args...); }, args_val_tuple); partials_ptr++; } { nested_rev_autodiff argument_nest; // The arguments copy is used multiple times in the following nests, so // do it once in a separate nest for efficiency auto args_tuple_local_copy = std::make_tuple(deep_copy_vars(args)...); // Save the varis so it's easy to efficiently access the nth adjoint std::vector local_varis(num_vars_args); math::apply( [&](const auto &... args) { save_varis(local_varis.data(), args...); }, args_tuple_local_copy); for (size_t n = 0; n < num_vars_args; ++n) { // This computes the integral of the gradient of f with respect to the // nth parameter in args using a nested nested reverse mode autodiff *partials_ptr = integrate( [&](const auto &x, const auto &xc) { argument_nest.set_zero_all_adjoints(); nested_rev_autodiff gradient_nest; var fx = math::apply( [&f, &x, &xc, msgs](auto &&... local_args) { return f(x, xc, msgs, local_args...); }, args_tuple_local_copy); fx.grad(); double gradient = local_varis[n]->adj(); // Gradients that evaluate to NaN are set to zero if the function // itself evaluates to zero. If the function is not zero and the // gradient evaluates to NaN, a std::domain_error is thrown if (is_nan(gradient)) { if (fx.val() == 0) { gradient = 0; } else { throw_domain_error("gradient_of_f", "The gradient of f", n, "is nan for parameter ", ""); } } return gradient; }, a_val, b_val, relative_tolerance); partials_ptr++; } } return make_callback_var( integral, [total_vars = num_vars_ab + num_vars_args, varis, partials](auto &vi) { for (size_t i = 0; i < total_vars; ++i) { varis[i]->adj_ += partials[i] * vi.adj(); } }); } } /** * Compute the integral of the single variable function f from a to b to within * a specified relative tolerance. a and b can be finite or infinite. * * f should be compatible with reverse mode autodiff and have the signature: * var f(double x, double xc, const std::vector& theta, * const std::vector& x_r, const std::vector &x_i, * std::ostream* msgs) * * It should return the value of the function evaluated at x. Any errors * should be printed to the msgs stream. * * Integrals that cross zero are broken into two, and the separate integrals are * each integrated to the given relative tolerance. * * For integrals with finite limits, the xc argument is the distance to the * nearest boundary. So for a > 0, b > 0, it will be a - x for x closer to a, * and b - x for x closer to b. xc is computed in a way that avoids the * precision loss of computing a - x or b - x manually. For integrals that cross * zero, xc can take values a - x, -x, or b - x depending on which integration * limit it is nearest. * * If either limit is infinite, xc is set to NaN * * The integration algorithm terminates when * \f[ * \frac{{|I_{n + 1} - I_n|}}{{|I|_{n + 1}}} < \text{relative tolerance} * \f] * where \f$I_{n}\f$ is the nth estimate of the integral and \f$|I|_{n}\f$ is * the nth estimate of the norm of the integral. * * Integrals that cross zero are * split into two. In this case, each integral is separately integrated to the * given relative_tolerance. * * Gradients of f that evaluate to NaN when the function evaluates to zero are * set to zero themselves. This is due to the autodiff easily overflowing to NaN * when evaluating gradients near the maximum and minimum floating point values * (where the function should be zero anyway for the integral to exist) * * @tparam T_a type of first limit * @tparam T_b type of second limit * @tparam T_theta type of parameters * @tparam T Type of f * * @param f the functor to integrate * @param a lower limit of integration * @param b upper limit of integration * @param theta additional parameters to be passed to f * @param x_r additional data to be passed to f * @param x_i additional integer data to be passed to f * @param[in, out] msgs the print stream for warning messages * @param relative_tolerance relative tolerance passed to Boost quadrature * @return numeric integral of function f */ template > inline return_type_t integrate_1d( const F &f, const T_a &a, const T_b &b, const std::vector &theta, const std::vector &x_r, const std::vector &x_i, std::ostream *msgs, const double relative_tolerance = std::sqrt(EPSILON)) { return integrate_1d_impl(integrate_1d_adapter(f), a, b, relative_tolerance, msgs, theta, x_r, x_i); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/idas_integrator.hpp0000644000176200001440000000730714645137106025132 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_IDAS_INTEGRATOR_HPP #define STAN_MATH_REV_FUNCTOR_IDAS_INTEGRATOR_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * IDAS DAE integrator. */ class idas_integrator { sundials::Context sundials_context_; const double rtol_; const double atol_; const int64_t max_num_steps_; public: /** * constructor * @param[in] rtol relative tolerance * @param[in] atol absolute tolerance * @param[in] max_num_steps max nb. of times steps */ idas_integrator(const double rtol, const double atol, const int64_t max_num_steps) : rtol_(rtol), atol_(atol), max_num_steps_(max_num_steps) {} /** * Return the solutions for the specified DAE * given the specified initial state, * initial times, times of desired solution, and parameters and * data, writing error and warning messages to the specified * stream contained in the DAE system. * * @tparam DAE type of DAE system * @param[in] func Calling function name (for printing debugging messages) * @param[in] dae DAE system * @param[in] t0 initial time. * @param[in] ts times of the desired solutions, in strictly * increasing order, all greater than the initial time. * @return a vector of states, each state being a vector of the * same size as the state variable, corresponding to a time in ts. */ template typename dae_type::return_t operator()(const char* func, dae_type& dae, double t0, const std::vector& ts) { idas_service serv(t0, dae); void* mem = serv.mem; N_Vector& yy = serv.nv_yy; N_Vector& yp = serv.nv_yp; N_Vector* yys = serv.nv_yys; N_Vector* yps = serv.nv_yps; const size_t n = dae.N; CHECK_IDAS_CALL(IDASStolerances(mem, rtol_, atol_)); CHECK_IDAS_CALL(IDASetMaxNumSteps(mem, max_num_steps_)); if (dae_type::use_fwd_sens) { CHECK_IDAS_CALL(IDASensEEtolerances(mem)); CHECK_IDAS_CALL(IDAGetSensConsistentIC(mem, yys, yps)); } size_t nt = ts.size(); typename dae_type::return_t res_yy( nt, Eigen::Matrix::Zero(n)); double t1 = t0; for (size_t i = 0; i < nt; ++i) { CHECK_IDAS_CALL(IDASolve(mem, ts[i], &t1, yy, yp, IDA_NORMAL)); if (dae_type::use_fwd_sens) { CHECK_IDAS_CALL(IDAGetSens(mem, &t1, yys)); } collect(yy, yys, dae, res_yy[i]); } return res_yy; } /** * Solve DAE system, no sensitivity * * @tparam dae_type DAE functor type * @param yy * @param yys * @param[out] dae DAE system * @param[out] y DAE solutions */ template void collect(N_Vector const& yy, N_Vector const* yys, dae_type& dae, Eigen::Matrix& y) { for (int i = 0; i < dae.N; ++i) { y.coeffRef(i) = NV_Ith_S(yy, i); } } template void collect(N_Vector const& yy, N_Vector const* yys, dae_type& dae, Eigen::Matrix& y) { std::vector g(dae.ns); for (size_t i = 0; i < dae.N; ++i) { for (size_t j = 0; j < dae.ns; ++j) { g[j] = NV_Ith_S(yys[j], i); } y[i] = precomputed_gradients(NV_Ith_S(yy, i), dae.all_vars, g); } } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/ode_adams.hpp0000644000176200001440000001617514645137106023673 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_ODE_ADAMS_HPP #define STAN_MATH_REV_FUNCTOR_ODE_ADAMS_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the non-stiff Adams-Moulton solver from * CVODES. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, 1> * operator()(const T_t& t, const Eigen::Matrix& y, * std::ostream* msgs, const T_Args&... args); * * t is the time, y is the vector-valued state, msgs is a stream for error * messages, and args are optional arguments passed to the ODE solve function * (which are passed through to \p f without modification). * * @tparam F Type of ODE right hand side * @tparam T_0 Type of initial time * @tparam T_ts Type of output times * @tparam T_Args Types of pass-through parameters * * @param function_name Calling function name (for printing debugging messages) * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param relative_tolerance Relative tolerance passed to CVODES * @param absolute_tolerance Absolute tolerance passed to CVODES * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand side * @return Solution to ODE at times \p ts */ template * = nullptr> std::vector, Eigen::Dynamic, 1>> ode_adams_tol_impl(const char* function_name, const F& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, double relative_tolerance, double absolute_tolerance, long int max_num_steps, // NOLINT(runtime/int) std::ostream* msgs, const T_Args&... args) { const auto& args_ref_tuple = std::make_tuple(to_ref(args)...); return math::apply( [&](const auto&... args_refs) { cvodes_integrator...> integrator(function_name, f, y0, t0, ts, relative_tolerance, absolute_tolerance, max_num_steps, msgs, args_refs...); return integrator(); }, args_ref_tuple); } /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the non-stiff Adams-Moulton solver from * CVODES. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, 1> * operator()(const T_t& t, const Eigen::Matrix& y, * std::ostream* msgs, const T_Args&... args); * * t is the time, y is the vector-valued state, msgs is a stream for error * messages, and args are optional arguments passed to the ODE solve function * (which are passed through to \p f without modification). * * @tparam F Type of ODE right hand side * @tparam T_0 Type of initial time * @tparam T_ts Type of output times * @tparam T_Args Types of pass-through parameters * * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param relative_tolerance Relative tolerance passed to CVODES * @param absolute_tolerance Absolute tolerance passed to CVODES * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand side * @return Solution to ODE at times \p ts */ template * = nullptr> std::vector, Eigen::Dynamic, 1>> ode_adams_tol(const F& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, double relative_tolerance, double absolute_tolerance, long int max_num_steps, // NOLINT(runtime/int) std::ostream* msgs, const T_Args&... args) { return ode_adams_tol_impl("ode_adams_tol", f, y0, t0, ts, relative_tolerance, absolute_tolerance, max_num_steps, msgs, args...); } /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the non-stiff Adams-Moulton * solver in CVODES with defaults for relative_tolerance, absolute_tolerance, * and max_num_steps. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, 1> * operator()(const T_t& t, const Eigen::Matrix& y, * std::ostream* msgs, const T_Args&... args); * * t is the time, y is the vector-valued state, msgs is a stream for error * messages, and args are optional arguments passed to the ODE solve function * (which are passed through to \p f without modification). * * @tparam F Type of ODE right hand side * @tparam T_0 Type of initial time * @tparam T_ts Type of output times * @tparam T_Args Types of pass-through parameters * * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand side * @return Solution to ODE at times \p ts */ template * = nullptr> std::vector, Eigen::Dynamic, 1>> ode_adams(const F& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, std::ostream* msgs, const T_Args&... args) { double relative_tolerance = 1e-10; double absolute_tolerance = 1e-10; long int max_num_steps = 1e8; // NOLINT(runtime/int) return ode_adams_tol_impl("ode_adams", f, y0, t0, ts, relative_tolerance, absolute_tolerance, max_num_steps, msgs, args...); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/cvodes_utils.hpp0000644000176200001440000000165714645137106024461 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_CVODES_UTILS_HPP #define STAN_MATH_REV_FUNCTOR_CVODES_UTILS_HPP #include #include #include #include #include namespace stan { namespace math { inline void cvodes_set_options(void* cvodes_mem, // NOLINTNEXTLINE(runtime/int) long int max_num_steps) { // Initialize solver parameters CHECK_CVODES_CALL(CVodeSetMaxNumSteps(cvodes_mem, max_num_steps)); double init_step = 0; CHECK_CVODES_CALL(CVodeSetInitStep(cvodes_mem, init_step)); long int max_err_test_fails = 20; // NOLINT(runtime/int) CHECK_CVODES_CALL(CVodeSetMaxErrTestFails(cvodes_mem, max_err_test_fails)); long int max_conv_fails = 50; // NOLINT(runtime/int) CHECK_CVODES_CALL(CVodeSetMaxConvFails(cvodes_mem, max_conv_fails)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/map_rect_concurrent.hpp0000644000176200001440000000577014645137106026012 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_MAP_RECT_CONCURRENT_HPP #define STAN_MATH_REV_FUNCTOR_MAP_RECT_CONCURRENT_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { template *> Eigen::Matrix, Eigen::Dynamic, 1> map_rect_concurrent( const T_shared_param& shared_params, const std::vector>& job_params, const std::vector>& x_r, const std::vector>& x_i, std::ostream* msgs) { using ReduceF = map_rect_reduce, T_job_param>; using CombineF = map_rect_combine; const int num_jobs = job_params.size(); const vector_d shared_params_dbl = value_of(shared_params); std::vector job_output(num_jobs); std::vector world_f_out(num_jobs, 0); auto execute_chunk = [&](std::size_t start, std::size_t end) -> void { for (std::size_t i = start; i != end; ++i) { job_output[i] = ReduceF()(shared_params_dbl, value_of(job_params[i]), x_r[i], x_i[i], msgs); world_f_out[i] = job_output[i].cols(); } }; #ifdef STAN_THREADS // we must use task isolation as described here: // https://software.intel.com/content/www/us/en/develop/documentation/tbb-documentation/top/intel-threading-building-blocks-developer-guide/task-isolation.html // this is to ensure that the thread local AD tape ressource is // not being modified from a different task which may happen // whenever this function is being used itself in a parallel // context (like running multiple chains for Stan) tbb::this_task_arena::isolate([&] { tbb::parallel_for(tbb::blocked_range(0, num_jobs), [&](const tbb::blocked_range& r) { execute_chunk(r.begin(), r.end()); }); }); #else execute_chunk(0, num_jobs); #endif // collect results const int num_world_output = std::accumulate(world_f_out.begin(), world_f_out.end(), 0); matrix_d world_output(job_output[0].rows(), num_world_output); int offset = 0; for (const auto& job : job_output) { const int num_job_outputs = job.cols(); world_output.block(0, offset, world_output.rows(), num_job_outputs) = job; offset += num_job_outputs; } CombineF combine(shared_params, job_params); return combine(world_output, world_f_out); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/ode_store_sensitivities.hpp0000644000176200001440000000716714645137106026725 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_ODE_STORE_SENSITIVITIES_HPP #define STAN_MATH_REV_FUNCTOR_ODE_STORE_SENSITIVITIES_HPP #include #include #include #include #include namespace stan { namespace math { /** * Build output vars for a state of the ODE solve, storing the sensitivities * precomputed using the forward sensitivity problem in precomputed varis. * * @tparam F Type of ODE right hand side * @tparam T_y0_t0 Type of initial state * @tparam T_t0 Type of initial time * @tparam T_ts Type of output times * @tparam T_Args Types of pass-through parameters * * @param f Right hand side of the ODE * @param coupled_state Current state of the coupled_ode_system * @param y0 Initial state * @param t0 Initial time * @param t Times at which to solve the ODE at * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand side * @return ODE state with scalar type var */ template ...>* = nullptr> Eigen::Matrix ode_store_sensitivities( const F& f, const std::vector& coupled_state, const Eigen::Matrix& y0, const T_t0& t0, const T_t& t, std::ostream* msgs, const Args&... args) { const size_t N = y0.size(); const size_t num_y0_vars = count_vars(y0); const size_t num_args_vars = count_vars(args...); const size_t num_t0_vars = count_vars(t0); const size_t num_t_vars = count_vars(t); Eigen::Matrix yt(N); Eigen::VectorXd y(N); for (size_t n = 0; n < N; ++n) { y.coeffRef(n) = coupled_state[n]; } Eigen::VectorXd f_y_t; if (is_var::value) f_y_t = f(value_of(t), y, msgs, eval(value_of(args))...); Eigen::VectorXd f_y0_t0; if (is_var::value) f_y0_t0 = f(value_of(t0), eval(value_of(y0)), msgs, eval(value_of(args))...); const size_t total_vars = num_y0_vars + num_args_vars + num_t0_vars + num_t_vars; vari** varis = ChainableStack::instance_->memalloc_.alloc_array(total_vars); save_varis(varis, y0, args..., t0, t); // memory for a column major jacobian double* jacobian_mem = ChainableStack::instance_->memalloc_.alloc_array(N * total_vars); Eigen::Map jacobian(jacobian_mem, total_vars, N); for (size_t j = 0; j < N; ++j) { for (size_t k = 0; k < num_y0_vars; ++k) { jacobian.coeffRef(k, j) = coupled_state[N + num_y0_vars * k + j]; } for (size_t k = 0; k < num_args_vars; ++k) { jacobian.coeffRef(num_y0_vars + k, j) = coupled_state[N + N * num_y0_vars + N * k + j]; } if (is_var::value) { double dyt_dt0 = 0.0; for (size_t k = 0; k < num_y0_vars; ++k) { dyt_dt0 -= f_y0_t0.coeffRef(k) * coupled_state[N + num_y0_vars * k + j]; } jacobian.coeffRef(num_y0_vars + num_args_vars, j) = dyt_dt0; } if (is_var::value) { jacobian.coeffRef(num_y0_vars + num_args_vars + num_t0_vars, j) = f_y_t.coeffRef(j); } // jacobian_mem + j * total_vars points to the jth column of jacobian yt(j) = new precomputed_gradients_vari(y(j), total_vars, varis, jacobian_mem + j * total_vars); } return yt; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/solve_newton.hpp0000644000176200001440000003230514645137106024472 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_SOLVE_NEWTON_HPP #define STAN_MATH_REV_FUNCTOR_SOLVE_NEWTON_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the solution to the specified system of algebraic * equations given an initial guess, and parameters and data, * which get passed into the algebraic system. Use the * KINSOL solver from the SUNDIALS suite. * * The user can also specify the scaled step size, the function * tolerance, and the maximum number of steps. * * This overload handles non-autodiff parameters. * * @tparam F type of equation system function. * @tparam T type of initial guess vector. * @tparam Args types of additional parameters to the equation system functor * * @param[in] f Functor that evaluated the system of equations. * @param[in] x Vector of starting values. * @param[in, out] msgs The print stream for warning messages. * @param[in] scaling_step_size Scaled-step stopping tolerance. If * a Newton step is smaller than the scaling step * tolerance, the code breaks, assuming the solver is no * longer making significant progress (i.e. is stuck) * @param[in] function_tolerance determines whether roots are acceptable. * @param[in] max_num_steps maximum number of function evaluations. * @param[in] args Additional parameters to the equation system functor. * @return theta Vector of solutions to the system of equations. * @pre f returns finite values when passed any value of x and the given args. * @throw std::invalid_argument if x has size zero. * @throw std::invalid_argument if x has non-finite elements. * @throw std::invalid_argument if scaled_step_size is strictly * negative. * @throw std::invalid_argument if function_tolerance is strictly * negative. * @throw std::invalid_argument if max_num_steps is not positive. * @throw std::domain_error if solver exceeds max_num_steps. */ template * = nullptr, require_all_st_arithmetic* = nullptr> Eigen::VectorXd solve_newton_tol(const F& f, const T& x, const double scaling_step_size, const double function_tolerance, const int64_t max_num_steps, std::ostream* const msgs, const Args&... args) { const auto& x_ref = to_ref(value_of(x)); check_nonzero_size("solve_newton", "initial guess", x_ref); check_finite("solve_newton", "initial guess", x_ref); check_nonnegative("solve_newton", "scaling_step_size", scaling_step_size); check_nonnegative("solve_newton", "function_tolerance", function_tolerance); check_positive("solve_newton", "max_num_steps", max_num_steps); return kinsol_solve(f, x_ref, scaling_step_size, function_tolerance, max_num_steps, 1, 10, KIN_LINESEARCH, msgs, args...); } /** * Return the solution to the specified system of algebraic * equations given an initial guess, and parameters and data, * which get passed into the algebraic system. Use the * KINSOL solver from the SUNDIALS suite. * * The user can also specify the scaled step size, the function * tolerance, and the maximum number of steps. * * This overload handles var parameters. * * The Jacobian \(J_{xy}\) (i.e., Jacobian of unknown \(x\) w.r.t. the parameter * \(y\)) is calculated given the solution as follows. Since * \[ * f(x, y) = 0, * \] * we have (\(J_{pq}\) being the Jacobian matrix \(\tfrac {dq} {dq}\)) * \[ * - J_{fx} J_{xy} = J_{fy}, * \] * and therefore \(J_{xy}\) can be solved from system * \[ * - J_{fx} J_{xy} = J_{fy}. * \] * Let \(eta\) be the adjoint with respect to \(x\); then to calculate * \[ * \eta J_{xy}, * \] * we solve * \[ * - (\eta J_{fx}^{-1}) J_{fy}. * \] * * @tparam F type of equation system function. * @tparam T type of initial guess vector. * @tparam Args types of additional parameters to the equation system functor * * @param[in] f Functor that evaluated the system of equations. * @param[in] x Vector of starting values. * @param[in, out] msgs The print stream for warning messages. * @param[in] scaling_step_size Scaled-step stopping tolerance. If * a Newton step is smaller than the scaling step * tolerance, the code breaks, assuming the solver is no * longer making significant progress (i.e. is stuck) * @param[in] function_tolerance determines whether roots are acceptable. * @param[in] max_num_steps maximum number of function evaluations. * @param[in] args Additional parameters to the equation system functor. * @return theta Vector of solutions to the system of equations. * @pre f returns finite values when passed any value of x and the given args. * @throw std::invalid_argument if x has size zero. * @throw std::invalid_argument if x has non-finite elements. * @throw std::invalid_argument if scaled_step_size is strictly * negative. * @throw std::invalid_argument if function_tolerance is strictly * negative. * @throw std::invalid_argument if max_num_steps is not positive. * @throw std::domain_error if solver exceeds max_num_steps. */ template * = nullptr, require_any_st_var* = nullptr> Eigen::Matrix solve_newton_tol( const F& f, const T& x, const double scaling_step_size, const double function_tolerance, const int64_t max_num_steps, std::ostream* const msgs, const T_Args&... args) { const auto& x_ref = to_ref(value_of(x)); auto arena_args_tuple = make_chainable_ptr(std::make_tuple(eval(args)...)); auto args_vals_tuple = math::apply( [&](const auto&... args) { return std::make_tuple(to_ref(value_of(args))...); }, *arena_args_tuple); check_nonzero_size("solve_newton", "initial guess", x_ref); check_finite("solve_newton", "initial guess", x_ref); check_nonnegative("solve_newton", "scaling_step_size", scaling_step_size); check_nonnegative("solve_newton", "function_tolerance", function_tolerance); check_positive("solve_newton", "max_num_steps", max_num_steps); // Solve the system Eigen::VectorXd theta_dbl = math::apply( [&](const auto&... vals) { return kinsol_solve(f, x_ref, scaling_step_size, function_tolerance, max_num_steps, 1, 10, KIN_LINESEARCH, msgs, vals...); }, args_vals_tuple); auto f_wrt_x = [&](const auto& x) { return math::apply([&](const auto&... args) { return f(x, msgs, args...); }, args_vals_tuple); }; Eigen::MatrixXd Jf_x; Eigen::VectorXd f_x; jacobian(f_wrt_x, theta_dbl, f_x, Jf_x); using ret_type = Eigen::Matrix; arena_t ret = theta_dbl; auto Jf_x_T_lu_ptr = make_unsafe_chainable_ptr(Jf_x.transpose().partialPivLu()); // Lu reverse_pass_callback( [f, ret, arena_args_tuple, Jf_x_T_lu_ptr, msgs]() mutable { Eigen::VectorXd eta = -Jf_x_T_lu_ptr->solve(ret.adj().eval()); // Contract with Jacobian of f with respect to y using a nested reverse // autodiff pass. { nested_rev_autodiff rev; Eigen::VectorXd ret_val = ret.val(); auto x_nrad_ = math::apply( [&ret_val, &f, msgs](const auto&... args) { return eval(f(ret_val, msgs, args...)); }, *arena_args_tuple); x_nrad_.adj() = eta; grad(); } }); return ret_type(ret); } /** * Return the solution to the specified system of algebraic * equations given an initial guess, and parameters and data, * which get passed into the algebraic system. Use the * KINSOL solver from the SUNDIALS suite. * * This signature does not give users control over the tuning parameters * and instead relies on default values. * * @tparam F type of equation system function * @tparam T type of elements in the x vector * @tparam Args types of additional input to the equation system functor * * @param[in] f Functor that evaluates the system of equations. * @param[in] x Vector of starting values (initial guess). * @param[in, out] msgs The print stream for warning messages. * @param[in] scaling_step_size Scaled-step stopping tolerance. If * a Newton step is smaller than the scaling step * tolerance, the code breaks, assuming the solver is no * longer making significant progress (i.e. is stuck) * @param[in] function_tolerance determines whether roots are acceptable. * @param[in] max_num_steps maximum number of function evaluations. * @param[in, out] msgs the print stream for warning messages. * @param[in] args Additional parameters to the equation system functor. * @return theta Vector of solutions to the system of equations. * @throw std::invalid_argument if x has size zero. * @throw std::invalid_argument if x has non-finite elements. * @throw std::invalid_argument if scaled_step_size is strictly * negative. * @throw std::invalid_argument if function_tolerance is strictly * negative. * @throw std::invalid_argument if max_num_steps is not positive. * @throw std::domain_error if solver exceeds max_num_steps. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, 1> solve_newton( const F& f, const T& x, std::ostream* const msgs, const T_Args&... args) { double scaling_step_size = 1e-3; double function_tolerance = 1e-6; int64_t max_num_steps = 200; const auto& args_ref_tuple = std::make_tuple(to_ref(args)...); return math::apply( [&](const auto&... args_refs) { return solve_newton_tol(f, x, scaling_step_size, function_tolerance, max_num_steps, msgs, args_refs...); }, args_ref_tuple); } /** * Return the solution to the specified system of algebraic * equations given an initial guess, and parameters and data, * which get passed into the algebraic system. Use the * KINSOL solver from the SUNDIALS suite. * * The user can also specify the scaled step size, the function * tolerance, and the maximum number of steps. * * Signature to maintain backward compatibility, will be removed * in the future. * * @tparam F type of equation system function. * @tparam T type of initial guess vector. * * @param[in] f Functor that evaluated the system of equations. * @param[in] x Vector of starting values. * @param[in] y Parameter vector for the equation system. * @param[in] dat Continuous data vector for the equation system. * @param[in] dat_int Integer data vector for the equation system. * @param[in, out] msgs The print stream for warning messages. * @param[in] scaling_step_size Scaled-step stopping tolerance. If * a Newton step is smaller than the scaling step * tolerance, the code breaks, assuming the solver is no * longer making significant progress (i.e. is stuck) * @param[in] function_tolerance determines whether roots are acceptable. * @param[in] max_num_steps maximum number of function evaluations. * @return theta Vector of solutions to the system of equations. * @throw std::invalid_argument if x has size zero. * @throw std::invalid_argument if x has non-finite elements. * @throw std::invalid_argument if y has non-finite elements. * @throw std::invalid_argument if dat has non-finite elements. * @throw std::invalid_argument if dat_int has non-finite elements. * @throw std::invalid_argument if scaled_step_size is strictly * negative. * @throw std::invalid_argument if function_tolerance is strictly * negative. * @throw std::invalid_argument if max_num_steps is not positive. * @throw std::domain_error if solver exceeds max_num_steps. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, 1> algebra_solver_newton( const F& f, const T1& x, const T2& y, const std::vector& dat, const std::vector& dat_int, std::ostream* const msgs = nullptr, const double scaling_step_size = 1e-3, const double function_tolerance = 1e-6, const long int max_num_steps = 200) { // NOLINT(runtime/int) return solve_newton_tol(algebra_solver_adapter(f), x, scaling_step_size, function_tolerance, max_num_steps, msgs, y, dat, dat_int); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/kinsol_solve.hpp0000644000176200001440000001332514645137106024460 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_KINSOL_SOLVE_HPP #define STAN_MATH_REV_FUNCTOR_KINSOL_SOLVE_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the solution to the specified algebraic system, * given an initial guess. Invokes the Kinsol solver from Sundials. * * @tparam F type of equation system function. * @tparam T type of initial guess vector. * * @param[in] f Functor that evaluated the system of equations. * @param[in] x Vector of starting values. * @param[in] y Parameter vector for the equation system. The function * is overloaded to treat y as a vector of doubles or of a * a template type T. * @param[in] dat Continuous data vector for the equation system. * @param[in] dat_int Integer data vector for the equation system. * @param[in, out] msgs The print stream for warning messages. * @param[in] scaling_step_tol Scaled-step stopping tolerance. If * a Newton step is smaller than the scaling step * tolerance, the code breaks, assuming the solver is no * longer making significant progress (i.e. is stuck) * @param[in] function_tolerance determines whether roots are acceptable. * @param[in] max_num_steps Maximum number of function evaluations. * @param[in] custom_jacobian If 0, use Kinsol's default to compute the * jacobian for the Newton step, namely Quotient Difference * (finite difference). If 1, use reverse-mode AD, unless * the user specifies their own method. * @param[in] J_f A functor that computes the Jacobian for the Newton step. * Defaults to reverse-mode AD. * @param[in] steps_eval_jacobian Maximum number of steps before the * Jacobian gets recomputed. Note that Kinsol's default is 10. * If equal to 1, the algorithm computes exact Newton steps. * @param[in] global_line_search does the solver use a global line search? * If equal to KIN_NONE, no, if KIN_LINESEARCH, yes. * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to DAE right hand side * @throw std::invalid_argument if Kinsol returns a negative * flag when setting up the solver. * @throw std::domain_error if Kinsol fails to solve * equation in max_num_steps iterations. * @throw std::runtime_error if Kinsol returns a * negative flag that is not due to hitting max_num_steps. */ template Eigen::VectorXd kinsol_solve(const F1& f, const Eigen::VectorXd& x, const double scaling_step_tol, // = 1e-3 const double function_tolerance, // = 1e-6 const int64_t max_num_steps, // = 200 const bool custom_jacobian, // = 1 const int steps_eval_jacobian, // = 10 const int global_line_search, // = KIN_LINESEARCH std::ostream* const msgs, const Args&... args) { int N = x.size(); typedef kinsol_system_data system_data; system_data kinsol_data(f, x, msgs, args...); CHECK_KINSOL_CALL(KINInit(kinsol_data.kinsol_memory_, &system_data::kinsol_f_system, kinsol_data.nv_x_)); N_Vector scaling = N_VNew_Serial(N, kinsol_data.sundials_context_); N_Vector nv_x = N_VNew_Serial(N, kinsol_data.sundials_context_); Eigen::VectorXd x_solution(N); try { N_VConst_Serial(1.0, scaling); // no scaling CHECK_KINSOL_CALL( KINSetNumMaxIters(kinsol_data.kinsol_memory_, max_num_steps)); CHECK_KINSOL_CALL( KINSetFuncNormTol(kinsol_data.kinsol_memory_, function_tolerance)); CHECK_KINSOL_CALL( KINSetScaledStepTol(kinsol_data.kinsol_memory_, scaling_step_tol)); CHECK_KINSOL_CALL( KINSetMaxSetupCalls(kinsol_data.kinsol_memory_, steps_eval_jacobian)); // CHECK // The default value is 1000 * ||u_0||_D where ||u_0|| is the initial guess. // So we run into issues if ||u_0|| = 0. // If the norm is non-zero, use kinsol's default (accessed with 0), // else use the dimension of x -- CHECK - find optimal length. double max_newton_step = (x.norm() == 0) ? x.size() : 0; CHECK_KINSOL_CALL( KINSetMaxNewtonStep(kinsol_data.kinsol_memory_, max_newton_step)); CHECK_KINSOL_CALL(KINSetUserData(kinsol_data.kinsol_memory_, static_cast(&kinsol_data))); // construct Linear solver CHECK_KINSOL_CALL(KINSetLinearSolver(kinsol_data.kinsol_memory_, kinsol_data.LS_, kinsol_data.J_)); if (custom_jacobian) CHECK_KINSOL_CALL(KINSetJacFn(kinsol_data.kinsol_memory_, &system_data::kinsol_jacobian)); for (int i = 0; i < N; i++) NV_Ith_S(nv_x, i) = x(i); kinsol_check(KINSol(kinsol_data.kinsol_memory_, nv_x, global_line_search, scaling, scaling), "KINSol", max_num_steps); for (int i = 0; i < N; i++) x_solution(i) = NV_Ith_S(nv_x, i); } catch (const std::exception& e) { N_VDestroy(nv_x); N_VDestroy(scaling); throw; } N_VDestroy(nv_x); N_VDestroy(scaling); return x_solution; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/integrate_ode_bdf.hpp0000644000176200001440000000304414645137106025372 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_INTEGRATE_ODE_BDF_HPP #define STAN_MATH_REV_FUNCTOR_INTEGRATE_ODE_BDF_HPP #include #include #include #include #include namespace stan { namespace math { /** * @deprecated use ode_bdf */ template std::vector>> integrate_ode_bdf(const F& f, const std::vector& y0, const T_t0& t0, const std::vector& ts, const std::vector& theta, const std::vector& x, const std::vector& x_int, std::ostream* msgs = nullptr, double relative_tolerance = 1e-10, double absolute_tolerance = 1e-10, long int max_num_steps = 1e8) { // NOLINT(runtime/int) internal::integrate_ode_std_vector_interface_adapter f_adapted(f); auto y = ode_bdf_tol_impl("integrate_ode_bdf", f_adapted, to_vector(y0), t0, ts, relative_tolerance, absolute_tolerance, max_num_steps, msgs, theta, x, x_int); std::vector>> y_converted; for (size_t i = 0; i < y.size(); ++i) y_converted.push_back(to_array_1d(y[i])); return y_converted; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/finite_diff_hessian_auto.hpp0000644000176200001440000000521114645137106026754 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_HESSIAN_HPP #define STAN_MATH_REV_FUNCTOR_HESSIAN_HPP #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * Calculate the value and the Hessian of the specified function at * the specified argument using first-order finite difference of gradients, * automatically setting the stepsize between the function evaluations * along a dimension. * *

The functor must implement * * * double operator()(const Eigen::VectorXd&) * * *

For details of the algorithm, see * "Central difference approximation", under "Second-order derivatives based on * gradient", in: https://v8doc.sas.com/sashtml/ormp/chap5/sect28.htm * *

Step size for dimension `i` is set automatically using * `stan::math::finite_diff_stepsize(x(i))`. * * 2n gradient calls are needed for the algorithm. * * @tparam F Type of function * @param[in] f Function * @param[in] x Argument to function * @param[out] fx Function applied to argument * @param[out] grad_fx Gradient of function at argument * @param[out] hess_fx Hessian of function at argument */ template void finite_diff_hessian_auto(const F& f, const Eigen::VectorXd& x, double& fx, Eigen::VectorXd& grad_fx, Eigen::MatrixXd& hess_fx) { int d = x.size(); Eigen::VectorXd x_temp(x); hess_fx.resize(d, d); gradient(f, x, fx, grad_fx); std::vector g_plus(d); std::vector g_minus(d); std::vector epsilons(d); double tmp; // compute the gradient at x+eps_i*e_i // such that eps_i is the step size and e_i is the unit vector // in the i-th direction for (size_t i = 0; i < d; ++i) { Eigen::VectorXd x_temp(x); epsilons[i] = finite_diff_stepsize(x(i)); x_temp(i) += epsilons[i]; gradient(f, x_temp, tmp, g_plus[i]); } // similarly, compute the gradient at x-eps_i*e_i for (size_t i = 0; i < d; ++i) { Eigen::VectorXd x_temp(x); x_temp(i) -= epsilons[i]; gradient(f, x_temp, tmp, g_minus[i]); } // approximate the hessian as a finite difference of gradients for (int i = 0; i < d; ++i) { for (int j = i; j < d; ++j) { hess_fx(j, i) = (g_plus[j](i) - g_minus[j](i)) / (4 * epsilons[j]) + (g_plus[i](j) - g_minus[i](j)) / (4 * epsilons[i]); hess_fx(i, j) = hess_fx(j, i); } } } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/solve_powell.hpp0000644000176200001440000004043114645137106024461 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_SOLVE_POWELL_HPP #define STAN_MATH_REV_FUNCTOR_SOLVE_POWELL_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Solve algebraic equations using Powell solver * * @tparam F type of equation system function * @tparam T type of elements in the x vector * @tparam Args types of additional parameters to the equation system functor * * @param[in] f Functor that evaluates the system of equations * @param[in] x Vector of starting values (initial guess). * @param[in, out] msgs the print stream for warning messages. * @param[in] relative_tolerance determines the convergence criteria * for the solution. * @param[in] function_tolerance determines whether roots are acceptable. * @param[in] max_num_steps maximum number of function evaluations. * @param[in] args additional parameters to the equation system functor. * @return theta_dbl Double vector of solutions to the system of equations. * @pre x has size greater than zero. * @pre x has only finite elements. * @pre f returns finite values when passed any value of x and the given args. * @pre relative_tolerance is non-negative. * @pre function_tolerance is non-negative. * @pre max_num_steps is positive. * @throw std::domain_error solver exceeds max_num_steps. * @throw std::domain_error if the norm of the solution exceeds * the function tolerance. */ template * = nullptr> T& solve_powell_call_solver(const F& f, T& x, std::ostream* const msgs, const double relative_tolerance, const double function_tolerance, const int64_t max_num_steps, const Args&... args) { // Construct the solver hybrj_functor_solver hfs(f); Eigen::HybridNonLinearSolver> solver(hfs); // Compute theta_dbl solver.parameters.xtol = relative_tolerance; solver.parameters.maxfev = max_num_steps; solver.solve(x); // Check if the max number of steps has been exceeded if (solver.nfev >= max_num_steps) { [max_num_steps]() STAN_COLD_PATH { throw_domain_error("algebra_solver", "maximum number of iterations", max_num_steps, "(", ") was exceeded in the solve."); }(); } // Check solution is a root double system_norm = f(x).stableNorm(); if (system_norm > function_tolerance) { [function_tolerance, system_norm]() STAN_COLD_PATH { std::ostringstream message; message << "the norm of the algebraic function is " << system_norm << " but should be lower than the function " << "tolerance:"; throw_domain_error("algebra_solver", message.str().c_str(), function_tolerance, "", ". Consider decreasing the relative tolerance and " "increasing max_num_steps."); }(); } return x; } /** * Return the solution to the specified system of algebraic * equations given an initial guess, and parameters and data, * which get passed into the algebraic system. * Use Powell's dogleg solver. * * The user can also specify the relative tolerance * (xtol in Eigen's code), the function tolerance, * and the maximum number of steps (maxfev in Eigen's code). * * This overload handles non-autodiff parameters. * * @tparam F type of equation system function * @tparam T type of elements in the x vector * @tparam Args types of additional parameters to the equation system functor * * @param[in] f Functor that evaluates the system of equations. * @param[in] x Vector of starting values (initial guess). * @param[in] relative_tolerance determines the convergence criteria * for the solution. * @param[in] function_tolerance determines whether roots are acceptable. * @param[in] max_num_steps maximum number of function evaluations. * @param[in, out] msgs the print stream for warning messages. * @param[in] args additional parameters to the equation system functor. * @return theta Vector of solutions to the system of equations. * @pre f returns finite values when passed any value of x and the given args. * @throw std::invalid_argument if x has size zero. * @throw std::invalid_argument if x has non-finite elements. * elements. * @throw std::invalid_argument if relative_tolerance is strictly * negative. * @throw std::invalid_argument if function_tolerance is strictly * negative. * @throw std::invalid_argument if max_num_steps is not positive. * @throw std::domain_error solver exceeds max_num_steps. * @throw std::domain_error if the norm of the solution exceeds * the function tolerance. */ template * = nullptr, require_all_st_arithmetic* = nullptr> Eigen::VectorXd solve_powell_tol(const F& f, const T& x, const double relative_tolerance, const double function_tolerance, const int64_t max_num_steps, std::ostream* const msgs, const Args&... args) { auto x_ref = eval(value_of(x)); auto args_vals_tuple = std::make_tuple(to_ref(args)...); auto f_wrt_x = [&args_vals_tuple, &f, msgs](const auto& x) { return math::apply( [&x, &f, msgs](const auto&... args) { return f(x, msgs, args...); }, args_vals_tuple); }; check_nonzero_size("solve_powell", "initial guess", x_ref); check_finite("solve_powell", "initial guess", x_ref); check_nonnegative("alegbra_solver_powell", "relative_tolerance", relative_tolerance); check_nonnegative("solve_powell", "function_tolerance", function_tolerance); check_positive("solve_powell", "max_num_steps", max_num_steps); check_matching_sizes("solve_powell", "the algebraic system's output", f_wrt_x(x_ref), "the vector of unknowns, x,", x_ref); // Solve the system return solve_powell_call_solver(f_wrt_x, x_ref, msgs, relative_tolerance, function_tolerance, max_num_steps); } /** * Return the solution to the specified system of algebraic * equations given an initial guess, and parameters and data, * which get passed into the algebraic system. * Use Powell's dogleg solver. * * This signature does not let the user specify the tuning parameters of the * solver (instead default values are used). * * @tparam F type of equation system function * @tparam T type of elements in the x vector * @tparam Args types of additional input to the equation system functor * * @param[in] f Functor that evaluates the system of equations. * @param[in] x Vector of starting values (initial guess). * @param[in, out] msgs the print stream for warning messages. * @param[in] args Additional parameters to the equation system functor. * @return theta Vector of solutions to the system of equations. * @throw std::invalid_argument if x has size zero. * @throw std::invalid_argument if x has non-finite elements. * @throw std::invalid_argument if relative_tolerance is strictly * negative. * @throw std::invalid_argument if function_tolerance is strictly * negative. * @throw std::invalid_argument if max_num_steps is not positive. * @throw std::domain_error solver exceeds max_num_steps. * @throw std::domain_error if the norm of the solution exceeds * the function tolerance. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, 1> solve_powell( const F& f, const T& x, std::ostream* const msgs, const T_Args&... args) { double relative_tolerance = 1e-10; double function_tolerance = 1e-6; int64_t max_num_steps = 200; const auto& args_ref_tuple = std::make_tuple(to_ref(args)...); return math::apply( [&](const auto&... args_refs) { return solve_powell_tol(f, x, relative_tolerance, function_tolerance, max_num_steps, msgs, args_refs...); }, args_ref_tuple); } /** * Return the solution to the specified system of algebraic * equations given an initial guess, and parameters and data, * which get passed into the algebraic system. * Use Powell's dogleg solver. * * The user can also specify the relative tolerance * (xtol in Eigen's code), the function tolerance, * and the maximum number of steps (maxfev in Eigen's code). * * Signature to maintain backward compatibility, will be removed * in the future. * * @tparam F type of equation system function * @tparam T1 type of elements in the x vector * @tparam T2 type of elements in the y vector * * @param[in] f Functor that evaluates the system of equations. * @param[in] x Vector of starting values (initial guess). * @param[in] y parameter vector for the equation system. * @param[in] dat continuous data vector for the equation system. * @param[in] dat_int integer data vector for the equation system. * @param[in, out] msgs the print stream for warning messages. * @param[in] relative_tolerance determines the convergence criteria * for the solution. * @param[in] function_tolerance determines whether roots are acceptable. * @param[in] max_num_steps maximum number of function evaluations. * @return theta Vector of solutions to the system of equations. * @pre f returns finite values when passed any value of x and the given y, dat, * and dat_int. * @throw std::invalid_argument if x has size zero. * @throw std::invalid_argument if x has non-finite elements. * @throw std::invalid_argument if y has non-finite elements. * @throw std::invalid_argument if dat has non-finite elements. * @throw std::invalid_argument if dat_int has non-finite * elements. * @throw std::invalid_argument if relative_tolerance is strictly * negative. * @throw std::invalid_argument if function_tolerance is strictly * negative. * @throw std::invalid_argument if max_num_steps is not positive. * @throw boost::math::evaluation_error (which is a subclass of * std::domain_error) if solver exceeds max_num_steps. * @throw boost::math::evaluation_error (which is a subclass of * std::domain_error) if the norm of the solution exceeds the * function tolerance. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, 1> algebra_solver( const F& f, const T1& x, const T2& y, const std::vector& dat, const std::vector& dat_int, std::ostream* msgs = nullptr, const double relative_tolerance = 1e-10, const double function_tolerance = 1e-6, const int64_t max_num_steps = 1e+3) { return solve_powell_tol(algebra_solver_adapter(f), x, relative_tolerance, function_tolerance, max_num_steps, msgs, y, dat, dat_int); } /** * Return the solution to the specified system of algebraic * equations given an initial guess, and parameters and data, * which get passed into the algebraic system. * Use Powell's dogleg solver. * * The user can also specify the relative tolerance * (xtol in Eigen's code), the function tolerance, * and the maximum number of steps (maxfev in Eigen's code). * * This overload handles var parameters. * * The Jacobian \(J_{xy}\) (i.e., Jacobian of unknown \(x\) w.r.t. the parameter * \(y\)) is calculated given the solution as follows. Since * \[ * f(x, y) = 0, * \] * we have (\(J_{pq}\) being the Jacobian matrix \(\tfrac {dq} {dq}\)) * \[ * - J_{fx} J_{xy} = J_{fy}, * \] * and therefore \(J_{xy}\) can be solved from system * \[ * - J_{fx} J_{xy} = J_{fy}. * \] * Let \(eta\) be the adjoint with respect to \(x\); then to calculate * \[ * \eta J_{xy}, * \] * we solve * \[ * - (\eta J_{fx}^{-1}) J_{fy}. * \] * * @tparam F type of equation system function * @tparam T type of elements in the x vector * @tparam Args types of additional parameters to the equation system functor * * @param[in] f Functor that evaluates the system of equations. * @param[in] x Vector of starting values (initial guess). * @param[in] relative_tolerance determines the convergence criteria * for the solution. * @param[in] function_tolerance determines whether roots are acceptable. * @param[in] max_num_steps maximum number of function evaluations. * @param[in, out] msgs the print stream for warning messages. * @param[in] args Additional parameters to the equation system functor. * @return theta Vector of solutions to the system of equations. * @pre f returns finite values when passed any value of x and the given args. * @throw std::invalid_argument if x has size zero. * @throw std::invalid_argument if x has non-finite elements. * elements. * @throw std::invalid_argument if relative_tolerance is strictly * negative. * @throw std::invalid_argument if function_tolerance is strictly * negative. * @throw std::invalid_argument if max_num_steps is not positive. * @throw std::domain_error solver exceeds max_num_steps. * @throw std::domain_error if the norm of the solution exceeds * the function tolerance. */ template * = nullptr, require_any_st_var* = nullptr> Eigen::Matrix solve_powell_tol( const F& f, const T& x, const double relative_tolerance, const double function_tolerance, const int64_t max_num_steps, std::ostream* const msgs, const T_Args&... args) { auto x_ref = eval(value_of(x)); auto arena_args_tuple = make_chainable_ptr(std::make_tuple(eval(args)...)); auto args_vals_tuple = math::apply( [&](const auto&... args) { return std::make_tuple(to_ref(value_of(args))...); }, *arena_args_tuple); auto f_wrt_x = [&args_vals_tuple, &f, msgs](const auto& x) { return math::apply( [&x, &f, msgs](const auto&... args) { return f(x, msgs, args...); }, args_vals_tuple); }; check_nonzero_size("solve_powell", "initial guess", x_ref); check_finite("solve_powell", "initial guess", x_ref); check_nonnegative("alegbra_solver_powell", "relative_tolerance", relative_tolerance); check_nonnegative("solve_powell", "function_tolerance", function_tolerance); check_positive("solve_powell", "max_num_steps", max_num_steps); check_matching_sizes("solve_powell", "the algebraic system's output", f_wrt_x(x_ref), "the vector of unknowns, x,", x_ref); // Solve the system solve_powell_call_solver(f_wrt_x, x_ref, msgs, relative_tolerance, function_tolerance, max_num_steps); Eigen::MatrixXd Jf_x; Eigen::VectorXd f_x; jacobian(f_wrt_x, x_ref, f_x, Jf_x); using ret_type = Eigen::Matrix; auto Jf_x_T_lu_ptr = make_unsafe_chainable_ptr(Jf_x.transpose().partialPivLu()); // Lu arena_t ret = x_ref; reverse_pass_callback([f, ret, arena_args_tuple, Jf_x_T_lu_ptr, msgs]() mutable { Eigen::VectorXd eta = -Jf_x_T_lu_ptr->solve(ret.adj().eval()); // Contract with Jacobian of f with respect to y using a nested reverse // autodiff pass. { nested_rev_autodiff rev; Eigen::VectorXd ret_val = ret.val(); auto x_nrad_ = math::apply( [&](const auto&... args) { return eval(f(ret_val, msgs, args...)); }, *arena_args_tuple); x_nrad_.adj() = eta; grad(); } }); return ret_type(ret); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/algebra_system.hpp0000644000176200001440000000726614645137106024761 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_ALGEBRA_SYSTEM_HPP #define STAN_MATH_REV_FUNCTOR_ALGEBRA_SYSTEM_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * A structure which gets passed to Eigen's dogleg * algebraic solver. * * @tparam T scalar type of independent variable. * @tparam NX number of rows * @tparam NY number of columns */ template struct nlo_functor { const int m_inputs, m_values; nlo_functor() : m_inputs(NX), m_values(NY) {} nlo_functor(int inputs, int values) : m_inputs(inputs), m_values(values) {} int inputs() const { return m_inputs; } int values() const { return m_values; } }; /** * A functor with the required operators to call Eigen's algebraic solver. * * @tparam S wrapper around the algebraic system functor. Has the * signature required for jacobian (i.e takes only one argument). * @tparam F algebraic system functor * @tparam T0 scalar type for unknowns * @tparam T1 scalar type for auxiliary parameters */ template struct hybrj_functor_solver : nlo_functor { /** Wrapper around algebraic system */ S fs_; /** Jacobian of algebraic function wrt unknowns */ Eigen::MatrixXd J_; explicit hybrj_functor_solver(const S& fs) : fs_(fs) {} /** * Computes the value the algebraic function, f, when pluging in the * independent variables, and the Jacobian w.r.t unknowns. * * @param [in] iv independent variables * @param [in, out] fvec value of algebraic function when plugging in iv. */ int operator()(const Eigen::VectorXd& iv, Eigen::VectorXd& fvec) { jacobian(fs_, iv, fvec, J_); return 0; } /** * Assign the Jacobian to fjac. * * @param [in] iv independent variables. * @param [in, out] fjac matrix container for jacobian */ int df(const Eigen::VectorXd& iv, Eigen::MatrixXd& fjac) const { fjac = J_; return 0; } /** * Performs the same task as the operator(), but returns the * Jacobian, instead of saving it inside an argument * passed by reference. * @param [in] iv independent variable. */ Eigen::MatrixXd get_jacobian(const Eigen::VectorXd& iv) { Eigen::VectorXd fvec; jacobian(fs_, iv, fvec, J_); return J_; } /** * Performs the same task as df(), but returns the value of * algebraic function, instead of saving it inside an * argument passed by reference. * @tparam [in] iv independent variable. */ Eigen::VectorXd get_value(const Eigen::VectorXd& iv) const { return fs_(iv); } }; // TODO(jgaeb): Remove this when the chain method of the fixed point solver is // updated. template void algebra_solver_check(const Eigen::Matrix& x, const Eigen::Matrix y, const std::vector& dat, const std::vector& dat_int, double function_tolerance, long int max_num_steps) { // NOLINT(runtime/int) check_nonzero_size("algebra_solver", "initial guess", x); check_finite("algebra_solver", "initial guess", x); check_finite("algebra_solver", "parameter vector", y); check_finite("algebra_solver", "continuous data", dat); check_finite("algebra_solver", "integer data", dat_int); check_nonnegative("algebra_solver", "function_tolerance", function_tolerance); check_positive("algebra_solver", "max_num_steps", max_num_steps); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/ode_bdf.hpp0000644000176200001440000001625314645137106023336 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_ODE_BDF_HPP #define STAN_MATH_REV_FUNCTOR_ODE_BDF_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the stiff backward differentiation formula * BDF solver from CVODES. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, 1> * operator()(const T_t& t, const Eigen::Matrix& y, * std::ostream* msgs, const T_Args&... args); * * t is the time, y is the vector-valued state, msgs is a stream for error * messages, and args are optional arguments passed to the ODE solve function * (which are passed through to \p f without modification). * * @tparam F Type of ODE right hand side * @tparam T_y0 Type of initial state * @tparam T_t0 Type of initial time * @tparam T_ts Type of output times * @tparam T_Args Types of pass-through parameters * * @param function_name Calling function name (for printing debugging messages) * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param relative_tolerance Relative tolerance passed to CVODES * @param absolute_tolerance Absolute tolerance passed to CVODES * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand side * @return Solution to ODE at times \p ts */ template * = nullptr> std::vector, Eigen::Dynamic, 1>> ode_bdf_tol_impl(const char* function_name, const F& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, double relative_tolerance, double absolute_tolerance, long int max_num_steps, // NOLINT(runtime/int) std::ostream* msgs, const T_Args&... args) { const auto& args_ref_tuple = std::make_tuple(to_ref(args)...); return math::apply( [&](const auto&... args_refs) { cvodes_integrator...> integrator(function_name, f, y0, t0, ts, relative_tolerance, absolute_tolerance, max_num_steps, msgs, args_refs...); return integrator(); }, args_ref_tuple); } /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the stiff backward differentiation formula * BDF solver from CVODES. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, 1> * operator()(const T_t& t, const Eigen::Matrix& y, * std::ostream* msgs, const T_Args&... args); * * t is the time, y is the vector-valued state, msgs is a stream for error * messages, and args are optional arguments passed to the ODE solve function * (which are passed through to \p f without modification). * * @tparam F Type of ODE right hand side * @tparam T_0 Type of initial time * @tparam T_ts Type of output times * @tparam T_Args Types of pass-through parameters * * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param relative_tolerance Relative tolerance passed to CVODES * @param absolute_tolerance Absolute tolerance passed to CVODES * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand side * @return Solution to ODE at times \p ts */ template * = nullptr> std::vector, Eigen::Dynamic, 1>> ode_bdf_tol(const F& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, double relative_tolerance, double absolute_tolerance, long int max_num_steps, // NOLINT(runtime/int) std::ostream* msgs, const T_Args&... args) { return ode_bdf_tol_impl("ode_bdf_tol", f, y0, t0, ts, relative_tolerance, absolute_tolerance, max_num_steps, msgs, args...); } /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the stiff backward differentiation formula * (BDF) solver in CVODES with defaults for relative_tolerance, * absolute_tolerance, and max_num_steps. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, 1> * operator()(const T_t& t, const Eigen::Matrix& y, * std::ostream* msgs, const T_Args&... args); * * t is the time, y is the vector-valued state, msgs is a stream for error * messages, and args are optional arguments passed to the ODE solve function * (which are passed through to \p f without modification). * * @tparam F Type of ODE right hand side * @tparam T_0 Type of initial time * @tparam T_ts Type of output times * @tparam T_Args Types of pass-through parameters * * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand side * @return Solution to ODE at times \p ts */ template * = nullptr> std::vector, Eigen::Dynamic, 1>> ode_bdf(const F& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, std::ostream* msgs, const T_Args&... args) { double relative_tolerance = 1e-10; double absolute_tolerance = 1e-10; long int max_num_steps = 1e8; // NOLINT(runtime/int) return ode_bdf_tol_impl("ode_bdf", f, y0, t0, ts, relative_tolerance, absolute_tolerance, max_num_steps, msgs, args...); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/idas_service.hpp0000644000176200001440000001013614645137106024406 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_IDAS_SERVICE_HPP #define STAN_MATH_REV_FUNCTOR_IDAS_SERVICE_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * For each type of Ode(with different rhs functor F and * senstivity parameters), we allocate mem and workspace for * idas. This service manages the * allocation/deallocation, so ODE systems only request * service by injection. * @tparam ode ode type * @tparam lmm_type IDAS solver type (BDF & ADAMS) * @tparam butcher_tab AKRODE Butcher table */ template struct idas_service { sundials::Context sundials_context_; int ns; N_Vector nv_yy; N_Vector nv_yp; N_Vector* nv_yys; N_Vector* nv_yps; void* mem; SUNMatrix A; SUNLinearSolver LS; /** * Construct IDAS ODE mem & workspace * * @param t0 initial time * @param dae differential-algebraic system of equations */ idas_service(double t0, dae_type& dae) : sundials_context_(), ns(dae.ns), nv_yy(N_VNew_Serial(dae.N, sundials_context_)), nv_yp(N_VNew_Serial(dae.N, sundials_context_)), nv_yys(nullptr), nv_yps(nullptr), mem(IDACreate(sundials_context_)), A(SUNDenseMatrix(dae.N, dae.N, sundials_context_)), LS(SUNLinSol_Dense(nv_yy, A, sundials_context_)) { const int n = dae.N; for (auto i = 0; i < n; ++i) { NV_Ith_S(nv_yy, i) = dae.dbl_yy[i]; NV_Ith_S(nv_yp, i) = dae.dbl_yp[i]; } CHECK_IDAS_CALL(IDAInit(mem, dae_type::idas_res, t0, nv_yy, nv_yp)); CHECK_IDAS_CALL(IDASetUserData(mem, static_cast(&dae))); CHECK_IDAS_CALL(IDASetLinearSolver(mem, LS, A)); idas_sens_init(nv_yys, nv_yps, ns, n); } ~idas_service() { SUNLinSolFree(LS); SUNMatDestroy(A); IDAFree(&mem); N_VDestroy(nv_yy); N_VDestroy(nv_yp); if (dae_type::use_fwd_sens) { N_VDestroyVectorArray(nv_yys, ns); N_VDestroyVectorArray(nv_yps, ns); } } template * = nullptr> void idas_sens_init(N_Vector* yys, N_Vector* yps, int ns, int n) {} template * = nullptr> void idas_sens_init(N_Vector*& yys, N_Vector*& yps, int ns, int n) { yys = N_VCloneVectorArray(ns, nv_yy); yps = N_VCloneVectorArray(ns, nv_yp); for (size_t is = 0; is < ns; ++is) { N_VConst(RCONST(0.0), yys[is]); N_VConst(RCONST(0.0), yps[is]); } set_init_sens(yys, yps, n); CHECK_IDAS_CALL( IDASensInit(mem, ns, IDA_STAGGERED, dae_type::idas_sens_res, yys, yps)); } template * = nullptr> void set_init_sens(N_Vector*& yys, N_Vector*& yps, int n) { for (size_t i = 0; i < n; ++i) { NV_Ith_S(yys[i], i) = 1.0; } for (size_t i = 0; i < n; ++i) { NV_Ith_S(yps[i + n], i) = 1.0; } } template < typename dae_t = dae_type, std::enable_if_t* = nullptr> void set_init_sens(N_Vector*& yys, N_Vector*& yps, int n) { for (size_t i = 0; i < n; ++i) { NV_Ith_S(yys[i], i) = 1.0; } } template < typename dae_t = dae_type, std::enable_if_t<(!dae_t::is_var_yy0) && dae_t::is_var_yp0>* = nullptr> void set_init_sens(N_Vector*& yys, N_Vector*& yps, int n) { for (size_t i = 0; i < n; ++i) { NV_Ith_S(yps[i], i) = 1.0; } } template < typename dae_t = dae_type, std::enable_if_t<(!dae_t::is_var_yy0) && (!dae_t::is_var_yp0)>* = nullptr> void set_init_sens(N_Vector*& yys, N_Vector*& yps, int n) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/kinsol_data.hpp0000644000176200001440000001051014645137106024232 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_KINSOL_DATA_HPP #define STAN_MATH_REV_FUNCTOR_KINSOL_DATA_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * KINSOL algebraic system data holder. * Based on cvodes_ode_data. * * @tparam F1 functor type for system function. * @tparam F2 functor type for jacobian function. Default is 0. * If 0, use rev mode autodiff to compute the Jacobian. */ template class kinsol_system_data { const F1& f_; const Eigen::VectorXd& x_; const size_t N_; std::ostream* const msgs_; const std::tuple args_tuple_; typedef kinsol_system_data system_data; public: sundials::Context sundials_context_; N_Vector nv_x_; SUNMatrix J_; SUNLinearSolver LS_; void* kinsol_memory_; /* Constructor */ kinsol_system_data(const F1& f, const Eigen::VectorXd& x, std::ostream* const msgs, const Args&... args) : f_(f), x_(x), N_(x.size()), msgs_(msgs), args_tuple_(args...), sundials_context_(), nv_x_(N_VMake_Serial(N_, &to_array_1d(x_)[0], sundials_context_)), J_(SUNDenseMatrix(N_, N_, sundials_context_)), LS_(SUNLinSol_Dense(nv_x_, J_, sundials_context_)), kinsol_memory_(KINCreate(sundials_context_)) {} ~kinsol_system_data() { N_VDestroy_Serial(nv_x_); SUNLinSolFree(LS_); SUNMatDestroy(J_); KINFree(&kinsol_memory_); } /* Implements the user-defined function passed to KINSOL. */ static int kinsol_f_system(const N_Vector x, const N_Vector f_eval, void* const user_data) { const system_data* explicit_system = static_cast(user_data); Eigen::VectorXd x_eigen( Eigen::Map(NV_DATA_S(x), explicit_system->N_)); Eigen::Map f_eval_map(N_VGetArrayPointer(f_eval), explicit_system->N_); auto result = math::apply( [&](const auto&... args) { return explicit_system->f_(x_eigen, explicit_system->msgs_, args...); }, explicit_system->args_tuple_); check_matching_sizes("", "the algebraic system's output", result, "the vector of unknowns, x,", f_eval_map); f_eval_map = result; return 0; } /** * Implements the function of type CVDlsJacFn which is the user-defined * callbacks for KINSOL to calculate the jacobian of the system. * The Jacobian is stored in column major format. * * REMARK - tmp1 and tmp2 are pointers to memory allocated for variables * of type N_Vector which can be used by KINJacFN (the function which * computes the Jacobian) as temporary storage or work space. * See * https://computation.llnl.gov/sites/default/files/public/kin_guide-dev.pdf, * page 55. */ static int kinsol_jacobian(const N_Vector x, const N_Vector f_eval, const SUNMatrix J, void* const user_data, const N_Vector tmp1, const N_Vector tmp2) { const system_data* explicit_system = static_cast(user_data); Eigen::VectorXd x_eigen( Eigen::Map(NV_DATA_S(x), explicit_system->N_)); Eigen::Map f_eval_map(N_VGetArrayPointer(f_eval), explicit_system->N_); auto f_wrt_x = [&](const auto& x) { return math::apply( [&](const auto&... args) { return explicit_system->f_(x, explicit_system->msgs_, args...); }, explicit_system->args_tuple_); }; Eigen::MatrixXd Jf_x; Eigen::VectorXd f_x; jacobian(f_wrt_x, x_eigen, f_x, Jf_x); f_eval_map = f_x; for (int j = 0; j < Jf_x.cols(); j++) for (int i = 0; i < Jf_x.rows(); i++) SM_ELEMENT_D(J, i, j) = Jf_x(i, j); return 0; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/ode_adjoint.hpp0000644000176200001440000003203314645137106024225 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_ODE_ADJOINT_HPP #define STAN_MATH_REV_FUNCTOR_ODE_ADJOINT_HPP #include #include #include #include #include namespace stan { namespace math { /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the stiff backward differentiation formula * BDF solver or the non-stiff Adams solver from CVODES. The ODE system is * integrated using the adjoint sensitivity approach of CVODES. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, 1> * operator()(const T_t& t, const Eigen::Matrix& y, * std::ostream* msgs, const T_Args&... args); * * t is the time, y is the state, msgs is a stream for error messages, and args * are optional arguments passed to the ODE solve function (which are passed * through to \p f without modification). * * @tparam F Type of ODE right hand side * @tparam T_y0 Type of initial state * @tparam T_t0 Type of initial time * @tparam T_ts Type of output times * @tparam T_Args Types of pass-through parameters * * @param function_name Calling function name (for printing debugging messages) * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param relative_tolerance_forward Relative tolerance for forward problem * passed to CVODES * @param absolute_tolerance_forward Absolute tolerance per ODE state for * forward problem passed to CVODES * @param relative_tolerance_backward Relative tolerance for backward problem * passed to CVODES * @param absolute_tolerance_backward Absolute tolerance per ODE state for * backward problem passed to CVODES * @param relative_tolerance_quadrature Relative tolerance for quadrature * problem passed to CVODES * @param absolute_tolerance_quadrature Absolute tolerance for quadrature * problem passed to CVODES * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param num_steps_between_checkpoints Number of integrator steps after which a * checkpoint is stored for the backward pass * @param interpolation_polynomial type of polynomial used for interpolation * @param solver_forward solver used for forward pass * @param solver_backward solver used for backward pass * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand side * @return An `std::vector` of Eigen column vectors with scalars equal to * the least upper bound of `T_y0`, `T_t0`, `T_ts`, and the lambda's arguments. * This represents the solution to ODE at times \p ts */ template * = nullptr, require_any_not_st_arithmetic* = nullptr> auto ode_adjoint_impl( const char* function_name, F&& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, double relative_tolerance_forward, const T_abs_tol_fwd& absolute_tolerance_forward, double relative_tolerance_backward, const T_abs_tol_bwd& absolute_tolerance_backward, double relative_tolerance_quadrature, double absolute_tolerance_quadrature, long int max_num_steps, // NOLINT(runtime/int) long int num_steps_between_checkpoints, // NOLINT(runtime/int) int interpolation_polynomial, int solver_forward, int solver_backward, std::ostream* msgs, const T_Args&... args) { using integrator_vari = cvodes_integrator_adjoint_vari, T_t0, T_ts, plain_type_t...>; auto integrator = new integrator_vari( function_name, std::forward(f), eval(y0), t0, ts, relative_tolerance_forward, absolute_tolerance_forward, relative_tolerance_backward, absolute_tolerance_backward, relative_tolerance_quadrature, absolute_tolerance_quadrature, max_num_steps, num_steps_between_checkpoints, interpolation_polynomial, solver_forward, solver_backward, msgs, eval(args)...); return integrator->solution(); } /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the stiff backward differentiation formula * BDF solver or the non-stiff Adams solver from CVODES. The ODE system is * integrated using the adjoint sensitivity approach of CVODES. This * implementation handles the case of a double return type which ensures that no * resources are left on the AD stack. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, 1> * operator()(const T_t& t, const Eigen::Matrix& y, * std::ostream* msgs, const T_Args&... args); * * t is the time, y is the state, msgs is a stream for error messages, and args * are optional arguments passed to the ODE solve function (which are passed * through to \p f without modification). * * @tparam F Type of ODE right hand side * @tparam T_y0 Type of initial state * @tparam T_t0 Type of initial time * @tparam T_ts Type of output times * @tparam T_Args Types of pass-through parameters * * @param function_name Calling function name (for printing debugging messages) * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param relative_tolerance_forward Relative tolerance for forward problem * passed to CVODES * @param absolute_tolerance_forward Absolute tolerance per ODE state for * forward problem passed to CVODES * @param relative_tolerance_backward Relative tolerance for backward problem * passed to CVODES * @param absolute_tolerance_backward Absolute tolerance per ODE state for * backward problem passed to CVODES * @param relative_tolerance_quadrature Relative tolerance for quadrature * problem passed to CVODES * @param absolute_tolerance_quadrature Absolute tolerance for quadrature * problem passed to CVODES * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param num_steps_between_checkpoints Number of integrator steps after which a * checkpoint is stored for the backward pass * @param interpolation_polynomial type of polynomial used for interpolation * @param solver_forward solver used for forward pass * @param solver_backward solver used for backward pass * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand side * @return An `std::vector` of Eigen column vectors with scalars equal to * the least upper bound of `T_y0`, `T_t0`, `T_ts`, and the lambda's arguments. * This represents the solution to ODE at times \p ts */ template * = nullptr, require_all_st_arithmetic* = nullptr> std::vector ode_adjoint_impl( const char* function_name, F&& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, double relative_tolerance_forward, const T_abs_tol_fwd& absolute_tolerance_forward, double relative_tolerance_backward, const T_abs_tol_bwd& absolute_tolerance_backward, double relative_tolerance_quadrature, double absolute_tolerance_quadrature, long int max_num_steps, // NOLINT(runtime/int) long int num_steps_between_checkpoints, // NOLINT(runtime/int) int interpolation_polynomial, int solver_forward, int solver_backward, std::ostream* msgs, const T_Args&... args) { std::vector ode_solution; { nested_rev_autodiff nested; using integrator_vari = cvodes_integrator_adjoint_vari, T_t0, T_ts, plain_type_t...>; auto integrator = new integrator_vari( function_name, std::forward(f), eval(y0), t0, ts, relative_tolerance_forward, absolute_tolerance_forward, relative_tolerance_backward, absolute_tolerance_backward, relative_tolerance_quadrature, absolute_tolerance_quadrature, max_num_steps, num_steps_between_checkpoints, interpolation_polynomial, solver_forward, solver_backward, msgs, eval(args)...); ode_solution = integrator->solution(); } return ode_solution; } /** * Solve the ODE initial value problem y' = f(t, y), y(t0) = y0 at a set of * times, { t1, t2, t3, ... } using the stiff backward differentiation formula * BDF solver or the non-stiff Adams solver from CVODES. The ODE system is * integrated using the adjoint sensitivity approach of CVODES. * * \p f must define an operator() with the signature as: * template * Eigen::Matrix, Eigen::Dynamic, 1> * operator()(const T_t& t, const Eigen::Matrix& y, * std::ostream* msgs, const T_Args&... args); * * t is the time, y is the state, msgs is a stream for error messages, and args * are optional arguments passed to the ODE solve function (which are passed * through to \p f without modification). * * @tparam F Type of ODE right hand side * @tparam T_y0 Type of initial state * @tparam T_t0 Type of initial time * @tparam T_ts Type of output times * @tparam T_Args Types of pass-through parameters * * @param f Right hand side of the ODE * @param y0 Initial state * @param t0 Initial time * @param ts Times at which to solve the ODE at. All values must be sorted and * not less than t0. * @param relative_tolerance_forward Relative tolerance for forward problem * passed to CVODES * @param absolute_tolerance_forward Absolute tolerance per ODE state for * forward problem passed to CVODES * @param relative_tolerance_backward Relative tolerance for backward problem * passed to CVODES * @param absolute_tolerance_backward Absolute tolerance per ODE state for * backward problem passed to CVODES * @param relative_tolerance_quadrature Relative tolerance for quadrature * problem passed to CVODES * @param absolute_tolerance_quadrature Absolute tolerance for quadrature * problem passed to CVODES * @param max_num_steps Upper limit on the number of integration steps to * take between each output (error if exceeded) * @param num_steps_between_checkpoints Number of integrator steps after which a * checkpoint is stored for the backward pass * @param interpolation_polynomial type of polynomial used for interpolation * @param solver_forward solver used for forward pass * @param solver_backward solver used for backward pass * @param[in, out] msgs the print stream for warning messages * @param args Extra arguments passed unmodified through to ODE right hand side * @return An `std::vector` of Eigen column vectors with scalars equal to * the least upper bound of `T_y0`, `T_t0`, `T_ts`, and the lambda's arguments. * This represents the solution to ODE at times \p ts */ template * = nullptr> auto ode_adjoint_tol_ctl( F&& f, const T_y0& y0, const T_t0& t0, const std::vector& ts, double relative_tolerance_forward, const T_abs_tol_fwd& absolute_tolerance_forward, double relative_tolerance_backward, const T_abs_tol_bwd& absolute_tolerance_backward, double relative_tolerance_quadrature, double absolute_tolerance_quadrature, long int max_num_steps, // NOLINT(runtime/int) long int num_steps_between_checkpoints, // NOLINT(runtime/int) int interpolation_polynomial, int solver_forward, int solver_backward, std::ostream* msgs, const T_Args&... args) { return ode_adjoint_impl( "ode_adjoint_tol_ctl", std::forward(f), y0, t0, ts, relative_tolerance_forward, absolute_tolerance_forward, relative_tolerance_backward, absolute_tolerance_backward, relative_tolerance_quadrature, absolute_tolerance_quadrature, max_num_steps, num_steps_between_checkpoints, interpolation_polynomial, solver_forward, solver_backward, msgs, args...); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/functor/map_rect_reduce.hpp0000644000176200001440000001033314645137106025066 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_MAP_RECT_REDUCE_HPP #define STAN_MATH_REV_FUNCTOR_MAP_RECT_REDUCE_HPP #include #include #include #include #include #include namespace stan { namespace math { namespace internal { template struct map_rect_reduce { matrix_d operator()(const vector_d& shared_params, const vector_d& job_specific_params, const std::vector& x_r, const std::vector& x_i, std::ostream* msgs = nullptr) const { const size_type num_shared_params = shared_params.rows(); const size_type num_job_specific_params = job_specific_params.rows(); matrix_d out(1 + num_shared_params + num_job_specific_params, 0); // Run nested autodiff in this scope nested_rev_autodiff nested; vector_v shared_params_v = to_var(shared_params); vector_v job_specific_params_v = to_var(job_specific_params); vector_v fx_v = F()(shared_params_v, job_specific_params_v, x_r, x_i, msgs); const size_type size_f = fx_v.rows(); out.resize(Eigen::NoChange, size_f); for (size_type i = 0; i < size_f; ++i) { out(0, i) = fx_v(i).val(); nested.set_zero_all_adjoints(); fx_v(i).grad(); for (size_type j = 0; j < num_shared_params; ++j) { out(1 + j, i) = shared_params_v(j).vi_->adj_; } for (size_type j = 0; j < num_job_specific_params; ++j) { out(1 + num_shared_params + j, i) = job_specific_params_v(j).vi_->adj_; } } return out; } }; template struct map_rect_reduce { matrix_d operator()(const vector_d& shared_params, const vector_d& job_specific_params, const std::vector& x_r, const std::vector& x_i, std::ostream* msgs = nullptr) const { const size_type num_job_specific_params = job_specific_params.rows(); matrix_d out(1 + num_job_specific_params, 0); // Run nested autodiff in this scope nested_rev_autodiff nested; vector_v job_specific_params_v = to_var(job_specific_params); vector_v fx_v = F()(shared_params, job_specific_params_v, x_r, x_i, msgs); const size_type size_f = fx_v.rows(); out.resize(Eigen::NoChange, size_f); for (size_type i = 0; i < size_f; ++i) { out(0, i) = fx_v(i).val(); nested.set_zero_all_adjoints(); fx_v(i).grad(); for (size_type j = 0; j < num_job_specific_params; ++j) { out(1 + j, i) = job_specific_params_v(j).vi_->adj_; } } return out; } }; template struct map_rect_reduce { matrix_d operator()(const vector_d& shared_params, const vector_d& job_specific_params, const std::vector& x_r, const std::vector& x_i, std::ostream* msgs = nullptr) const { const size_type num_shared_params = shared_params.rows(); matrix_d out(1 + num_shared_params, 0); // Run nested autodiff in this scope nested_rev_autodiff nested; vector_v shared_params_v = to_var(shared_params); vector_v fx_v = F()(shared_params_v, job_specific_params, x_r, x_i, msgs); const size_type size_f = fx_v.rows(); out.resize(Eigen::NoChange, size_f); for (size_type i = 0; i < size_f; ++i) { out(0, i) = fx_v(i).val(); nested.set_zero_all_adjoints(); fx_v(i).grad(); for (size_type j = 0; j < num_shared_params; ++j) { out(1 + j, i) = shared_params_v(j).vi_->adj_; } } return out; } }; } // namespace internal } // namespace math } // namespace stan #ifdef STAN_REGISTER_MPI_MAP_RECT_ALL #undef STAN_REGISTER_MPI_MAP_RECT_ALL #define STAN_REGISTER_MPI_MAP_RECT_ALL(CALLID, FUNCTOR) \ STAN_REGISTER_MPI_MAP_RECT(CALLID, FUNCTOR, double, double) \ STAN_REGISTER_MPI_MAP_RECT(CALLID, FUNCTOR, double, var) \ STAN_REGISTER_MPI_MAP_RECT(CALLID, FUNCTOR, var, double) \ STAN_REGISTER_MPI_MAP_RECT(CALLID, FUNCTOR, var, var) #endif #endif StanHeaders/inst/include/stan/math/rev/core/0000755000176200001440000000000014645161272020505 5ustar liggesusersStanHeaders/inst/include/stan/math/rev/core/operator_unary_negative.hpp0000644000176200001440000000300714645137106026150 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_UNARY_NEGATIVE_HPP #define STAN_MATH_REV_CORE_OPERATOR_UNARY_NEGATIVE_HPP #include #include #include #include #include namespace stan { namespace math { /** * Unary negation operator for variables (C++). * * \f$\frac{d}{dx} -x = -1\f$. * \f[ \mbox{operator-}(x) = \begin{cases} -x & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{operator-}(x)}{\partial x} = \begin{cases} -1 & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Argument variable. * @return Negation of variable. */ inline var operator-(const var& a) { return make_callback_var( -a.val(), [a](const auto& vi) mutable { a.adj() -= vi.adj(); }); } /** * Compute additive inverse of input * * @tparam T type of input * @param a input * @return negative of input */ template * = nullptr> inline auto operator-(const T& a) { return make_callback_var(-a.val(), [a](const auto& vi) mutable { for (Eigen::Index j = 0; j < a.cols(); ++j) { for (Eigen::Index i = 0; i < a.rows(); ++i) { a.adj().coeffRef(i, j) -= vi.adj().coeff(i, j); } } }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/chainablestack.hpp0000644000176200001440000000053314645137106024152 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_CHAINABLESTACK_HPP #define STAN_MATH_REV_CORE_CHAINABLESTACK_HPP #include namespace stan { namespace math { class chainable_alloc; class vari_base; using ChainableStack = AutodiffStackSingleton; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_less_than.hpp0000644000176200001440000000272414645137106025115 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_LESS_THAN_HPP #define STAN_MATH_REV_CORE_OPERATOR_LESS_THAN_HPP #include #include namespace stan { namespace math { /** * Less than operator comparing variables' values (C++). * \f[ \mbox{operator\textless}(x, y) = \begin{cases} 0 & \mbox{if } x \geq y \\ 1 & \mbox{if } x < y \\[6pt] 0 & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * @param a First variable. * @param b Second variable. * @return True if first variable's value is less than second's. */ inline bool operator<(const var& a, const var& b) { return a.val() < b.val(); } /** * Less than operator comparing variable's value and a double * (C++). * * @tparam Arith An arithmetic type * @param a First variable. * @param b Second value. * @return True if first variable's value is less than second value. */ template * = nullptr> inline bool operator<(const var& a, Arith b) { return a.val() < b; } /** * Less than operator comparing a double and variable's value * (C++). * * @tparam Arith An arithmetic type * @param a First value. * @param b Second variable. * @return True if first value is less than second variable's value. */ template * = nullptr> inline bool operator<(Arith a, const var& b) { return a < b.val(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_greater_than_or_equal.hpp0000644000176200001440000000316514645137106027467 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_GREATER_THAN_OR_EQUAL_HPP #define STAN_MATH_REV_CORE_OPERATOR_GREATER_THAN_OR_EQUAL_HPP #include #include namespace stan { namespace math { /** * Greater than or equal operator comparing two variables' values * (C++). * \f[ \mbox{operator\textgreater=}(x, y) = \begin{cases} 0 & \mbox{if } x < y\\ 1 & \mbox{if } x \geq y \\[6pt] 0 & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param a First variable. * @param b Second variable. * @return True if first variable's value is greater than or equal * to the second's. */ inline bool operator>=(const var& a, const var& b) { return a.val() >= b.val(); } /** * Greater than or equal operator comparing variable's value and * double (C++). * * @tparam Arith An arithmetic type * @param a First variable. * @param b Second value. * @return True if first variable's value is greater than or equal * to second value. */ template * = nullptr> inline bool operator>=(const var& a, Arith b) { return a.val() >= b; } /** * Greater than or equal operator comparing double and variable's * value (C++). * * @tparam Arith An arithmetic type * @param a First value. * @param b Second variable. * @return True if the first value is greater than or equal to the * second variable's value. */ template * = nullptr> inline bool operator>=(Arith a, const var& b) { return a >= b.val(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_subtraction.hpp0000644000176200001440000002540014645137106025466 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_SUBTRACTION_HPP #define STAN_MATH_REV_CORE_OPERATOR_SUBTRACTION_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Subtraction operator for variables. * * The partial derivatives are defined by * * \f$\frac{\partial}{\partial x} (x-y) = 1\f$, and * * \f$\frac{\partial}{\partial y} (x-y) = -1\f$. * \f[ \mbox{operator-}(x, y) = \begin{cases} x-y & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{operator-}(x, y)}{\partial x} = \begin{cases} 1 & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{operator-}(x, y)}{\partial y} = \begin{cases} -1 & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @tparam Var1 value type of a var * @tparam Var2 value type of a var * @param a First variable operand. * @param b Second variable operand. * @return Variable result of subtracting the second variable from * the first. */ inline var operator-(const var& a, const var& b) { return make_callback_vari(a.vi_->val_ - b.vi_->val_, [avi = a.vi_, bvi = b.vi_](const auto& vi) mutable { avi->adj_ += vi.adj_; bvi->adj_ -= vi.adj_; }); } /** * Subtraction operator for variable and scalar. * * The derivative for the variable is * * \f$\frac{\partial}{\partial x} (x-c) = 1\f$, and * * @tparam Var value type of a var * @tparam Arith An arithmetic type * @param a First variable operand. * @param b Second scalar operand. * @return Result of subtracting the scalar from the variable. */ template * = nullptr> inline var operator-(const var& a, Arith b) { if (unlikely(b == 0.0)) { return a; } return make_callback_vari( a.vi_->val_ - b, [avi = a.vi_](const auto& vi) mutable { avi->adj_ += vi.adj_; }); } /** * Subtraction operator for scalar and variable. * * The derivative for the variable is * * \f$\frac{\partial}{\partial y} (c-y) = -1\f$, and * * @tparam Var value type of a var * @tparam Arith An arithmetic type * @param a First scalar operand. * @param b Second variable operand. * @return Result of subtracting a variable from a scalar. */ template * = nullptr> inline var operator-(Arith a, const var& b) { return make_callback_vari( a - b.vi_->val_, [bvi = b.vi_](const auto& vi) mutable { bvi->adj_ -= vi.adj_; }); } /** * Subtraction operator for matrix variables. * * @tparam VarMat1 A matrix of vars or a var with an underlying matrix type. * @tparam VarMat2 A matrix of vars or a var with an underlying matrix type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of subtracting two variables. */ template * = nullptr> inline auto subtract(const VarMat1& a, const VarMat2& b) { check_matching_dims("subtract", "a", a, "b", b); using op_ret_type = decltype(a.val() - b.val()); using ret_type = return_var_matrix_t; arena_t arena_a = a; arena_t arena_b = b; arena_t ret((arena_a.val() - arena_b.val())); reverse_pass_callback([ret, arena_a, arena_b]() mutable { for (Eigen::Index j = 0; j < ret.cols(); ++j) { for (Eigen::Index i = 0; i < ret.rows(); ++i) { const auto ref_adj = ret.adj().coeffRef(i, j); arena_a.adj().coeffRef(i, j) += ref_adj; arena_b.adj().coeffRef(i, j) -= ref_adj; } } }); return ret_type(ret); } /** * Subtraction operator for a matrix variable and arithmetic. * * @tparam VarMat A matrix of vars or a var with an underlying matrix type. * @tparam Arith A type with an arithmetic Scalar type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of subtracting two variables. */ template * = nullptr, require_rev_matrix_t* = nullptr> inline auto subtract(const VarMat& a, const Arith& b) { if (is_eigen::value) { check_matching_dims("subtract", "a", a, "b", b); } using op_ret_type = plain_type_t; using ret_type = return_var_matrix_t; arena_t arena_a = a; arena_t ret(arena_a.val().array() - as_array_or_scalar(b)); reverse_pass_callback( [ret, arena_a]() mutable { arena_a.adj() += ret.adj(); }); return ret_type(ret); } /** * Subtraction operator for an arithmetic type and matrix variable. * * @tparam VarMat A matrix of vars or a var with an underlying matrix type. * @tparam Arith A type with an arithmetic Scalar type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of subtracting two variables. */ template * = nullptr, require_rev_matrix_t* = nullptr> inline auto subtract(const Arith& a, const VarMat& b) { if (is_eigen::value) { check_matching_dims("subtract", "a", a, "b", b); } using op_ret_type = plain_type_t; using ret_type = return_var_matrix_t; arena_t arena_b = b; arena_t ret(as_array_or_scalar(a) - arena_b.val().array()); reverse_pass_callback( [ret, arena_b]() mutable { arena_b.adj() -= ret.adj_op(); }); return ret_type(ret); } /** * Subtraction operator for an arithmetic matrix and variable. * * @tparam Var A `var_value` with an underlying arithmetic type. * @tparam EigMat An Eigen Matrix type with an arithmetic Scalar type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of subtracting two variables. */ template * = nullptr, require_eigen_vt* = nullptr> inline auto subtract(const Var& a, const EigMat& b) { using ret_type = return_var_matrix_t; arena_t ret(a.val() - b.array()); reverse_pass_callback([ret, a]() mutable { a.adj() += ret.adj().sum(); }); return ret_type(ret); } /** * Subtraction operator for a variable and arithmetic matrix. * * @tparam EigMat An Eigen Matrix type with an arithmetic Scalar type. * @tparam Var A `var_value` with an underlying arithmetic type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of subtracting two variables. */ template * = nullptr, require_var_vt* = nullptr> inline auto subtract(const EigMat& a, const Var& b) { using ret_type = return_var_matrix_t; arena_t ret(a.array() - b.val()); reverse_pass_callback([ret, b]() mutable { b.adj() -= ret.adj().sum(); }); return ret_type(ret); } /** * Subtraction operator for a variable and variable matrix. * * @tparam VarMat An Eigen Matrix type with a variable Scalar type or a * `var_value` with an underlying matrix type. * @tparam Var A `var_value` with an underlying arithmetic type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of subtracting two variables. */ template * = nullptr, require_rev_matrix_t* = nullptr> inline auto subtract(const Var& a, const VarMat& b) { using ret_type = return_var_matrix_t; arena_t arena_b(b); arena_t ret(a.val() - arena_b.val().array()); reverse_pass_callback([ret, a, arena_b]() mutable { for (Eigen::Index j = 0; j < ret.cols(); ++j) { for (Eigen::Index i = 0; i < ret.rows(); ++i) { auto ret_adj = ret.adj().coeff(i, j); a.adj() += ret_adj; arena_b.adj().coeffRef(i, j) -= ret_adj; } } }); return ret_type(ret); } /** * Subtraction operator for a variable matrix and variable. * * @tparam VarMat An Eigen Matrix type with a variable Scalar type or a * `var_value` with an underlying matrix type. * @tparam Var A `var_value` with an underlying arithmetic type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of subtracting two variables. */ template * = nullptr, require_var_vt* = nullptr> inline auto subtract(const VarMat& a, const Var& b) { using ret_type = return_var_matrix_t; arena_t arena_a(a); arena_t ret(arena_a.val().array() - b.val()); reverse_pass_callback([ret, b, arena_a]() mutable { for (Eigen::Index j = 0; j < ret.cols(); ++j) { for (Eigen::Index i = 0; i < ret.rows(); ++i) { const auto ret_adj = ret.adj().coeff(i, j); arena_a.adj().coeffRef(i, j) += ret_adj; b.adj() -= ret_adj; } } }); return ret_type(ret); } template * = nullptr, require_any_arithmetic_t* = nullptr> inline auto subtract(const T1& a, const T2& b) { return a - b; } template * = nullptr> inline auto subtract(const T1& a, const T2& b) { return a - b; } /** * Addition operator for matrix variables. * * @tparam VarMat1 A matrix of vars or a var with an underlying matrix type. * @tparam VarMat2 A matrix of vars or a var with an underlying matrix type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of adding two variables. */ template * = nullptr> inline auto operator-(const VarMat1& a, const VarMat2& b) { return subtract(a, b); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_logical_or.hpp0000644000176200001440000000262114645137106025243 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_LOGICAL_OR_HPP #define STAN_MATH_REV_CORE_OPERATOR_LOGICAL_OR_HPP #include #include namespace stan { namespace math { /** * Return the logical disjunction of the values of the two * arguments as defined by ||. * * @param[in] x first argument * @param[in] y second argument * @return disjunction of the arguments' values */ inline bool operator||(const var& x, const var& y) { return x.val() || y.val(); } /** * Return the logical disjunction of the values of the two * arguments as defined by ||. * * @tparam Arith An arithmetic type * @param[in] x first argument * @param[in] y second argument * @return disjunction of first argument's value and second * argument */ template * = nullptr> inline bool operator||(const var& x, Arith y) { return x.val() || y; } /** * Return the logical disjunction of the values of the two * arguments as defined by ||. * * @tparam Arith An arithmetic type * @param[in] x first argument * @param[in] y second argument * @return disjunction of first argument and the second * argument's value */ template * = nullptr> inline bool operator||(Arith x, const var& y) { return x || y.val(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/var.hpp0000644000176200001440000011352014645137106022007 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_VAR_HPP #define STAN_MATH_REV_CORE_VAR_HPP #ifdef STAN_OPENCL #include #include #endif #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // forward declare template static void grad(Vari* vi); /** * Independent (input) and dependent (output) variables for gradients. * * This class acts as a smart pointer, with resources managed by * an arena-based memory manager scoped to a single gradient * calculation. * * A var is constructed with a type `T` and used like any * other scalar. Arithmetical functions like negation, addition, * and subtraction, as well as a range of mathematical functions * like exponentiation and powers are overridden to operate on * var values objects. * @tparam T An Floating point type. */ template class var_value> { public: using value_type = std::decay_t; // type in vari_value. using vari_type = vari_value; /** * Pointer to the implementation of this variable. * * This value should not be modified, but may be accessed in * var operators to construct `vari_value` * instances. */ vari_type* vi_; /** * Return `true` if this variable has been * declared, but not been defined. Any attempt to use an * undefined variable's value or adjoint will result in a * segmentation fault. * * @return true if this variable does not yet have * a defined variable. */ inline bool is_uninitialized() { return (vi_ == nullptr); } /** * Construct a variable for later assignment. * * This is implemented as a no-op, leaving the underlying implementation * dangling. Before an assignment, the behavior is thus undefined just * as for a basic double. */ var_value() : vi_(nullptr) {} /** * Construct a variable from the specified floating point argument * by constructing a new `vari_value`. This constructor is only * valid when `S` is convertible to this `vari_value`'s `value_type`. * @tparam S A type that is convertible to `value_type`. * @param x Value of the variable. */ template * = nullptr> var_value(S x) : vi_(new vari_type(x, false)) {} // NOLINT /** * Construct a variable from a pointer to a variable implementation. * @param vi A vari_value pointer. */ var_value(vari_type* vi) : vi_(vi) {} // NOLINT /** * Return a constant reference to the value of this variable. * * @return The value of this variable. */ inline const auto& val() const noexcept { return vi_->val(); } /** * Return a reference of the derivative of the root expression with * respect to this expression. This method only works * after one of the `grad()` methods has been * called. * * @return Adjoint for this variable. */ inline auto& adj() const noexcept { return vi_->adj(); } /** * Return a reference to the derivative of the root expression with * respect to this expression. This method only works * after one of the `grad()` methods has been * called. * * @return Adjoint for this variable. */ inline auto& adj() noexcept { return vi_->adj_; } /** * Compute the gradient of this (dependent) variable with respect to * the specified vector of (independent) variables, assigning the * specified vector to the gradient. * * The grad() function does not recover memory. In Stan * 2.4 and earlier, this function did recover memory. * * @tparam CheckContainer Not set by user. The default value of value_type * is used to require that grad is only available for scalar `var_value` * types. * @param x Vector of independent variables. * @param g Gradient vector of partial derivatives of this * variable with respect to x. */ inline void grad(std::vector>& x, std::vector& g) { stan::math::grad(vi_); g.resize(x.size()); for (size_t i = 0; i < x.size(); ++i) { g[i] = x[i].vi_->adj_; } } /** * Compute the gradient of this (dependent) variable with respect * to all (independent) variables. * * @tparam CheckContainer Not set by user. The default value of value_type * is used to require that grad is only available for scalar `var_value` * types. * The grad() function does not recover memory. */ void grad() { stan::math::grad(vi_); } // POINTER OVERRIDES /** * Return a reference to underlying implementation of this variable. * * If x is of type var, then applying * this operator, *x, has the same behavior as * *(x.vi_). * * Warning: The returned reference does not track changes to * this variable. * * @return variable */ inline vari_type& operator*() { return *vi_; } /** * Return a pointer to the underlying implementation of this variable. * * If x is of type var, then applying * this operator, x->, behaves the same way as * x.vi_->. * * Warning: The returned result does not track changes to * this variable. */ inline vari_type* operator->() { return vi_; } // COMPOUND ASSIGNMENT OPERATORS /** * The compound add/assignment operator for variables (C++). * * If this variable is a and the argument is the variable b, * then (a += b) behaves exactly the same way as (a = a + b), * creating an intermediate variable representing (a + b). * * @param b The variable to add to this variable. * @return The result of adding the specified variable to this variable. */ inline var_value& operator+=(const var_value& b); /** * The compound add/assignment operator for scalars (C++). * * If this variable is a and the argument is the scalar b, then * (a += b) behaves exactly the same way as (a = a + b). Note * that the result is an assignable lvalue. * * @param b The scalar to add to this variable. * @return The result of adding the specified variable to this variable. */ inline var_value& operator+=(T b); /** * The compound subtract/assignment operator for variables (C++). * * If this variable is a and the argument is the variable b, * then (a -= b) behaves exactly the same way as (a = a - b). * Note that the result is an assignable lvalue. * * @param b The variable to subtract from this variable. * @return The result of subtracting the specified variable from * this variable. */ inline var_value& operator-=(const var_value& b); /** * The compound subtract/assignment operator for scalars (C++). * * If this variable is a and the argument is the scalar b, then * (a -= b) behaves exactly the same way as (a = a - b). Note * that the result is an assignable lvalue. * * @param b The scalar to subtract from this variable. * @return The result of subtracting the specified variable from this * variable. */ inline var_value& operator-=(T b); /** * The compound multiply/assignment operator for variables (C++). * * If this variable is a and the argument is the variable b, * then (a *= b) behaves exactly the same way as (a = a * b). * Note that the result is an assignable lvalue. * * @param b The variable to multiply this variable by. * @return The result of multiplying this variable by the * specified variable. */ inline var_value& operator*=(const var_value& b); /** * The compound multiply/assignment operator for scalars (C++). * * If this variable is a and the argument is the scalar b, then * (a *= b) behaves exactly the same way as (a = a * b). Note * that the result is an assignable lvalue. * * @param b The scalar to multiply this variable by. * @return The result of multiplying this variable by the specified * variable. */ inline var_value& operator*=(T b); /** * The compound divide/assignment operator for variables (C++). If this * variable is a and the argument is the variable b, then (a /= b) * behaves exactly the same way as (a = a / b). Note that the * result is an assignable lvalue. * * @param b The variable to divide this variable by. * @return The result of dividing this variable by the * specified variable. */ inline var_value& operator/=(const var_value& b); /** * The compound divide/assignment operator for scalars (C++). * * If this variable is a and the argument is the scalar b, then * (a /= b) behaves exactly the same way as (a = a / b). Note * that the result is an assignable lvalue. * * @param b The scalar to divide this variable by. * @return The result of dividing this variable by the specified * variable. */ inline var_value& operator/=(T b); /** * Write the value of this autodiff variable and its adjoint to * the specified output stream. * * @param os Output stream to which to write. * @param v Variable to write. * @return Reference to the specified output stream. */ friend std::ostream& operator<<(std::ostream& os, const var_value& v) { if (v.vi_ == nullptr) { return os << "uninitialized"; } return os << v.val(); } }; namespace internal { template using require_matrix_var_value = require_t::value || is_kernel_expression_and_not_scalar::value) && std::is_floating_point>::value>>; } /** * Independent (input) and dependent (output) variables for gradients. * * This class acts as a smart pointer, with resources managed by * an arena-based memory manager scoped to a single gradient * calculation. * * A var is constructed with a type `T` and used like any * other scalar. Arithmetical functions like negation, addition, * and subtraction, as well as a range of mathematical functions * like exponentiation and powers are overridden to operate on * var values objects. * @tparam T An Floating point type. */ template class var_value> { public: using value_type = T; // type in vari_value. using vari_type = std::conditional_t::value, vari_value, vari_view>; static constexpr int RowsAtCompileTime{vari_type::RowsAtCompileTime}; static constexpr int ColsAtCompileTime{vari_type::ColsAtCompileTime}; /** * Pointer to the implementation of this variable. * * This value should not be modified, but may be accessed in * var operators to construct `vari_value` * instances. */ vari_type* vi_; /** * Return `true` if this variable has been * declared, but not been defined. Any attempt to use an * undefined variable's value or adjoint will result in a * segmentation fault. * * @return true if this variable does not yet have * a defined variable. */ inline bool is_uninitialized() noexcept { return (vi_ == nullptr); } /** * Construct a variable for later assignment. * * This is implemented as a no-op, leaving the underlying implementation * dangling. Before an assignment, the behavior is thus undefined just * as for a basic double. */ var_value() : vi_(nullptr) {} /** * Construct a variable from the specified floating point argument * by constructing a new `vari_value`. This constructor is only * valid when `S` is convertible to this `vari_value`'s `value_type`. * @tparam S A type that is convertible to `value_type`. * @param x Value of the variable. */ template * = nullptr> var_value(S&& x) : vi_(new vari_type(std::forward(x), false)) {} // NOLINT /** * Copy constructor for var_val. * @tparam S type of the value in the `var_value` to assing * @param other the value to assign * @return this */ template * = nullptr, require_all_plain_type_t* = nullptr> var_value(const var_value& other) : vi_(other.vi_) {} /** * Construct a `var_value` with a plain type * from another `var_value` containing an expression. * @tparam S type of the value in the `var_value` to assing * @param other the value to assign * @return this */ template * = nullptr, require_not_plain_type_t* = nullptr, require_plain_type_t* = nullptr> var_value(const var_value& other) : vi_(new vari_type(other.vi_->val_)) { reverse_pass_callback( [this_vi = this->vi_, other_vi = other.vi_]() mutable { other_vi->adj_ += this_vi->adj_; }); } /** * Construct a `var_value` with premade @ref arena_matrix types. * The values and adjoint matrices passed here will be shallow copied. * @tparam S type of the value in the `var_value` to assing * @param val The value matrix to go into the vari * @param adj the adjoint matrix to go into the vari */ template * = nullptr, require_arena_matrix_t* = nullptr> var_value(const S& val, const S& adj) : vi_(new vari_type(val, adj)) {} /** * Construct a variable from a pointer to a variable implementation. * @param vi A vari_value pointer. */ var_value(vari_type* vi) : vi_(vi) {} // NOLINT /** * Return a constant reference to the value of this variable. * * @return The value of this variable. */ inline const auto& val() const noexcept { return vi_->val(); } inline auto& val_op() noexcept { return vi_->val_op(); } /** * Return a reference to the derivative of the root expression with * respect to this expression. This method only works * after one of the `grad()` methods has been * called. * * @return Adjoint for this variable. */ inline auto& adj() noexcept { return vi_->adj(); } inline auto& adj() const noexcept { return vi_->adj(); } inline auto& adj_op() noexcept { return vi_->adj(); } inline Eigen::Index rows() const noexcept { return vi_->rows(); } inline Eigen::Index cols() const noexcept { return vi_->cols(); } inline Eigen::Index size() const noexcept { return vi_->size(); } // POINTER OVERRIDES /** * Return a reference to underlying implementation of this variable. * * If x is of type var, then applying * this operator, *x, has the same behavior as * *(x.vi_). * * Warning: The returned reference does not track changes to * this variable. * * @return variable */ inline vari_type& operator*() { return *vi_; } /** * Return a pointer to the underlying implementation of this variable. * * If x is of type var, then applying * this operator, x->, behaves the same way as * x.vi_->. * * Warning: The returned result does not track changes to * this variable. */ inline vari_type* operator->() { return vi_; } // COMPOUND ASSIGNMENT OPERATORS /** * The compound add/assignment operator for variables (C++). * * If this variable is a and the argument is the variable b, * then (a += b) behaves exactly the same way as (a = a + b), * creating an intermediate variable representing (a + b). * * @param b The variable to add to this variable. * @return The result of adding the specified variable to this variable. */ inline var_value& operator+=(const var_value& b); /** * The compound add/assignment operator for scalars (C++). * * If this variable is a and the argument is the scalar b, then * (a += b) behaves exactly the same way as (a = a + b). Note * that the result is an assignable lvalue. * * @param b The scalar to add to this variable. * @return The result of adding the specified variable to this variable. */ inline var_value& operator+=(T b); /** * The compound subtract/assignment operator for variables (C++). * * If this variable is a and the argument is the variable b, * then (a -= b) behaves exactly the same way as (a = a - b). * Note that the result is an assignable lvalue. * * @param b The variable to subtract from this variable. * @return The result of subtracting the specified variable from * this variable. */ template * = nullptr> inline var_value& operator-=(const S& b); /** * The compound subtract/assignment operator for scalars (C++). * * If this variable is a and the argument is the scalar b, then * (a -= b) behaves exactly the same way as (a = a - b). Note * that the result is an assignable lvalue. * * @param b The scalar to subtract from this variable. * @return The result of subtracting the specified variable from this * variable. */ template * = nullptr> inline var_value& operator-=(const S& b); /** * The compound multiply/assignment operator for variables (C++). * * If this variable is a and the argument is the variable b, * then (a *= b) behaves exactly the same way as (a = a * b). * Note that the result is an assignable lvalue. * * @param b The variable to multiply this variable by. * @return The result of multiplying this variable by the * specified variable. */ inline var_value& operator*=(const var_value& b); /** * The compound multiply/assignment operator for scalars (C++). * * If this variable is a and the argument is the scalar b, then * (a *= b) behaves exactly the same way as (a = a * b). Note * that the result is an assignable lvalue. * * @param b The scalar to multiply this variable by. * @return The result of multiplying this variable by the specified * variable. */ inline var_value& operator*=(T b); /** * The compound divide/assignment operator for variables (C++). If this * variable is a and the argument is the variable b, then (a /= b) * behaves exactly the same way as (a = a / b). Note that the * result is an assignable lvalue. * * @param b The variable to divide this variable by. * @return The result of dividing this variable by the * specified variable. */ inline var_value& operator/=(const var_value& b); /** * The compound divide/assignment operator for scalars (C++). * * If this variable is a and the argument is the scalar b, then * (a /= b) behaves exactly the same way as (a = a / b). Note * that the result is an assignable lvalue. * * @param b The scalar to divide this variable by. * @return The result of dividing this variable by the specified * variable. */ inline var_value& operator/=(T b); /** * A block view of the underlying Eigen matrices. * @param start_row Starting row of block. * @param start_col Starting columns of block. * @param num_rows Number of rows to return. * @param num_cols Number of columns to return. */ inline auto block(Eigen::Index start_row, Eigen::Index start_col, Eigen::Index num_rows, Eigen::Index num_cols) const { using vari_sub = decltype(vi_->block(start_row, start_col, num_rows, num_cols)); using var_sub = var_value>; return var_sub( new vari_sub(vi_->block(start_row, start_col, num_rows, num_cols))); } inline auto block(Eigen::Index start_row, Eigen::Index start_col, Eigen::Index num_rows, Eigen::Index num_cols) { using vari_sub = decltype(vi_->block(start_row, start_col, num_rows, num_cols)); using var_sub = var_value>; return var_sub( new vari_sub(vi_->block(start_row, start_col, num_rows, num_cols))); } /** * View transpose of eigen matrix. */ inline auto transpose() const { using vari_sub = decltype(vi_->transpose()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->transpose())); } inline auto transpose() { using vari_sub = decltype(vi_->transpose()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->transpose())); } /** * View of the head of Eigen vector types. * @param n Number of elements to return from top of vector. */ inline auto head(Eigen::Index n) const { using vari_sub = decltype(vi_->head(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->head(n))); } inline auto head(Eigen::Index n) { using vari_sub = decltype(vi_->head(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->head(n))); } /** * View of the tail of the Eigen vector types. * @param n Number of elements to return from bottom of vector. */ inline auto tail(Eigen::Index n) const { using vari_sub = decltype(vi_->tail(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->tail(n))); } inline auto tail(Eigen::Index n) { using vari_sub = decltype(vi_->tail(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->tail(n))); } /** * View block of N elements starting at position `i` * @param i Starting position of block. * @param n Number of elements in block */ inline auto segment(Eigen::Index i, Eigen::Index n) const { using vari_sub = decltype(vi_->segment(i, n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->segment(i, n))); } inline auto segment(Eigen::Index i, Eigen::Index n) { using vari_sub = decltype(vi_->segment(i, n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->segment(i, n))); } /** * View row of eigen matrices. * @param i Row index to slice. */ inline auto row(Eigen::Index i) const { using vari_sub = decltype(vi_->row(i)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->row(i))); } inline auto row(Eigen::Index i) { using vari_sub = decltype(vi_->row(i)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->row(i))); } /** * View column of eigen matrices * @param i Column index to slice */ inline auto col(Eigen::Index i) const { using vari_sub = decltype(vi_->col(i)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->col(i))); } inline auto col(Eigen::Index i) { using vari_sub = decltype(vi_->col(i)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->col(i))); } /** * View diagonal of eigen matrices * @param i Column index to slice */ inline auto diagonal() const { using vari_sub = decltype(vi_->diagonal()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->diagonal())); } inline auto diagonal() { using vari_sub = decltype(vi_->diagonal()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->diagonal())); } /** * View a `matrix_cl` as a column vector. */ inline auto as_column_vector_or_scalar() const { using vari_sub = decltype(vi_->as_column_vector_or_scalar()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->as_column_vector_or_scalar())); } inline auto as_column_vector_or_scalar() { using vari_sub = decltype(vi_->as_column_vector_or_scalar()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->as_column_vector_or_scalar())); } /** * View element of eigen matrices. This creates a new * vari_value so unlike the other views this subset will not * have the same adjoints as the original matrix and must be propogated * back. * @param i Element to access */ inline auto coeff(Eigen::Index i) const { using vari_sub = decltype(vi_->coeff(i)); vari_sub* vari_coeff = new vari_sub(vi_->coeff(i)); reverse_pass_callback([this_vi = this->vi_, vari_coeff, i]() { this_vi->adj_.coeffRef(i) += vari_coeff->adj_; }); return var_value>(vari_coeff); } inline auto coeff(Eigen::Index i) { using vari_sub = decltype(vi_->coeff(i)); vari_sub* vari_coeff = new vari_sub(vi_->coeff(i)); reverse_pass_callback([this_vi = this->vi_, vari_coeff, i]() { this_vi->adj_.coeffRef(i) += vari_coeff->adj_; }); return var_value>(vari_coeff); } /** * View element of eigen matrices. This creates a new * vari_value so unlike the other views this subset will not * have the same adjoints as the original matrix and must be propogated * back. * @param i Row to access * @param j Column to access */ inline auto coeff(Eigen::Index i, Eigen::Index j) const { using vari_sub = decltype(vi_->coeff(i, j)); vari_sub* vari_coeff = new vari_sub(vi_->coeff(i, j)); reverse_pass_callback([this_vi = this->vi_, vari_coeff, i, j]() { this_vi->adj_.coeffRef(i, j) += vari_coeff->adj_; }); return var_value>(vari_coeff); } inline auto coeff(Eigen::Index i, Eigen::Index j) { using vari_sub = decltype(vi_->coeff(i, j)); vari_sub* vari_coeff = new vari_sub(vi_->coeff(i, j)); reverse_pass_callback([this_vi = this->vi_, vari_coeff, i, j]() { this_vi->adj_.coeffRef(i, j) += vari_coeff->adj_; }); return var_value>(vari_coeff); } /** * View element of eigen matrices. This creates a new * vari_value so unlike the other views this subset will not * have the same adjoints as the original matrix and must be propogated * back. * @param i Element to access */ inline auto operator()(Eigen::Index i) const { return this->coeff(i); } inline auto operator()(Eigen::Index i) { return this->coeff(i); } /** * View element of eigen matrices. This creates a new * vari_value so unlike the other views this subset will not * have the same adjoints as the original matrix and must be propogated * back. * @param i Row to access * @param j Column to access */ inline auto operator()(Eigen::Index i, Eigen::Index j) const { return this->coeff(i, j); } inline auto operator()(Eigen::Index i, Eigen::Index j) { return this->coeff(i, j); } /** * View element of eigen matrices. This creates a new * vari_value so unlike the other views this subset will not * have the same adjoints as the original matrix and must be propogated * back. * @param i Element to access */ inline auto coeffRef(Eigen::Index i) const { return this->coeff(i); } inline auto coeffRef(Eigen::Index i) { return this->coeff(i); } /** * View element of eigen matrices. This creates a new * vari_value so unlike the other views this subset will not * have the same adjoints as the original matrix and must be propogated * back. * @param i Row to access * @param j Column to access */ inline auto coeffRef(Eigen::Index i, Eigen::Index j) const { return this->coeff(i, j); } inline auto coeffRef(Eigen::Index i, Eigen::Index j) { return this->coeff(i, j); } /** * Return an expression that operates on the rows of the matrix `vari` */ inline auto rowwise_reverse() const { using vari_sub = decltype(vi_->rowwise_reverse()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->rowwise_reverse())); } inline auto rowwise_reverse() { using vari_sub = decltype(vi_->rowwise_reverse()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->rowwise_reverse())); } /** * Return an expression that operates on the columns of the matrix `vari` */ inline auto colwise_reverse() const { using vari_sub = decltype(vi_->colwise_reverse()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->colwise_reverse())); } inline auto colwise_reverse() { using vari_sub = decltype(vi_->colwise_reverse()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->colwise_reverse())); } /** * Return an expression an expression to reverse the order of the coefficients * inside of a `vari` matrix */ inline auto reverse() const { using vari_sub = decltype(vi_->reverse()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->reverse())); } inline auto reverse() { using vari_sub = decltype(vi_->reverse()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->reverse())); } /** * Return a block consisting of the top rows * @param n Number of rows */ inline auto topRows(Eigen::Index n) const { using vari_sub = decltype(vi_->topRows(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->topRows(n))); } inline auto topRows(Eigen::Index n) { using vari_sub = decltype(vi_->topRows(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->topRows(n))); } /** * Return a block consisting of the bottom rows * @param n Number of rows */ inline auto bottomRows(Eigen::Index n) const { using vari_sub = decltype(vi_->bottomRows(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->bottomRows(n))); } inline auto bottomRows(Eigen::Index n) { using vari_sub = decltype(vi_->bottomRows(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->bottomRows(n))); } /** * Return a block consisting of rows in the middle. * @param start_row Starting row index * @param n Number of rows */ inline auto middleRows(Eigen::Index start_row, Eigen::Index n) const { using vari_sub = decltype(vi_->middleRows(start_row, n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->middleRows(start_row, n))); } inline auto middleRows(Eigen::Index start_row, Eigen::Index n) { using vari_sub = decltype(vi_->middleRows(start_row, n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->middleRows(start_row, n))); } /** * Return a block consisting of the left-most columns * @param n Number of columns */ inline auto leftCols(Eigen::Index n) const { using vari_sub = decltype(vi_->leftCols(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->leftCols(n))); } inline auto leftCols(Eigen::Index n) { using vari_sub = decltype(vi_->leftCols(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->leftCols(n))); } /** * Return a block consisting of the right-most columns * @param n Number of columns */ inline auto rightCols(Eigen::Index n) const { using vari_sub = decltype(vi_->rightCols(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->rightCols(n))); } inline auto rightCols(Eigen::Index n) { using vari_sub = decltype(vi_->rightCols(n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->rightCols(n))); } /** * Return a block consisting of columns in the middle. * @param start_col Starting column index * @param n Number of columns */ inline auto middleCols(Eigen::Index start_col, Eigen::Index n) const { using vari_sub = decltype(vi_->middleCols(start_col, n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->middleCols(start_col, n))); } inline auto middleCols(Eigen::Index start_col, Eigen::Index n) { using vari_sub = decltype(vi_->middleCols(start_col, n)); using var_sub = var_value>; return var_sub(new vari_sub(vi_->middleCols(start_col, n))); } /** * Return an Array. */ inline auto array() const { using vari_sub = decltype(vi_->array()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->array())); } inline auto array() { using vari_sub = decltype(vi_->array()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->array())); } /** * Return an Matrix. */ inline auto matrix() const { using vari_sub = decltype(vi_->matrix()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->matrix())); } inline auto matrix() { using vari_sub = decltype(vi_->matrix()); using var_sub = var_value>; return var_sub(new vari_sub(vi_->matrix())); } /** * Write the value of this autodiff variable and its adjoint to * the specified output stream. * * @param os Output stream to which to write. * @param v Variable to write. * @return Reference to the specified output stream. */ friend std::ostream& operator<<(std::ostream& os, const var_value& v) { if (v.vi_ == nullptr) { return os << "uninitialized"; } return os << v.val(); } /** * Returns number of rows. Only available if `T` is a matrix. * @return number of rows. */ template , is_matrix_cl>* = nullptr> inline auto rows() const noexcept { return vi_->rows(); } /** * Returns number of columns. Only available if `T` is a matrix. * @return number of columns. */ template , is_matrix_cl>* = nullptr> inline auto cols() const noexcept { return vi_->cols(); } /** * Assignment of another plain var value, when this also contains a plain * type. * @tparam S type of the value in the `var_value` to assing * @param other the value to assign * @return this */ template * = nullptr, require_all_plain_type_t* = nullptr, require_same_t, plain_type_t>* = nullptr> inline var_value& operator=(const var_value& other) { vi_ = other.vi_; return *this; } /** * Assignment of one plain type to another when one sides compile time columns * differ from the other. * @tparam S A type inheriting from `Eigen::DenseBase` that has a differing * number of compile time rows or columns, but is assignable to * `this`'s underlying Eigen type. * @param other the value to assign * @return this */ template * = nullptr, require_all_plain_type_t* = nullptr, require_not_same_t, plain_type_t>* = nullptr> inline var_value& operator=(const var_value& other) { static_assert( EIGEN_PREDICATE_SAME_MATRIX_SIZE(T, S), "You mixed matrices of different sizes that are not assignable."); vi_ = new vari_type(other.vi_); return *this; } /** * Assignment of another var value, when either this or the other one does not * contain a plain type. * @tparam S type of the value in the `var_value` to assing * @param other the value to assign * @return this */ template * = nullptr, require_any_not_plain_type_t* = nullptr> inline var_value& operator=(const var_value& other) { arena_t> prev_val = vi_->val_; vi_->val_ = other.val(); // no need to change any adjoints - these are just zeros before the reverse // pass reverse_pass_callback( [this_vi = this->vi_, other_vi = other.vi_, prev_val]() mutable { this_vi->val_ = prev_val; // we have no way of detecting aliasing between this->vi_->adj_ and // other.vi_->adj_, so we must copy adjoint before reseting to zero // we can reuse prev_val instead of allocating a new matrix prev_val = this_vi->adj_; this_vi->adj_.setZero(); other_vi->adj_ += prev_val; }); return *this; } /** * No-op to match with Eigen methods which call eval */ template * = nullptr> inline auto& eval() noexcept { return *this; } template * = nullptr> inline const auto& eval() const noexcept { return *this; } /** * For non-plain types evaluate to the plain type */ template * = nullptr> inline auto eval() { return var_value>(*this); } template * = nullptr> inline auto eval() const { return var_value>(*this); } /** * Copy assignment operator delegates to general assignment operator * @param other the value to assign * @return this */ inline var_value& operator=(const var_value& other) { return operator=(other); } }; // For backwards compatability the default value is double using var = var_value; } // namespace math /** * Template specialization defining the scalar type of * values stored in var_value. * * @tparam T type to check. * @ingroup type_trait */ template struct scalar_type::value>> { using type = math::var_value::value_type>>; }; } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/count_vars.hpp0000644000176200001440000001236114645137106023403 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_COUNT_VARS_HPP #define STAN_MATH_REV_CORE_COUNT_VARS_HPP #include #include #include #include #include namespace stan { namespace math { namespace internal { template * = nullptr, typename... Pargs> inline size_t count_vars_impl(size_t count, VecVar&& x, Pargs&&... args); template * = nullptr, require_std_vector_vt* = nullptr, typename... Pargs> inline size_t count_vars_impl(size_t count, VecContainer&& x, Pargs&&... args); template * = nullptr, typename... Pargs> inline size_t count_vars_impl(size_t count, EigT&& x, Pargs&&... args); template inline size_t count_vars_impl(size_t count, const var& x, Pargs&&... args); template >* = nullptr, typename... Pargs> inline size_t count_vars_impl(size_t count, Arith& x, Pargs&&... args); inline size_t count_vars_impl(size_t count); /** * Count the number of vars in x (a std::vector of vars), * add it to the running total, * count the number of vars in the remaining arguments * and return the result. * * @tparam VecVar type of standard container holding vars * @tparam Pargs Types of remaining arguments * @param[in] count The current count of the number of vars * @param[in] x A std::vector holding vars. * @param[in] args objects to be forwarded to recursive call of * `count_vars_impl` */ template *, typename... Pargs> inline size_t count_vars_impl(size_t count, VecVar&& x, Pargs&&... args) { return count_vars_impl(count + x.size(), std::forward(args)...); } /** * Count the number of vars in x (a std::vector holding other containers), * add it to the running total, * count the number of vars in the remaining arguments * and return the result. * * @tparam VecContainer std::vector holding arguments which contain Vars * @tparam Pargs Types of remaining arguments * @param[in] count The current count of the number of vars * @param[in] x A vector holding containers of vars * @param[in] args objects to be forwarded to recursive call of * `count_vars_impl` */ template *, require_std_vector_vt*, typename... Pargs> inline size_t count_vars_impl(size_t count, VecContainer&& x, Pargs&&... args) { for (auto&& x_iter : x) { count = count_vars_impl(count, x_iter); } return count_vars_impl(count, std::forward(args)...); } /** * Count the number of vars in x (an eigen container), * add it to the running total, * count the number of vars in the remaining arguments * and return the result. * * @tparam EigT A type derived from `EigenBase` * @tparam Pargs Types of remaining arguments * @param[in] count The current count of the number of vars * @param[in] x An Eigen container holding vars * @param[in] args objects to be forwarded to recursive call of * `count_vars_impl` */ template *, typename... Pargs> inline size_t count_vars_impl(size_t count, EigT&& x, Pargs&&... args) { return count_vars_impl(count + x.size(), std::forward(args)...); } /** * Add one to the running total number of vars, * count the number of vars in the remaining arguments * and return the result. * * @tparam Pargs Types of remaining arguments * @param[in] count The current count of the number of vars * @param[in] x A var * @param[in] args objects to be forwarded to recursive call of * `count_vars_impl` */ template inline size_t count_vars_impl(size_t count, const var& x, Pargs&&... args) { return count_vars_impl(count + 1, std::forward(args)...); } /** * Arguments without vars contribute zero to the total number of vars. * * Return the running total number of vars plus the number of * vars in the remaining arguments. * * @tparam Arith An object that is either arithmetic or holds arithmetic * types * @tparam Pargs Types of remaining arguments * @param[in] count The current count of the number of vars * @param[in] x An arithmetic value or container * @param[in] args objects to be forwarded to recursive call of * `count_vars_impl` */ template >*, typename... Pargs> inline size_t count_vars_impl(size_t count, Arith& x, Pargs&&... args) { return count_vars_impl(count, std::forward(args)...); } /** * End count_vars_impl recursion and return total number of counted vars */ inline size_t count_vars_impl(size_t count) { return count; } } // namespace internal /** * Count the number of vars in the input argument list * * @tparam Pargs Types of input arguments * @return Number of vars in input */ template inline size_t count_vars(Pargs&&... args) { return internal::count_vars_impl(0, std::forward(args)...); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/empty_nested.hpp0000644000176200001440000000062114645137106023714 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_EMPTY_NESTED_HPP #define STAN_MATH_REV_CORE_EMPTY_NESTED_HPP #include namespace stan { namespace math { /** * Return true if there is no nested autodiff being executed. */ static inline bool empty_nested() { return ChainableStack::instance_->nested_var_stack_sizes_.empty(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_greater_than.hpp0000644000176200001440000000276514645137106025605 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_GREATER_THAN_HPP #define STAN_MATH_REV_CORE_OPERATOR_GREATER_THAN_HPP #include #include namespace stan { namespace math { /** * Greater than operator comparing variables' values (C++). * \f[ \mbox{operator\textgreater}(x, y) = \begin{cases} 0 & \mbox{if } x \leq y\\ 1 & \mbox{if } x > y \\[6pt] 0 & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param a First variable. * @param b Second variable. * @return True if first variable's value is greater than second's. */ inline bool operator>(const var& a, const var& b) { return a.val() > b.val(); } /** * Greater than operator comparing variable's value and double * (C++). * * @tparam Arith An arithmetic type * @param a First variable. * @param b Second value. * @return True if first variable's value is greater than second value. */ template * = nullptr> inline bool operator>(const var& a, Arith b) { return a.val() > b; } /** * Greater than operator comparing a double and a variable's value * (C++). * * @tparam Arith An arithmetic type * @param a First value. * @param b Second variable. * @return True if first value is greater than second variable's value. */ template * = nullptr> inline bool operator>(Arith a, const var& b) { return a > b.val(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/matrix_vari.hpp0000644000176200001440000000166114645137106023546 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_MATRIX_VARI_HPP #define STAN_MATH_REV_CORE_MATRIX_VARI_HPP #include #include #include #include #include #include #include namespace stan { namespace math { class op_matrix_vari : public vari { protected: const size_t size_; vari** vis_; public: template * = nullptr> op_matrix_vari(double f, const T& vs) : vari(f), size_(vs.size()) { vis_ = ChainableStack::instance_->memalloc_.alloc_array(size_); Eigen::Map>(vis_, vs.rows(), vs.cols()) = vs.vi(); } vari* operator[](size_t n) const { return vis_[n]; } size_t size() { return size_; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/accumulate_adjoints.hpp0000644000176200001440000001316414645137106025240 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_ACCUMULATE_ADJOINTS_HPP #define STAN_MATH_REV_CORE_ACCUMULATE_ADJOINTS_HPP #include #include #include #include #include namespace stan { namespace math { template inline double* accumulate_adjoints(double* dest, const var& x, Pargs&&... args); template * = nullptr, typename... Pargs> inline double* accumulate_adjoints(double* dest, VarVec&& x, Pargs&&... args); template * = nullptr, require_std_vector_vt* = nullptr, typename... Pargs> inline double* accumulate_adjoints(double* dest, VecContainer&& x, Pargs&&... args); template * = nullptr, typename... Pargs> inline double* accumulate_adjoints(double* dest, EigT&& x, Pargs&&... args); template * = nullptr, typename... Pargs> inline double* accumulate_adjoints(double* dest, Arith&& x, Pargs&&... args); inline double* accumulate_adjoints(double* dest); /** * Accumulate adjoints from x into storage pointed to by dest, * increment the adjoint storage pointer, * recursively accumulate the adjoints of the rest of the arguments, * and return final position of storage pointer. * * @tparam Pargs Types of remaining arguments * @param dest Pointer to where adjoints are to be accumulated * @param x A var * @param args Further args to accumulate over * @return Final position of adjoint storage pointer */ template inline double* accumulate_adjoints(double* dest, const var& x, Pargs&&... args) { *dest += x.adj(); return accumulate_adjoints(dest + 1, std::forward(args)...); } /** * Accumulate adjoints from std::vector x into storage pointed to by dest, * increment the adjoint storage pointer, * recursively accumulate the adjoints of the rest of the arguments, * and return final position of storage pointer. * * @tparam Pargs Types of remaining arguments * @param dest Pointer to where adjoints are to be accumulated * @param x A std::vector of vars * @param args Further args to accumulate over * @return Final position of adjoint storage pointer */ template *, typename... Pargs> inline double* accumulate_adjoints(double* dest, VarVec&& x, Pargs&&... args) { for (auto&& x_iter : x) { *dest += x_iter.adj(); ++dest; } return accumulate_adjoints(dest, std::forward(args)...); } /** * Accumulate adjoints from x (a std::vector of containers containing vars) * into storage pointed to by dest, * increment the adjoint storage pointer, * recursively accumulate the adjoints of the rest of the arguments, * and return final position of storage pointer. * * @tparam VecContainer the type of a standard container holding var * containers. * @tparam Pargs Types of remaining arguments * @param dest Pointer to where adjoints are to be accumulated * @param x A std::vector of containers holding vars * @param args Further args to accumulate over * @return Final position of adjoint storage pointer */ template *, require_std_vector_vt*, typename... Pargs> inline double* accumulate_adjoints(double* dest, VecContainer&& x, Pargs&&... args) { for (auto&& x_iter : x) { dest = accumulate_adjoints(dest, x_iter); } return accumulate_adjoints(dest, std::forward(args)...); } /** * Accumulate adjoints from x (an Eigen type containing vars) * into storage pointed to by dest, * increment the adjoint storage pointer, * recursively accumulate the adjoints of the rest of the arguments, * and return final position of storage pointer. * * @tparam EigT Type derived from `EigenBase` containing vars. * @tparam Pargs Types of remaining arguments * @param dest Pointer to where adjoints are to be accumulated * @param x An eigen type holding vars to accumulate over * @param args Further args to accumulate over * @return Final position of adjoint storage pointer */ template *, typename... Pargs> inline double* accumulate_adjoints(double* dest, EigT&& x, Pargs&&... args) { Eigen::Map(dest, x.rows(), x.cols()) += x.adj(); return accumulate_adjoints(dest + x.size(), std::forward(args)...); } /** * Ignore arithmetic types. * * Recursively accumulate the adjoints of the rest of the arguments * and return final position of adjoint storage pointer. * * @tparam Arith A type satisfying `std::is_arithmetic`. * @tparam Pargs Types of remaining arguments * @param dest Pointer to where adjoints are to be accumulated * @param x An object that is either arithmetic or a container of Arithmetic * types * @param args Further args to accumulate over * @return Final position of adjoint storage pointer */ template *, typename... Pargs> inline double* accumulate_adjoints(double* dest, Arith&& x, Pargs&&... args) { return accumulate_adjoints(dest, std::forward(args)...); } /** * End accumulate_adjoints recursion and return pointer * * @param dest Pointer */ inline double* accumulate_adjoints(double* dest) { return dest; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/autodiffstackstorage.hpp0000644000176200001440000001326514645137106025440 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_AUTODIFFSTACKSTORAGE_HPP #define STAN_MATH_REV_CORE_AUTODIFFSTACKSTORAGE_HPP #include #include namespace stan { namespace math { // Internal macro used to modify global pointer definition to the // global AD instance. #ifdef STAN_THREADS // Whenever STAN_THREADS is set a TLS keyword is used. For reasons // explained below we use the GNU compiler extension __thread if // supported by the compiler while the generic thread_local C++11 // keyword is used otherwise. #ifdef __GNUC__ #define STAN_THREADS_DEF __thread #else #define STAN_THREADS_DEF thread_local #endif #else // In case STAN_THREADS is not set, then no modifier is needed. #define STAN_THREADS_DEF #endif /** * This struct always provides access to the autodiff stack using * the singleton pattern. Read warnings below! * * The singleton instance_ is a global static pointer, * which is thread local (TLS) if the STAN_THREADS preprocess variable * is defined. * * The use of a pointer is motivated by performance reasons for the * threading case. When a TLS is used, initialization with a constant * expression at compile time is required for fast access to the * TLS. As the autodiff storage struct is non-POD, its initialization * is a dynamic expression at compile time. These dynamic expressions * are wrapped, in the TLS case, by a TLS wrapper function which slows * down its access. Using a pointer instead allows to initialize at * compile time to nullptr, which is a compile time * constant. In this case, the compiler avoids the use of a TLS * wrapper function. * * For performance reasons we use the __thread keyword on compilers * which support it. The __thread keyword is a GNU compiler-specific * (gcc, clang, Intel) extension which requires initialization with a * compile time constant expression. The C++11 keyword thread_local * does allow for constant and dynamic initialization of the * TLS. Thus, only the __thread keyword guarantees that constant * initialization and its implied speedup, is used. * * The initialization of the AD instance at run-time is handled by the * lifetime of a AutodiffStackSingleton object. More specifically, the * first instance of the AutodiffStackSingleton object will initialize * the AD instance and take ownership (it is the only one instance * with the private member own_instance_ being true). Thus, whenever * the first instance of the AutodiffStackSingleton object gets * destructed, the AD tape will be destructed as well. Within * stan-math the initialization of the AD instance for the main thread * of the program is handled by instantiating the singleton once in * the init_chainablestack.hpp file. Whenever STAN_THREADS is defined * then all created child threads must instantiate a * AutodiffStackSingleton object within the child thread before * accessing the AD system in order to initialize the TLS AD tape * within the child thread. * * The design of a globally held (optionally TLS) pointer, which is * globally initialized, allows the compiler to apply necessary * inlining to get maximal performance. However, the design suffers * from "the static init order fiasco"[0]. Whenever the static init * order fiasco occurs, the C++ client of the library may instantiate * an AutodiffStackSingleton object at the adequate code position prior * to any AD tape access to ensure proper initialization order. In * exchange, we get a more performant singleton pattern with automatic * initialization of the AD stack for the main thread. There has been * some discussion on earlier designs using the Meyer singleton * approach; see [1] and [2] and the discussions those PRs link to as * well. * * [0] https://isocpp.org/wiki/faq/ctors#static-init-order * [1] https://github.com/stan-dev/math/pull/840 * [2] https://github.com/stan-dev/math/pull/826 * [3] * http://discourse.mc-stan.org/t/potentially-dropping-support-for-older-versions-of-apples-version-of-clang/3780/ */ template struct AutodiffStackSingleton { using AutodiffStackSingleton_t = AutodiffStackSingleton; AutodiffStackSingleton() : own_instance_(init()) {} ~AutodiffStackSingleton() { if (own_instance_) { delete instance_; instance_ = nullptr; } } struct AutodiffStackStorage { AutodiffStackStorage &operator=(const AutodiffStackStorage &) = delete; std::vector var_stack_; std::vector var_nochain_stack_; std::vector var_alloc_stack_; stack_alloc memalloc_; // nested positions std::vector nested_var_stack_sizes_; std::vector nested_var_nochain_stack_sizes_; std::vector nested_var_alloc_stack_starts_; }; explicit AutodiffStackSingleton(AutodiffStackSingleton_t const &) = delete; AutodiffStackSingleton &operator=(const AutodiffStackSingleton_t &) = delete; static STAN_THREADS_DEF AutodiffStackStorage *instance_; private: static bool init() { static STAN_THREADS_DEF bool is_initialized = false; if (!is_initialized) { is_initialized = true; instance_ = new AutodiffStackStorage(); return true; } if (!instance_) { is_initialized = true; instance_ = new AutodiffStackStorage(); return true; } return false; } bool own_instance_; }; template STAN_THREADS_DEF typename AutodiffStackSingleton::AutodiffStackStorage *AutodiffStackSingleton::instance_; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/callback_vari.hpp0000644000176200001440000000433014645137106023772 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_CALLBACK_VARI_HPP #define STAN_MATH_REV_CORE_CALLBACK_VARI_HPP #include #include namespace stan { namespace math { namespace internal { template struct callback_vari : public vari_value { F rev_functor_; template , plain_type_t>* = nullptr> explicit callback_vari(S&& value, F&& rev_functor) : vari_value(std::move(value)), rev_functor_(std::forward(rev_functor)) {} inline void chain() final { rev_functor_(*this); } }; } // namespace internal /** * Creates a new vari with given value and a callback that implements the * reverse pass (chain). The callback needs to accept a referenct to the vari. * If it needs any other data it should be implemented as a lambda capturing the * variables it needs. * * All captured values must be trivially destructible or they will leak memory! * `to_arena()` function can be used to ensure that. * * @tparam T type of value * @tparam F type of callable * @param value value of the vari * @param functor functor or other callable to call in the reverse pass */ template internal::callback_vari, F>* make_callback_vari(T&& value, F&& functor) { return new internal::callback_vari, F>( std::move(value), std::forward(functor)); } /** * Creates a new var initialized with a callback_vari with a * given value and reverse-pass callback functor. The callback functor * will be passed a reference to the constructed vari. * * All captured values must be trivially destructible or they will leak memory. * `to_arena()` function can be used to ensure that. * * @tparam T type of value * @tparam F type of callable * @param value value of the vari * @param functor functor or other callable to call in the reverse pass */ template var_value> make_callback_var(T&& value, F&& functor) { return var_value>( make_callback_vari(std::move(value), std::forward(functor))); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/scoped_chainablestack.hpp0000644000176200001440000000443014645137106025507 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_SCOPED_CHAINABLESTACK_HPP #define STAN_MATH_REV_CORE_SCOPED_CHAINABLESTACK_HPP #include #include #include #include namespace stan { namespace math { /** * The AD tape of reverse mode AD is by default stored globally within the * process (or thread). With the ScopedChainableStack class one may execute a * nullary functor with reference to an AD tape which is stored with the * instance of ScopedChainableStack. Example: * * ScopedChainableStack scoped_stack; * * double cgrad_a = scoped_stack.execute([] { * var a = 2.0; * var b = 4.0; * var c = a*a + b; * c.grad(); * return a.adj(); * }); * * Doing so will not interfere with the process (or thread) AD tape. */ class ScopedChainableStack { ChainableStack::AutodiffStackStorage local_stack_; std::mutex local_stack_mutex_; struct activate_scope { ChainableStack::AutodiffStackStorage* parent_stack_; ScopedChainableStack& scoped_stack_; explicit activate_scope(ScopedChainableStack& scoped_stack) : parent_stack_(ChainableStack::instance_), scoped_stack_(scoped_stack) { if (!scoped_stack_.local_stack_mutex_.try_lock()) { throw std::logic_error{"Cannot recurse same instance scoped stacks."}; } ChainableStack::instance_ = &scoped_stack.local_stack_; } ~activate_scope() { ChainableStack::instance_ = parent_stack_; scoped_stack_.local_stack_mutex_.unlock(); } }; public: ScopedChainableStack() = default; /** * Execute in the current thread a function and write the AD * tape to local_stack_ of this instance. The function may return * any type. * * @tparam F functor to evaluate * @param f instance of functor * @param args arguments passed to functor * @return Result of evaluated functor */ template decltype(auto) execute(F&& f, Args&&... args) { const activate_scope active_scope(*this); return std::forward(f)(std::forward(args)...); } // Prevent undesirable operations ScopedChainableStack(const ScopedChainableStack&) = delete; ScopedChainableStack& operator=(const ScopedChainableStack&) = delete; }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/save_varis.hpp0000644000176200001440000001245314645137106023364 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_SAVE_VARIS_HPP #define STAN_MATH_REV_CORE_SAVE_VARIS_HPP #include #include #include #include #include #include namespace stan { namespace math { template inline vari** save_varis(vari** dest, const var& x, Pargs&&... args); template * = nullptr, typename... Pargs> inline vari** save_varis(vari** dest, VarVec&& x, Pargs&&... args); template * = nullptr, require_std_vector_vt* = nullptr, typename... Pargs> inline vari** save_varis(vari** dest, VecContainer&& x, Pargs&&... args); template * = nullptr, typename... Pargs> inline vari** save_varis(vari** dest, EigT&& x, Pargs&&... args); template * = nullptr, typename... Pargs> inline vari** save_varis(vari** dest, Arith&& x, Pargs&&... args); inline vari** save_varis(vari** dest); /** * Save the vari pointer in x into the memory pointed to by dest, * increment the dest storage pointer, * recursively call save_varis on the rest of the arguments, * and return the final value of the dest storage pointer. * * @tparam Pargs Types of remaining arguments * @param[in, out] dest Pointer to where vari pointers are saved * @param[in] x A var * @param[in] args Additional arguments to have their varis saved * @return Final position of dest pointer */ template inline vari** save_varis(vari** dest, const var& x, Pargs&&... args) { *dest = x.vi_; return save_varis(dest + 1, std::forward(args)...); } /** * Save the vari pointers in x into the memory pointed to by dest, * increment the dest storage pointer, * recursively call save_varis on the rest of the arguments, * and return the final value of the dest storage pointer. * * @tparam VarVec A variant of std::vector * @tparam Pargs Types of remaining arguments * @param[in, out] dest Pointer to where vari pointers are saved * @param[in] x A std::vector of vars * @param[in] args Additional arguments to have their varis saved * @return Final position of dest pointer */ template *, typename... Pargs> inline vari** save_varis(vari** dest, VarVec&& x, Pargs&&... args) { for (int i = 0; i < x.size(); ++i) { dest[i] = x[i].vi_; } return save_varis(dest + x.size(), std::forward(args)...); } /** * Save the vari pointers in x into the memory pointed to by dest, * increment the dest storage pointer, * recursively call save_varis on the rest of the arguments, * and return the final value of the dest storage pointer. * * @tparam VecContainer std::vector where T is another type containing vars * @tparam Pargs Types of remaining arguments * @param[in, out] dest Pointer to where vari pointers are saved * @param[in] x A std::vector of containers containing of vars * @param[in] args Additional arguments to have their varis saved * @return Final position of dest pointer */ template *, require_std_vector_vt*, typename... Pargs> inline vari** save_varis(vari** dest, VecContainer&& x, Pargs&&... args) { for (size_t i = 0; i < x.size(); ++i) { dest = save_varis(dest, x[i]); } return save_varis(dest, std::forward(args)...); } /** * Save the vari pointers in x into the memory pointed to by dest, * increment the dest storage pointer, * recursively call save_varis on the rest of the arguments, * and return the final value of the dest storage pointer. * * @tparam EigT An Eigen type with var value type * @tparam Pargs Types of remaining arguments * @param[in, out] dest Pointer to where vari pointers are saved * @param[in] x An Eigen container of vars * @param[in] args Additional arguments to have their varis saved * @return Final position of dest pointer */ template *, typename... Pargs> inline vari** save_varis(vari** dest, EigT&& x, Pargs&&... args) { for (int i = 0; i < x.size(); ++i) { dest[i] = x.coeff(i).vi_; } return save_varis(dest + x.size(), std::forward(args)...); } /** * Ignore arithmetic types. * * Recursively call save_varis on the rest of the arguments * and return the final value of the dest storage pointer. * * @tparam Arith An arithmetic type * @tparam Pargs Types of remaining arguments * @param[in, out] dest Pointer to where vari pointers are saved * @param[in] x An argument not containing vars * @param[in] args Additional arguments to have their varis saved * @return Final position of dest pointer */ template *, typename... Pargs> inline vari** save_varis(vari** dest, Arith&& x, Pargs&&... args) { return save_varis(dest, std::forward(args)...); } /** * End save_varis recursion and return pointer * * @param dest Pointer */ inline vari** save_varis(vari** dest) { return dest; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/chainable_alloc.hpp0000644000176200001440000000124714645137106024301 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_CHAINABLE_ALLOC_HPP #define STAN_MATH_REV_CORE_CHAINABLE_ALLOC_HPP #include namespace stan { namespace math { /** * A chainable_alloc is an object which is constructed and * destructed normally but the memory lifespan is managed along * with the arena allocator for the gradient calculation. A * chainable_alloc instance must be created with a call to * operator new for memory management. */ class chainable_alloc { public: chainable_alloc() { ChainableStack::instance_->var_alloc_stack_.push_back(this); } virtual ~chainable_alloc() {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/arena_matrix.hpp0000644000176200001440000004021114645137106023665 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_ARENA_MATRIX_HPP #define STAN_MATH_REV_CORE_ARENA_MATRIX_HPP #include #include #include #include #include namespace stan { namespace math { template class arena_matrix> : public Eigen::Map { public: using Scalar = value_type_t; using Base = Eigen::Map; using PlainObject = std::decay_t; static constexpr int RowsAtCompileTime = MatrixType::RowsAtCompileTime; static constexpr int ColsAtCompileTime = MatrixType::ColsAtCompileTime; /** * Default constructor. */ arena_matrix() : Base::Map(nullptr, RowsAtCompileTime == Eigen::Dynamic ? 0 : RowsAtCompileTime, ColsAtCompileTime == Eigen::Dynamic ? 0 : ColsAtCompileTime) { } /** * Constructs `arena_matrix` with given number of rows and columns. * @param rows number of rows * @param cols number of columns */ arena_matrix(Eigen::Index rows, Eigen::Index cols) : Base::Map( ChainableStack::instance_->memalloc_.alloc_array(rows * cols), rows, cols) {} /** * Constructs `arena_matrix` with given size. This only works if * `MatrixType` is row or col vector. * @param size number of elements */ explicit arena_matrix(Eigen::Index size) : Base::Map( ChainableStack::instance_->memalloc_.alloc_array(size), size) {} /** * Constructs `arena_matrix` from an expression. * @param other expression */ template * = nullptr> arena_matrix(const T& other) // NOLINT : Base::Map( ChainableStack::instance_->memalloc_.alloc_array( other.size()), (RowsAtCompileTime == 1 && T::ColsAtCompileTime == 1) || (ColsAtCompileTime == 1 && T::RowsAtCompileTime == 1) ? other.cols() : other.rows(), (RowsAtCompileTime == 1 && T::ColsAtCompileTime == 1) || (ColsAtCompileTime == 1 && T::RowsAtCompileTime == 1) ? other.rows() : other.cols()) { *this = other; } /** * Constructs `arena_matrix` from an expression. This makes an assumption that * any other `Eigen::Map` also contains memory allocated in the arena. * @param other expression */ arena_matrix(const Base& other) // NOLINT : Base::Map(other) {} /** * Copy constructor. * @param other matrix to copy from */ arena_matrix(const arena_matrix& other) : Base::Map(const_cast(other.data()), other.rows(), other.cols()) {} // without this using, compiler prefers combination of implicit construction // and copy assignment to the inherited operator when assigned an expression using Base::operator=; /** * Copy assignment operator. * @param other matrix to copy from * @return `*this` */ arena_matrix& operator=(const arena_matrix& other) { // placement new changes what data map points to - there is no allocation new (this) Base(const_cast(other.data()), other.rows(), other.cols()); return *this; } /** * Assignment operator for assigning an expression. * @param a expression to evaluate into this * @return `*this` */ template arena_matrix& operator=(const T& a) { // do we need to transpose? if ((RowsAtCompileTime == 1 && T::ColsAtCompileTime == 1) || (ColsAtCompileTime == 1 && T::RowsAtCompileTime == 1)) { // placement new changes what data map points to - there is no allocation new (this) Base( ChainableStack::instance_->memalloc_.alloc_array(a.size()), a.cols(), a.rows()); } else { new (this) Base( ChainableStack::instance_->memalloc_.alloc_array(a.size()), a.rows(), a.cols()); } Base::operator=(a); return *this; } }; template class arena_matrix> : public Eigen::Map { public: using Scalar = value_type_t; using Base = Eigen::Map; using PlainObject = std::decay_t; using StorageIndex = typename PlainObject::StorageIndex; /** * Default constructor. */ arena_matrix() : Base::Map(0, 0, 0, nullptr, nullptr, nullptr) {} /** * Constructor for CDC formatted data. Assumes compressed and does not own * memory. * * Constructs a read-write Map to a sparse matrix of size rows x cols, * containing nnz non-zero coefficients, stored as a sparse format as defined * by the pointers outerIndexPtr, innerIndexPtr, and valuePtr. If the optional * parameter innerNonZerosPtr is the null pointer, then a standard compressed * format is assumed. * * @param rows Number of rows * @param cols Number of columns * @param nnz Number of nonzero items in matrix * @param outerIndexPtr A pointer to the array of outer indices. * @param innerIndexPtr A pointer to the array of inner indices. * @param valuePtr A pointer to the array of values. * @param innerNonZerosPtr A pointer to the array of the number of non zeros * of the inner vectors. * */ arena_matrix(Eigen::Index rows, Eigen::Index cols, Eigen::Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZerosPtr = nullptr) : Base::Map(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr) {} private: template inline T* copy_vector(const T* ptr, Integral size) { if (size == 0) return nullptr; T* ret = ChainableStack::instance_->memalloc_.alloc_array(size); std::copy_n(ptr, size, ret); return ret; } public: /** * Constructs `arena_matrix` from an `Eigen::SparseMatrix`. * @param other Eigen Sparse Matrix class */ template * = nullptr> arena_matrix(T&& other) // NOLINT : Base::Map( other.rows(), other.cols(), other.nonZeros(), copy_vector(other.outerIndexPtr(), other.outerSize() + 1), copy_vector(other.innerIndexPtr(), other.nonZeros()), copy_vector(other.valuePtr(), other.nonZeros()), copy_vector( other.innerNonZeroPtr(), other.innerNonZeroPtr() == nullptr ? 0 : other.innerSize())) {} /** * Constructs `arena_matrix` from an Eigen expression * @param other An expression */ template * = nullptr, require_not_same_t* = nullptr> arena_matrix(S&& other) // NOLINT : arena_matrix(PlainObject(std::forward(other))) {} /** * Constructs `arena_matrix` from an expression. This makes an assumption that * any other `Eigen::Map` also contains memory allocated in the arena. * @param other expression */ arena_matrix(const Base& other) // NOLINT : Base(other) {} /** * Const copy constructor. No actual copy is performed * @note Since the memory for the arena matrix sits in Stan's memory arena all * copies/moves of arena matrices are shallow moves * @param other matrix to copy from */ arena_matrix(const arena_matrix& other) : Base::Map(other.rows(), other.cols(), other.nonZeros(), const_cast(other.outerIndexPtr()), const_cast(other.innerIndexPtr()), const_cast(other.valuePtr()), const_cast(other.innerNonZeroPtr())) {} /** * Move constructor. * @note Since the memory for the arena matrix sits in Stan's memory arena all * copies/moves of arena matrices are shallow moves * @param other matrix to copy from */ arena_matrix(arena_matrix&& other) : Base::Map(other.rows(), other.cols(), other.nonZeros(), const_cast(other.outerIndexPtr()), const_cast(other.innerIndexPtr()), const_cast(other.valuePtr()), const_cast(other.innerNonZeroPtr())) {} /** * Copy constructor. No actual copy is performed * @note Since the memory for the arena matrix sits in Stan's memory arena all * copies/moves of arena matrices are shallow moves * @param other matrix to copy from */ arena_matrix(arena_matrix& other) : Base::Map(other.rows(), other.cols(), other.nonZeros(), const_cast(other.outerIndexPtr()), const_cast(other.innerIndexPtr()), const_cast(other.valuePtr()), const_cast(other.innerNonZeroPtr())) {} // without this using, compiler prefers combination of implicit construction // and copy assignment to the inherited operator when assigned an expression using Base::operator=; /** * Copy assignment operator. * @tparam ArenaMatrix An `arena_matrix` type * @param other matrix to copy from * @return `*this` */ template , arena_matrix>* = nullptr> arena_matrix& operator=(ArenaMatrix&& other) { // placement new changes what data map points to - there is no allocation new (this) Base(other.rows(), other.cols(), other.nonZeros(), const_cast(other.outerIndexPtr()), const_cast(other.innerIndexPtr()), const_cast(other.valuePtr()), const_cast(other.innerNonZeroPtr())); return *this; } /** * Assignment operator for assigning an expression. * @tparam Expr An expression that an `arena_matrix` can be * constructed from * @param expr expression to evaluate into this * @return `*this` */ template >* = nullptr> arena_matrix& operator=(Expr&& expr) { new (this) arena_matrix(std::forward(expr)); return *this; } private: /** * inplace operations functor for `Sparse (.*)= Sparse`. * @note This assumes that each matrix is of the same size and sparsity * pattern. * @tparam F A type with a valid `operator()(Scalar& x, const Scalar& y)` * method * @tparam Expr Type derived from `Eigen::SparseMatrixBase` * @param f Functor that performs the inplace operation * @param other The right hand side of the inplace operation */ template * = nullptr, require_same_t>* = nullptr> inline void inplace_ops_impl(F&& f, Expr&& other) { auto&& x = to_ref(other); auto* val_ptr = (*this).valuePtr(); auto* x_val_ptr = x.valuePtr(); const auto non_zeros = (*this).nonZeros(); for (Eigen::Index i = 0; i < non_zeros; ++i) { f(val_ptr[i], x_val_ptr[i]); } } /** * inplace operations functor for `Sparse (.*)= Sparse`. * @note This assumes that each matrix is of the same size and sparsity * pattern. * @tparam F A type with a valid `operator()(Scalar& x, const Scalar& y)` * method * @tparam Expr Type derived from `Eigen::SparseMatrixBase` * @param f Functor that performs the inplace operation * @param other The right hand side of the inplace operation */ template * = nullptr, require_not_same_t>* = nullptr> inline void inplace_ops_impl(F&& f, Expr&& other) { auto&& x = to_ref(other); for (int k = 0; k < (*this).outerSize(); ++k) { typename Base::InnerIterator it(*this, k); typename std::decay_t::InnerIterator iz(x, k); for (; static_cast(it) && static_cast(iz); ++it, ++iz) { f(it.valueRef(), iz.value()); } } } /** * inplace operations functor for `Sparse (.*)= Dense`. * @note This assumes the user intends to perform the inplace operation for * the nonzero parts of `this` * @tparam F A type with a valid `operator()(Scalar& x, const Scalar& y)` * method * @tparam Expr Type derived from `Eigen::DenseBase` * @param f Functor that performs the inplace operation * @param other The right hand side of the inplace operation */ template * = nullptr, require_eigen_dense_base_t* = nullptr> inline void inplace_ops_impl(F&& f, Expr&& other) { auto&& x = to_ref(other); for (int k = 0; k < (*this).outerSize(); ++k) { typename Base::InnerIterator it(*this, k); for (; static_cast(it); ++it) { f(it.valueRef(), x(it.row(), it.col())); } } } /** * inplace operations functor for `Sparse (.*)= Scalar`. * @note This assumes the user intends to perform the inplace operation for * the nonzero parts of `this` * @tparam F A type with a valid `operator()(Scalar& x, const Scalar& y)` * method * @tparam T A scalar type * @param f Functor that performs the inplace operation * @param other The right hand side of the inplace operation */ template * = nullptr> inline void inplace_ops_impl(F&& f, T&& other) { auto* val_ptr = (*this).valuePtr(); const auto non_zeros = (*this).nonZeros(); for (Eigen::Index i = 0; i < non_zeros; ++i) { f(val_ptr[i], other); } } public: /** * Inplace operator `+=` * @note Caution! Inplace operators assume that either * 1. The right hand side sparse matrix has the same sparsity pattern * 2. You only intend to add a scalar or dense matrix coefficients to the * nonzero values of `this`. This is intended to be used within the reverse * pass for accumulation of the adjoint and is built as such. Any other use * case should be be sure that the above assumptions are satisfied. * @tparam T A type derived from `Eigen::SparseMatrixBase` or * `Eigen::DenseMatrixBase` or a `Scalar` * @param other value to be added inplace to the matrix. */ template inline arena_matrix& operator+=(T&& other) { inplace_ops_impl( [](auto&& x, auto&& y) { x += y; return; }, std::forward(other)); return *this; } /** * Inplace operator `+=` * @note Caution! Inplace operators assume that either * 1. The right hand side sparse matrix has the same sparsity pattern * 2. You only intend to add a scalar or dense matrix coefficients to the * nonzero values of `this`. This is intended to be used within the reverse * pass for accumulation of the adjoint and is built as such. Any other use * case should be be sure that the above assumptions are satisfied. * @tparam T A type derived from `Eigen::SparseMatrixBase` or * `Eigen::DenseMatrixBase` or a `Scalar` * @param other value to be added inplace to the matrix. */ template inline arena_matrix& operator-=(T&& other) { inplace_ops_impl( [](auto&& x, auto&& y) { x -= y; return; }, std::forward(other)); return *this; } }; } // namespace math } // namespace stan namespace Eigen { namespace internal { template struct traits> { using base = traits>; using XprKind = typename Eigen::internal::traits>::XprKind; enum { PlainObjectTypeInnerSize = base::PlainObjectTypeInnerSize, InnerStrideAtCompileTime = base::InnerStrideAtCompileTime, OuterStrideAtCompileTime = base::OuterStrideAtCompileTime, Alignment = base::Alignment, Flags = base::Flags }; }; } // namespace internal } // namespace Eigen #endif StanHeaders/inst/include/stan/math/rev/core/operator_equal.hpp0000644000176200001440000000457614645137106024253 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_EQUAL_HPP #define STAN_MATH_REV_CORE_OPERATOR_EQUAL_HPP #include #include namespace stan { namespace math { /** * Equality operator comparing two variables' values (C++). * \f[ \mbox{operator==}(x, y) = \begin{cases} 0 & \mbox{if } x \neq y\\ 1 & \mbox{if } x = y \\[6pt] 0 & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param a First variable. * @param b Second variable. * @return True if the first variable's value is the same as the * second's. */ inline bool operator==(const var& a, const var& b) { return a.val() == b.val(); } /** * Equality operator comparing a variable's value and a double * (C++). * * @tparam Arith An arithmetic type * @param a First variable. * @param b Second value. * @return True if the first variable's value is the same as the * second value. */ template * = nullptr> inline bool operator==(const var& a, Arith b) { return a.val() == b; } /** * Equality operator comparing a scalar and a variable's value * (C++). * * @tparam Arith An arithmetic type * @param a First scalar. * @param b Second variable. * @return True if the variable's value is equal to the scalar. */ template * = nullptr> inline bool operator==(Arith a, const var& b) { return a == b.val(); } /** * Return `true` if the real number is equal to the real part of the * complex number, and the imaginary part of the complex number is zero * * @param x real number * @param z complex number * @return `true` if the real number is equal to the real part of the * complex number, and the imaginary part of the complex number is zero */ inline bool operator==(const var& x, const std::complex& z) { return x == z.real() && 0 == z.imag(); } /** * Return `true` if the real number is equal to the real part of the * complex number, and the imaginary part of the complex number is zero * * @param z complex number * @param y real number * @return `true` if the real number is equal to the real part of the * complex number, and the imaginary part of the complex number is zero */ inline bool operator==(const std::complex& z, const var& y) { return z.real() == y && z.imag() == 0; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/arena_allocator.hpp0000644000176200001440000000314614645137106024347 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_ARENA_ALLOCATOR_HPP #define STAN_MATH_REV_CORE_ARENA_ALLOCATOR_HPP #include namespace stan { namespace math { /** * std library compatible allocator that uses AD stack. * @tparam T type of scalar * * @warning The type T needs to be either trivially destructible or the dynamic allocations needs to be managed by the arena_allocator. * For example this works: @code{.cpp} using my_matrix = std::vector>, stan::math::arena_allocator>>>;@endcode * */ template struct arena_allocator { using value_type = T; arena_allocator() = default; arena_allocator(const arena_allocator& rhs) = default; template arena_allocator(const arena_allocator& rhs) {} /** * Allocates space for `n` items of type `T`. * * @param n number of items to allocate space for * @return pointer to allocated space */ T* allocate(std::size_t n) { return ChainableStack::instance_->memalloc_.alloc_array(n); } /** * No-op. Memory is deallocated by calling `recover_memory()`. */ void deallocate(T* /*p*/, std::size_t /*n*/) noexcept {} /** * Equality comparison operator. * @return true */ constexpr bool operator==(const arena_allocator&) const noexcept { return true; } /** * Inequality comparison operator. * @return false */ constexpr bool operator!=(const arena_allocator&) const noexcept { return false; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/stored_gradient_vari.hpp0000644000176200001440000000254014645137106025414 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_STORED_GRADIENT_VARI_HPP #define STAN_MATH_REV_CORE_STORED_GRADIENT_VARI_HPP #include namespace stan { namespace math { /** * A var implementation that stores the daughter variable * implementation pointers and the partial derivative with respect * to the result explicitly in arrays constructed on the * autodiff memory stack. * * Like a simplified version of OperandsAndPartials. */ class stored_gradient_vari : public vari { protected: size_t size_; vari** dtrs_; double* partials_; public: /** * Construct a stored gradient vari with the specified * value, size, daughter varis, and partial derivatives. * * @param[in] value Value of vari * @param[in] size Number of daughters * @param[in] dtrs Array of pointers to daughters * @param[in] partials Partial derivatives of value with respect * to daughters. */ stored_gradient_vari(double value, size_t size, vari** dtrs, double* partials) : vari(value), size_(size), dtrs_(dtrs), partials_(partials) {} /** * Propagate derivatives through this vari with partial * derivatives given for the daughter vari by the stored partials. */ void chain() { for (size_t i = 0; i < size_; ++i) { dtrs_[i]->adj_ += adj_ * partials_[i]; } } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_less_than_or_equal.hpp0000644000176200001440000000311714645137106027001 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_LESS_THAN_OR_EQUAL_HPP #define STAN_MATH_REV_CORE_OPERATOR_LESS_THAN_OR_EQUAL_HPP #include #include namespace stan { namespace math { /** * Less than or equal operator comparing two variables' values * (C++). \f[ \mbox{operator\textless=}(x, y) = \begin{cases} 0 & \mbox{if } x > y\\ 1 & \mbox{if } x \leq y \\[6pt] 0 & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param a First variable. * @param b Second variable. * @return True if first variable's value is less than or equal to * the second's. */ inline bool operator<=(const var& a, const var& b) { return a.val() <= b.val(); } /** * Less than or equal operator comparing a variable's value and a * scalar (C++). * * @tparam Arith An arithmetic type * @param a First variable. * @param b Second value. * @return True if first variable's value is less than or equal to * the second value. */ template * = nullptr> inline bool operator<=(const var& a, Arith b) { return a.val() <= b; } /** * Less than or equal operator comparing a double and variable's * value (C++). * * @tparam Arith An arithmetic type * @param a First value. * @param b Second variable. * @return True if first value is less than or equal to the second * variable's value. */ template * = nullptr> inline bool operator<=(Arith a, const var& b) { return a <= b.val(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/dv_vari.hpp0000644000176200001440000000055614645137106022655 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_DV_VARI_HPP #define STAN_MATH_REV_CORE_DV_VARI_HPP #include namespace stan { namespace math { class op_dv_vari : public vari { protected: double ad_; vari* bvi_; public: op_dv_vari(double f, double a, vari* bvi) : vari(f), ad_(a), bvi_(bvi) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/typedefs.hpp0000644000176200001440000000232214645137106023037 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_TYPEDEFS_HPP #define STAN_MATH_REV_CORE_TYPEDEFS_HPP #include #include #include #include namespace stan { namespace math { using size_type = Eigen::Matrix::Index; /** * The type of a matrix holding var * values. */ using matrix_v = Eigen::Matrix; /** * The type of a (column) vector holding var * values. */ using vector_v = Eigen::Matrix; /** * The type of a row vector holding var * values. */ using row_vector_v = Eigen::Matrix; /** * The type of a matrix holding vari* * values. */ using matrix_vi = Eigen::Matrix; /** * The type of a (column) vector holding vari* * values. */ using vector_vi = Eigen::Matrix; /** * The type of a row vector holding vari* * values. */ using row_vector_vi = Eigen::Matrix; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/vector_vari.hpp0000644000176200001440000000155714645137106023550 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_VECTOR_VARI_HPP #define STAN_MATH_REV_CORE_VECTOR_VARI_HPP #include #include #include #include namespace stan { namespace math { class op_vector_vari : public vari { protected: const size_t size_; vari** vis_; public: template * = nullptr, require_vector_like_vt* = nullptr> op_vector_vari(Arith f, VecVar&& vs) : vari(f), size_(vs.size()) { vis_ = reinterpret_cast(operator new(sizeof(vari*) * vs.size())); for (size_t i = 0; i < vs.size(); ++i) { vis_[i] = vs[i].vi_; } } vari* operator[](size_t n) const { return vis_[n]; } size_t size() { return size_; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/vv_vari.hpp0000644000176200001440000000056214645137106022674 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_VV_VARI_HPP #define STAN_MATH_REV_CORE_VV_VARI_HPP #include namespace stan { namespace math { class op_vv_vari : public vari { protected: vari* avi_; vari* bvi_; public: op_vv_vari(double f, vari* avi, vari* bvi) : vari(f), avi_(avi), bvi_(bvi) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_addition.hpp0000644000176200001440000002245314645137106024731 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_ADDITION_HPP #define STAN_MATH_REV_CORE_OPERATOR_ADDITION_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Addition operator for variables (C++). * * The partial derivatives are defined by * * \f$\frac{\partial}{\partial x} (x+y) = 1\f$, and * * \f$\frac{\partial}{\partial y} (x+y) = 1\f$. * * \f[ \mbox{operator+}(x, y) = \begin{cases} x+y & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{operator+}(x, y)}{\partial x} = \begin{cases} 1 & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{operator+}(x, y)}{\partial y} = \begin{cases} 1 & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param a First variable operand. * @param b Second variable operand. * @return Variable result of adding two variables. */ inline var operator+(const var& a, const var& b) { return make_callback_vari(a.vi_->val_ + b.vi_->val_, [avi = a.vi_, bvi = b.vi_](const auto& vi) mutable { avi->adj_ += vi.adj_; bvi->adj_ += vi.adj_; }); } /** * Addition operator for variable and scalar (C++). * * The derivative with respect to the variable is * * \f$\frac{d}{dx} (x + c) = 1\f$. * * @tparam Arith An arithmetic type * @param a First variable operand. * @param b Second scalar operand. * @return Result of adding variable and scalar. */ template * = nullptr> inline var operator+(const var& a, Arith b) { if (unlikely(b == 0.0)) { return a; } return make_callback_vari( a.vi_->val_ + b, [avi = a.vi_](const auto& vi) mutable { avi->adj_ += vi.adj_; }); } /** * Addition operator for scalar and variable (C++). * * The derivative with respect to the variable is * * \f$\frac{d}{dy} (c + y) = 1\f$. * * @tparam Arith An arithmetic type * @param a First scalar operand. * @param b Second variable operand. * @return Result of adding variable and scalar. */ template * = nullptr> inline var operator+(Arith a, const var& b) { return b + a; // by symmetry } /** * Addition operator for matrix variables (C++). * * @tparam VarMat1 A matrix of vars or a var with an underlying matrix type. * @tparam VarMat2 A matrix of vars or a var with an underlying matrix type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of adding two variables. */ template * = nullptr> inline auto add(const VarMat1& a, const VarMat2& b) { check_matching_dims("add", "a", a, "b", b); using op_ret_type = decltype(a.val() + b.val()); using ret_type = return_var_matrix_t; arena_t arena_a(a); arena_t arena_b(b); arena_t ret(arena_a.val() + arena_b.val()); reverse_pass_callback([ret, arena_a, arena_b]() mutable { for (Eigen::Index j = 0; j < ret.cols(); ++j) { for (Eigen::Index i = 0; i < ret.rows(); ++i) { const auto ref_adj = ret.adj().coeffRef(i, j); arena_a.adj().coeffRef(i, j) += ref_adj; arena_b.adj().coeffRef(i, j) += ref_adj; } } }); return ret_type(ret); } /** * Addition operator for a matrix variable and arithmetic (C++). * * @tparam VarMat A matrix of vars or a var with an underlying matrix type. * @tparam Arith A type with an arithmetic Scalar type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of adding two variables. */ template * = nullptr, require_rev_matrix_t* = nullptr> inline auto add(const VarMat& a, const Arith& b) { if (is_eigen::value) { check_matching_dims("add", "a", a, "b", b); } using op_ret_type = decltype((a.val().array() + as_array_or_scalar(b)).matrix()); using ret_type = return_var_matrix_t; arena_t arena_a(a); arena_t ret(arena_a.val().array() + as_array_or_scalar(b)); reverse_pass_callback( [ret, arena_a]() mutable { arena_a.adj() += ret.adj_op(); }); return ret_type(ret); } /** * Addition operator for an arithmetic type and matrix variable (C++). * * @tparam VarMat A matrix of vars or a var with an underlying matrix type. * @tparam Arith A type with an arithmetic Scalar type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of adding two variables. */ template * = nullptr, require_rev_matrix_t* = nullptr> inline auto add(const Arith& a, const VarMat& b) { return add(b, a); } /** * Addition operator for an arithmetic matrix and variable (C++). * * @tparam Var A `var_value` with an underlying arithmetic type. * @tparam EigMat An Eigen Matrix type with an arithmetic Scalar type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of adding two variables. */ template * = nullptr, require_eigen_vt* = nullptr> inline auto add(const Var& a, const EigMat& b) { using ret_type = return_var_matrix_t; arena_t ret(a.val() + b.array()); reverse_pass_callback([ret, a]() mutable { a.adj() += ret.adj().sum(); }); return ret_type(ret); } /** * Addition operator for a variable and arithmetic matrix (C++). * * @tparam EigMat An Eigen Matrix type with an arithmetic Scalar type. * @tparam Var A `var_value` with an underlying arithmetic type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of adding two variables. */ template * = nullptr, require_var_vt* = nullptr> inline auto add(const EigMat& a, const Var& b) { return add(b, a); } /** * Addition operator for a variable and variable matrix (C++). * * @tparam Var A `var_value` with an underlying arithmetic type. * @tparam VarMat An Eigen Matrix type with a variable Scalar type or a * `var_value` with an underlying matrix type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of adding two variables. */ template * = nullptr, require_rev_matrix_t* = nullptr> inline auto add(const Var& a, const VarMat& b) { using ret_type = return_var_matrix_t; arena_t arena_b(b); arena_t ret(a.val() + arena_b.val().array()); reverse_pass_callback([ret, a, arena_b]() mutable { for (Eigen::Index j = 0; j < ret.cols(); ++j) { for (Eigen::Index i = 0; i < ret.rows(); ++i) { const auto ret_adj = ret.adj().coeffRef(i, j); a.adj() += ret_adj; arena_b.adj().coeffRef(i, j) += ret_adj; } } }); return ret_type(ret); } /** * Addition operator for a variable matrix and variable (C++). * * @tparam VarMat An Eigen Matrix type with a variable Scalar type or a * `var_value` with an underlying matrix type. * @tparam Var A `var_value` with an underlying arithmetic type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of adding two variables. */ template * = nullptr, require_rev_matrix_t* = nullptr> inline auto add(const VarMat& a, const Var& b) { return add(b, a); } template * = nullptr, require_any_arithmetic_t* = nullptr> inline auto add(const T1& a, const T2& b) { return a + b; } template * = nullptr> inline auto add(const T1& a, const T2& b) { return a + b; } /** * Addition operator for matrix variables. * * @tparam VarMat1 A matrix of vars or a var with an underlying matrix type. * @tparam VarMat2 A matrix of vars or a var with an underlying matrix type. * @param a First variable operand. * @param b Second variable operand. * @return Variable result of adding two variables. */ template * = nullptr> inline auto operator+(const VarMat1& a, const VarMat2& b) { return add(a, b); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/chainable_object.hpp0000644000176200001440000000732314645137106024456 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_CHAINABLE_OBJECT_HPP #define STAN_MATH_REV_CORE_CHAINABLE_OBJECT_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * `chainable_object` hold another object is useful for connecting * the lifetime of a specific object to the chainable stack * * `chainable_object` objects should only be allocated with `new`. * `chainable_object` objects allocated on the stack will result * in a double free (`obj_` will get destructed once when the * chainable_object leaves scope and once when the chainable * stack memory is recovered). * * @tparam T type of object to hold */ template class chainable_object : public chainable_alloc { private: plain_type_t obj_; public: /** * Construct chainable object from another object * * @tparam S type of object to hold (must have the same plain type as `T`) */ template , plain_type_t>* = nullptr> explicit chainable_object(S&& obj) : obj_(std::forward(obj)) {} /** * Return a reference to the underlying object * * @return reference to underlying object */ inline auto& get() noexcept { return obj_; } inline const auto& get() const noexcept { return obj_; } }; /** * Store the given object in a `chainable_object` so it is destructed * only when the chainable stack memory is recovered and return * a pointer to the underlying object * * @tparam T type of object to hold * @param obj object to hold * @return pointer to object held in `chainable_object` */ template auto make_chainable_ptr(T&& obj) { auto ptr = new chainable_object(std::forward(obj)); return &ptr->get(); } /** * `unsafe_chainable_object` hold another object and is useful for connecting * the lifetime of a specific object to the chainable stack. This class * differs from `chainable_object` in that this class does not evaluate * expressions. * * `unsafe_chainable_object` objects should only be allocated with `new`. * `unsafe_chainable_object` objects allocated on the stack will result * in a double free (`obj_` will get destructed once when the * `unsafe_chainable_object` leaves scope and once when the chainable * stack memory is recovered). * * @tparam T type of object to hold */ template class unsafe_chainable_object : public chainable_alloc { private: std::decay_t obj_; public: /** * Construct chainable object from another object * * @tparam S type of object to hold (must have the same plain type as `T`) */ template , plain_type_t>* = nullptr> explicit unsafe_chainable_object(S&& obj) : obj_(std::forward(obj)) {} /** * Return a reference to the underlying object * * @return reference to underlying object */ inline auto& get() noexcept { return obj_; } inline const auto& get() const noexcept { return obj_; } }; /** * Store the given object in a `chainable_object` so it is destructed * only when the chainable stack memory is recovered and return * a pointer to the underlying object This function * differs from `make_chainable_object` in that this class does not evaluate * expressions. * * @tparam T type of object to hold * @param obj object to hold * @return pointer to object held in `chainable_object` */ template auto make_unsafe_chainable_ptr(T&& obj) { auto ptr = new unsafe_chainable_object(std::forward(obj)); return &ptr->get(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/grad.hpp0000644000176200001440000000347114645137106022137 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_GRAD_HPP #define STAN_MATH_REV_CORE_GRAD_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Compute the gradient for all variables starting from the end of the AD tape. * This function does not recover memory. The chain * rule is applied working down the stack from the last vari created on the * AD tape and then calling each vari's `chain()` method in turn. * *

This function computes a nested gradient only going back as far * as the last nesting. * *

This function does not recover any memory from the computation. * */ static inline void grad() { size_t end = ChainableStack::instance_->var_stack_.size(); size_t beginning = empty_nested() ? 0 : end - nested_size(); for (size_t i = end; i-- > beginning;) { ChainableStack::instance_->var_stack_[i]->chain(); } } /** * Compute the gradient for all variables starting from the * specified root variable implementation. Does not recover * memory. This chainable variable's adjoint is initialized using * the method init_dependent() and then the chain * rule is applied working down the stack from this vari and * calling each vari's chain() method in turn. * *

This function computes a nested gradient only going back as far * as the last nesting. * *

This function does not recover any memory from the computation. * * @param vi Variable implementation for root of partial * derivative propagation. */ template static void grad(Vari* vi) { vi->init_dependent(); grad(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/vdd_vari.hpp0000644000176200001440000000063014645137106023012 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_VDD_VARI_HPP #define STAN_MATH_REV_CORE_VDD_VARI_HPP #include namespace stan { namespace math { class op_vdd_vari : public vari { protected: vari* avi_; double bd_; double cd_; public: op_vdd_vari(double f, vari* avi, double b, double c) : vari(f), avi_(avi), bd_(b), cd_(c) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/set_zero_all_adjoints.hpp0000644000176200001440000000115614645137106025575 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_SET_ZERO_ALL_ADJOINTS_HPP #define STAN_MATH_REV_CORE_SET_ZERO_ALL_ADJOINTS_HPP #include #include #include namespace stan { namespace math { /** * Reset all adjoint values in the stack to zero. */ static inline void set_zero_all_adjoints() { for (auto &x : ChainableStack::instance_->var_stack_) { x->set_zero_adjoint(); } for (auto &x : ChainableStack::instance_->var_nochain_stack_) { x->set_zero_adjoint(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/nested_size.hpp0000644000176200001440000000062414645137106023533 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_NESTED_SIZE_HPP #define STAN_MATH_REV_CORE_NESTED_SIZE_HPP #include #include namespace stan { namespace math { static inline size_t nested_size() { return ChainableStack::instance_->var_stack_.size() - ChainableStack::instance_->nested_var_stack_sizes_.back(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/dvv_vari.hpp0000644000176200001440000000063414645137106023040 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_DVV_VARI_HPP #define STAN_MATH_REV_CORE_DVV_VARI_HPP #include namespace stan { namespace math { class op_dvv_vari : public vari { protected: double ad_; vari* bvi_; vari* cvi_; public: op_dvv_vari(double f, double a, vari* bvi, vari* cvi) : vari(f), ad_(a), bvi_(bvi), cvi_(cvi) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/init_chainablestack.hpp0000644000176200001440000000424214645137106025176 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_INIT_CHAINABLESTACK_HPP #define STAN_MATH_REV_CORE_INIT_CHAINABLESTACK_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * TBB observer object which is a callback hook called whenever the * TBB scheduler adds a new thread to the TBB managed threadpool. This * hook ensures that each worker thread has an initialized AD tape * ready for use. * * Refer to * https://software.intel.com/content/www/us/en/develop/documentation/tbb-documentation/top/intel-threading-building-blocks-developer-reference/task-scheduler/taskschedulerobserver.html * for details on the observer concept. */ class ad_tape_observer final : public tbb::task_scheduler_observer { using stack_ptr = std::unique_ptr; using ad_map = std::unordered_map; public: ad_tape_observer() : tbb::task_scheduler_observer(), thread_tape_map_() { on_scheduler_entry(true); // register current process observe(true); // activates the observer } ~ad_tape_observer() { observe(false); } void on_scheduler_entry(bool worker) { std::lock_guard thread_tape_map_lock(thread_tape_map_mutex_); const std::thread::id thread_id = std::this_thread::get_id(); if (thread_tape_map_.find(thread_id) == thread_tape_map_.end()) { ad_map::iterator insert_elem; bool status = false; std::tie(insert_elem, status) = thread_tape_map_.emplace(ad_map::value_type{thread_id, nullptr}); insert_elem->second = std::make_unique(); } } void on_scheduler_exit(bool worker) { std::lock_guard thread_tape_map_lock(thread_tape_map_mutex_); auto elem = thread_tape_map_.find(std::this_thread::get_id()); if (elem != thread_tape_map_.end()) { thread_tape_map_.erase(elem); } } private: ad_map thread_tape_map_; std::mutex thread_tape_map_mutex_; }; namespace { ad_tape_observer global_observer; } // namespace } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/v_vari.hpp0000644000176200001440000000051214645137106022501 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_V_VARI_HPP #define STAN_MATH_REV_CORE_V_VARI_HPP #include namespace stan { namespace math { class op_v_vari : public vari { protected: vari* avi_; public: op_v_vari(double f, vari* avi) : vari(f), avi_(avi) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_unary_plus.hpp0000644000176200001440000000225414645137106025334 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_UNARY_PLUS_HPP #define STAN_MATH_REV_CORE_OPERATOR_UNARY_PLUS_HPP #include #include #include #include #include namespace stan { namespace math { /** * Unary plus operator for variables (C++). * * The function simply returns its input, because * * \f$\frac{d}{dx} +x = \frac{d}{dx} x = 1\f$. * * The effect of unary plus on a built-in C++ scalar type is * integer promotion. Because variables are all * double-precision floating point already, promotion is * not necessary. * \f[ \mbox{operator+}(x) = \begin{cases} x & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{operator+}(x)}{\partial x} = \begin{cases} 1 & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param a Argument variable. * @return The input reference. */ inline var operator+(const var& a) { return a; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/nested_rev_autodiff.hpp0000644000176200001440000000252414645137106025237 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_nested_rev_autodiff_HPP #define STAN_MATH_REV_CORE_nested_rev_autodiff_HPP #include #include #include namespace stan { namespace math { /** * A class following the RAII idiom to start and recover nested autodiff scopes. * This is the preferred way to use nested autodiff. Example: * * var a; // allocated normally * { * nested_rev_autodiff nested; // Starts nested autodiff * * var nested_var; //allocated on the nested stack * // Do stuff on the nested stack * * // Nested stack is automatically recovered at the end of scope where * // nested was declared, including exceptions, returns, etc. * } * var b; */ class nested_rev_autodiff { public: nested_rev_autodiff() { start_nested(); } ~nested_rev_autodiff() { recover_memory_nested(); } // Prevent undesirable operations nested_rev_autodiff(const nested_rev_autodiff&) = delete; nested_rev_autodiff& operator=(const nested_rev_autodiff&) = delete; void* operator new(std::size_t) = delete; /** * Reset all adjoint values in this nested stack * to zero. **/ inline void set_zero_all_adjoints() { set_zero_all_adjoints_nested(); } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/std_iterator_traits.hpp0000644000176200001440000000163114645137106025307 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_STD_ITERATOR_TRAITS_HPP #define STAN_MATH_REV_CORE_STD_ITERATOR_TRAITS_HPP #include #include #include namespace std { /** * Specialization of iterator traits for Stan math. These all take * the form of typedefs. */ template struct iterator_traits> { /** * Iterator category for traits. */ typedef random_access_iterator_tag iterator_category; /** * Type for difference between pointers. */ typedef ptrdiff_t difference_type; /** * Type for value of pointer to values. */ typedef stan::math::var_value value_type; /** * Type of pointer to variables. */ typedef stan::math::var_value* pointer; /** * Type of reference to variables. */ typedef stan::math::var_value& reference; }; } // namespace std #endif StanHeaders/inst/include/stan/math/rev/core/recover_memory.hpp0000644000176200001440000000170314645137106024253 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_RECOVER_MEMORY_HPP #define STAN_MATH_REV_CORE_RECOVER_MEMORY_HPP #include #include #include #include namespace stan { namespace math { /** * Recover memory used for all variables for reuse. * * @throw std::logic_error if empty_nested() returns * false */ static inline void recover_memory() { if (!empty_nested()) { throw std::logic_error( "empty_nested() must be true" " before calling recover_memory()"); } ChainableStack::instance_->var_stack_.clear(); ChainableStack::instance_->var_nochain_stack_.clear(); for (auto &x : ChainableStack::instance_->var_alloc_stack_) { delete x; } ChainableStack::instance_->var_alloc_stack_.clear(); ChainableStack::instance_->memalloc_.recover_all(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/vvv_vari.hpp0000644000176200001440000000064014645137106023057 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_VVV_VARI_HPP #define STAN_MATH_REV_CORE_VVV_VARI_HPP #include namespace stan { namespace math { class op_vvv_vari : public vari { protected: vari* avi_; vari* bvi_; vari* cvi_; public: op_vvv_vari(double f, vari* avi, vari* bvi, vari* cvi) : vari(f), avi_(avi), bvi_(bvi), cvi_(cvi) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/zero_adjoints.hpp0000644000176200001440000000400614645137106024067 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_ZERO_ADJOINTS_HPP #define STAN_MATH_REV_CORE_ZERO_ADJOINTS_HPP #include #include #include namespace stan { namespace math { /** * End of recursion for set_zero_adjoints */ inline void zero_adjoints() noexcept {} /** * Do nothing for non-autodiff arguments. Recursively call zero_adjoints * on the rest of the arguments. * * @tparam T type of current argument * @tparam Pargs type of rest of arguments * * @param x current argument * @param args rest of arguments to zero */ template * = nullptr> inline void zero_adjoints(T& x) noexcept {} /** * Zero the adjoint of the vari in the first argument. Recursively call * zero_adjoints on the rest of the arguments. * * @tparam T type of current argument * @tparam Pargs type of rest of arguments * * @param x current argument * @param args rest of arguments to zero */ inline void zero_adjoints(var& x) { x.adj() = 0; } /** * Zero the adjoints of the varis of every var in an Eigen::Matrix * container. Recursively call zero_adjoints on the rest of the arguments. * * @tparam T type of current argument * @tparam Pargs type of rest of arguments * * @param x current argument * @param args rest of arguments to zero */ template * = nullptr> inline void zero_adjoints(EigMat& x) { for (size_t i = 0; i < x.size(); ++i) x.coeffRef(i).adj() = 0; } /** * Zero the adjoints of every element in a vector. Recursively call * zero_adjoints on the rest of the arguments. * * @tparam T type of current argument * @tparam Pargs type of rest of arguments * * @param x current argument * @param args rest of arguments to zero */ template * = nullptr> inline void zero_adjoints(StdVec& x) { for (size_t i = 0; i < x.size(); ++i) { zero_adjoints(x[i]); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/print_stack.hpp0000644000176200001440000000115214645137106023535 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_PRINT_STACK_HPP #define STAN_MATH_REV_CORE_PRINT_STACK_HPP #include #include #include namespace stan { namespace math { /** * Prints the autodiff variable stack. This function * is used for debugging purposes. * * Only works if all members of stack are vari* as it * casts to vari*. * * @param o ostream to modify */ inline void print_stack(std::ostream& o) { o << "STACK, size=" << ChainableStack::instance_->var_stack_.size() << std::endl; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/set_zero_all_adjoints_nested.hpp0000644000176200001440000000262314645137106027137 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_SET_ZERO_ALL_ADJOINTS_NESTED_HPP #define STAN_MATH_REV_CORE_SET_ZERO_ALL_ADJOINTS_NESTED_HPP #include #include #include #include #include namespace stan { namespace math { /** * Reset all adjoint values in the top nested portion of the stack * to zero. * * It is preferred to use the nested_rev_autodiff class for * nested autodiff class as it handles recovery of memory automatically. */ static inline void set_zero_all_adjoints_nested() { if (empty_nested()) { throw std::logic_error( "empty_nested() must be false before calling" " set_zero_all_adjoints_nested()"); } const size_t start1 = ChainableStack::instance_->nested_var_stack_sizes_.back(); // avoid wrap with unsigned when start1 == 0 for (size_t i = start1; i < ChainableStack::instance_->var_stack_.size(); ++i) { ChainableStack::instance_->var_stack_[i]->set_zero_adjoint(); } const size_t start2 = ChainableStack::instance_->nested_var_nochain_stack_sizes_.back(); for (size_t i = start2; i < ChainableStack::instance_->var_nochain_stack_.size(); ++i) { ChainableStack::instance_->var_nochain_stack_[i]->set_zero_adjoint(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/gevv_vvv_vari.hpp0000644000176200001440000000335014645137106024107 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_GEVV_VVV_VARI_HPP #define STAN_MATH_REV_CORE_GEVV_VVV_VARI_HPP #include #include #include namespace stan { namespace math { class gevv_vvv_vari : public vari { protected: vari* alpha_; vari** v1_; vari** v2_; double dotval_; size_t length_; inline static double eval_gevv(const var* alpha, const var* v1, int stride1, const var* v2, int stride2, size_t length, double* dotprod) { double result = 0; for (size_t i = 0; i < length; i++) { result += v1[i * stride1].vi_->val_ * v2[i * stride2].vi_->val_; } *dotprod = result; return alpha->vi_->val_ * result; } public: gevv_vvv_vari(const var* alpha, const var* v1, int stride1, const var* v2, int stride2, size_t length) : vari(eval_gevv(alpha, v1, stride1, v2, stride2, length, &dotval_)), length_(length) { alpha_ = alpha->vi_; // TODO(carpenter): replace this with array alloc fun call v1_ = reinterpret_cast(ChainableStack::instance_->memalloc_.alloc( 2 * length_ * sizeof(vari*))); v2_ = v1_ + length_; for (size_t i = 0; i < length_; i++) { v1_[i] = v1[i * stride1].vi_; } for (size_t i = 0; i < length_; i++) { v2_[i] = v2[i * stride2].vi_; } } virtual ~gevv_vvv_vari() {} void chain() { const double adj_alpha = adj_ * alpha_->val_; for (size_t i = 0; i < length_; i++) { v1_[i]->adj_ += adj_alpha * v2_[i]->val_; v2_[i]->adj_ += adj_alpha * v1_[i]->val_; } alpha_->adj_ += adj_ * dotval_; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/std_isinf.hpp0000644000176200001440000000077314645137106023206 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_STD_ISINF_HPP #define STAN_MATH_REV_CORE_STD_ISINF_HPP #include #include namespace std { /** * Return 1 if the specified argument is positive * infinity or negative infinity and 0 otherwise. * * @param a Argument. * @return 1 if argument is infinite and 0 otherwise. */ inline bool isinf(const stan::math::var& a) { return stan::math::is_inf(a.val()); } } // namespace std #endif StanHeaders/inst/include/stan/math/rev/core/precomputed_gradients.hpp0000644000176200001440000002115414645137106025607 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_PRECOMPUTED_GRADIENTS_HPP #define STAN_MATH_REV_CORE_PRECOMPUTED_GRADIENTS_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * A variable implementation taking a sequence of operands and * partial derivatives with respect to the operands. * * Stan users should use function precomputed_gradients() * directly. * * @tparam ContainerOperands tuple of any container operands ((optionally std * vector of) var_value containing Eigen types) * @tparam ContainerGradients tuple of any container gradients (Eigen types) */ template , typename ContainerGradients = std::tuple<>> class precomputed_gradients_vari_template : public vari { protected: const size_t size_; vari** varis_; double* gradients_; static_assert(std::tuple_size::value == std::tuple_size::value, "precomputed_gradients_vari: ContainerOperands and " "ContainerGradients should have same size!"); static constexpr size_t N_containers = std::tuple_size::value; ContainerOperands container_operands_; ContainerGradients container_gradients_; /** * Checks that sizes of containers in container operands and * container_gradients match. * * @throws std::invalid_argument sizes do not match */ template void check_sizes(std::index_sequence) { static_cast(std::initializer_list{ (check_matching_dims("precomputed_gradients_vari", "operands", std::get(container_operands_), "gradients", std::get(container_gradients_)), 0)...}); } public: /** * Construct a precomputed vari with the specified value, * operands, gradients and optionally container operands and containers of * gradients. * * @tparam ContainerOps tuple of any container operands (var_value * containing Eigen types) * @tparam ContainerGrads tupleof any container gradients (Eigen types) * @param[in] val The value of the variable. * @param[in] size Size of operands and gradients * @param[in] varis Operand implementations. * @param[in] gradients Gradients with respect to operands. * @param container_operands any container operands * @param container_gradients any container gradients */ template precomputed_gradients_vari_template( double val, size_t size, vari** varis, double* gradients, const std::tuple& container_operands = std::tuple<>(), const std::tuple& container_gradients = std::tuple<>()) : vari(val), size_(size), varis_(varis), gradients_(gradients), container_operands_(index_apply([&](auto... Is) { return std::make_tuple(to_arena(std::get(container_operands))...); })), container_gradients_(index_apply([&](auto... Is) { return std::make_tuple( to_arena(std::get(container_gradients))...); })) { check_sizes(std::make_index_sequence()); } /** * Construct a precomputed vari with the specified value, * operands, gradients and optionally container operands and containers of * gradients. * * @tparam Arith An arithmetic type * @tparam VecVar A vector of vars * @tparam VecArith A vector of arithmetic types * @tparam ContainerOps tuple of any container operands (var_value * containing Eigen types) * @tparam ContainerGrads tupleof any container gradients (Eigen types) * @param[in] val The value of the variable. * @param[in] vars Vector of operands. * @param[in] gradients Vector of partial derivatives of value * with respect to operands. * @param container_operands any container operands * @param container_gradients any container gradients * @throws std::invalid_argument if the sizes of the vectors * don't match. */ template * = nullptr> precomputed_gradients_vari_template( Arith val, const VecVar& vars, const VecArith& gradients, const std::tuple& container_operands = std::tuple<>(), const std::tuple& container_gradients = std::tuple<>()) : vari(val), size_(vars.size()), varis_(ChainableStack::instance_->memalloc_.alloc_array( vars.size())), gradients_(ChainableStack::instance_->memalloc_.alloc_array( vars.size())), container_operands_(index_apply([&](auto... Is) { return std::make_tuple(to_arena(std::get(container_operands))...); })), container_gradients_(index_apply([&](auto... Is) { return std::make_tuple( to_arena(std::get(container_gradients))...); })) { check_consistent_sizes("precomputed_gradients_vari", "vars", vars, "gradients", gradients); check_sizes(std::make_index_sequence()); for (size_t i = 0; i < vars.size(); ++i) { varis_[i] = vars[i].vi_; } std::copy(gradients.begin(), gradients.end(), gradients_); } /** * Implements the chain rule for this variable, using the * prestored operands and gradient. */ void chain() { for (size_t i = 0; i < size_; ++i) { varis_[i]->adj_ += adj_ * gradients_[i]; } index_apply([this](auto... Is) { static_cast(std::initializer_list{ (this->chain_one(std::get(this->container_operands_), std::get(this->container_gradients_)), 0)...}); }); } private: /** * Implements the chain rule for one non-`std::vector` operand. * @tparam Op type of the operand * @tparam Grad type of the gradient * @param op operand * @param grad gradient */ template * = nullptr> void chain_one(Op& op, const Grad& grad) { op.vi_->adj_ = op.vi_->adj_ + this->adj_ * grad; } /** * Implements the chain rule for one `std::vector` operand. * @tparam Op type of the operand element * @tparam Grad type of the gradient element * @param op operand * @param grad gradient */ template void chain_one(const std::vector& op, const std::vector& grad) { for (int i = 0; i < op.size(); i++) { chain_one(op[i], grad[i]); } } }; using precomputed_gradients_vari = precomputed_gradients_vari_template, std::tuple<>>; /** * This function returns a var for an expression that has the * specified value, vector of operands, and vector of partial * derivatives of value with respect to the operands. * * @tparam Arith An arithmetic type * @tparam VecVar A vector of vars * @tparam VecArith A vector of arithmetic types * @tparam ContainerOperands tuple of any container operands (var_value * containing Eigen types) * @tparam ContainerGradients tupleof any container gradients (Eigen types) * @param[in] value The value of the resulting dependent variable. * @param[in] operands operands. * @param[in] gradients vector of partial derivatives of result with * respect to operands. * @param container_operands any container operands * @param container_gradients any container gradients * @return An autodiff variable that uses the precomputed * gradients provided. */ template inline var precomputed_gradients( Arith value, const VecVar& operands, const VecArith& gradients, const std::tuple& container_operands = std::tuple<>(), const std::tuple& container_gradients = std::tuple<>()) { return {new precomputed_gradients_vari_template< std::tuple...>, std::tuple...>>( value, operands, gradients, container_operands, container_gradients)}; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_unary_not.hpp0000644000176200001440000000073414645137106025152 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_UNARY_NOT_HPP #define STAN_MATH_REV_CORE_OPERATOR_UNARY_NOT_HPP #include #include namespace stan { namespace math { /** * Return the negation of the value of the argument as defined by * !. * * @param[in] x argument * @return negation of argument value */ inline bool operator!(const var& x) { return !x.val(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/std_complex.hpp0000644000176200001440000000362314645137106023542 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_STD_COMPLEX_HPP #define STAN_MATH_REV_CORE_STD_COMPLEX_HPP #include #include #include #include #include namespace std { /** * Specialization of the standard library complex number type for * reverse-mode autodiff type `stan::math::var`. */ template <> class complex : public stan::math::complex_base { public: using base_t = stan::math::complex_base; /** * Construct a complex number with zero real and imaginary parts. */ complex() = default; /** * Construct a complex number from real and imaginary parts. * * @tparam U type of real part (assignable to `value_type`) * @tparam V type of imaginary part (assignable to `value_type`) * @param[in] re real part * @param[in] im imaginary part */ template complex(const U& re, const V& im) : base_t(re, im) {} /** * Construct a complex number with specified real part and zero * imaginary part. * * @tparam U type of real part (assignable to `value_type`) * @param[in] re real part */ template > complex(const U& re) : base_t(re) {} // NOLINT(runtime/explicit) template complex(const std::complex& z) // NOLINT(runtime/explicit) : base_t(z.real(), z.imag()) {} /** * Set the real and imaginary components of this complex number to * those of the specified complex number. * * @tparam U value type of argument (must be an arithmetic type) * @param[in] x complex argument * @return this */ template > auto& operator=(const std::complex& x) { re_ = x.real(); im_ = x.imag(); return *this; } }; } // namespace std #endif StanHeaders/inst/include/stan/math/rev/core/recover_memory_nested.hpp0000644000176200001440000000344014645137106025615 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_RECOVER_MEMORY_NESTED_HPP #define STAN_MATH_REV_CORE_RECOVER_MEMORY_NESTED_HPP #include #include #include #include namespace stan { namespace math { /** * Recover only the memory used for the top nested call. If there * is nothing on the nested stack, then a * std::logic_error exception is thrown. * * It is preferred to use the nested_rev_autodiff class for * nested autodiff as it handles recovery of memory automatically. * * @throw std::logic_error if empty_nested() returns * true */ static inline void recover_memory_nested() { if (empty_nested()) { throw std::logic_error( "empty_nested() must be false" " before calling recover_memory_nested()"); } ChainableStack::instance_->var_stack_.resize( ChainableStack::instance_->nested_var_stack_sizes_.back()); ChainableStack::instance_->nested_var_stack_sizes_.pop_back(); ChainableStack::instance_->var_nochain_stack_.resize( ChainableStack::instance_->nested_var_nochain_stack_sizes_.back()); ChainableStack::instance_->nested_var_nochain_stack_sizes_.pop_back(); for (size_t i = ChainableStack::instance_->nested_var_alloc_stack_starts_.back(); i < ChainableStack::instance_->var_alloc_stack_.size(); ++i) { delete ChainableStack::instance_->var_alloc_stack_[i]; } ChainableStack::instance_->var_alloc_stack_.resize( ChainableStack::instance_->nested_var_alloc_stack_starts_.back()); ChainableStack::instance_->nested_var_alloc_stack_starts_.pop_back(); ChainableStack::instance_->memalloc_.recover_nested(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/vvd_vari.hpp0000644000176200001440000000063414645137106023040 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_VVD_VARI_HPP #define STAN_MATH_REV_CORE_VVD_VARI_HPP #include namespace stan { namespace math { class op_vvd_vari : public vari { protected: vari* avi_; vari* bvi_; double cd_; public: op_vvd_vari(double f, vari* avi, vari* bvi, double c) : vari(f), avi_(avi), bvi_(bvi), cd_(c) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_multiply_equal.hpp0000644000176200001440000000134514645137106026201 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_MULTIPLY_EQUAL_HPP #define STAN_MATH_REV_CORE_OPERATOR_MULTIPLY_EQUAL_HPP #include #include #include namespace stan { namespace math { template inline var_value& var_value>::operator*=( const var_value& b) { vi_ = new internal::multiply_vv_vari(vi_, b.vi_); return *this; } template inline var_value& var_value>::operator*=( T b) { if (b == 1.0) { return *this; } vi_ = new internal::multiply_vd_vari(vi_, b); return *this; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/start_nested.hpp0000644000176200001440000000165414645137106023722 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_START_NESTED_HPP #define STAN_MATH_REV_CORE_START_NESTED_HPP #include namespace stan { namespace math { /** * Record the current position so that recover_memory_nested() * can find it. * * It is preferred to use the nested_rev_autodiff class for * nested autodiff as it handles recovery of memory automatically. */ static inline void start_nested() { ChainableStack::instance_->nested_var_stack_sizes_.push_back( ChainableStack::instance_->var_stack_.size()); ChainableStack::instance_->nested_var_nochain_stack_sizes_.push_back( ChainableStack::instance_->var_nochain_stack_.size()); ChainableStack::instance_->nested_var_alloc_stack_starts_.push_back( ChainableStack::instance_->var_alloc_stack_.size()); ChainableStack::instance_->memalloc_.start_nested(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_divide_equal.hpp0000644000176200001440000000206714645137106025570 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_DIVIDE_EQUAL_HPP #define STAN_MATH_REV_CORE_OPERATOR_DIVIDE_EQUAL_HPP #include #include #include namespace stan { namespace math { template inline var_value& var_value>::operator/=( const var_value& b) { vi_ = divide(*this, b).vi_; return *this; } template inline var_value& var_value>::operator/=( T b) { if (b == 1.0) { return *this; } vi_ = divide(*this, b).vi_; return *this; } template inline var_value& var_value>::operator/=( const var_value& b) { vi_ = divide(*this, b).vi_; return *this; } template inline var_value& var_value>::operator/=(T b) { if (b == 1.0) { return *this; } vi_ = divide(*this, b).vi_; return *this; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/vari.hpp0000644000176200001440000010217214645137106022161 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_VARI_HPP #define STAN_MATH_REV_CORE_VARI_HPP #include #include #include #include #include #include #include namespace stan { namespace math { // forward declaration of vari_value template class vari_value; /** * Abstract base class that all `vari_value` and it's derived classes inherit. * * The chain() method applies the chain rule. Concrete extensions * of this class will represent base variables or the result * of operations such as addition or subtraction. These extended * classes will store operand variables and propagate derivative * information via an implementation of chain(). */ class vari_base { public: /** * Apply the chain rule to this variable based on the variables * on which it depends. */ virtual void chain() = 0; virtual void set_zero_adjoint() = 0; /** * Allocate memory from the underlying memory pool. This memory is * is managed as a whole externally. * * Warning: Classes should not be allocated with this operator * if they have non-trivial destructors. * * @param nbytes Number of bytes to allocate. * @return Pointer to allocated bytes. */ static inline void* operator new(size_t nbytes) noexcept { return ChainableStack::instance_->memalloc_.alloc(nbytes); } /** * Delete a pointer from the underlying memory pool. * * This no-op implementation enables a subclass to throw * exceptions in its constructor. An exception thrown in the * constructor of a subclass will result in an error being * raised, which is in turn caught and calls delete(). * * See the discussion of "plugging the memory leak" in: * http://www.parashift.com/c++-faq/memory-pools.html */ static inline void operator delete( void* /* ignore arg */) noexcept { /* no op */ } }; /** * The variable implementation for floating point types. * * This class is complete (not abstract) and may be used for * constants. * * A variable implementation is constructed with a constant * value. It also stores the adjoint for storing the partial * derivative with respect to the root of the derivative tree. * */ template class vari_value>> : public vari_base { public: using value_type = std::decay_t; /** * The value of this variable. */ const value_type val_; /** * The adjoint of this variable, which is the partial derivative * of this variable with respect to the root variable. */ value_type adj_{0.0}; /** * Construct a variable implementation from a value. The * adjoint is initialized to zero. * * All constructed variables are added to the stack. Variables * should be constructed before variables on which they depend * to insure proper partial derivative propagation. During * derivative propagation, the chain() method of each variable * will be called in the reverse order of construction. * * @tparam S a floating point type. * @param x Value of the constructed variable. */ template * = nullptr> vari_value(S x) noexcept : val_(x) { // NOLINT ChainableStack::instance_->var_stack_.push_back(this); } /** * Construct a variable implementation from a value. The * adjoint is initialized to zero and if `stacked` is `false` this vari * will be not be put on the var_stack. Instead it will only be put on * a stack to keep track of whether the adjoint needs to be set to zero. * Variables should be constructed before variables on which they depend * to insure proper partial derivative propagation. During * derivative propagation, the chain() method of each variable * will be called in the reverse order of construction. * * @tparam S n floating point type. * @param x Value of the constructed variable. * @param stacked If false will put this this vari on the nochain stack so * that its `chain()` method is not called. */ template * = nullptr> vari_value(S x, bool stacked) noexcept : val_(x) { if (stacked) { ChainableStack::instance_->var_stack_.push_back(this); } else { ChainableStack::instance_->var_nochain_stack_.push_back(this); } } /** * Return a constant reference to the value of this vari. * * @return The value of this vari. */ inline const auto& val() const { return val_; } /** * Return a reference of the derivative of the root expression with * respect to this expression. This method only works * after one of the `grad()` methods has been * called. * * @return Adjoint for this vari. */ inline auto& adj() const { return adj_; } /** * Return a reference to the derivative of the root expression with * respect to this expression. This method only works * after one of the `grad()` methods has been * called. * * @return Adjoint for this vari. */ inline auto& adj() { return adj_; } inline void chain() {} /** * Initialize the adjoint for this (dependent) variable to 1. * This operation is applied to the dependent variable before * propagating derivatives, setting the derivative of the * result with respect to itself to be 1. */ inline void init_dependent() noexcept { adj_ = 1.0; } /** * Set the adjoint value of this variable to 0. This is used to * reset adjoints before propagating derivatives again (for * example in a Jacobian calculation). */ inline void set_zero_adjoint() noexcept final { adj_ = 0.0; } /** * Insertion operator for vari. Prints the current value and * the adjoint value. * * @param os [in, out] ostream to modify * @param v [in] vari object to print. * * @return The modified ostream. */ friend std::ostream& operator<<(std::ostream& os, const vari_value* v) { return os << v->val_ << ":" << v->adj_; } private: template friend class var_value; }; // For backwards compatability the default is double using vari = vari_value; /** * A `vari_view` is used to read from a slice of a `vari_value` with an inner * eigen type. It can only accept expressions which do not allocate dynamic * memory. * @tparam T An eigen expression referencing memory allocated in a `vari_value`. */ template class vari_view; /** * This struct is follows the CRTP for methods common to `vari_view<>` and * `vari_value`. * @tparam Derived A `var_value<>` or `vari_view` with an inner type that has * defined methods for subslices of the value and adjoint. */ template class vari_view_eigen { private: /** * Making the base constructor private while making the derived class a friend * help's catch if derived types inherit from another derived types * base class. See the fluentcpp article on CRTP for more information. */ vari_view_eigen() = default; friend Derived; /** * Helper function to return a reference to the derived type */ inline Derived& derived() { return static_cast(*this); } /** * Helper function to return a constant reference to the derived type */ inline const Derived& derived() const { return static_cast(*this); } public: /** * A block view of the underlying Eigen matrices. * @param start_row Starting row of block. * @param start_col Starting columns of block. * @param num_rows Number of rows to return. * @param num_cols Number of columns to return. */ inline auto block(Eigen::Index start_row, Eigen::Index start_col, Eigen::Index num_rows, Eigen::Index num_cols) const { using inner_type = decltype( derived().val_.block(start_row, start_col, num_rows, num_cols)); return vari_view( derived().val_.block(start_row, start_col, num_rows, num_cols), derived().adj_.block(start_row, start_col, num_rows, num_cols)); } inline auto block(Eigen::Index start_row, Eigen::Index start_col, Eigen::Index num_rows, Eigen::Index num_cols) { using inner_type = decltype( derived().val_.block(start_row, start_col, num_rows, num_cols)); return vari_view( derived().val_.block(start_row, start_col, num_rows, num_cols), derived().adj_.block(start_row, start_col, num_rows, num_cols)); } /** * View of the head of Eigen vector types. * @param n Number of elements to return from top of vector. */ inline auto head(Eigen::Index n) const { using inner_type = decltype(derived().val_.head(n)); return vari_view(derived().val_.head(n), derived().adj_.head(n)); } inline auto head(Eigen::Index n) { using inner_type = decltype(derived().val_.head(n)); return vari_view(derived().val_.head(n), derived().adj_.head(n)); } /** * View of the tail of the Eigen vector types. * @param n Number of elements to return from bottom of vector. */ inline auto tail(Eigen::Index n) const { using inner_type = decltype(derived().val_.tail(n)); return vari_view(derived().val_.tail(n), derived().adj_.tail(n)); } inline auto tail(Eigen::Index n) { using inner_type = decltype(derived().val_.tail(n)); return vari_view(derived().val_.tail(n), derived().adj_.tail(n)); } /** * View block of N elements starting at position `i` * @param i Starting position of block. * @param n Number of elements in block */ inline auto segment(Eigen::Index i, Eigen::Index n) const { using inner_type = decltype(derived().val_.segment(i, n)); return vari_view(derived().val_.segment(i, n), derived().adj_.segment(i, n)); } inline auto segment(Eigen::Index i, Eigen::Index n) { using inner_type = decltype(derived().val_.segment(i, n)); return vari_view(derived().val_.segment(i, n), derived().adj_.segment(i, n)); } /** * View transpose of eigen matrix. */ inline auto transpose() const { using inner_type = decltype(derived().val_.transpose()); return vari_view(derived().val_.transpose(), derived().adj_.transpose()); } inline auto transpose() { using inner_type = decltype(derived().val_.transpose()); return vari_view(derived().val_.transpose(), derived().adj_.transpose()); } /** * View row of eigen matrices. * @param i Row index to slice. */ inline auto row(Eigen::Index i) const { using inner_type = decltype(derived().val_.row(i)); return vari_view(derived().val_.row(i), derived().adj_.row(i)); } inline auto row(Eigen::Index i) { using inner_type = decltype(derived().val_.row(i)); return vari_view(derived().val_.row(i), derived().adj_.row(i)); } /** * View column of eigen matrices * @param i Column index to slice */ inline auto col(Eigen::Index i) const { using inner_type = decltype(derived().val_.col(i)); return vari_view(derived().val_.col(i), derived().adj_.col(i)); } inline auto col(Eigen::Index i) { using inner_type = decltype(derived().val_.col(i)); return vari_view(derived().val_.col(i), derived().adj_.col(i)); } /** * View diagonal of eigen matrices * @param i Column index to slice */ inline auto diagonal() const { using inner_type = decltype(derived().val_.diagonal()); return vari_view(derived().val_.diagonal(), derived().adj_.diagonal()); } inline auto diagonal() { using inner_type = decltype(derived().val_.diagonal()); return vari_view(derived().val_.diagonal(), derived().adj_.diagonal()); } /** * Get coefficient of eigen matrices * @param i Row index * @param j Column index */ inline auto coeff(Eigen::Index i, Eigen::Index j) const { return vari_value(derived().val_.coeffRef(i, j), derived().adj_.coeffRef(i, j)); } inline auto coeff(Eigen::Index i, Eigen::Index j) { return vari_value(derived().val_.coeffRef(i, j), derived().adj_.coeffRef(i, j)); } /** * Get coefficient of eigen matrices * @param i Column index to slice */ inline auto coeff(Eigen::Index i) const { return vari_value(derived().val_.coeffRef(i), derived().adj_.coeffRef(i)); } inline auto coeff(Eigen::Index i) { return vari_value(derived().val_.coeffRef(i), derived().adj_.coeffRef(i)); } /** * Get coefficient of eigen matrices * @param i Column index to slice */ inline auto operator()(Eigen::Index i) const { return this->coeff(i); } inline auto operator()(Eigen::Index i) { return this->coeff(i); } /** * Get coefficient of eigen matrices * @param i Row index * @param j Column index */ inline auto operator()(Eigen::Index i, Eigen::Index j) const { return this->coeff(i, j); } inline auto operator()(Eigen::Index i, Eigen::Index j) { return this->coeff(i, j); } /** * Return an expression that operates on the rows of the matrix `vari` */ inline auto rowwise_reverse() const { using inner_type = decltype(derived().val_.rowwise().reverse()); return vari_view(derived().val_.rowwise().reverse(), derived().adj_.rowwise().reverse()); } inline auto rowwise_reverse() { using inner_type = decltype(derived().val_.rowwise().reverse()); return vari_view(derived().val_.rowwise().reverse(), derived().adj_.rowwise().reverse()); } /** * Return an expression that operates on the columns of the matrix `vari` */ inline auto colwise_reverse() const { using inner_type = decltype(derived().val_.colwise().reverse()); return vari_view(derived().val_.colwise().reverse(), derived().adj_.colwise().reverse()); } inline auto colwise_reverse() { using inner_type = decltype(derived().val_.colwise().reverse()); return vari_view(derived().val_.colwise().reverse(), derived().adj_.colwise().reverse()); } /** * Return an expression to reverse the order of the coefficients * inside of a `vari` matrix */ inline auto reverse() const { using inner_type = decltype(derived().val_.reverse()); return vari_view(derived().val_.reverse(), derived().adj_.reverse()); } inline auto reverse() { using inner_type = decltype(derived().val_.reverse()); return vari_view(derived().val_.reverse(), derived().adj_.reverse()); } /** * Return a block consisting of the top rows * @param n Number of rows */ inline auto topRows(Eigen::Index n) const { using inner_type = decltype(derived().val_.topRows(n)); return vari_view(derived().val_.topRows(n), derived().adj_.topRows(n)); } inline auto topRows(Eigen::Index n) { using inner_type = decltype(derived().val_.topRows(n)); return vari_view(derived().val_.topRows(n), derived().adj_.topRows(n)); } /** * Return a block consisting of the bottom rows * @param n Number of rows */ inline auto bottomRows(Eigen::Index n) const { using inner_type = decltype(derived().val_.bottomRows(n)); return vari_view(derived().val_.bottomRows(n), derived().adj_.bottomRows(n)); } inline auto bottomRows(Eigen::Index n) { using inner_type = decltype(derived().val_.bottomRows(n)); return vari_view(derived().val_.bottomRows(n), derived().adj_.bottomRows(n)); } /** * Return a block consisting of rows in the middle. * @param start_row Starting row index * @param n Number of rows */ inline auto middleRows(Eigen::Index start_row, Eigen::Index n) const { using inner_type = decltype(derived().val_.middleRows(start_row, n)); return vari_view(derived().val_.middleRows(start_row, n), derived().adj_.middleRows(start_row, n)); } inline auto middleRows(Eigen::Index start_row, Eigen::Index n) { using inner_type = decltype(derived().val_.middleRows(start_row, n)); return vari_view(derived().val_.middleRows(start_row, n), derived().adj_.middleRows(start_row, n)); } /** * Return a block consisting of the left-most columns * @param n Number of columns */ inline auto leftCols(Eigen::Index n) const { using inner_type = decltype(derived().val_.leftCols(n)); return vari_view(derived().val_.leftCols(n), derived().adj_.leftCols(n)); } inline auto leftCols(Eigen::Index n) { using inner_type = decltype(derived().val_.leftCols(n)); return vari_view(derived().val_.leftCols(n), derived().adj_.leftCols(n)); } /** * Return a block consisting of the right-most columns * @param n Number of columns */ inline auto rightCols(Eigen::Index n) const { using inner_type = decltype(derived().val_.rightCols(n)); return vari_view(derived().val_.rightCols(n), derived().adj_.rightCols(n)); } inline auto rightCols(Eigen::Index n) { using inner_type = decltype(derived().val_.rightCols(n)); return vari_view(derived().val_.rightCols(n), derived().adj_.rightCols(n)); } /** * Return a block consisting of columns in the middle. * @param start_col Starting column index * @param n Number of columns */ inline auto middleCols(Eigen::Index start_col, Eigen::Index n) const { using inner_type = decltype(derived().val_.middleCols(start_col, n)); return vari_view(derived().val_.middleCols(start_col, n), derived().adj_.middleCols(start_col, n)); } inline auto middleCols(Eigen::Index start_col, Eigen::Index n) { using inner_type = decltype(derived().val_.middleCols(start_col, n)); return vari_view(derived().val_.middleCols(start_col, n), derived().adj_.middleCols(start_col, n)); } /** * Return an Array expression */ inline auto array() const { using inner_type = decltype(derived().val_.array()); return vari_view(derived().val_.array(), derived().adj_.array()); } inline auto array() { using inner_type = decltype(derived().val_.array()); return vari_view(derived().val_.array(), derived().adj_.array()); } /** * Return a Matrix expression */ inline auto matrix() const { using inner_type = decltype(derived().val_.matrix()); return vari_view(derived().val_.matrix(), derived().adj_.matrix()); } inline auto matrix() { using inner_type = decltype(derived().val_.matrix()); return vari_view(derived().val_.matrix(), derived().adj_.matrix()); } /** * Return the number of rows for this class's `val_` member */ inline Eigen::Index rows() const { return derived().val_.rows(); } /** * Return the number of columns for this class's `val_` member */ inline Eigen::Index cols() const { return derived().val_.cols(); } /** * Return the size of this class's `val_` member */ inline Eigen::Index size() const { return derived().val_.size(); } }; template class vari_view< T, require_all_t, bool_constant::value>>> final : public vari_base, public vari_view_eigen>> { public: using PlainObject = plain_type_t; using value_type = std::decay_t; // The underlying type for this class /** * Number of rows known at compile time */ static constexpr int RowsAtCompileTime = PlainObject::RowsAtCompileTime; /** * Number of columns known at compile time */ static constexpr int ColsAtCompileTime = PlainObject::ColsAtCompileTime; T val_; T adj_; template * = nullptr, require_assignable_t* = nullptr> vari_view(const S& val, const K& adj) noexcept : val_(val), adj_(adj) {} /** * Return a constant reference to the value of this vari. * * @return The value of this vari. */ inline const auto& val() const noexcept { return val_; } inline auto& val_op() noexcept { return val_; } /** * Return a reference to the derivative of the root expression with * respect to this expression. This method only works * after one of the `grad()` methods has been * called. * * @return Adjoint for this vari. */ inline auto& adj() noexcept { return adj_; } inline auto& adj() const noexcept { return adj_; } inline auto& adj_op() noexcept { return adj_; } void set_zero_adjoint() {} void chain() {} }; /** * The variable implementation for Eigen dense matrix types. * * This class is complete (not abstract) and may be used for * constants. * * A variable implementation is constructed with a constant * value. It also stores the adjoint for storing the partial * derivative with respect to the root of the derivative tree. * */ template class vari_value, is_eigen_dense_base>> : public vari_base, public vari_view_eigen, is_eigen_dense_base>>> { public: /** * `PlainObject` represents a user constructible type such as Matrix or Array */ using PlainObject = plain_type_t; using value_type = PlainObject; // The underlying type for this class using eigen_scalar = value_type_t; // A floating point type using vari_type = vari_value; /** * Number of rows known at compile time */ static constexpr int RowsAtCompileTime = PlainObject::RowsAtCompileTime; /** * Number of columns known at compile time */ static constexpr int ColsAtCompileTime = PlainObject::ColsAtCompileTime; /** * The value of this variable. */ arena_matrix val_; /** * The adjoint of this variable, which is the partial derivative * of this variable with respect to the root variable. */ arena_matrix adj_; /** * Construct a dense Eigen variable implementation from a value. The * adjoint is initialized to zero. * * All constructed variables are added to the stack. Variables * should be constructed before variables on which they depend * to insure proper partial derivative propagation. During * derivative propagation, the chain() method of each variable * will be called in the reverse order of construction. * * @tparam S A dense Eigen type that is convertible to `value_type` * @param x Value of the constructed variable. */ template * = nullptr> explicit vari_value(const S& x) : val_(x), adj_((RowsAtCompileTime == 1 && S::ColsAtCompileTime == 1) || (ColsAtCompileTime == 1 && S::RowsAtCompileTime == 1) ? x.cols() : x.rows(), (RowsAtCompileTime == 1 && S::ColsAtCompileTime == 1) || (ColsAtCompileTime == 1 && S::RowsAtCompileTime == 1) ? x.rows() : x.cols()) { adj_.setZero(); ChainableStack::instance_->var_stack_.push_back(this); } /** * Construct a dense Eigen variable implementation from a value. The * adjoint is initialized to zero and if `stacked` is `false` this vari * will be not be put on the var_stack. Instead it will only be put on * a stack to keep track of whether the adjoint needs to be set to zero. * Variables should be constructed before variables on which they depend * to insure proper partial derivative propagation. During * derivative propagation, the chain() method of each variable * will be called in the reverse order of construction. * * @tparam S A dense Eigen type that is convertible to `value_type` * @param x Value of the constructed variable. * @param stacked If false will put this this vari on the nochain stack so * that its `chain()` method is not called. */ template * = nullptr> vari_value(const S& x, bool stacked) : val_(x), adj_((RowsAtCompileTime == 1 && S::ColsAtCompileTime == 1) || (ColsAtCompileTime == 1 && S::RowsAtCompileTime == 1) ? x.cols() : x.rows(), (RowsAtCompileTime == 1 && S::ColsAtCompileTime == 1) || (ColsAtCompileTime == 1 && S::RowsAtCompileTime == 1) ? x.rows() : x.cols()) { adj_.setZero(); if (stacked) { ChainableStack::instance_->var_stack_.push_back(this); } else { ChainableStack::instance_->var_nochain_stack_.push_back(this); } } protected: template * = nullptr> explicit vari_value(const vari_value* x) : val_(x->val_), adj_(x->adj_) {} public: /** * Return a constant reference to the value of this vari. * * @return The value of this vari. */ inline const auto& val() const noexcept { return val_; } inline auto& val_op() noexcept { return val_; } /** * Return a reference to the derivative of the root expression with * respect to this expression. This method only works * after one of the `grad()` methods has been * called. * * @return Adjoint for this vari. */ inline auto& adj() noexcept { return adj_; } inline auto& adj() const noexcept { return adj_; } inline auto& adj_op() noexcept { return adj_; } virtual void chain() {} /** * Initialize the adjoint for this (dependent) variable to 1. * This operation is applied to the dependent variable before * propagating derivatives, setting the derivative of the * result with respect to itself to be 1. */ inline void init_dependent() { adj_.setOnes(); } /** * Set the adjoint value of this variable to 0. This is used to * reset adjoints before propagating derivatives again (for * example in a Jacobian calculation). */ inline void set_zero_adjoint() final { adj_.setZero(); } /** * Insertion operator for vari. Prints the current value and * the adjoint value. * * @param os [in, out] ostream to modify * @param v [in] vari object to print. * * @return The modified ostream. */ friend std::ostream& operator<<(std::ostream& os, const vari_value* v) { return os << "val: \n" << v->val_ << " \nadj: \n" << v->adj_; } private: template friend class var_value; }; /** * The variable implementation for Eigen sparse matrix types. * * This class is complete (not abstract) and may be used for * constants. * * A variable implementation is constructed with a constant * value. It also stores the adjoint for storing the partial * derivative with respect to the root of the derivative tree. * */ template class vari_value> : public vari_base { public: using PlainObject = plain_type_t; // Base type of Eigen class using value_type = PlainObject; // vari's adj_ and val_ member type using InnerIterator = typename arena_matrix::InnerIterator; /** * Rows at compile time */ static constexpr int RowsAtCompileTime = T::RowsAtCompileTime; /** * Columns at compile time */ static constexpr int ColsAtCompileTime = T::ColsAtCompileTime; /** * The value of this variable. */ arena_matrix val_; /** * The adjoint of this variable, which is the partial derivative * of this variable with respect to the root variable. */ arena_matrix adj_; /** * Construct a variable implementation from a value. The * adjoint is initialized to zero. * * All constructed variables are added to the stack. For a sparse eigen matrix * this includes the nozero values as well the inner and outer indices. * Variables should be constructed before variables on which they depend * to insure proper partial derivative propagation. During * derivative propagation, the chain() method of each variable * will be called in the reverse order of construction. * * @tparam S A sparse Eigen type that is convertible to `value_type` * @param x Value of the constructed variable. */ template * = nullptr> explicit vari_value(S&& x) : val_(std::forward(x)), adj_(val_.rows(), val_.cols(), val_.nonZeros(), val_.outerIndexPtr(), val_.innerIndexPtr(), arena_matrix(val_.nonZeros()).setZero().data(), val_.innerNonZeroPtr()) { ChainableStack::instance_->var_stack_.push_back(this); } vari_value(const arena_matrix& val, const arena_matrix& adj) : val_(val), adj_(adj) { ChainableStack::instance_->var_stack_.push_back(this); } /** * Construct an sparse Eigen variable implementation from a value. The * adjoint is initialized to zero and if `stacked` is `false` this vari * will be not be put on the var_stack. Instead it will only be put on * a stack to keep track of whether the adjoint needs to be set to zero. * * All constructed variables are added to a stack. Variables * should be constructed before variables on which they depend * to insure proper partial derivative propagation. During * derivative propagation, the chain() method of each variable * will be called in the reverse order of construction. * * @tparam S A sparse Eigen type that is convertible to `value_type` * @param x Value of the constructed variable. * @param stacked If false will put this this vari on the nochain stack so * that its `chain()` method is not called. */ template * = nullptr> vari_value(S&& x, bool stacked) : val_(std::forward(x)), adj_(val_.rows(), val_.cols(), val_.nonZeros(), val_.outerIndexPtr(), val_.innerIndexPtr(), arena_matrix(val_.nonZeros()).setZero().data(), val_.innerNonZeroPtr()) { if (stacked) { ChainableStack::instance_->var_stack_.push_back(this); } else { ChainableStack::instance_->var_nochain_stack_.push_back(this); } } /** * Return the number of rows for this class's `val_` member */ Eigen::Index rows() const { return val_.rows(); } /** * Return the number of columns for this class's `val_` member */ Eigen::Index cols() const { return val_.cols(); } /** * Return the size of this class's `val_` member */ Eigen::Index size() const { return val_.size(); } /** * Return a constant reference to the value of this vari. * * @return The value of this vari. */ inline const auto& val() const { return val_; } inline auto& val_op() { return val_; } /** * Return a reference to the derivative of the root expression with * respect to this expression. This method only works * after one of the `grad()` methods has been * called. * * @return Adjoint for this vari. */ inline auto& adj() { return adj_; } inline auto& adj() const { return adj_; } inline auto& adj_op() { return adj_; } void chain() {} /** * Initialize the adjoint for this (dependent) variable to 1. * This operation is applied to the dependent variable before * propagating derivatives, setting the derivative of the * result with respect to itself to be 1. */ inline void init_dependent() { std::fill(adj_.valuePtr(), adj_.valuePtr() + adj_.nonZeros(), 1.0); } /** * Set the adjoint value of this variable to 0. This is used to * reset adjoints before propagating derivatives again (for * example in a Jacobian calculation). */ inline void set_zero_adjoint() noexcept final { std::fill(adj_.valuePtr(), adj_.valuePtr() + adj_.nonZeros(), 0.0); } /** * Insertion operator for vari. Prints the current value and * the adjoint value. * * @param os [in, out] ostream to modify * @param v [in] vari object to print. * * @return The modified ostream. */ friend std::ostream& operator<<(std::ostream& os, const vari_value* v) { return os << "val: \n" << v->val_ << " \nadj: \n" << v->adj_; } private: template friend class var_value; }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_minus_equal.hpp0000644000176200001440000000133214645137106025451 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_MINUS_EQUAL_HPP #define STAN_MATH_REV_CORE_OPERATOR_MINUS_EQUAL_HPP #include #include #include #include namespace stan { namespace math { template inline var_value& var_value>::operator-=( const var_value& b) { vi_ = (*this - b).vi_; return *this; } template inline var_value& var_value>::operator-=( T b) { if (unlikely(b == 0.0)) { return *this; } vi_ = (*this - b).vi_; return *this; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/build_vari_array.hpp0000644000176200001440000000156714645137106024544 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_BUILD_VARI_ARRAY_HPP #define STAN_MATH_REV_CORE_BUILD_VARI_ARRAY_HPP #include #include #include namespace stan { namespace math { /** * Allocates and populates a flat array of vari pointers in the autodiff arena * with the varis pointed to by the vars in the input Eigen matrix * * @tparam R number of rows, can be Eigen::Dynamic * @tparam C number of columns, can be Eigen::Dynamic * @param x input matrix * @return Flat array of vari pointers */ template vari** build_vari_array(const Eigen::Matrix& x) { vari** x_vi_ = ChainableStack::instance_->memalloc_.alloc_array(x.size()); for (int i = 0; i < x.size(); ++i) { x_vi_[i] = x(i).vi_; } return x_vi_; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/deep_copy_vars.hpp0000644000176200001440000000462514645137106024226 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_DEEP_COPY_VARS_HPP #define STAN_MATH_REV_CORE_DEEP_COPY_VARS_HPP #include #include #include #include #include namespace stan { namespace math { /** * Forward arguments that do not contain vars. There * is no copying to be done. * * @tparam Arith an arithmetic type. * @param arg For lvalue references this will be passed by reference. * Otherwise it will be moved. */ template >> inline Arith deep_copy_vars(Arith&& arg) { return std::forward(arg); } /** * Copy the value of a var but reallocate a new vari * * @param arg A var * @return A new var */ inline auto deep_copy_vars(const var& arg) { return var(new vari(arg.val(), false)); } /** * Copy the vars in arg but reallocate new varis for them * * @tparam VarVec A variant of std::vector * @param arg A std::vector of vars * @return A new std::vector of vars */ template * = nullptr> inline auto deep_copy_vars(VarVec&& arg) { std::vector copy_vec(arg.size()); for (size_t i = 0; i < arg.size(); ++i) { copy_vec[i] = new vari(arg[i].val(), false); } return copy_vec; } /** * Copy the vars in arg but reallocate new varis for them * * @tparam VecContainer std::vector where T is another type containing vars * @param arg A std::vector of containers containing vars * @return A new std::vector of containers containing vars */ template * = nullptr, require_std_vector_vt* = nullptr> inline auto deep_copy_vars(VecContainer&& arg) { std::vector> copy_vec(arg.size()); for (size_t i = 0; i < arg.size(); ++i) { copy_vec[i] = deep_copy_vars(arg[i]); } return copy_vec; } /** * Copy the vars in arg but reallocate new varis for them * * @tparam EigT An Eigen type with var value type * @param arg An Eigen container of vars * @return A new Eigen container of vars */ template * = nullptr> inline auto deep_copy_vars(EigT&& arg) { return arg.unaryExpr([](auto&& x) { return var(new vari(x.val(), false)); }) .eval(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/ddv_vari.hpp0000644000176200001440000000063014645137106023012 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_DDV_VARI_HPP #define STAN_MATH_REV_CORE_DDV_VARI_HPP #include namespace stan { namespace math { class op_ddv_vari : public vari { protected: double ad_; double bd_; vari* cvi_; public: op_ddv_vari(double f, double a, double b, vari* cvi) : vari(f), ad_(a), bd_(b), cvi_(cvi) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/reverse_pass_callback.hpp0000644000176200001440000000237114645137106025535 0ustar liggesusers#ifndef STAN_MATH_REV_FUNCTOR_REVERSE_PASS_CALLBACK_HPP #define STAN_MATH_REV_FUNCTOR_REVERSE_PASS_CALLBACK_HPP #include namespace stan { namespace math { namespace internal { template struct reverse_pass_callback_vari : public vari_base { F rev_functor_; explicit reverse_pass_callback_vari(F&& rev_functor) : rev_functor_(std::forward(rev_functor)) { ChainableStack::instance_->var_stack_.push_back(this); } inline void chain() final { rev_functor_(); } inline void set_zero_adjoint() final {} inline void init_dependent() {} }; } // namespace internal /** * Puts a callback on the autodiff stack to be called in reverse pass. * * The intended use case is for the callable to ba a lambda function that * captures any arguments it needs to work with. All captured values must be * trivially destructible or they will leak memory! `to_AD_stack()` function can * be used to ensure that. * * @tparam F type of callable * @param functor funtor or other callable to call in the reverse pass */ template inline void reverse_pass_callback(F&& functor) { new internal::reverse_pass_callback_vari(std::forward(functor)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_plus_equal.hpp0000644000176200001440000000124514645137106025304 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_PLUS_EQUAL_HPP #define STAN_MATH_REV_CORE_OPERATOR_PLUS_EQUAL_HPP #include #include #include namespace stan { namespace math { template inline var_value& var_value>::operator+=( const var_value& b) { vi_ = (*this + b).vi_; return *this; } template inline var_value& var_value>::operator+=( T b) { if (b == 0.0) { return *this; } vi_ = (*this + b).vi_; return *this; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/precomp_vv_vari.hpp0000644000176200001440000000114614645137106024420 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_PRECOMP_VV_VARI_HPP #define STAN_MATH_REV_CORE_PRECOMP_VV_VARI_HPP #include #include namespace stan { namespace math { // use for single precomputed partials class precomp_vv_vari final : public op_vv_vari { protected: double da_; double db_; public: precomp_vv_vari(double val, vari* avi, vari* bvi, double da, double db) : op_vv_vari(val, avi, bvi), da_(da), db_(db) {} void chain() { avi_->adj_ += adj_ * da_; bvi_->adj_ += adj_ * db_; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/std_isnan.hpp0000644000176200001440000000073714645137106023206 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_STD_ISNAN_HPP #define STAN_MATH_REV_CORE_STD_ISNAN_HPP #include #include namespace std { /** * Checks if the given number is NaN. * * Return true if the value of the * specified variable is not a number. * * @param a Variable to test. * @return true if value is not a number. */ inline bool isnan(const stan::math::var& a) { return isnan(a.val()); } } // namespace std #endif StanHeaders/inst/include/stan/math/rev/core/var_value_fwd_declare.hpp0000644000176200001440000000112314645137106025515 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_VAR_VALUE_FWD_DECLARE_HPP #define STAN_MATH_REV_CORE_VAR_VALUE_FWD_DECLARE_HPP namespace stan { namespace math { // forward declaration of var template class var_value; /** * Equivalent to `Eigen::Matrix`, except that the data is stored on AD stack. * That makes these objects trivially destructible and usable in `vari`s. * * @tparam MatrixType Eigen matrix type this works as (`MatrixXd`, `VectorXd`, * ...) */ template class arena_matrix; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_unary_increment.hpp0000644000176200001440000000245414645137106026337 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_UNARY_INCREMENT_HPP #define STAN_MATH_REV_CORE_OPERATOR_UNARY_INCREMENT_HPP #include #include #include #include #include namespace stan { namespace math { /** * Prefix increment operator for variables (C++). Following C++, * (++a) is defined to behave exactly as (a = a + 1.0) does, * but is faster and uses less memory. In particular, the * result is an assignable lvalue. * * @param a Variable to increment. * @return Reference the result of incrementing this input variable. */ inline var& operator++(var& a) { a = make_callback_var(a.val() + 1.0, [a](auto& vi) { a.adj() += vi.adj(); }); return a; } /** * Postfix increment operator for variables (C++). * * Following C++, the expression (a++) is defined to behave like * the sequence of operations * * var temp = a; a = a + 1.0; return temp; * * @param a Variable to increment. * @return Input variable. */ inline var operator++(var& a, int /*dummy*/) { var temp(a); a = make_callback_var(a.val() + 1.0, [a](auto& vi) { a.adj() += vi.adj(); }); return temp; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/precomp_vvv_vari.hpp0000644000176200001440000000132014645137106024600 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_PRECOMP_VVV_VARI_HPP #define STAN_MATH_REV_CORE_PRECOMP_VVV_VARI_HPP #include #include namespace stan { namespace math { // use for single precomputed partials class precomp_vvv_vari final : public op_vvv_vari { protected: double da_; double db_; double dc_; public: precomp_vvv_vari(double val, vari* avi, vari* bvi, vari* cvi, double da, double db, double dc) : op_vvv_vari(val, avi, bvi, cvi), da_(da), db_(db), dc_(dc) {} void chain() { avi_->adj_ += adj_ * da_; bvi_->adj_ += adj_ * db_; cvi_->adj_ += adj_ * dc_; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/read_var.hpp0000644000176200001440000001627014645137106023006 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_READ_VAR_HPP #define STAN_MATH_REV_CORE_READ_VAR_HPP #include #include #include namespace stan { namespace math { /** * Functor for extracting the vari*, values, and adjoints from a matrix of var. * This functor is called using Eigen's NullaryExpr framework, which takes care * of the indexing. This removes the need to programmatically account for * whether the input is row- or column-major. */ template class vi_val_adj_functor { const EigRev& var_mat; EigVari& vi_mat; EigDbl& val_mat; public: vi_val_adj_functor(const EigRev& arg1, EigVari& arg2, EigDbl& arg3) : var_mat(arg1), vi_mat(arg2), val_mat(arg3) {} inline decltype(auto) operator()(Eigen::Index row, Eigen::Index col) const { vi_mat.coeffRef(row, col) = var_mat.coeffRef(row, col).vi_; val_mat.coeffRef(row, col) = var_mat.coeffRef(row, col).vi_->val_; return var_mat.coeffRef(row, col).vi_->adj_; } inline decltype(auto) operator()(Eigen::Index index) const { vi_mat.coeffRef(index) = var_mat.coeffRef(index).vi_; val_mat.coeffRef(index) = var_mat.coeffRef(index).vi_->val_; return var_mat.coeffRef(index).vi_->adj_; } }; /** * Functor for extracting the values and adjoints from a matrix of var or vari. * This functor is called using Eigen's NullaryExpr framework. */ template class val_adj_functor { const EigRev& var_mat; EigDbl& val_mat; public: val_adj_functor(const EigRev& arg1, EigDbl& arg2) : var_mat(arg1), val_mat(arg2) {} template * = nullptr> inline decltype(auto) operator()(Eigen::Index row, Eigen::Index col) const { val_mat.coeffRef(row, col) = var_mat.coeffRef(row, col).vi_->val_; return var_mat.coeffRef(row, col).vi_->adj_; } template * = nullptr> inline decltype(auto) operator()(Eigen::Index index) const { val_mat.coeffRef(index) = var_mat.coeffRef(index).vi_->val_; return var_mat.coeffRef(index).vi_->adj_; } template * = nullptr> inline decltype(auto) operator()(Eigen::Index row, Eigen::Index col) const { val_mat.coeffRef(row, col) = var_mat.coeffRef(row, col)->val_; return var_mat.coeffRef(row, col)->adj_; } template * = nullptr> inline decltype(auto) operator()(Eigen::Index index) const { val_mat.coeffRef(index) = var_mat.coeffRef(index)->val_; return var_mat.coeffRef(index)->adj_; } }; /** * Functor for extracting the varis and values from a matrix of var. * This functor is called using Eigen's NullaryExpr framework. */ template class vi_val_functor { const EigVar& var_mat; EigVari& vi_mat; public: vi_val_functor(const EigVar& arg1, EigVari& arg2) : var_mat(arg1), vi_mat(arg2) {} inline decltype(auto) operator()(Eigen::Index row, Eigen::Index col) const { vi_mat.coeffRef(row, col) = var_mat.coeffRef(row, col).vi_; return var_mat.coeffRef(row, col).vi_->val_; } inline decltype(auto) operator()(Eigen::Index index) const { vi_mat.coeffRef(index) = var_mat.coeffRef(index).vi_; return var_mat.coeffRef(index).vi_->val_; } }; /** * Functor for extracting the varis and adjoints from a matrix of var. * This functor is called using Eigen's NullaryExpr framework. */ template class vi_adj_functor { const EigVar& var_mat; EigVari& vi_mat; public: vi_adj_functor(const EigVar& arg1, EigVari& arg2) : var_mat(arg1), vi_mat(arg2) {} inline decltype(auto) operator()(Eigen::Index row, Eigen::Index col) const { vi_mat.coeffRef(row, col) = var_mat.coeffRef(row, col).vi_; return var_mat.coeffRef(row, col).vi_->adj_; } inline decltype(auto) operator()(Eigen::Index index) const { vi_mat.coeffRef(index) = var_mat.coeffRef(index).vi_; return var_mat.coeffRef(index).vi_->adj_; } }; /** * Function applying the vi_val_adj_functor to extract the vari*, values, * and adjoints of a given var matrix into separate matrices. * * @tparam EigVar type of the Eigen container of var. * @tparam EigVari type of the Eigen container of vari to be copied to. * @tparam EigDbl type of the Eigen container of doubles to be copied to. * @param[in] VarMat Input Eigen container of var. * @param[in] VariMat Output Eigen container of vari. * @param[in] ValMat Output Eigen container of values. * @param[in] AdjMat Output Eigen container of tangents. */ template inline void read_vi_val_adj(const EigVar& VarMat, EigVari& VariMat, EigDbl& ValMat, EigDbl& AdjMat) { AdjMat = EigDbl::NullaryExpr(VarMat.rows(), VarMat.cols(), vi_val_adj_functor( VarMat, VariMat, ValMat)); } /** * Function applying the val_adj_functor to extract the values * and adjoints of a given var or vari matrix into separate matrices. * * @tparam EigRev type of the Eigen container of var or vari. * @tparam EigDbl type of the Eigen container of doubles to be copied to. * @param[in] VarMat Input Eigen container of var. * @param[in] ValMat Output Eigen container of values. * @param[in] AdjMat Output Eigen container of adjoints. */ template inline void read_val_adj(const EigRev& VarMat, EigDbl& ValMat, EigDbl& AdjMat) { AdjMat = EigDbl::NullaryExpr( VarMat.rows(), VarMat.cols(), val_adj_functor(VarMat, ValMat)); } /** * Function applying the vi_val_functor to extract the varis and * and values of a given var matrix into separate matrices. * * @tparam EigVar type of the Eigen container of var. * @tparam EigDbl type of the Eigen container of doubles to be copied to. * @param[in] VarMat Input Eigen container of var. * @param[in] VariMat Output Eigen container of vari. * @param[in] ValMat Output Eigen container of values. */ template inline void read_vi_val(const EigVar& VarMat, EigVari& VariMat, EigDbl& ValMat) { ValMat = EigDbl::NullaryExpr( VarMat.rows(), VarMat.cols(), vi_val_functor(VarMat, VariMat)); } /** * Function applying the vi_adj_functor to extract the varis and * and adjoints of a given var matrix into separate matrices. * * @tparam EigVar type of the Eigen container of var. * @tparam EigDbl type of the Eigen container of doubles to be copied to. * @param[in] VarMat Input Eigen container of var. * @param[in] VariMat Output Eigen container of vari. * @param[in] AdjMat Output Eigen container of adjoints. */ template inline void read_vi_adj(const EigVar& VarMat, EigVari& VariMat, EigDbl& AdjMat) { AdjMat = EigDbl::NullaryExpr( VarMat.rows(), VarMat.cols(), vi_adj_functor(VarMat, VariMat)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/dvd_vari.hpp0000644000176200001440000000063014645137106023012 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_DVD_VARI_HPP #define STAN_MATH_REV_CORE_DVD_VARI_HPP #include namespace stan { namespace math { class op_dvd_vari : public vari { protected: double ad_; vari* bvi_; double cd_; public: op_dvd_vari(double f, double a, vari* bvi, double c) : vari(f), ad_(a), bvi_(bvi), cd_(c) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/Eigen_NumTraits.hpp0000644000176200001440000003665614645137106024272 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_EIGEN_NUMTRAITS_HPP #define STAN_MATH_REV_CORE_EIGEN_NUMTRAITS_HPP #include #include #include #include #include #include #include #include namespace Eigen { /** * Numerical traits template override for Eigen for automatic * gradient variables. * * Documentation here: * http://eigen.tuxfamily.org/dox/structEigen_1_1NumTraits.html */ template <> struct NumTraits : GenericNumTraits { using Real = stan::math::var; using NonInteger = stan::math::var; using Nested = stan::math::var; using Literal = stan::math::var; /** * Return the precision for stan::math::var delegates * to precision for double. * * @return precision */ static inline Real dummy_precision() { return NumTraits::dummy_precision(); } static inline Real epsilon() { return NumTraits::epsilon(); } static inline Real highest() { return NumTraits::highest(); } static inline Real lowest() { return NumTraits::lowest(); } enum { /** * stan::math::var is not complex. */ IsComplex = 0, /** * stan::math::var is not an integer. */ IsInteger = 0, /** * stan::math::var is signed. */ IsSigned = 1, /** * stan::math::var does not require initialization. */ RequireInitialization = 0, /** * Twice the cost of copying a double. */ ReadCost = 2 * NumTraits::ReadCost, /** * This is just forward cost, but it's the cost of a single * addition (plus memory overhead) in the forward direction. */ AddCost = NumTraits::AddCost, /** * Multiply cost is single multiply going forward, but there's * also memory allocation cost. */ MulCost = NumTraits::MulCost }; /** * Return the number of decimal digits that can be represented * without change. Delegates to * std::numeric_limits::digits10(). */ static int digits10() { return std::numeric_limits::digits10; } }; /** * Traits specialization for Eigen binary operations for reverse-mode * autodiff and `double` arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits { using ReturnType = stan::math::var; }; /** * Traits specialization for Eigen binary operations for `double` and * reverse-mode autodiff arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits { using ReturnType = stan::math::var; }; /** * Traits specialization for Eigen binary operations for reverse-mode * autodiff and `int` arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits { using ReturnType = stan::math::var; }; /** * Traits specialization for Eigen binary operations for `int` and * reverse-mode autodiff arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits { using ReturnType = stan::math::var; }; /** * Traits specialization for Eigen binary operations for reverse-mode autodiff * arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits { using ReturnType = stan::math::var; }; /** * Traits specialization for Eigen binary operations for `double` and * complex autodiff arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits, BinaryOp> { using ReturnType = std::complex; }; /** * Traits specialization for Eigen binary operations for complex * autodiff and `double` arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits, double, BinaryOp> { using ReturnType = std::complex; }; /** * Traits specialization for Eigen binary operations for `int` and * complex autodiff arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits, BinaryOp> { using ReturnType = std::complex; }; /** * Traits specialization for Eigen binary operations for complex * autodiff and `int` arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits, int, BinaryOp> { using ReturnType = std::complex; }; /** * Traits specialization for Eigen binary operations for autodiff and * complex `double` arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits, BinaryOp> { using ReturnType = std::complex; }; /** * Traits specialization for Eigen binary operations for complex * double and autodiff arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits, stan::math::var, BinaryOp> { using ReturnType = std::complex; }; /** * Traits specialization for Eigen binary operations for complex * double and complex autodiff arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits, std::complex, BinaryOp> { using ReturnType = std::complex; }; /** * Traits specialization for Eigen binary operations for complex * autodiff and complex double arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits, std::complex, BinaryOp> { using ReturnType = std::complex; }; template struct ScalarBinaryOpTraits, BinaryOp> { using ReturnType = std::complex; }; template struct ScalarBinaryOpTraits, stan::math::var, BinaryOp> { using ReturnType = std::complex; }; template struct ScalarBinaryOpTraits, std::complex, BinaryOp> { using ReturnType = std::complex; }; namespace internal { /** * Enable linear access of inputs when using read_vi_val_adj. */ template struct functor_has_linear_access< stan::math::vi_val_adj_functor> { enum { ret = 1 }; }; /** * Enable linear access of inputs when using read_val_adj. */ template struct functor_has_linear_access> { enum { ret = 1 }; }; /** * Enable linear access of inputs when using read_vi_val. */ template struct functor_has_linear_access> { enum { ret = 1 }; }; /** * Enable linear access of inputs when using read_vi_adj. */ template struct functor_has_linear_access> { enum { ret = 1 }; }; /** * Partial specialization of Eigen's remove_all struct to stop * Eigen removing pointer from vari* variables */ template <> struct remove_all { using type = stan::math::vari*; }; /** * Specialization of matrix-vector products for reverse-mode * autodiff variables. * * @tparam Index index type * @tparam LhsMapper left-hand side data and stride * @tparam CongjuageLhs left-hand side conjugacy flag * @tparam CongjuageRhs right-hand side conjugacy flag * @tparam RhsMapper right-hand side data and stride * @tparam Version integer version number */ template struct general_matrix_vector_product { using LhsScalar = stan::math::var; using RhsScalar = stan::math::var; using ResScalar = stan::math::var; enum { LhsStorageOrder = ColMajor }; EIGEN_DONT_INLINE static void run(Index rows, Index cols, const LhsMapper& lhsMapper, const RhsMapper& rhsMapper, ResScalar* res, Index resIncr, const ResScalar& alpha) { const LhsScalar* lhs = lhsMapper.data(); const Index lhsStride = lhsMapper.stride(); const RhsScalar* rhs = rhsMapper.data(); const Index rhsIncr = rhsMapper.stride(); run(rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); } EIGEN_DONT_INLINE static void run(Index rows, Index cols, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsIncr, ResScalar* res, Index resIncr, const ResScalar& alpha) { using stan::math::gevv_vvv_vari; using stan::math::var; for (Index i = 0; i < rows; ++i) { res[i * resIncr] += var( new gevv_vvv_vari(&alpha, &lhs[i], lhsStride, rhs, rhsIncr, cols)); } } }; template struct general_matrix_vector_product { using LhsScalar = stan::math::var; using RhsScalar = stan::math::var; using ResScalar = stan::math::var; enum { LhsStorageOrder = RowMajor }; EIGEN_DONT_INLINE static void run(Index rows, Index cols, const LhsMapper& lhsMapper, const RhsMapper& rhsMapper, ResScalar* res, Index resIncr, const RhsScalar& alpha) { const LhsScalar* lhs = lhsMapper.data(); const Index lhsStride = lhsMapper.stride(); const RhsScalar* rhs = rhsMapper.data(); const Index rhsIncr = rhsMapper.stride(); run(rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); } EIGEN_DONT_INLINE static void run(Index rows, Index cols, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsIncr, ResScalar* res, Index resIncr, const RhsScalar& alpha) { for (Index i = 0; i < rows; i++) { res[i * resIncr] += stan::math::var(new stan::math::gevv_vvv_vari( &alpha, (static_cast(LhsStorageOrder) == static_cast(ColMajor)) ? (&lhs[i]) : (&lhs[i * lhsStride]), (static_cast(LhsStorageOrder) == static_cast(ColMajor)) ? (lhsStride) : (1), rhs, rhsIncr, cols)); } } }; #if EIGEN_VERSION_AT_LEAST(3, 3, 8) template struct general_matrix_matrix_product< Index, stan::math::var, LhsStorageOrder, ConjugateLhs, stan::math::var, RhsStorageOrder, ConjugateRhs, ColMajor, ResInnerStride> { #else template struct general_matrix_matrix_product { #endif using LhsScalar = stan::math::var; using RhsScalar = stan::math::var; using ResScalar = stan::math::var; using Traits = gebp_traits; using LhsMapper = const_blas_data_mapper; using RhsMapper = const_blas_data_mapper; EIGEN_DONT_INLINE #if EIGEN_VERSION_AT_LEAST(3, 3, 8) static void run(Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resIncr, Index resStride, const ResScalar& alpha, level3_blocking& /* blocking */, GemmParallelInfo* /* info = 0 */) #else static void run(Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, const ResScalar& alpha, level3_blocking& /* blocking */, GemmParallelInfo* /* info = 0 */) #endif { for (Index i = 0; i < cols; i++) { general_matrix_vector_product< Index, LhsScalar, LhsMapper, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsMapper, ConjugateRhs>::run(rows, depth, lhs, lhsStride, &rhs[static_cast(RhsStorageOrder) == static_cast(ColMajor) ? i * rhsStride : i], static_cast(RhsStorageOrder) == static_cast(ColMajor) ? 1 : rhsStride, &res[i * resStride], 1, alpha); } } // namespace internal EIGEN_DONT_INLINE static void run(Index rows, Index cols, Index depth, const LhsMapper& lhsMapper, const RhsMapper& rhsMapper, ResScalar* res, Index resStride, const ResScalar& alpha, level3_blocking& blocking, GemmParallelInfo* info = 0) { const LhsScalar* lhs = lhsMapper.data(); const Index lhsStride = lhsMapper.stride(); const RhsScalar* rhs = rhsMapper.data(); const Index rhsStride = rhsMapper.stride(); run(rows, cols, depth, lhs, lhsStride, rhs, rhsStride, res, resStride, alpha, blocking, info); } }; } // namespace internal } // namespace Eigen #endif StanHeaders/inst/include/stan/math/rev/core/vdv_vari.hpp0000644000176200001440000000063414645137106023040 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_VDV_VARI_HPP #define STAN_MATH_REV_CORE_VDV_VARI_HPP #include namespace stan { namespace math { class op_vdv_vari : public vari { protected: vari* avi_; double bd_; vari* cvi_; public: op_vdv_vari(double f, vari* avi, double b, vari* cvi) : vari(f), avi_(avi), bd_(b), cvi_(cvi) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/profiling.hpp0000644000176200001440000001162314645137106023211 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_PROFILING_HPP #define STAN_MATH_REV_CORE_PROFILING_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Class used for storing profiling information. */ class profile_info { private: bool active_; double fwd_pass_time_; double rev_pass_time_; size_t n_fwd_AD_passes_; size_t n_fwd_no_AD_passes_; size_t n_rev_passes_; size_t chain_stack_size_sum_; size_t nochain_stack_size_sum_; std::chrono::time_point fwd_pass_tp_; std::chrono::time_point rev_pass_tp_; size_t start_chain_stack_size_; size_t start_nochain_stack_size_; public: profile_info() : active_(false), fwd_pass_time_(0.0), rev_pass_time_(0.0), n_fwd_AD_passes_(0), n_fwd_no_AD_passes_(0), n_rev_passes_(0), chain_stack_size_sum_(0), nochain_stack_size_sum_(0), fwd_pass_tp_(std::chrono::steady_clock::now()), rev_pass_tp_(std::chrono::steady_clock::now()), start_chain_stack_size_(0), start_nochain_stack_size_(0) {} bool is_active() const noexcept { return active_; } template void fwd_pass_start() { if (!is_constant::value) { start_chain_stack_size_ = ChainableStack::instance_->var_stack_.size(); start_nochain_stack_size_ = ChainableStack::instance_->var_nochain_stack_.size(); } fwd_pass_tp_ = std::chrono::steady_clock::now(); active_ = true; } template void fwd_pass_stop() { if (!is_constant::value) { n_fwd_AD_passes_++; chain_stack_size_sum_ += (ChainableStack::instance_->var_stack_.size() - start_chain_stack_size_ - 1); nochain_stack_size_sum_ += (ChainableStack::instance_->var_nochain_stack_.size() - start_nochain_stack_size_); } else { n_fwd_no_AD_passes_++; } fwd_pass_time_ += std::chrono::duration( std::chrono::steady_clock::now() - fwd_pass_tp_) .count(); active_ = false; } void rev_pass_start() { rev_pass_tp_ = std::chrono::steady_clock::now(); } void rev_pass_stop() { rev_pass_time_ += std::chrono::duration( std::chrono::steady_clock::now() - rev_pass_tp_) .count(); n_rev_passes_++; } size_t get_chain_stack_used() const noexcept { return chain_stack_size_sum_; }; size_t get_nochain_stack_used() const noexcept { return nochain_stack_size_sum_; }; size_t get_num_no_AD_fwd_passes() const noexcept { return n_fwd_no_AD_passes_; } size_t get_num_AD_fwd_passes() const noexcept { return n_fwd_AD_passes_; } size_t get_num_fwd_passes() const noexcept { return n_fwd_AD_passes_ + n_fwd_no_AD_passes_; } double get_fwd_time() const noexcept { return fwd_pass_time_; } size_t get_num_rev_passes() const noexcept { return n_rev_passes_; } double get_rev_time() const noexcept { return rev_pass_time_; } }; using profile_key = std::pair; using profile_map = std::map; /** * Profiles C++ lines where the object is in scope. * When T is var, the constructor starts the profile for the forward pass * and places a var with a callback to stop the reverse pass on the * AD tape. The destructor stops the profile for the forward pass and * places a var with a callback to start the profile for the reverse pass. * When T is not var, the constructor and destructor only profile the * * * @tparam T type of profile class. If var, the created object is used * to profile reverse mode AD. Only profiles the forward pass otherwise. */ template class profile { profile_key key_; profile_info* profile_; public: profile(std::string name, profile_map& profiles) : key_({name, std::this_thread::get_id()}) { profile_map::iterator p = profiles.find(key_); if (p == profiles.end()) { profiles[key_] = profile_info(); } profile_ = &profiles[key_]; if (profile_->is_active()) { std::ostringstream msg; msg << "Profile '" << key_.first << "' already started!"; throw std::runtime_error(msg.str()); } profile_->fwd_pass_start(); if (!is_constant::value) { reverse_pass_callback( [profile = this->profile_]() mutable { profile->rev_pass_stop(); }); } } ~profile() { profile_->fwd_pass_stop(); if (!is_constant::value) { reverse_pass_callback( [profile = this->profile_]() mutable { profile->rev_pass_start(); }); } } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_not_equal.hpp0000644000176200001440000000504514645137106025123 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_NOT_EQUAL_HPP #define STAN_MATH_REV_CORE_OPERATOR_NOT_EQUAL_HPP #include #include #include #include #include namespace stan { namespace math { /** * Inequality operator comparing two variables' values (C++). * \f[ \mbox{operator!=}(x, y) = \begin{cases} 0 & \mbox{if } x = y\\ 1 & \mbox{if } x \neq y \\[6pt] 0 & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param a First variable. * @param b Second variable. * @return True if the first variable's value is not the same as the * second's. */ inline bool operator!=(const var& a, const var& b) { return a.val() != b.val(); } /** * Inequality operator comparing a variable's value and a double * (C++). * * @tparam Arith An arithmetic type * @param a First variable. * @param b Second value. * @return True if the first variable's value is not the same as the * second value. */ template * = nullptr> inline bool operator!=(const var& a, Arith b) { return a.val() != b; } /** * Inequality operator comparing a double and a variable's value * (C++). * * @tparam Arith An arithmetic type * @param a First value. * @param b Second variable. * @return True if the first value is not the same as the * second variable's value. */ template * = nullptr> inline bool operator!=(Arith a, const var& b) { return a != b.val(); } /** * Return `false` if the real number is equal to the real part of the * complex number, and the imaginary part of the complex number is zero * * @param x real number * @param z complex number * @return `false` if the real number is equal to the real part of the * complex number, and the imaginary part of the complex number is zero */ inline bool operator!=(const var& x, const std::complex& z) { return !(x == z.real() && 0 == z.imag()); } /** * Return `false` if the real number is equal to the real part of the * complex number, and the imaginary part of the complex number is zero * * @param z complex number * @param y real number * @return `false` if the real number is equal to the real part of the * complex number, and the imaginary part of the complex number is zero */ inline bool operator!=(const std::complex& z, const var& y) { return !(z.real() == y && z.imag() == 0); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_logical_and.hpp0000644000176200001440000000275514645137106025375 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_LOGICAL_AND_HPP #define STAN_MATH_REV_CORE_OPERATOR_LOGICAL_AND_HPP #include #include namespace stan { namespace math { /** * Return the logical conjunction of the values of the two * arguments as defined by &&. * * @param[in] x first argument * @param[in] y second argument * @return conjunction of the arguments' values */ inline bool operator&&(const var& x, const var& y) { return x.val() && y.val(); } /** * Return the logical conjunction of the values of the two * arguments as defined by &&. * * @tparam Var value type of a var * @tparam Arith An arithmetic type * @param[in] x first argument * @param[in] y second argument * @return conjunction of first argument's value and second * argument */ template * = nullptr> inline bool operator&&(const var& x, Arith y) { return x.val() && y; } /** * Return the logical conjunction of the values of the two * arguments as defined by &&. * * @tparam Var value type of a var * @tparam Arith An arithmetic type * @param[in] x first argument * @param[in] y second argument * @return conjunction of first argument and second argument's * value */ template * = nullptr> inline bool operator&&(Arith x, const var& y) { return x && y.val(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/vd_vari.hpp0000644000176200001440000000055614645137106022655 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_VD_VARI_HPP #define STAN_MATH_REV_CORE_VD_VARI_HPP #include namespace stan { namespace math { class op_vd_vari : public vari { protected: vari* avi_; double bd_; public: op_vd_vari(double f, vari* avi, double b) : vari(f), avi_(avi), bd_(b) {} }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_unary_decrement.hpp0000644000176200001440000000252014645137106026313 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_UNARY_DECREMENT_HPP #define STAN_MATH_REV_CORE_OPERATOR_UNARY_DECREMENT_HPP #include #include #include #include #include namespace stan { namespace math { /** * Prefix decrement operator for variables (C++). * * Following C++, (--a) is defined to behave exactly as * * a = a - 1.0) * * does, but is faster and uses less memory. In particular, * the result is an assignable lvalue. * * @param a Variable to decrement. * @return Reference the result of decrementing this input variable. */ inline var& operator--(var& a) { a = make_callback_var(a.val() - 1.0, [a](auto& vi) { a.adj() += vi.adj(); }); return a; } /** * Postfix decrement operator for variables (C++). * * Following C++, the expression (a--) is defined to * behave like the sequence of operations * * var temp = a; a = a - 1.0; return temp; * * @param a Variable to decrement. * @return Input variable. */ inline var operator--(var& a, int /*dummy*/) { var temp(a); a = make_callback_var(a.val() - 1.0, [a](auto& vi) { a.adj() += vi.adj(); }); return temp; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_multiplication.hpp0000644000176200001440000001066314645137106026173 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_MULTIPLICATION_HPP #define STAN_MATH_REV_CORE_OPERATOR_MULTIPLICATION_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { class multiply_vv_vari final : public op_vv_vari { public: multiply_vv_vari(vari* avi, vari* bvi) : op_vv_vari(avi->val_ * bvi->val_, avi, bvi) {} void chain() { avi_->adj_ += bvi_->val_ * adj_; bvi_->adj_ += avi_->val_ * adj_; } }; class multiply_vd_vari final : public op_vd_vari { public: multiply_vd_vari(vari* avi, double b) : op_vd_vari(avi->val_ * b, avi, b) {} void chain() { avi_->adj_ += adj_ * bd_; } }; } // namespace internal /** * Multiplication operator for two variables (C++). * * The partial derivatives are * * \f$\frac{\partial}{\partial x} (x * y) = y\f$, and * * \f$\frac{\partial}{\partial y} (x * y) = x\f$. * \f[ \mbox{operator*}(x, y) = \begin{cases} xy & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{operator*}(x, y)}{\partial x} = \begin{cases} y & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{operator*}(x, y)}{\partial y} = \begin{cases} x & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param a First variable operand. * @param b Second variable operand. * @return Variable result of multiplying operands. */ inline var operator*(const var& a, const var& b) { return {new internal::multiply_vv_vari(a.vi_, b.vi_)}; } /** * Multiplication operator for a variable and a scalar (C++). * * The partial derivative for the variable is * * \f$\frac{\partial}{\partial x} (x * c) = c\f$, and * * @tparam Arith An arithmetic type * @param a Variable operand. * @param b Scalar operand. * @return Variable result of multiplying operands. */ template * = nullptr> inline var operator*(const var& a, Arith b) { if (b == 1.0) { return a; } return {new internal::multiply_vd_vari(a.vi_, b)}; } /** * Multiplication operator for a scalar and a variable (C++). * * The partial derivative for the variable is * * \f$\frac{\partial}{\partial y} (c * y) = c\f$. * * @tparam Arith An arithmetic type * @param a Scalar operand. * @param b Variable operand. * @return Variable result of multiplying the operands. */ template * = nullptr> inline var operator*(Arith a, const var& b) { if (a == 1.0) { return b; } return {new internal::multiply_vd_vari(b.vi_, a)}; // by symmetry } /** * Return the product of std::complex arguments. * * @param[in] x first argument * @param[in] y second argument * @return product of arguments */ inline std::complex operator*( const std::complex& x, const std::complex& y) { return internal::complex_multiply(x, y); } /** * Return the product of std::complex and * std::complex arguments. * * @param[in] x first argument * @param[in] y second argument * @return product of arguments */ inline std::complex operator*( const std::complex& x, const std::complex& y) { return internal::complex_multiply(x, y); } /** * Return the product of std::complex and * std::complex arguments. * * @param[in] x first argument * @param[in] y second argument * @return product of arguments */ inline std::complex operator*( const std::complex& x, const std::complex& y) { return internal::complex_multiply(x, y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/operator_division.hpp0000644000176200001440000002404414645137106024760 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_OPERATOR_DIVISION_HPP #define STAN_MATH_REV_CORE_OPERATOR_DIVISION_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Division operator for two variables (C++). * * The partial derivatives for the variables are * * \f$\frac{\partial}{\partial x} (x/y) = 1/y\f$, and * * \f$\frac{\partial}{\partial y} (x/y) = -x / y^2\f$. * \f[ \mbox{operator/}(x, y) = \begin{cases} \frac{x}{y} & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{operator/}(x, y)}{\partial x} = \begin{cases} \frac{1}{y} & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{operator/}(x, y)}{\partial y} = \begin{cases} -\frac{x}{y^2} & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @param dividend First variable operand. * @param divisor Second variable operand. * @return Variable result of dividing the first variable by the * second. */ inline var operator/(const var& dividend, const var& divisor) { return make_callback_var( dividend.val() / divisor.val(), [dividend, divisor](auto&& vi) { dividend.adj() += vi.adj() / divisor.val(); divisor.adj() -= vi.adj() * dividend.val() / (divisor.val() * divisor.val()); }); } /** * Division operator for dividing a variable by a scalar (C++). * * The derivative with respect to the variable is * * \f$\frac{\partial}{\partial x} (x/c) = 1/c\f$. * * @tparam Var value type of a var * @tparam Arith An arithmetic type * @param dividend Variable operand. * @param divisor Scalar operand. * @return Variable result of dividing the variable by the scalar. */ template * = nullptr> inline var operator/(const var& dividend, Arith divisor) { if (divisor == 1.0) { return dividend; } return make_callback_var( dividend.val() / divisor, [dividend, divisor](auto&& vi) { dividend.adj() += vi.adj() / divisor; }); } /** * Division operator for dividing a scalar by a variable (C++). * * The derivative with respect to the variable is * * \f$\frac{d}{d y} (c/y) = -c / y^2\f$. * * @tparam Arith An arithmetic type * @param dividend Scalar operand. * @param divisor Variable operand. * @return Quotient of the dividend and divisor. */ template * = nullptr> inline var operator/(Arith dividend, const var& divisor) { return make_callback_var( dividend / divisor.val(), [dividend, divisor](auto&& vi) { divisor.adj() -= vi.adj() * dividend / (divisor.val() * divisor.val()); }); } /** * Return matrix divided by scalar. * * @tparam Mat A type inheriting from `EigenBase` with an `Arithmetic` scalar * type. * @param[in] m specified matrix or expression * @param[in] c specified scalar * @return matrix divided by the scalar */ template * = nullptr, require_stan_scalar_t* = nullptr, require_all_st_var_or_arithmetic* = nullptr, require_any_st_var* = nullptr> inline auto divide(const Mat& m, Scalar c) { if (!is_constant::value && !is_constant::value) { arena_t> arena_m = m; var arena_c = c; auto inv_c = (1.0 / arena_c.val()); arena_t> res = inv_c * arena_m.val(); reverse_pass_callback([arena_c, inv_c, arena_m, res]() mutable { auto inv_times_adj = (inv_c * res.adj().array()).eval(); arena_c.adj() -= (inv_times_adj * res.val().array()).sum(); arena_m.adj().array() += inv_times_adj; }); return promote_scalar_t(res); } else if (!is_constant::value) { arena_t> arena_m = m; auto inv_c = (1.0 / value_of(c)); arena_t> res = inv_c * arena_m.val(); reverse_pass_callback([inv_c, arena_m, res]() mutable { arena_m.adj().array() += inv_c * res.adj_op().array(); }); return promote_scalar_t(res); } else { var arena_c = c; auto inv_c = (1.0 / arena_c.val()); arena_t> res = inv_c * value_of(m).array(); reverse_pass_callback([arena_c, inv_c, res]() mutable { arena_c.adj() -= inv_c * (res.adj().array() * res.val().array()).sum(); }); return promote_scalar_t(res); } } /** * Return scalar divided by matrix. * * @tparam Mat Either a type inheriting from `EigenBase` with a scalar type of * `var` or a `var_value` with type `T` inheriting from `EigenBase`. * @param[in] m specified matrix or expression * @param[in] c specified scalar * @return matrix divided by the scalar */ template * = nullptr, require_stan_scalar_t* = nullptr, require_all_st_var_or_arithmetic* = nullptr, require_any_st_var* = nullptr> inline auto divide(Scalar c, const Mat& m) { if (!is_constant::value && !is_constant::value) { arena_t> arena_m = m; auto inv_m = to_arena(arena_m.val().array().inverse()); var arena_c = c; arena_t> res = arena_c.val() * inv_m; reverse_pass_callback([arena_c, inv_m, arena_m, res]() mutable { auto inv_times_res = (inv_m * res.adj().array()).eval(); arena_m.adj().array() -= inv_times_res * res.val().array(); arena_c.adj() += (inv_times_res).sum(); }); return promote_scalar_t(res); } else if (!is_constant::value) { arena_t> arena_m = m; auto inv_m = to_arena(arena_m.val().array().inverse()); arena_t> res = value_of(c) * inv_m; reverse_pass_callback([inv_m, arena_m, res]() mutable { arena_m.adj().array() -= inv_m * res.adj().array() * res.val().array(); }); return promote_scalar_t(res); } else { auto inv_m = to_arena(value_of(m).array().inverse()); var arena_c = c; arena_t> res = arena_c.val() * inv_m; reverse_pass_callback([arena_c, inv_m, res]() mutable { arena_c.adj() += (inv_m * res.adj().array()).sum(); }); return promote_scalar_t(res); } } /// /** * Return a matrix divided by a matrix elementwise. * @tparam Mat1 Either a type inheriting from `EigenBase` or a `var_value` * with type `T` inheriting from `EigenBase`. * @tparam Mat2 Either a type inheriting from `EigenBase` or a `var_value` * with type `T` inheriting from `EigenBase`. * @param[in] m1 specified matrix or expression * @param[in] m2 specified matrix or expression */ template * = nullptr, require_any_matrix_st* = nullptr> inline auto divide(const Mat1& m1, const Mat2& m2) { if (!is_constant::value && !is_constant::value) { arena_t> arena_m1 = m1; arena_t> arena_m2 = m2; auto inv_m2 = to_arena(arena_m2.val().array().inverse()); using val_ret = decltype((inv_m2 * arena_m1.val().array()).matrix().eval()); using ret_type = return_var_matrix_t; arena_t res = (inv_m2.array() * arena_m1.val().array()).matrix(); reverse_pass_callback([inv_m2, arena_m1, arena_m2, res]() mutable { auto inv_times_res = (inv_m2 * res.adj().array()).eval(); arena_m1.adj().array() += inv_times_res; arena_m2.adj().array() -= inv_times_res * res.val().array(); }); return ret_type(res); } else if (!is_constant::value) { arena_t> arena_m1 = value_of(m1); arena_t> arena_m2 = m2; auto inv_m2 = to_arena(arena_m2.val().array().inverse()); using val_ret = decltype((inv_m2 * arena_m1.array()).matrix().eval()); using ret_type = return_var_matrix_t; arena_t res = (inv_m2.array() * arena_m1.array()).matrix(); reverse_pass_callback([inv_m2, arena_m1, arena_m2, res]() mutable { arena_m2.adj().array() -= inv_m2 * res.adj().array() * res.val().array(); }); return ret_type(res); } else { arena_t> arena_m1 = m1; arena_t> arena_m2 = value_of(m2); auto inv_m2 = to_arena(arena_m2.array().inverse()); using val_ret = decltype((inv_m2 * arena_m1.val().array()).matrix().eval()); using ret_type = return_var_matrix_t; arena_t res = (inv_m2.array() * arena_m1.val().array()).matrix(); reverse_pass_callback([inv_m2, arena_m1, arena_m2, res]() mutable { arena_m1.adj().array() += inv_m2 * res.adj().array(); }); return ret_type(res); } } template * = nullptr> inline auto operator/(const T1& dividend, const T2& divisor) { return divide(dividend, divisor); } inline std::complex operator/(const std::complex& x1, const std::complex& x2) { return internal::complex_divide(x1, x2); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/rev/core/std_numeric_limits.hpp0000644000176200001440000000652014645137106025115 0ustar liggesusers#ifndef STAN_MATH_REV_CORE_STD_NUMERIC_LIMITS_HPP #define STAN_MATH_REV_CORE_STD_NUMERIC_LIMITS_HPP #include #include namespace std { /** * Specialization of numeric limits for var objects. * * This implementation of std::numeric_limits * is used to treat var objects like value_types. */ template struct numeric_limits> { typedef stan::promote_args_t value_type; static constexpr bool is_specialized = true; static constexpr stan::math::var_value min() noexcept { return numeric_limits::min(); } static constexpr stan::math::var_value max() noexcept { return numeric_limits::max(); } static constexpr int digits = numeric_limits::digits; static constexpr int digits10 = numeric_limits::digits10; static constexpr int max_digits10 = numeric_limits::max_digits10; static constexpr bool is_signed = numeric_limits::is_signed; static constexpr bool is_integer = numeric_limits::is_integer; static constexpr bool is_exact = numeric_limits::is_exact; static constexpr int radix = numeric_limits::radix; static constexpr stan::math::var_value epsilon() noexcept { return numeric_limits::epsilon(); } static constexpr stan::math::var_value round_error() noexcept { return numeric_limits::round_error(); } static constexpr T lowest() noexcept { return numeric_limits::lowest(); }; static constexpr int min_exponent = numeric_limits::min_exponent; static constexpr int min_exponent10 = numeric_limits::min_exponent10; static constexpr int max_exponent = numeric_limits::max_exponent; static constexpr int max_exponent10 = numeric_limits::max_exponent10; static constexpr bool has_infinity = numeric_limits::has_infinity; static constexpr bool has_quiet_NaN = numeric_limits::has_quiet_NaN; static constexpr bool has_signaling_NaN = numeric_limits::has_signaling_NaN; static constexpr float_denorm_style has_denorm = numeric_limits::has_denorm; static constexpr bool has_denorm_loss = numeric_limits::has_denorm_loss; static constexpr stan::math::var_value infinity() noexcept { return numeric_limits::infinity(); } static constexpr stan::math::var_value quiet_NaN() noexcept { return numeric_limits::quiet_NaN(); } static constexpr stan::math::var_value signaling_NaN() noexcept { return numeric_limits::signaling_NaN(); } static constexpr stan::math::var_value denorm_min() noexcept { return numeric_limits::denorm_min(); } static constexpr bool is_iec559 = numeric_limits::is_iec559; static constexpr bool is_bounded = numeric_limits::is_bounded; static constexpr bool is_modulo = numeric_limits::is_modulo; static constexpr bool traps = numeric_limits::traps; static constexpr bool tinyness_before = numeric_limits::tinyness_before; static constexpr float_round_style round_style = numeric_limits::round_style; }; } // namespace std #endif StanHeaders/inst/include/stan/math/memory/0000755000176200001440000000000014645137106020270 5ustar liggesusersStanHeaders/inst/include/stan/math/memory/stack_alloc.hpp0000644000176200001440000002141314645137106023261 0ustar liggesusers#ifndef STAN_MATH_MEMORY_STACK_ALLOC_HPP #define STAN_MATH_MEMORY_STACK_ALLOC_HPP // TODO(Bob): replaces this ifdef in C++11, until then this // is best we can do to get safe pointer casts to uints. #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return true if the specified pointer is aligned * on the number of bytes. * * This doesn't really make sense other than for powers of 2. * * @param ptr Pointer to test. * @param bytes_aligned Number of bytes of alignment required. * @return true if pointer is aligned. * @tparam Type of object to which pointer points. */ template bool is_aligned(T* ptr, unsigned int bytes_aligned) { return (reinterpret_cast(ptr) % bytes_aligned) == 0U; } namespace internal { const size_t DEFAULT_INITIAL_NBYTES = 1 << 16; // 64KB // FIXME: enforce alignment // big fun to inline, but only called twice inline char* eight_byte_aligned_malloc(size_t size) { char* ptr = static_cast(malloc(size)); if (!ptr) { return ptr; // malloc failed to alloc } if (!is_aligned(ptr, 8U)) { std::stringstream s; s << "invalid alignment to 8 bytes, ptr=" << reinterpret_cast(ptr) << std::endl; throw std::runtime_error(s.str()); } return ptr; } } // namespace internal /** * An instance of this class provides a memory pool through * which blocks of raw memory may be allocated and then collected * simultaneously. * * This class is useful in settings where large numbers of small * objects are allocated and then collected all at once. This may * include objects whose destructors have no effect. * * Memory is allocated on a stack of blocks. Each block allocated * is twice as large as the previous one. The memory may be * recovered, with the blocks being reused, or all blocks may be * freed, resetting the stack of blocks to its original state. * * Alignment up to 8 byte boundaries guaranteed for the first malloc, * and after that it's up to the caller. On 64-bit architectures, * all struct values should be padded to 8-byte boundaries if they * contain an 8-byte member or a virtual function. */ class stack_alloc { private: std::vector blocks_; // storage for blocks, // may be bigger than cur_block_ std::vector sizes_; // could store initial & shift for others size_t cur_block_; // index into blocks_ for next alloc char* cur_block_end_; // ptr to cur_block_ptr_ + sizes_[cur_block_] char* next_loc_; // ptr to next available spot in cur // block // next three for keeping track of nested allocations on top of stack: std::vector nested_cur_blocks_; std::vector nested_next_locs_; std::vector nested_cur_block_ends_; /** * Moves us to the next block of memory, allocating that block * if necessary, and allocates len bytes of memory within that * block. * * @param len Number of bytes to allocate. * @return A pointer to the allocated memory. */ char* move_to_next_block(size_t len) { char* result; ++cur_block_; // Find the next block (if any) containing at least len bytes. while ((cur_block_ < blocks_.size()) && (sizes_[cur_block_] < len)) { ++cur_block_; } // Allocate a new block if necessary. if (unlikely(cur_block_ >= blocks_.size())) { // New block should be max(2*size of last block, len) bytes. size_t newsize = sizes_.back() * 2; if (newsize < len) { newsize = len; } blocks_.push_back(internal::eight_byte_aligned_malloc(newsize)); if (!blocks_.back()) { throw std::bad_alloc(); } sizes_.push_back(newsize); } result = blocks_[cur_block_]; // Get the object's state back in order. next_loc_ = result + len; cur_block_end_ = result + sizes_[cur_block_]; return result; } public: /** * Construct a resizable stack allocator initially holding the * specified number of bytes. * * @param initial_nbytes Initial number of bytes for the * allocator. Defaults to (1 << 16) = 64KB initial bytes. * @throws std::runtime_error if the underlying malloc is not 8-byte * aligned. */ explicit stack_alloc(size_t initial_nbytes = internal::DEFAULT_INITIAL_NBYTES) : blocks_(1, internal::eight_byte_aligned_malloc(initial_nbytes)), sizes_(1, initial_nbytes), cur_block_(0), cur_block_end_(blocks_[0] + initial_nbytes), next_loc_(blocks_[0]) { if (!blocks_[0]) { throw std::bad_alloc(); // no msg allowed in bad_alloc ctor } } /** * Destroy this memory allocator. * * This is implemented as a no-op as there is no destruction * required. */ ~stack_alloc() { // free ALL blocks for (auto& block : blocks_) { if (block) { free(block); } } } /** * Return a newly allocated block of memory of the appropriate * size managed by the stack allocator. * * The allocated pointer will be 8-byte aligned. * * This function may call C++'s malloc() function, * with any exceptions percolated through this function. * * @param len Number of bytes to allocate. * @return A pointer to the allocated memory. */ inline void* alloc(size_t len) { size_t pad = len % 8 == 0 ? 0 : 8 - len % 8; // Typically, just return and increment the next location. char* result = next_loc_; next_loc_ += len + pad; // Occasionally, we have to switch blocks. if (unlikely(next_loc_ >= cur_block_end_)) { result = move_to_next_block(len); } return reinterpret_cast(result); } /** * Allocate an array on the arena of the specified size to hold * values of the specified template parameter type. * * @tparam T type of entries in allocated array. * @param[in] n size of array to allocate. * @return new array allocated on the arena. */ template inline T* alloc_array(size_t n) { return static_cast(alloc(n * sizeof(T))); } /** * Recover all the memory used by the stack allocator. The stack * of memory blocks allocated so far will be available for further * allocations. To free memory back to the system, use the * function free_all(). */ inline void recover_all() { cur_block_ = 0; next_loc_ = blocks_[0]; cur_block_end_ = next_loc_ + sizes_[0]; } /** * Store current positions before doing nested operation so can * recover back to start. */ inline void start_nested() { nested_cur_blocks_.push_back(cur_block_); nested_next_locs_.push_back(next_loc_); nested_cur_block_ends_.push_back(cur_block_end_); } /** * recover memory back to the last start_nested call. */ inline void recover_nested() { if (unlikely(nested_cur_blocks_.empty())) { recover_all(); } cur_block_ = nested_cur_blocks_.back(); nested_cur_blocks_.pop_back(); next_loc_ = nested_next_locs_.back(); nested_next_locs_.pop_back(); cur_block_end_ = nested_cur_block_ends_.back(); nested_cur_block_ends_.pop_back(); } /** * Free all memory used by the stack allocator other than the * initial block allocation back to the system. Note: the * destructor will free all memory. */ inline void free_all() { // frees all BUT the first (index 0) block for (size_t i = 1; i < blocks_.size(); ++i) { if (blocks_[i]) { free(blocks_[i]); } } sizes_.resize(1); blocks_.resize(1); recover_all(); } /** * Return number of bytes allocated to this instance by the heap. * This is not the same as the number of bytes allocated through * calls to memalloc_. The latter number is not calculatable * because space is wasted at the end of blocks if the next * alloc request doesn't fit. (Perhaps we could trim down to * what is actually used?) * * @return number of bytes allocated to this instance */ inline size_t bytes_allocated() const { size_t sum = 0; for (size_t i = 0; i <= cur_block_; ++i) { sum += sizes_[i]; } return sum; } /** * Indicates whether the memory in the pointer * is in the stack. * * @param[in] ptr memory location * @return true if the pointer is in the stack, * false otherwise. */ inline bool in_stack(const void* ptr) const { for (size_t i = 0; i < cur_block_; ++i) { if (ptr >= blocks_[i] && ptr < blocks_[i] + sizes_[i]) { return true; } } if (ptr >= blocks_[cur_block_] && ptr < next_loc_) { return true; } return false; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/0000755000176200001440000000000014645161272017730 5ustar liggesusersStanHeaders/inst/include/stan/math/prim/meta.hpp0000644000176200001440000001163114645137106021370 0ustar liggesusers#ifndef STAN_MATH_PRIM_META_HPP #define STAN_MATH_PRIM_META_HPP /** * \defgroup type_trait Type Traits * The type traits in Stan math are a mix of custom traits for detecting * value and container types of Eigen matrices, standard vectors, standard * complex numbers, and backports of C++17 traits. */ /** * \defgroup require_meta Available requires<> for overloading. * * See @ref require_meta_doc for an overview of their use and for how to * build your own custom requires for functions. * * The modules below group the different possible require constraints * available for overloading functions. */ /** * \ingroup require_meta * \defgroup require_stan_scalar Scalar types * `require` type traits for types that are either `arithmetic`, `var`, `fvar`, * or `Complex`. */ /** * \ingroup require_stan_scalar * \defgroup require_stan_scalar_real Real types * `require` type traits for types that are either `arithmetic`, `var`, or * `fvar`. */ /** * \ingroup require_stan_scalar * \defgroup require_stan_scalar_complex Complex types * `require` type traits for types that are `Complex`. */ /** * \ingroup require_meta * \defgroup require_std Standard library types and traits * `require` type traits that come from the standard library. */ /** * \ingroup require_meta * \defgroup require_eigens_types Eigen * `require` type traits to detect Eigen types. */ /** * \ingroup require_meta * \defgroup general_types General Types * `require` type traits for general types. */ /** * \ingroup require_meta * \defgroup macro_helpers Require Macro Generators * These macros are used on type traits to define the set of `requires` */ /** * \ingroup require_meta * \defgroup require_opencl_types OpenCL * `require` type traits to detect types used with OpenCL. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/prim/fun/0000755000176200001440000000000014645161272020520 5ustar liggesusersStanHeaders/inst/include/stan/math/prim/fun/gamma_q.hpp0000644000176200001440000000424514645137106022637 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GAMMA_Q_HPP #define STAN_MATH_PRIM_FUN_GAMMA_Q_HPP #include #include #include namespace stan { namespace math { /** * \f[ \mbox{gamma\_q}(a, z) = \begin{cases} \textrm{error} & \mbox{if } a\leq 0 \textrm{ or } z < 0\\ Q(a, z) & \mbox{if } a > 0, z \geq 0 \\[6pt] \textrm{NaN} & \mbox{if } a = \textrm{NaN or } z = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{gamma\_q}(a, z)}{\partial a} = \begin{cases} \textrm{error} & \mbox{if } a\leq 0 \textrm{ or } z < 0\\ \frac{\partial\, Q(a, z)}{\partial a} & \mbox{if } a > 0, z \geq 0 \\[6pt] \textrm{NaN} & \mbox{if } a = \textrm{NaN or } z = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{gamma\_q}(a, z)}{\partial z} = \begin{cases} \textrm{error} & \mbox{if } a\leq 0 \textrm{ or } z < 0\\ \frac{\partial\, Q(a, z)}{\partial z} & \mbox{if } a > 0, z \geq 0 \\[6pt] \textrm{NaN} & \mbox{if } a = \textrm{NaN or } z = \textrm{NaN} \end{cases} \f] \f[ Q(a, z)=\frac{1}{\Gamma(a)}\int_z^\infty t^{a-1}e^{-t}dt \f] \f[ \frac{\partial \, Q(a, z)}{\partial a} = -\frac{\Psi(a)}{\Gamma^2(a)}\int_z^\infty t^{a-1}e^{-t}dt + \frac{1}{\Gamma(a)}\int_z^\infty (a-1)t^{a-2}e^{-t}dt \f] \f[ \frac{\partial \, Q(a, z)}{\partial z} = -\frac{z^{a-1}e^{-z}}{\Gamma(a)} \f] * @throws domain_error if x is at pole */ inline double gamma_q(double x, double a) { return boost::math::gamma_q(x, a); } /** * Enables the vectorized application of the gamma_q function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return gamma_q function applied to the two inputs. */ template * = nullptr> inline auto gamma_q(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return gamma_q(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/atan2.hpp0000644000176200001440000000252414645137106022240 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ATAN2_HPP #define STAN_MATH_PRIM_FUN_ATAN2_HPP #include #include #include #include namespace stan { namespace math { /** * Computes the arc tangent of y/x using the signs of * arguments to determine the correct quadrant. * * @tparam T1 type of first argument (must be arithmetic) * @tparam T2 type of second argument (must be arithmetic) * @param y first argument * @param x second argument * @return arctangent of `y / x` */ template * = nullptr> double atan2(T1 y, T2 x) { return std::atan2(y, x); } /** * Enables the vectorized application of the atan2 function, when * the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return Returns the atan2 function applied to the two inputs. */ template * = nullptr, require_all_not_var_matrix_t* = nullptr> inline auto atan2(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [](const auto& c, const auto& d) { return atan2(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/segment.hpp0000644000176200001440000000252414645137106022675 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SEGMENT_HPP #define STAN_MATH_PRIM_FUN_SEGMENT_HPP #include #include #include namespace stan { namespace math { /** * Return the specified number of elements as a row/column vector starting * from the specified element - 1 of the specified row/column vector. * * @tparam T type of the vector */ template * = nullptr> inline auto segment(const Vec& v, size_t i, size_t n) { check_greater("segment", "n", i, 0.0); check_less_or_equal("segment", "n", i, static_cast(v.size())); if (n != 0) { check_greater("segment", "n", i + n - 1, 0.0); check_less_or_equal("segment", "n", i + n - 1, static_cast(v.size())); } return v.segment(i - 1, n); } template std::vector segment(const std::vector& sv, size_t i, size_t n) { check_greater("segment", "i", i, 0.0); check_less_or_equal("segment", "i", i, sv.size()); if (n != 0) { check_greater("segment", "i+n-1", i + n - 1, 0.0); check_less_or_equal("segment", "i+n-1", i + n - 1, static_cast(sv.size())); } std::vector s; for (size_t j = 0; j < n; ++j) { s.push_back(sv[i + j - 1]); } return s; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/tan.hpp0000644000176200001440000000402614645137106022014 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TAN_HPP #define STAN_MATH_PRIM_FUN_TAN_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `tan()` so that it can be vectorized. * * @tparam T type of argument * @param x angle in radians * @return Tangent of x. */ struct tan_fun { template static inline auto fun(const T& x) { using std::tan; return tan(x); } }; /** * Vectorized version of `tan()`. * * @tparam Container type of container * @param x angles in radians * @return Tangent of each value in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto tan(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `tan()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Tangent of each value in x. */ template * = nullptr> inline auto tan(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().tan(); }); } namespace internal { /** * Return the tangent of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return tangent of the argument */ template inline std::complex complex_tan(const std::complex& z) { return neg_i_times(tanh(i_times(z))); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/svd_V.hpp0000644000176200001440000000153314645137106022313 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SVD_V_HPP #define STAN_MATH_PRIM_FUN_SVD_V_HPP #include #include namespace stan { namespace math { /** * Given input matrix m, return matrix V where `m = UDV^{T}` * * @tparam EigMat type of the matrix * @param m MxN input matrix * @return Orthogonal matrix V */ template * = nullptr, require_not_st_var* = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> svd_V( const EigMat& m) { using MatType = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; if (unlikely(m.size() == 0)) { return MatType(0, 0); } return Eigen::JacobiSVD(m, Eigen::ComputeThinV).matrixV(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/hypergeometric_2F2.hpp0000644000176200001440000000224614645137106024673 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_HYPERGEOMETRIC_2F2_HPP #define STAN_MATH_PRIM_FUN_HYPERGEOMETRIC_2F2_HPP #include #include namespace stan { namespace math { /** * Returns the generalized hypergeometric function applied to the * input arguments: * \f$_2F_2(a_1,a_2;b_1,b_2;z)\f$ * * See 'grad_pFq.hpp' for the derivatives wrt each parameter * * @param[in] a Vector of 'a' arguments to function * @param[in] b Vector of 'b' arguments to function * @param[in] z Scalar z argument * @return Generalized hypergeometric function */ template * = nullptr, require_stan_scalar_t* = nullptr> return_type_t hypergeometric_2F2(const Ta& a, const Tb& b, const Tz& z) { if (a.size() != 2 || b.size() != 2) { std::stringstream msg; msg << "Inputs to hypergeometric 2F2 do not contain two values" << "a: " << a << ", b: " << b; throw std::domain_error(msg.str()); } return hypergeometric_pFq(a, b, z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/grad_pFq.hpp0000644000176200001440000003230714645137106022760 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GRAD_PFQ_HPP #define STAN_MATH_PRIM_FUN_GRAD_PFQ_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * Returns the gradient of generalized hypergeometric function wrt to the * input arguments: * \f$ _pF_q(a_1,...,a_p;b_1,...,b_q;z) \f$ * * The derivatives wrt a and b are defined using a Kampé de Fériet function, * (https://en.wikipedia.org/wiki/Kamp%C3%A9_de_F%C3%A9riet_function). * This is implemented below as an infinite sum (on the log scale) until * convergence. * * \f$ \frac{\partial}{\partial a_1} = * \frac{z\prod_{j=2}^pa_j}{\prod_{j=1}^qb_j} * F_{q+1\:0\;1}^{p\:1\:2}\left(\begin{array}&a_1+1,...,a_p+1;1;1,a_1\\ * 2, b_1+1,...,b_1+1;;a_1+1\end{array};z,z\right) \f$ * * \f$ \frac{\partial}{\partial b_1}= * -\frac{z\prod_{j=1}^pa_j}{b_1\prod_{j=1}^qb_j} * F_{q+1\:0\;1}^{p\:1\:2}\left(\begin{array}&a_1+1,...,a_p+1;1;1,b_1\\ * 2, b_1+1,...,b_1+1;;b_1+1\end{array};z,z\right) \f$ * * \f$ \frac{\partial}{\partial z}= \frac{\prod_{j=1}^pa_j}{\prod_{j=1}^qb_j} * {}_pF_q(a_1 + 1,...,a_p + 1; b_1 +1,...,b_q+1;z) \f$ * * @tparam calc_a Boolean for whether to calculate derivatives wrt to 'a' * @tparam calc_b Boolean for whether to calculate derivatives wrt to 'b' * @tparam calc_z Boolean for whether to calculate derivatives wrt to 'z' * @tparam TupleT Type of tuple containing objects to evaluate gradients into * @tparam Ta Eigen type with either one row or column at compile time * @tparam Tb Eigen type with either one row or column at compile time * @tparam Tz Scalar type * @param[in] grad_tuple Tuple of references to evaluate gradients into * @param[in] a Vector of 'a' arguments to function * @param[in] b Vector of 'b' arguments to function * @param[in] z Scalar z argument * @param[in] precision Convergence criteria for infinite sum * @param[in] outer_steps Maximum number of iterations for infinite sum * @param[in] inner_steps Maximum number of iterations for infinite sum * @return Generalized hypergeometric function */ template * = nullptr, require_stan_scalar_t* = nullptr> void grad_pFq_impl(TupleT&& grad_tuple, const Ta& a, const Tb& b, const Tz& z, double precision, int outer_steps, int inner_steps) { using std::max; using scalar_t = return_type_t; using Ta_plain = plain_type_t; using Tb_plain = plain_type_t; using T_vec = Eigen::Matrix; ref_type_t a_ref_in = a; ref_type_t b_ref_in = b; // Replace any zero inputs with the smallest number representable, so that // taking log and aggregating does not return -Inf Ta_plain a_ref = (a_ref_in.array() == 0).select(EPSILON, a_ref_in.array()).matrix(); Tb_plain b_ref = (b_ref_in.array() == 0).select(EPSILON, b_ref_in.array()).matrix(); int a_size = a.size(); int b_size = b.size(); // Convergence criteria for the gradients are the same as for the // Hypergeometric pFq itself bool condition_1 = (a_size > (b_size + 1)) && (z != 0); bool condition_2 = (a_size == (b_size + 1)) && (fabs(z) > 1); if (condition_1 || condition_2) { std::stringstream msg; msg << "hypergeometric pFq gradient does not meet convergence " << "conditions with given arguments. " << "a: " << a_ref << ", b: " << b_ref << ", " << "z: " << z; throw std::domain_error(msg.str()); } // As the gradients will be aggregating on the log scale, we will track the // the values and the signs separately - to avoid taking the log of a // negative input Eigen::VectorXi a_signs = sign(value_of_rec(a_ref)); Eigen::VectorXi b_signs = sign(value_of_rec(b_ref)); int z_sign = sign(value_of_rec(z)); Ta_plain ap1 = (a.array() + 1).matrix(); Tb_plain bp1 = (b.array() + 1).matrix(); scalar_type_t log_z = log(fabs(z)); scalar_type_t a_prod = prod(a_ref); scalar_type_t b_prod = prod(b_ref); // Only need the infinite sum for partials wrt a & b if (calc_a || calc_b) { double log_precision = log(precision); T_vec da_mn = T_vec::Constant(a_size, NEGATIVE_INFTY); T_vec db_mn = T_vec::Constant(b_size, NEGATIVE_INFTY); T_vec da = T_vec::Constant(a_size, 0.0); T_vec db = T_vec::Constant(b_size, 0.0); int m = 0; int n_iter = 2; double lgamma_mp1 = 0; double log_phammer_1m = 0; double log_phammer_2m = 0; T_vec log_phammer_ap1_m = T_vec::Zero(ap1.size()); T_vec log_phammer_bp1_m = T_vec::Zero(bp1.size()); Tz log_z_m = 0; Ta_plain ap1m = ap1; Tb_plain bp1m = bp1; Ta_plain ap1n = ap1; Tb_plain bp1n = bp1; Ta_plain an = a_ref; Tb_plain bn = b_ref; Ta_plain ap1mn = ap1m; Tb_plain bp1mn = bp1m; double log_phammer_1n; double log_phammer_2_mpn; double lgamma_np1; T_vec log_phammer_an(a_size); T_vec log_phammer_ap1_n(a_size); T_vec log_phammer_bp1_n(b_size); T_vec log_phammer_bn(b_size); T_vec log_phammer_ap1_mpn(a_size); T_vec log_phammer_bp1_mpn(b_size); int z_pow_m_sign = 1; Eigen::VectorXi curr_signs_da(a_size); Eigen::VectorXi curr_signs_db(b_size); Eigen::VectorXi log_phammer_an_sign(a_size); Eigen::VectorXi log_phammer_ap1n_sign(a_size); Eigen::VectorXi log_phammer_bp1n_sign(b_size); Eigen::VectorXi log_phammer_bn_sign(b_size); Eigen::VectorXi log_phammer_ap1mpn_sign(a_size); Eigen::VectorXi log_phammer_bp1mpn_sign(b_size); Eigen::VectorXi log_phammer_ap1m_sign = Eigen::VectorXi::Ones(a_size); Eigen::VectorXi log_phammer_bp1m_sign = Eigen::VectorXi::Ones(b_size); // If the inner loop converges in 1 iteration, then the sum has coverged // and another iteration of the outer loop is not needed while ((n_iter > 1) && (m < outer_steps)) { ap1n = ap1; bp1n = bp1; an = a_ref; bn = b_ref; ap1mn = ap1m; bp1mn = bp1m; int n = 0; Tz log_z_mn = log_z_m; int z_pow_mn_sign = z_pow_m_sign; scalar_t inner_diff = 0; lgamma_np1 = 0; log_phammer_1n = 0; log_phammer_an.setZero(); log_phammer_ap1_n.setZero(); log_phammer_bp1_n.setZero(); log_phammer_bn.setZero(); log_phammer_ap1_mpn = log_phammer_ap1_m; log_phammer_bp1_mpn = log_phammer_bp1_m; log_phammer_2_mpn = log_phammer_2m; log_phammer_an_sign.setOnes(); log_phammer_ap1n_sign.setOnes(); log_phammer_bp1n_sign.setOnes(); log_phammer_bn_sign.setOnes(); log_phammer_ap1mpn_sign = log_phammer_ap1m_sign; log_phammer_bp1mpn_sign = log_phammer_bp1m_sign; while ((inner_diff > log_precision) && (n < inner_steps)) { // Numerator term scalar_t term1_mn = log_z_mn + sum(log_phammer_ap1_mpn) + log_phammer_1m + log_phammer_1n; // Denominator term scalar_t term2_mn = lgamma_mp1 + lgamma_np1 + sum(log_phammer_bp1_mpn) + log_phammer_2_mpn; int base_sign = z_pow_mn_sign * log_phammer_ap1mpn_sign.prod() * log_phammer_bp1mpn_sign.prod(); if (calc_a) { // Division (on log scale) for the a & b partials // Determine signs of each element curr_signs_da = (base_sign * log_phammer_an_sign.array() * log_phammer_ap1n_sign.array()) .matrix(); da_mn = (term1_mn + log_phammer_an.array()) - (term2_mn + log_phammer_ap1_n.array()); // Aggregate the sums on the natural scale, so that the sign can be // applied before aggregation da += exp(da_mn).cwiseProduct(curr_signs_da); } if (calc_b) { curr_signs_db = (base_sign * log_phammer_bn_sign.array() * log_phammer_bp1n_sign.array()) .matrix(); db_mn = (term1_mn + log_phammer_bn.array()) - (term2_mn + log_phammer_bp1_n.array()); db += exp(db_mn).cwiseProduct(curr_signs_db); } // Series convergence assessed by whether the maximum term is // smaller than the specified criteria (precision) inner_diff = max(da_mn.maxCoeff(), db_mn.maxCoeff()); // Increment the input arguments and rising factorials log_z_mn += log_z; log_phammer_1n += log1p(n); log_phammer_2_mpn += log(2 + m + n); log_phammer_ap1_n.array() += log(math::fabs((ap1n.array() == 0).select(1.0, ap1n.array()))); log_phammer_bp1_n.array() += log(math::fabs((bp1n.array() == 0).select(1.0, bp1n.array()))); log_phammer_an.array() += log(math::fabs((an.array() == 0).select(1.0, an.array()))); log_phammer_bn.array() += log(math::fabs((bn.array() == 0).select(1.0, bn.array()))); log_phammer_ap1_mpn.array() += log(math::fabs((ap1mn.array() == 0).select(1.0, ap1mn.array()))); log_phammer_bp1_mpn.array() += log(math::fabs((bp1mn.array() == 0).select(1.0, bp1mn.array()))); z_pow_mn_sign *= z_sign; log_phammer_ap1n_sign.array() *= sign(value_of_rec(ap1n)).array(); log_phammer_bp1n_sign.array() *= sign(value_of_rec(bp1n)).array(); log_phammer_an_sign.array() *= sign(value_of_rec(an)).array(); log_phammer_bn_sign.array() *= sign(value_of_rec(bn)).array(); log_phammer_ap1mpn_sign.array() *= sign(value_of_rec(ap1mn)).array(); log_phammer_bp1mpn_sign.array() *= sign(value_of_rec(bp1mn)).array(); n += 1; lgamma_np1 += log(n); ap1n.array() += 1; bp1n.array() += 1; an.array() += 1; bn.array() += 1; ap1mn.array() += 1; bp1mn.array() += 1; } z_pow_m_sign *= z_sign; n_iter = n; log_z_m += log_z; log_phammer_1m += log1p(m); log_phammer_2m += log(2 + m); log_phammer_ap1_m += log(math::fabs(ap1m)); log_phammer_ap1m_sign.array() *= sign(value_of_rec(ap1m)).array(); log_phammer_bp1_m += log(math::fabs(bp1m)); log_phammer_bp1m_sign.array() *= sign(value_of_rec(bp1m)).array(); m += 1; lgamma_mp1 += log(m); ap1m.array() += 1; bp1m.array() += 1; ap1mn.array() += 1; bp1mn.array() += 1; } if (m == outer_steps) { throw_domain_error("grad_pFq", "k (internal counter)", outer_steps, "exceeded ", " iterations, hypergeometric function gradient " "did not converge."); } if (calc_a) { auto pre_mult_a = (z * a_prod / a_ref.array() / b_prod).matrix(); std::get<0>(grad_tuple) = std::move(pre_mult_a.cwiseProduct(da)); } if (calc_b) { auto pre_mult_b = ((z * a_prod) / (b.array() * b_prod)).matrix(); std::get<1>(grad_tuple) = std::move(-pre_mult_b.cwiseProduct(db)); } } if (calc_z) { std::get<2>(grad_tuple) = std::move((a_prod / b_prod) * hypergeometric_pFq(ap1, bp1, z)); } } } // namespace internal /** * Wrapper function for calculating gradients for the generalized * hypergeometric function. The function always returns a tuple with * three elements (gradients wrt a, b, and z, respectively), but the * elements will only be defined/calculated when the respective parameter * is not a primitive type. * * @tparam Ta Eigen type with either one row or column at compile time * @tparam Tb Eigen type with either one row or column at compile time * @tparam Tz Scalar type * @param[in] a Vector of 'a' arguments to function * @param[in] b Vector of 'b' arguments to function * @param[in] z Scalar z argument * @param[in] precision Convergence criteria for infinite sum * @param[in] outer_steps Maximum number of iterations for outer_steps * @param[in] inner_steps Maximum number of iterations for inner_steps * @return Tuple of gradients */ template auto grad_pFq(const Ta& a, const Tb& b, const Tz& z, double precision = 1e-10, int outer_steps = 1e6, int inner_steps = 1e6) { using partials_t = partials_return_t; std::tuple>, promote_scalar_t>, promote_scalar_t>> ret_tuple; internal::grad_pFq_impl::value, !is_constant::value, !is_constant::value>( ret_tuple, value_of(a), value_of(b), value_of(z), precision, outer_steps, inner_steps); return ret_tuple; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cholesky_corr_free.hpp0000644000176200001440000000273314645137106025104 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CHOLESKY_CORR_FREE_HPP #define STAN_MATH_PRIM_FUN_CHOLESKY_CORR_FREE_HPP #include #include #include #include #include namespace stan { namespace math { template * = nullptr> auto cholesky_corr_free(const T& x) { using Eigen::Dynamic; using Eigen::Matrix; check_square("cholesky_corr_free", "x", x); // should validate lower-triangular, unit lengths const auto& x_ref = to_ref(x); int K = (x.rows() * (x.rows() - 1)) / 2; Matrix, Dynamic, 1> z(K); int k = 0; for (int i = 1; i < x.rows(); ++i) { z.coeffRef(k++) = corr_free(x_ref.coeff(i, 0)); double sum_sqs = square(x_ref.coeff(i, 0)); for (int j = 1; j < i; ++j) { z.coeffRef(k++) = corr_free(x_ref.coeff(i, j) / std::sqrt(1.0 - sum_sqs)); sum_sqs += square(x_ref.coeff(i, j)); } } return z; } /** * Overload of `cholesky_corr_free()` to untransform each matrix * in a standard vector. * @tparam T A standard vector with with a `value_type` which inherits from * `Eigen::MatrixBase`. * @param x The standard vector to untransform. */ template * = nullptr> auto cholesky_corr_free(const T& x) { return apply_vector_unary::apply( x, [](auto&& v) { return cholesky_corr_free(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/singular_values.hpp0000644000176200001440000000205714645137106024437 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SINGULAR_VALUES_HPP #define STAN_MATH_PRIM_FUN_SINGULAR_VALUES_HPP #include #include #include namespace stan { namespace math { /** * Return the vector of the singular values of the specified matrix * in decreasing order of magnitude. *

See the documentation for svd() for * information on the singular values. * * @tparam EigMat type of the matrix * @param m Specified matrix. * @return Singular values of the matrix. */ template * = nullptr, require_not_st_var* = nullptr> auto singular_values(const EigMat& m) { if (unlikely(m.size() == 0)) { return Eigen::Matrix, Eigen::Dynamic, 1>(0, 1); } return Eigen::JacobiSVD, Eigen::Dynamic, Eigen::Dynamic> >(m) .singularValues(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/select.hpp0000644000176200001440000001527114645137106022515 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SELECT_HPP #define STAN_MATH_PRIM_FUN_SELECT_HPP #include #include #include namespace stan { namespace math { /** * If first argument is true return the second argument, * else return the third argument. * * `select(c, y1, y0) = c ? y1 : y0`. * * @tparam T_true A stan `Scalar` type * @tparam T_false A stan `Scalar` type * @param c Boolean condition value. * @param y_true Value to return if condition is true. * @param y_false Value to return if condition is false. */ template , require_all_stan_scalar_t* = nullptr> inline ReturnT select(const bool c, const T_true y_true, const T_false y_false) { return c ? ReturnT(y_true) : ReturnT(y_false); } /** * If first argument is true return the second argument, * else return the third argument. Eigen expressions are * evaluated so that the return type is the same for both branches. * * Both containers must have the same plain type. The scalar type * of the return is determined by the return_type_t<> type trait. * * Overload for use with two containers. * * @tparam T_true A container of stan `Scalar` types * @tparam T_false A container of stan `Scalar` types * @param c Boolean condition value. * @param y_true Value to return if condition is true. * @param y_false Value to return if condition is false. */ template < typename T_true, typename T_false, typename T_return = return_type_t, typename T_true_plain = promote_scalar_t>, typename T_false_plain = promote_scalar_t>, require_all_container_t* = nullptr, require_all_same_t* = nullptr> inline T_true_plain select(const bool c, T_true&& y_true, T_false&& y_false) { check_matching_dims("select", "left hand side", y_true, "right hand side", y_false); return c ? T_true_plain(std::forward(y_true)) : T_true_plain(std::forward(y_false)); } /** * If first argument is true return the second argument, * else return the third argument. * * Overload for use when the 'true' return is a container and the 'false' * return is a scalar * * If chosen, the scalar is returned as a container of the same size and * plain type as the provided argument. Consequently, any Eigen expressions are * evaluated. * * @tparam T_true A container of stan `Scalar` types * @tparam T_false A stan `Scalar` type * @param c Boolean condition value. * @param y_true Value to return if condition is true. * @param y_false Value to return if condition is false. */ template , plain_type_t>, require_container_t* = nullptr, require_stan_scalar_t* = nullptr> inline ReturnT select(const bool c, const T_true& y_true, const T_false& y_false) { if (c) { return y_true; } else { return apply_scalar_binary( y_true, y_false, [](const auto& y_true_inner, const auto& y_false_inner) { return y_false_inner; }); } } /** * If first argument is true return the second argument, * else return the third argument. * * Overload for use when the 'true' return is a scalar and the 'false' * return is a container * * If chosen, the scalar is returned as a container of the same size and * plain type as the provided argument. Consequently, any Eigen expressions are * evaluated. * * @tparam T_true A stan `Scalar` type * @tparam T_false A container of stan `Scalar` types * @param c Boolean condition value. * @param y_true Value to return if condition is true. * @param y_false Value to return if condition is false. */ template , plain_type_t>, require_stan_scalar_t* = nullptr, require_container_t* = nullptr> inline ReturnT select(const bool c, const T_true y_true, const T_false y_false) { if (c) { return apply_scalar_binary( y_true, y_false, [](const auto& y_true_inner, const auto& y_false_inner) { return y_true_inner; }); } else { return y_false; } } /** * If first argument is true return the second argument, * else return the third argument. Overload for use with an Eigen * object of booleans, and two scalars. * * The chosen scalar is returned as an Eigen object of the same dimension * as the input Eigen argument * * @tparam T_bool type of Eigen boolean object * @tparam T_true A stan `Scalar` type * @tparam T_false A stan `Scalar` type * @param c Eigen object of boolean condition values. * @param y_true Value to return if condition is true. * @param y_false Value to return if condition is false. */ template * = nullptr, require_all_stan_scalar_t* = nullptr> inline auto select(const T_bool c, const T_true y_true, const T_false y_false) { using ret_t = return_type_t; return c .unaryExpr([y_true, y_false](bool cond) { return cond ? ret_t(y_true) : ret_t(y_false); }) .eval(); } /** * If first argument is true return the second argument, * else return the third argument. Overload for use with an Eigen * array of booleans, one Eigen array and a scalar as input. * * @tparam T_bool type of Eigen boolean object * @tparam T_true A stan `Scalar` type or Eigen Array type * @tparam T_false A stan `Scalar` type or Eigen Array type * @param c Eigen object of boolean condition values. * @param y_true Value to return if condition is true. * @param y_false Value to return if condition is false. */ template * = nullptr, require_any_eigen_array_t* = nullptr> inline auto select(const T_bool c, const T_true y_true, const T_false y_false) { check_consistent_sizes("select", "boolean", c, "left hand side", y_true, "right hand side", y_false); using ret_t = return_type_t; return c.select(y_true, y_false).template cast().eval(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/welford_var_estimator.hpp0000644000176200001440000000174214645137106025635 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_WELFORD_VAR_ESTIMATOR_HPP #define STAN_MATH_PRIM_FUN_WELFORD_VAR_ESTIMATOR_HPP #include #include namespace stan { namespace math { class welford_var_estimator { public: explicit welford_var_estimator(int n) : m_(Eigen::VectorXd::Zero(n)), m2_(Eigen::VectorXd::Zero(n)) { restart(); } void restart() { num_samples_ = 0; m_.setZero(); m2_.setZero(); } void add_sample(const Eigen::VectorXd& q) { ++num_samples_; Eigen::VectorXd delta(q - m_); m_ += delta / num_samples_; m2_ += delta.cwiseProduct(q - m_); } int num_samples() { return num_samples_; } void sample_mean(Eigen::VectorXd& mean) { mean = m_; } void sample_variance(Eigen::VectorXd& var) { if (num_samples_ > 1) { var = m2_ / (num_samples_ - 1.0); } } protected: double num_samples_; Eigen::VectorXd m_; Eigen::VectorXd m2_; }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/append_row.hpp0000644000176200001440000000630214645137106023367 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_APPEND_ROW_HPP #define STAN_MATH_PRIM_FUN_APPEND_ROW_HPP #include #include #include #include namespace stan { namespace math { /** * Return the result of stacking the rows of the first argument * matrix on top of the second argument matrix. * * Given input types result in following outputs: * (matrix, matrix) -> matrix, * (matrix, row_vector) -> matrix, * (row_vector, matrix) -> matrix, * (row_vector, row_vector) -> matrix, * (vector, vector) -> vector. * * @tparam T1 type of the first matrix * @tparam T2 type of the second matrix * * @param A First matrix. * @param B Second matrix. * @return Result of stacking first matrix on top of second. */ template * = nullptr> inline auto append_row(const T1& A, const T2& B) { using Eigen::Dynamic; using Eigen::Matrix; using T_return = return_type_t; constexpr int col_type = (T1::ColsAtCompileTime == 1 && T2::ColsAtCompileTime == 1) ? 1 : Eigen::Dynamic; int Arows = A.rows(); int Brows = B.rows(); int Acols = A.cols(); int Bcols = B.cols(); check_size_match("append_row", "columns of A", Acols, "columns of B", Bcols); Matrix result(Arows + Brows, Acols); result.topRows(Arows) = A.template cast(); result.bottomRows(Brows) = B.template cast(); return result; } /** * Return the result of stacking an scalar on top of the * a vector, with the result being a vector. * * This function applies to (scalar, vector) and returns a vector. * * @tparam Scal type of the scalar * @tparam ColVec type of the vector * * @param A scalar. * @param B vector. * @return Result of stacking the scalar on top of the vector. */ template * = nullptr, require_t>* = nullptr> inline Eigen::Matrix, Eigen::Dynamic, 1> append_row( const Scal& A, const ColVec& B) { using Eigen::Dynamic; using Eigen::Matrix; using T_return = return_type_t; Matrix result(B.size() + 1); result << A, B.template cast(); return result; } /** * Return the result of stacking a vector on top of the * an scalar, with the result being a vector. * * This function applies to (vector, scalar) and returns a vector. * * @tparam ColVec type of the vector * @tparam Scal type of the scalar * * @param A vector. * @param B scalar. * @return Result of stacking the vector on top of the scalar. */ template >* = nullptr, require_stan_scalar_t* = nullptr> inline Eigen::Matrix, Eigen::Dynamic, 1> append_row( const ColVec& A, const Scal& B) { using Eigen::Dynamic; using Eigen::Matrix; using T_return = return_type_t; Matrix result(A.size() + 1); result << A.template cast(), B; return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/atan.hpp0000644000176200001440000000426114645137106022156 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ATAN_HPP #define STAN_MATH_PRIM_FUN_ATAN_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap \c atan() so it can be vectorized. * * @tparam T type of variable * @param x variable * @return Arctan of x in radians. */ struct atan_fun { template static inline auto fun(const T& x) { using std::atan; return atan(x); } }; /** * Returns the elementwise \c atan() of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x container * @return Arctan of each value in x, in radians. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto atan(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of atan() that accepts std::vectors, Eigen Matrix/Array objects, * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Elementwise atan of members of container. */ template * = nullptr> inline auto atan(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().atan(); }); } namespace internal { /** * Return the arc tangent of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return arc tangent of the argument */ template inline std::complex complex_atan(const std::complex& z) { return neg_i_times(atanh(i_times(z))); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/to_matrix.hpp0000644000176200001440000001566514645137106023253 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TO_MATRIX_HPP #define STAN_MATH_PRIM_FUN_TO_MATRIX_HPP #include #include #include #include namespace stan { namespace math { /** * Returns a matrix with dynamic dimensions constructed from an * Eigen matrix. * * @tparam EigMat type of the matrix * * @param x matrix * @return the matrix representation of the input */ template * = nullptr> inline EigMat to_matrix(EigMat&& x) { return std::forward(x); } /** * Returns a matrix with dynamic dimensions constructed from an * Eigen row or column vector. * The runtime dimensions will be the same as the input. * * @tparam EigMat type of the vector/row vector * * @param matrix input vector/row vector * @return the matrix representation of the input */ template * = nullptr> inline auto to_matrix(EigVec&& matrix) { return Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>( std::forward(matrix)); } /** * Returns a matrix representation of a standard vector of Eigen * row vectors with the same dimensions and indexing order. * * @tparam T type of the elements in the vector * @param x Eigen vector of vectors of scalar values * @return the matrix representation of the input */ template inline Eigen::Matrix to_matrix( const std::vector>& x) { int rows = x.size(); if (rows == 0) { return {}; } int cols = x[0].size(); Eigen::Matrix result(rows, cols); for (int i = 0, ij = 0; i < cols; i++) { for (int j = 0; j < rows; j++, ij++) { result.coeffRef(ij) = x[j][i]; } } return result; } /** * Returns a matrix representation of the standard vector of * standard vectors with the same dimensions and indexing order. * * @tparam T type of elements in the vector * @param x vector of vectors of scalar values * @return the matrix representation of the input */ template inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> to_matrix(const std::vector>& x) { size_t rows = x.size(); if (rows == 0) { return {}; } size_t cols = x[0].size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> result(rows, cols); for (size_t i = 0, ij = 0; i < cols; i++) { for (size_t j = 0; j < rows; j++, ij++) { result.coeffRef(ij) = x[j][i]; } } return result; } /** * Returns a matrix representation of the vector in column-major * order with the specified number of rows and columns. * * @tparam EigMat type of the matrix * * @param x matrix * @param m rows * @param n columns * @return Reshaped inputted matrix * @throw std::invalid_argument if the sizes * do not match */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> to_matrix(EigMat&& x, int m, int n) { static const char* function = "to_matrix(matrix)"; check_size_match(function, "rows * columns", m * n, "vector size", x.size()); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> y = std::forward(x); y.resize(m, n); return y; } /** * Returns a matrix representation of the vector in column-major * order with the specified number of rows and columns. * * @tparam T type of elements in the vector * @param x vector of values * @param m rows * @param n columns * @return the matrix representation of the input * @throw std::invalid_argument * if the sizes do not match */ template inline auto to_matrix(const std::vector& x, int m, int n) { static const char* function = "to_matrix(array)"; check_size_match(function, "rows * columns", m * n, "vector size", x.size()); return Eigen::Map>( &x[0], m, n); } /** * Returns a matrix representation of the vector in column-major * order with the specified number of rows and columns. * * @param x vector of values * @param m rows * @param n columns * @return the matrix representation of the input * @throw std::invalid_argument * if the sizes do not match */ inline Eigen::Matrix to_matrix( const std::vector& x, int m, int n) { static const char* function = "to_matrix(array)"; int x_size = x.size(); check_size_match(function, "rows * columns", m * n, "vector size", x_size); Eigen::Matrix result(m, n); for (int i = 0; i < x_size; i++) { result.coeffRef(i) = x[i]; } return result; } /** * Returns a matrix representation of the vector in column-major * order with the specified number of rows and columns. * * @tparam EigMat type of the matrix * * @param x matrix * @param m rows * @param n columns * @param col_major column-major indicator: * if 1, output matrix is transversed in column-major order, * if 0, output matrix is transversed in row-major order, * otherwise function throws std::invalid_argument * @return Reshaped inputted matrix * @throw std::invalid_argument * if the sizes do not match */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> to_matrix(EigMat&& x, int m, int n, bool col_major) { if (col_major) { return to_matrix(std::forward(x), m, n); } else { Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> res = to_matrix(std::forward(x), n, m); res.transposeInPlace(); return res; } } /** * Returns a matrix representation of the vector in column-major * order with the specified number of rows and columns. * * @tparam T type of elements in the vector * @param x vector of values * @param m rows * @param n columns * @param col_major column-major indicator: * if 1, output matrix is transversed in column-major order, * if 0, output matrix is transversed in row-major order, * otherwise function throws std::invalid_argument * @return the matrix representation of the input * @throw std::invalid_argument * if the sizes do not match */ template inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> to_matrix(const std::vector& x, int m, int n, bool col_major) { if (col_major) { return to_matrix(x, m, n); } check_size_match("to_matrix", "rows * columns", m * n, "matrix size", x.size()); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> result(m, n); for (int i = 0, ij = 0; i < m; i++) { for (int j = 0; j < n; j++, ij++) { result.coeffRef(i, j) = x[ij]; } } return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/num_elements.hpp0000644000176200001440000000232314645137106023723 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_NUM_ELEMENTS_HPP #define STAN_MATH_PRIM_FUN_NUM_ELEMENTS_HPP #include #include #include namespace stan { namespace math { /** * Returns 1, the number of elements in a primitive type. * * @tparam T scalar type * @param x Argument of primitive type. * @return 1 */ template * = nullptr> inline int num_elements(const T& x) { return 1; } /** * Returns the size of the specified matrix. * * @tparam T type of the matrix * * @param m argument matrix * @return size of matrix */ template * = nullptr> inline int num_elements(const T& m) { return m.size(); } /** * Returns the number of elements in the specified vector. * This assumes it is not ragged and that each of its contained * elements has the same number of elements. * * @tparam T type of elements in the vector * @param v argument vector * @return number of contained arguments */ template inline int num_elements(const std::vector& v) { if (v.size() == 0) { return 0; } return v.size() * num_elements(v[0]); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_modified_bessel_first_kind.hpp0000644000176200001440000002306414645137106027427 0ustar liggesusers// Original code derived from Boost and is distributed here // under the Boost license (licenses/boost-license.txt) // Copyright (c) 2006 Xiaogang Zhang // Copyright (c) 2007, 2017 John Maddock // Secondary code copyright by its author and is distributed here // under the BSD-3 license (LICENSE.md) #ifndef STAN_MATH_PRIM_FUN_LOG_MODIFIED_BESSEL_FIRST_KIND_HPP #define STAN_MATH_PRIM_FUN_LOG_MODIFIED_BESSEL_FIRST_KIND_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /* Log of the modified Bessel function of the first kind, * which is better known as the Bessel I function. See * modified_bessel_first_kind.hpp for the function definition. * The derivatives are known to be incorrect for v = 0 because a * simple constant 0 is returned. * * @tparam T1 type of the order (v) * @tparam T2 type of argument (z) * @param v Order, can be a non-integer but must be at least -1 * @param z Real non-negative number * @throws std::domain_error if either v or z is NaN, z is * negative, or v is less than -1 * @return log of Bessel I function */ template * = nullptr> inline return_type_t log_modified_bessel_first_kind( const T1 v, const T2 z) { check_not_nan("log_modified_bessel_first_kind", "first argument (order)", v); check_not_nan("log_modified_bessel_first_kind", "second argument (variable)", z); check_nonnegative("log_modified_bessel_first_kind", "second argument (variable)", z); check_greater_or_equal("log_modified_bessel_first_kind", "first argument (order)", v, -1); using boost::math::tools::evaluate_polynomial; using std::log; using std::pow; using std::sqrt; using T = return_type_t; if (z == 0) { if (v == 0) { return 0.0; } if (v > 0) { return NEGATIVE_INFTY; } return INFTY; } if (is_inf(z)) { return z; } if (v == 0) { // WARNING: will not autodiff for v = 0 correctly // modified from Boost's bessel_i0_imp in the double precision case, // which refers to: // Modified Bessel function of the first kind of order zero // we use the approximating forms derived in: // "Rational Approximations for the Modified Bessel Function of the // First Kind -- I0(x) for Computations with Double Precision" // by Pavel Holoborodko, see // http://www.advanpix.com/2015/11/11/rational-approximations-for-the-modified-bessel-function-of-the-first-kind-i0-computations-double-precision // The actual coefficients used are [Boost's] own, and extend // Pavel's work to precisions other than double. if (z < 7.75) { // Bessel I0 over[10 ^ -16, 7.75] // Max error in interpolated form : 3.042e-18 // Max Error found at double precision = Poly : 5.106609e-16 // Cheb : 5.239199e-16 static const double P[] = {1.00000000000000000e+00, 2.49999999999999909e-01, 2.77777777777782257e-02, 1.73611111111023792e-03, 6.94444444453352521e-05, 1.92901234513219920e-06, 3.93675991102510739e-08, 6.15118672704439289e-10, 7.59407002058973446e-12, 7.59389793369836367e-14, 6.27767773636292611e-16, 4.34709704153272287e-18, 2.63417742690109154e-20, 1.13943037744822825e-22, 9.07926920085624812e-25}; return log1p_exp(multiply_log(2, z) - log(4.0) + log(evaluate_polynomial(P, 0.25 * square(z)))); } if (z < 500) { // Max error in interpolated form : 1.685e-16 // Max Error found at double precision = Poly : 2.575063e-16 // Cheb : 2.247615e+00 static const double P[] = {3.98942280401425088e-01, 4.98677850604961985e-02, 2.80506233928312623e-02, 2.92211225166047873e-02, 4.44207299493659561e-02, 1.30970574605856719e-01, -3.35052280231727022e+00, 2.33025711583514727e+02, -1.13366350697172355e+04, 4.24057674317867331e+05, -1.23157028595698731e+07, 2.80231938155267516e+08, -5.01883999713777929e+09, 7.08029243015109113e+10, -7.84261082124811106e+11, 6.76825737854096565e+12, -4.49034849696138065e+13, 2.24155239966958995e+14, -8.13426467865659318e+14, 2.02391097391687777e+15, -3.08675715295370878e+15, 2.17587543863819074e+15}; return z + log(evaluate_polynomial(P, inv(z))) - multiply_log(0.5, z); } // Max error in interpolated form : 2.437e-18 // Max Error found at double precision = Poly : 1.216719e-16 static const double P[] = {3.98942280401432905e-01, 4.98677850491434560e-02, 2.80506308916506102e-02, 2.92179096853915176e-02, 4.53371208762579442e-02}; return z + log(evaluate_polynomial(P, inv(z))) - multiply_log(0.5, z); } if (v == 1) { // WARNING: will not autodiff for v = 1 correctly // modified from Boost's bessel_i1_imp in the double precision case // see credits above in the v == 0 case if (z < 7.75) { // Bessel I0 over[10 ^ -16, 7.75] // Max error in interpolated form: 5.639e-17 // Max Error found at double precision = Poly: 1.795559e-16 static const double P[] = {8.333333333333333803e-02, 6.944444444444341983e-03, 3.472222222225921045e-04, 1.157407407354987232e-05, 2.755731926254790268e-07, 4.920949692800671435e-09, 6.834657311305621830e-11, 7.593969849687574339e-13, 6.904822652741917551e-15, 5.220157095351373194e-17, 3.410720494727771276e-19, 1.625212890947171108e-21, 1.332898928162290861e-23}; T a = square(z) * 0.25; T Q[3] = {1, 0.5, evaluate_polynomial(P, a)}; return log(z) + log(evaluate_polynomial(Q, a)) - LOG_TWO; } if (z < 500) { // Max error in interpolated form: 1.796e-16 // Max Error found at double precision = Poly: 2.898731e-16 static const double P[] = {3.989422804014406054e-01, -1.496033551613111533e-01, -4.675104253598537322e-02, -4.090895951581637791e-02, -5.719036414430205390e-02, -1.528189554374492735e-01, 3.458284470977172076e+00, -2.426181371595021021e+02, 1.178785865993440669e+04, -4.404655582443487334e+05, 1.277677779341446497e+07, -2.903390398236656519e+08, 5.192386898222206474e+09, -7.313784438967834057e+10, 8.087824484994859552e+11, -6.967602516005787001e+12, 4.614040809616582764e+13, -2.298849639457172489e+14, 8.325554073334618015e+14, -2.067285045778906105e+15, 3.146401654361325073e+15, -2.213318202179221945e+15}; return z + log(evaluate_polynomial(P, inv(z))) - multiply_log(0.5, z); } // Max error in interpolated form: 1.320e-19 // Max Error found at double precision = Poly: 7.065357e-17 static const double P[] = {3.989422804014314820e-01, -1.496033551467584157e-01, -4.675105322571775911e-02, -4.090421597376992892e-02, -5.843630344778927582e-02}; return z + log(evaluate_polynomial(P, inv(z))) - multiply_log(0.5, z); } if (z > 100) { // Boost does something like this in asymptotic_bessel_i_large_x T lim = pow((square(v) + 2.5) / (2 * z), 3) / 24; if (lim < (EPSILON * 10)) { T s = 1; T mu = 4 * square(v); T ex = 8 * z; T num = mu - 1; T denom = ex; s -= num / denom; num *= mu - 9; denom *= ex * 2; s += num / denom; num *= mu - 25; denom *= ex * 3; s -= num / denom; s = z - log(sqrt(z * TWO_PI)) + log(s); return s; } } return_type_t log_half_z = log(0.5 * z); return_type_t lgam = v > -1 ? lgamma(v + 1.0) : 0; T lcons = (2.0 + v) * log_half_z; T out; if (v > -1) { out = log_sum_exp(v * log_half_z - lgam, lcons - lgamma(v + 2)); lgam += log1p(v); } else { out = lcons; } double m = 2; double lfac = 0; T old_out; do { lfac += log(m); lgam += log(v + m); lcons += 2 * log_half_z; old_out = out; out = log_sum_exp(out, lcons - lfac - lgam); // underflows eventually m++; } while (out > old_out || out < old_out); return out; } /** * Enables the vectorized application of the log_modified_bessel_first_kind * function, when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return log_modified_bessel_first_kind function applied to the two inputs. */ template * = nullptr> inline auto log_modified_bessel_first_kind(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return log_modified_bessel_first_kind(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/atanh.hpp0000644000176200001440000000514714645137106022332 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ATANH_HPP #define STAN_MATH_PRIM_FUN_ATANH_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the inverse hyperbolic tangent of the specified value. * An argument of -1 returns negative infinity and an argument of 1 * returns infinity. * Returns nan for nan argument. * * @param[in] x Argument. * @return Inverse hyperbolic tangent of the argument. * @throw std::domain_error If argument is not in [-1, 1]. */ inline double atanh(double x) { if (is_nan(x)) { return x; } else { check_bounded("atanh", "x", x, -1.0, 1.0); return std::atanh(x); } } /** * Integer version of atanh. * * @param[in] x Argument. * @return Inverse hyperbolic tangent of the argument. * @throw std::domain_error If argument is less than 1. */ inline double atanh(int x) { check_bounded("atanh", "x", x, -1, 1); return std::atanh(x); } /** * Structure to wrap atanh() so it can be vectorized. */ struct atanh_fun { /** * Return the inverse hyperbolic tangent of the specified argument. * * @tparam T type of argument * @param x argument * @return Inverse hyperbolic tangent of the argument. */ template static inline auto fun(const T& x) { return atanh(x); } }; /** * Return the elementwise application of atanh() to * specified argument container. The return type promotes the * underlying scalar argument type to double if it is an integer, * and otherwise is the argument type. * * @tparam T type of container * @param x container * @return Elementwise atanh of members of container. */ template < typename T, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto atanh(const T& x) { return apply_scalar_unary::apply(x); } namespace internal { /** * Return the hyperbolic arc tangent of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return hyperbolic arc tangent of the argument */ template inline std::complex complex_atanh(const std::complex& z) { std::complex y_d = atanh(value_of_rec(z)); V one(1); auto y = 0.5 * (log(one + z) - log(one - z)); return copysign(y, y_d); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/csr_extract.hpp0000644000176200001440000000405714645137106023557 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CSR_EXTRACT_HPP #define STAN_MATH_PRIM_FUN_CSR_EXTRACT_HPP #include #include namespace stan { namespace math { /** \addtogroup csr_format * @{ */ /** * Extract the non-zero values, column indexes for non-zero values, and * the NZE index for each entry from a sparse matrix. * * @tparam T type of elements in the matrix * @param[in] A sparse matrix. * @return a tuple W,V,U. */ template const std::tuple, std::vector, std::vector> csr_extract(const Eigen::SparseMatrix& A) { auto a_nonzeros = A.nonZeros(); Eigen::Matrix w = Eigen::Matrix::Zero(a_nonzeros); std::vector v(a_nonzeros); for (int nze = 0; nze < a_nonzeros; ++nze) { w[nze] = *(A.valuePtr() + nze); v[nze] = *(A.innerIndexPtr() + nze) + stan::error_index::value; } std::vector u(A.outerSize() + 1); // last entry is garbage. for (int nze = 0; nze <= A.outerSize(); ++nze) { u[nze] = *(A.outerIndexPtr() + nze) + stan::error_index::value; } return std::make_tuple(std::move(w), std::move(v), std::move(u)); } /* Extract the non-zero values from a dense matrix by converting * to sparse and calling the sparse matrix extractor. * * @tparam T type of elements in the matrix * @tparam R number of rows, can be Eigen::Dynamic * @tparam C number of columns, can be Eigen::Dynamic * * @param[in] A dense matrix. * @return a tuple W,V,U. */ template * = nullptr> const std::tuple, Eigen::Dynamic, 1>, std::vector, std::vector> csr_extract(const T& A) { // conversion to sparse seems to touch data twice, so we need to call to_ref Eigen::SparseMatrix, Eigen::RowMajor> B = to_ref(A).sparseView(); return csr_extract(B); } /** @} */ // end of csr_format group } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_inv_logit_diff.hpp0000644000176200001440000000352614645137106025061 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_INV_LOGIT_DIFF_HPP #define STAN_MATH_PRIM_FUN_LOG_INV_LOGIT_DIFF_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the natural logarithm of the difference of the * inverse logits of the specified arguments. * \f[ \mathrm{log\_inv\_logit\_diff}(x,y) = \ln\left(\frac{1}{1+\exp(-x)}-\frac{1}{1+\exp(-y)}\right) \f] \f[ \frac{\partial }{\partial x} = -\frac{e^x}{e^y-e^x}-\frac{e^x}{e^x+1} \f] \f[ \frac{\partial }{\partial x} = -\frac{e^y}{e^x-e^y}-\frac{e^y}{e^y+1} \f] * * @tparam T1 type of x argument * @tparam T2 type of y argument * @param x first argument * @param y second argument * @return Result of log difference of inverse logits of arguments. */ template * = nullptr> inline return_type_t log_inv_logit_diff(const T1& x, const T2& y) { if (is_inf(x) && x >= 0) { return -log1p_exp(y); } return x - log1p_exp(x) + log1m_exp(y - x) - log1p_exp(y); } /** * Enables the vectorized application of the log_inv_logit_diff function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return log_inv_logit_diff function applied to the two inputs. */ template * = nullptr> inline auto log_inv_logit_diff(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return log_inv_logit_diff(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/eigenvectors.hpp0000644000176200001440000000376414645137106023737 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EIGENVECTORS_HPP #define STAN_MATH_PRIM_FUN_EIGENVECTORS_HPP #include #include namespace stan { namespace math { /** * Return the eigenvectors of a (real-valued) matrix * * @tparam EigMat type of real matrix argument * @param[in] m matrix to find the eigenvectors of. Must be square and have a * non-zero size. * @return a complex-valued matrix with columns representing the eigenvectors of * `m` */ template * = nullptr, require_not_vt_complex* = nullptr> inline Eigen::Matrix>, -1, -1> eigenvectors(const EigMat& m) { if (unlikely(m.size() == 0)) { return Eigen::Matrix>, -1, -1>(0, 0); } check_square("eigenvectors", "m", m); using PlainMat = plain_type_t; const PlainMat& m_eval = m; Eigen::EigenSolver solver(m_eval); return solver.eigenvectors(); } /** * Return the eigenvectors of a (complex-valued) matrix * * @tparam EigCplxMat type of complex matrix argument * @param[in] m matrix to find the eigenvectors of. Must be square and have a * non-zero size. * @return a complex-valued matrix with columns representing the eigenvectors of * `m` */ template * = nullptr> inline Eigen::Matrix>, -1, -1> eigenvectors(const EigCplxMat& m) { if (unlikely(m.size() == 0)) { return Eigen::Matrix>, -1, -1>(0, 0); } check_square("eigenvectors", "m", m); using PlainMat = Eigen::Matrix, -1, -1>; const PlainMat& m_eval = m; Eigen::ComplexEigenSolver solver(m_eval); return solver.eigenvectors(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/erf.hpp0000644000176200001440000000175714645137106022016 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ERF_HPP #define STAN_MATH_PRIM_FUN_ERF_HPP #include #include #include namespace stan { namespace math { /** * Structure to wrap `erf()` so it can be vectorized. * * @tparam T type of variable * @param x variable * @return Error function of x. */ struct erf_fun { template static inline auto fun(const T& x) { using std::erf; return erf(x); } }; /** * Returns the elementwise `erf()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam T type of container * @param x container * @return Error function applied to each value in x. */ template < typename T, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr> inline auto erf(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/as_bool.hpp0000644000176200001440000000075614645137106022656 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_AS_BOOL_HPP #define STAN_MATH_PRIM_FUN_AS_BOOL_HPP #include namespace stan { namespace math { /** * Return true if the argument is not equal to zero (in the * != operator sense) and false otherwise. * * @tparam T type of scalar * @param x value * @return true if value is not equal to zero */ template inline bool as_bool(const T& x) { return x != 0; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/identity_constrain.hpp0000644000176200001440000000160214645137106025140 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IDENTITY_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_IDENTITY_CONSTRAIN_HPP #include namespace stan { namespace math { /** * Returns the result of applying the identity constraint * transform to the input. This promotes the input to the least upper * bound of the input types. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T type of value to promote * @tparam Types Other types. * @param[in] x object * @return transformed input */ template * = nullptr> inline auto identity_constrain(T&& x, Types&&... /* args */) { return promote_scalar_t, T>(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/Eigen.hpp0000644000176200001440000000414714645137106022265 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EIGEN_HPP #define STAN_MATH_PRIM_FUN_EIGEN_HPP #ifdef EIGEN_MATRIXBASE_PLUGIN #ifndef EIGEN_STAN_MATRIXBASE_PLUGIN #error "Stan uses Eigen's EIGEN_MATRIXBASE_PLUGIN macro. To use your own " "plugin add the eigen_plugin.h file to your plugin." #endif #else #define EIGEN_MATRIXBASE_PLUGIN "stan/math/prim/eigen_plugins.h" #endif #ifdef EIGEN_ARRAYBASE_PLUGIN #ifndef EIGEN_STAN_ARRAYBASE_PLUGIN #error "Stan uses Eigen's EIGEN_ARRAYBASE_PLUGIN macro. To use your own " "plugin add the eigen_plugin.h file to your plugin." #endif #else #define EIGEN_ARRAYBASE_PLUGIN "stan/math/prim/eigen_plugins.h" #endif #include #include #include #include #include namespace Eigen { /** * Traits specialization for Eigen binary operations for `int` * and `double` arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits { using ReturnType = double; }; /** * Traits specialization for Eigen binary operations for `double` * and `int` arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits { using ReturnType = double; }; /** * Traits specialization for Eigen binary operations for `int` * and complex `double` arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits, BinaryOp> { using ReturnType = std::complex; }; /** * Traits specialization for Eigen binary operations for complex * `double` and `int` arguments. * * @tparam BinaryOp type of binary operation for which traits are * defined */ template struct ScalarBinaryOpTraits, int, BinaryOp> { using ReturnType = std::complex; }; } // namespace Eigen #endif StanHeaders/inst/include/stan/math/prim/fun/lgamma.hpp0000644000176200001440000000714014645137106022470 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LGAMMA_HPP #define STAN_MATH_PRIM_FUN_LGAMMA_HPP /** * The lgamma implementation in stan-math is based on either the * reentrant safe lgamma_r implementation from C or the * boost::math::lgamma implementation. The reentrant safe lgamma_r * implementation is preferred as it is faster when compared to the * boost version. However, the reentrant safe lgamma_r C function is * not available with MinGW compilers used on Windows. Therefore, the * boost version is used on Windows with MinGW compilers as fall * back. For details on the speed evaluations, please refer to * https://github.com/stan-dev/math/pull/1255 . */ #if !__MINGW32__ && !_BOOST_LGAMMA // _REENTRANT must be defined during compilation to ensure that cmath // exports the reentrant safe lgamma_r version. #if !_REENTRANT #error \ "stan-math requires _REENTRANT being defined during compilation" \ "to make lgamma_r available." #endif #include #else // MinGW compilers on Windows do not provide the reentrant lgamma_r // such that we fall back to boost whenever we are on MinGW. #include #include #include #endif #include #include namespace stan { namespace math { /** * Return the natural logarithm of the gamma function applied to * the specified argument. * \f[ \mbox{lgamma}(x) = \begin{cases} \textrm{error} & \mbox{if } x\in \{\dots, -3, -2, -1, 0\}\\ \ln\Gamma(x) & \mbox{if } x\not\in \{\dots, -3, -2, -1, 0\}\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{lgamma}(x)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } x\in \{\dots, -3, -2, -1, 0\}\\ \Psi(x) & \mbox{if } x\not\in \{\dots, -3, -2, -1, 0\}\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param x argument * @return natural logarithm of the gamma function applied to * argument */ inline double lgamma(double x) { #if !__MINGW32__ && !_BOOST_LGAMMA int sign = 1; return ::lgamma_r(x, &sign); #else if (unlikely(x == 0.0)) return std::numeric_limits::infinity(); return boost::math::lgamma(x, boost_policy_t<>()); #endif } /** * Return the natural logarithm of the gamma function applied * to the specified argument. * * @param x argument * @return natural logarithm of the gamma function applied to * argument */ inline double lgamma(int x) { #if !__MINGW32__ && !_BOOST_LGAMMA int sign = 1; return ::lgamma_r(x, &sign); #else if (unlikely(x == 0.0)) return std::numeric_limits::infinity(); return boost::math::lgamma(x, boost_policy_t<>()); #endif } /** * Structure to wrap lgamma() so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return Natural log of the gamma function applied to x. * @throw std::domain_error if x is a negative integer or 0. */ struct lgamma_fun { template static inline auto fun(const T& x) { return lgamma(x); } }; /** * Vectorized version of lgamma(). * * @tparam T type of container * @param x container * @return Natural log of the gamma function * applied to each value in x. * @throw std::domain_error if any value is a negative integer or 0. */ template * = nullptr, require_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto lgamma(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/norm1.hpp0000644000176200001440000000217014645137106022264 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_NORM1_HPP #define STAN_MATH_PRIM_FUN_NORM1_HPP #include #include #include namespace stan { namespace math { /** * Returns L1 norm of a vector. For vectors that equals the * sum of magnitudes of its individual elements. * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase) * @param v Vector. * @return L1 norm of v. */ template * = nullptr> inline double norm1(const T& v) { ref_type_t v_ref = v; return v_ref.template lpNorm<1>(); } /** * Returns L1 norm of a vector. For vectors that equals the * sum of magnitudes of its individual elements. * * @tparam Container type of the vector (must be derived from \c std::Vector) * @param v Vector. * @return L1 norm of v. */ template * = nullptr> inline auto norm1(const Container& x) { return apply_vector_unary::reduce( x, [](const auto& v) { return norm1(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/read_cov_L.hpp0000644000176200001440000000311314645137106023263 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_READ_COV_L_HPP #define STAN_MATH_PRIM_FUN_READ_COV_L_HPP #include #include #include #include #include namespace stan { namespace math { /** * This is the function that should be called prior to evaluating * the density of any elliptical distribution * * @tparam T_CPCs type of \c T_CPCs vector (must be derived from \c * Eigen::ArrayBase and have one compile-time dimension equal to 1) * @tparam T_sds type of \c sds vector (must be derived from \c Eigen::ArrayBase * and have one compile-time dimension equal to 1) * @param CPCs on (-1, 1) * @param sds on (0, inf) * @param log_prob the log probability value to increment with the Jacobian * @return Cholesky factor of covariance matrix for specified * partial correlations. */ template * = nullptr, require_vt_same* = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> read_cov_L( const T_CPCs& CPCs, const T_sds& sds, value_type_t& log_prob) { size_t K = sds.rows(); // adjust due to transformation from correlations to covariances log_prob += (sum(log(sds)) + LOG_TWO) * K; return make_holder( [](const auto& b, const auto& sds) { return sds.matrix().asDiagonal() * b; }, read_corr_L(CPCs, K, log_prob), to_ref(sds)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/is_nonpositive_integer.hpp0000644000176200001440000000110314645137106026010 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IS_NONPOSITIVE_INTEGER_HPP #define STAN_MATH_PRIM_FUN_IS_NONPOSITIVE_INTEGER_HPP #include #include #include namespace stan { namespace math { /** * Returns true if the input is a nonpositive integer and false otherwise. * * @param x Value to test. * @return true if the value is an integer */ template inline bool is_nonpositive_integer(T x) { using std::floor; return x <= 0.0 && floor(x) == x; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inv_erfc.hpp0000644000176200001440000000272414645137106023030 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INV_ERFC_HPP #define STAN_MATH_PRIM_FUN_INV_ERFC_HPP #include #include #include namespace stan { namespace math { /** * The inverse complementary error function for variables. * * @tparam T type of scalar * @param x scalar * @return Inverse complementary error function applied to x. */ template * = nullptr> inline auto inv_erfc(const T& x) { return boost::math::erfc_inv(x); } /** * Structure to wrap the `inv_erfc() function` * so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return Inverse of the complementary error function applied to x. */ struct inv_erfc_fun { template static inline auto fun(const T& x) { return inv_erfc(x); } }; /** * Returns the elementwise `inv_erfc()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam T type of container * @param x container * @return Inverse complementary error function applied to each value in x. */ template < typename T, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr, require_not_arithmetic_t* = nullptr> inline auto inv_erfc(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/fabs.hpp0000644000176200001440000000372314645137106022150 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FABS_HPP #define STAN_MATH_PRIM_FUN_FABS_HPP #include #include #include #include #include namespace stan { namespace math { template * = nullptr> inline auto fabs(T x) { return std::abs(x); } template * = nullptr> inline auto fabs(T x) { return hypot(x.real(), x.imag()); } /** * Structure to wrap `fabs()` so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return Absolute value of x. */ struct fabs_fun { template static inline auto fun(const T& x) { return fabs(x); } }; /** * Returns the elementwise `fabs()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x container * @return Absolute value of each value in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr, require_not_stan_scalar_t* = nullptr> inline auto fabs(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `fabs()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Absolute value of each value in x. */ template * = nullptr> inline auto fabs(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().abs(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/grad_inc_beta.hpp0000644000176200001440000000237314645137106023776 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GRAD_INC_BETA_HPP #define STAN_MATH_PRIM_FUN_GRAD_INC_BETA_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // Gradient of the incomplete beta function beta(a, b, z) // with respect to the first two arguments, using the // equivalence to a hypergeometric function. // See http://dlmf.nist.gov/8.17#ii inline void grad_inc_beta(double& g1, double& g2, double a, double b, double z) { using std::exp; using std::log; double c1 = log(z); double c2 = log1m(z); double c3 = beta(a, b) * inc_beta(a, b, z); double C = exp(a * c1 + b * c2) / a; double dF1 = 0; double dF2 = 0; double dF3 = 0; double dFz = 0; if (C) { std::forward_as_tuple(dF1, dF2, dF3, dFz) = grad_2F1(a + b, 1.0, a + 1, z); } g1 = fma((c1 - inv(a)), c3, C * (dF1 + dF3)); g2 = fma(c2, c3, C * dF1); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/lgamma_stirling_diff.hpp0000644000176200001440000000467114645137106025401 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LGAMMA_STIRLING_DIFF_HPP #define STAN_MATH_PRIM_FUN_LGAMMA_STIRLING_DIFF_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { constexpr double lgamma_stirling_diff_useful = 10; /** * Return the difference between log of the gamma function and its Stirling * approximation. * This is useful to stably compute log of ratios of gamma functions with large * arguments where the Stirling approximation allows for analytic solution * and the (small) differences can be added afterwards. * This is for example used in the implementation of lbeta. * * The function will return correct value for all arguments, but using it can * lead to a loss of precision when x < lgamma_stirling_diff_useful. * \f[ \mbox{lgamma_stirling_diff}(x) = \log(\Gamma(x)) - \frac{1}{2} \log(2\pi) + (x-\frac{1}{2})*\log(x) - x \f] * * @tparam T type of value * @param x value * @return Difference between lgamma(x) and its Stirling approximation. */ template return_type_t lgamma_stirling_diff(const T x) { using T_ret = return_type_t; if (is_nan(value_of_rec(x))) { return NOT_A_NUMBER; } check_nonnegative("lgamma_stirling_diff", "argument", x); if (x == 0) { return INFTY; } if (value_of(x) < lgamma_stirling_diff_useful) { return lgamma(x) - lgamma_stirling(x); } // Using the Stirling series as expressed in formula 5.11.1. at // https://dlmf.nist.gov/5.11 constexpr double stirling_series[]{ 0.0833333333333333333333333, -0.00277777777777777777777778, 0.000793650793650793650793651, -0.000595238095238095238095238, 0.000841750841750841750841751, -0.00191752691752691752691753, 0.00641025641025641025641026, -0.0295506535947712418300654}; constexpr int n_stirling_terms = 6; T_ret result(0.0); T_ret multiplier = inv(x); T_ret inv_x_squared = square(multiplier); for (int n = 0; n < n_stirling_terms; n++) { if (n > 0) { multiplier *= inv_x_squared; } result += stirling_series[n] * multiplier; } return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/lub_constrain.hpp0000644000176200001440000003474114645137106024103 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LUB_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_LUB_CONSTRAIN_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the lower and upper-bounded scalar derived by * transforming the specified free scalar given the specified * lower and upper bounds. * *

The transform is the transformed and scaled inverse logit, * *

\f$f(x) = L + (U - L) \mbox{logit}^{-1}(x)\f$ * * @tparam T Scalar. * @tparam L Scalar. * @tparam U Scalar. * @param[in] x Free scalar to transform. * @param[in] lb Lower bound. * @param[in] ub Upper bound. * @return Lower- and upper-bounded scalar derived from transforming * the free scalar. * @throw std::domain_error if ub <= lb */ template * = nullptr, require_not_var_t>* = nullptr> inline auto lub_constrain(T&& x, L&& lb, U&& ub) { const bool is_lb_inf = value_of(lb) == NEGATIVE_INFTY; const bool is_ub_inf = value_of(ub) == INFTY; if (unlikely(is_ub_inf && is_lb_inf)) { return identity_constrain(x, lb, ub); } else if (unlikely(is_ub_inf)) { return lb_constrain(identity_constrain(x, ub), lb); } else if (unlikely(is_lb_inf)) { return ub_constrain(identity_constrain(x, lb), ub); } else { check_less("lub_constrain", "lb", value_of(lb), value_of(ub)); return (ub - lb) * inv_logit(x) + lb; } } /** * Return the lower- and upper-bounded scalar derived by * transforming the specified free scalar given the specified * lower and upper bounds and increment the specified log * density with the log absolute Jacobian determinant. * *

The transform is as defined in * lub_constrain(T, double, double). The log absolute * Jacobian determinant is given by * *

\f$\log \left| \frac{d}{dx} \left( * L + (U-L) \mbox{logit}^{-1}(x) \right) * \right|\f$ * *

\f$ {} = \log | * (U-L) * \, (\mbox{logit}^{-1}(x)) * \, (1 - \mbox{logit}^{-1}(x)) |\f$ * *

\f$ {} = \log (U - L) + \log (\mbox{logit}^{-1}(x)) * + \log (1 - \mbox{logit}^{-1}(x))\f$ * * @tparam T Scalar. * @tparam L Scalar. * @tparam U Scalar. * @param[in] x Free scalar to transform. * @param[in] lb Lower bound. * @param[in] ub Upper bound. * @param[in,out] lp Log probability scalar reference. * @return Lower- and upper-bounded scalar derived from transforming * the free scalar. * @throw std::domain_error if ub <= lb */ template * = nullptr, require_not_var_t>* = nullptr> inline auto lub_constrain(T&& x, L&& lb, U&& ub, return_type_t& lp) { const bool is_lb_inf = value_of(lb) == NEGATIVE_INFTY; const bool is_ub_inf = value_of(ub) == INFTY; if (unlikely(is_ub_inf && is_lb_inf)) { return identity_constrain(x, ub, lb); } else if (unlikely(is_ub_inf)) { return lb_constrain(identity_constrain(x, ub), lb, lp); } else if (unlikely(is_lb_inf)) { return ub_constrain(identity_constrain(x, lb), ub, lp); } else { check_less("lub_constrain", "lb", value_of(lb), value_of(ub)); const auto diff = ub - lb; lp += add(log(diff), subtract(-abs(x), multiply(2.0, log1p_exp(-abs(x))))); return diff * inv_logit(x) + lb; } } /** * Overload for Eigen matrix and scalar bounds. */ template * = nullptr, require_all_stan_scalar_t* = nullptr, require_not_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub) { return eval( x.unaryExpr([ub, lb](auto&& xx) { return lub_constrain(xx, lb, ub); })); } /** * Overload for Eigen matrix and scalar bounds plus lp. */ template * = nullptr, require_all_stan_scalar_t* = nullptr, require_not_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub, return_type_t& lp) { return eval(x.unaryExpr( [lb, ub, &lp](auto&& xx) { return lub_constrain(xx, lb, ub, lp); })); } /** * Overload for Eigen matrix with matrix lower bound and scalar upper * bound. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_not_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub) { check_matching_dims("lub_constrain", "x", x, "lb", lb); return eval(x.binaryExpr( lb, [ub](auto&& x, auto&& lb) { return lub_constrain(x, lb, ub); })); } /** * Overload for Eigen matrix with matrix lower bound and scalar upper * bound plus lp. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_not_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub, return_type_t& lp) { check_matching_dims("lub_constrain", "x", x, "lb", lb); return eval(x.binaryExpr(lb, [ub, &lp](auto&& x, auto&& lb) { return lub_constrain(x, lb, ub, lp); })); } /** * Overload for Eigen matrix with scalar lower bound and matrix upper * bound. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_not_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub) { check_matching_dims("lub_constrain", "x", x, "ub", ub); return eval(x.binaryExpr( ub, [lb](auto&& x, auto&& ub) { return lub_constrain(x, lb, ub); })); } /** * Overload for Eigen matrix with scalar lower bound and matrix upper * bound plus lp. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_not_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub, return_type_t& lp) { check_matching_dims("lub_constrain", "x", x, "ub", ub); return eval(x.binaryExpr(ub, [lb, &lp](auto&& x, auto&& ub) { return lub_constrain(x, lb, ub, lp); })); } /** * Overload for Eigen matrix and matrix bounds. */ template * = nullptr, require_not_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub) { check_matching_dims("lub_constrain", "x", x, "lb", lb); check_matching_dims("lub_constrain", "x", x, "ub", ub); auto x_ref = to_ref(x); auto lb_ref = to_ref(lb); auto ub_ref = to_ref(ub); promote_scalar_t, T> x_ret(x.rows(), x.cols()); for (Eigen::Index j = 0; j < x_ref.cols(); ++j) { for (Eigen::Index i = 0; i < x_ref.rows(); ++i) { x_ret.coeffRef(i, j) = lub_constrain( x_ref.coeff(i, j), lb_ref.coeff(i, j), ub_ref.coeff(i, j)); } } return x_ret; } /** * Overload for Eigen matrix and matrix bounds plus lp. */ template * = nullptr, require_not_var_t>* = nullptr> inline auto lub_constrain(const T& x, const L& lb, const U& ub, return_type_t& lp) { check_matching_dims("lub_constrain", "x", x, "lb", lb); check_matching_dims("lub_constrain", "x", x, "ub", ub); auto x_ref = to_ref(x); auto lb_ref = to_ref(lb); auto ub_ref = to_ref(ub); promote_scalar_t, T> x_ret(x.rows(), x.cols()); for (Eigen::Index j = 0; j < x_ref.cols(); ++j) { for (Eigen::Index i = 0; i < x_ref.rows(); ++i) { x_ret.coeffRef(i, j) = lub_constrain( x_ref.coeff(i, j), lb_ref.coeff(i, j), ub_ref.coeff(i, j), lp); } } return x_ret; } /** * Overload for array of x and non-array lb and ub */ template * = nullptr> inline auto lub_constrain(const std::vector& x, const L& lb, const U& ub) { std::vector> ret( x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lub_constrain(x[i], lb, ub); } return ret; } /** * Overload for array of x and non-array lb and ub with lp */ template * = nullptr> inline auto lub_constrain(const std::vector& x, const L& lb, const U& ub, return_type_t& lp) { std::vector> ret( x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lub_constrain(x[i], lb, ub, lp); } return ret; } /** * Overload for array of x and ub and non-array lb */ template * = nullptr> inline auto lub_constrain(const std::vector& x, const L& lb, const std::vector& ub) { check_matching_dims("lub_constrain", "x", x, "ub", ub); std::vector> ret( x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lub_constrain(x[i], lb, ub[i]); } return ret; } /** * Overload for array of x and ub and non-array lb with lp */ template * = nullptr> inline auto lub_constrain(const std::vector& x, const L& lb, const std::vector& ub, return_type_t& lp) { check_matching_dims("lub_constrain", "x", x, "ub", ub); std::vector> ret( x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lub_constrain(x[i], lb, ub[i], lp); } return ret; } /** * Overload for array of x and lb and non-array ub */ template * = nullptr> inline auto lub_constrain(const std::vector& x, const std::vector& lb, const U& ub) { check_matching_dims("lub_constrain", "x", x, "lb", lb); std::vector> ret( x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lub_constrain(x[i], lb[i], ub); } return ret; } /** * Overload for array of x and lb and non-array ub with lp */ template * = nullptr> inline auto lub_constrain(const std::vector& x, const std::vector& lb, const U& ub, return_type_t& lp) { check_matching_dims("lub_constrain", "x", x, "lb", lb); std::vector> ret( x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lub_constrain(x[i], lb[i], ub, lp); } return ret; } /** * Overload for array of x, lb, and ub with lp */ template inline auto lub_constrain(const std::vector& x, const std::vector& lb, const std::vector& ub) { check_matching_dims("lub_constrain", "x", x, "lb", lb); check_matching_dims("lub_constrain", "x", x, "ub", ub); std::vector> ret( x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lub_constrain(x[i], lb[i], ub[i]); } return ret; } /** * Overload for array of x, lb, and ub */ template inline auto lub_constrain(const std::vector& x, const std::vector& lb, const std::vector& ub, return_type_t& lp) { check_matching_dims("lub_constrain", "x", x, "lb", lb); check_matching_dims("lub_constrain", "x", x, "ub", ub); std::vector> ret( x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lub_constrain(x[i], lb[i], ub[i], lp); } return ret; } /** * Return the lower and upper-bounded scalar derived by * transforming the specified free scalar given the specified * lower and upper bounds. If the `Jacobian` parameter is `true`, the log * density accumulator is incremented with the log absolute Jacobian determinant * of the transform. All of the transforms are specified with their Jacobians * in the *Stan Reference Manual* chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @tparam L A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @tparam U A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @param[in] x Free scalar to transform * @param[in] lb Lower bound * @param[in] ub Upper bound * @param[in, out] lp log density accumulator * @return Lower- and upper-bounded scalar derived from transforming the free * scalar * @throw std::domain_error if `ub <= lb` */ template inline auto lub_constrain(const T& x, const L& lb, const U& ub, return_type_t& lp) { if (Jacobian) { return lub_constrain(x, lb, ub, lp); } else { return lub_constrain(x, lb, ub); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cols.hpp0000644000176200001440000000105614645137106022172 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COLS_HPP #define STAN_MATH_PRIM_FUN_COLS_HPP #include #include namespace stan { namespace math { /** * Return the number of columns in the specified * matrix, vector, or row vector. * * @tparam T type of the matrix * @param[in] m Input matrix, vector, or row vector. * @return Number of columns. */ template * = nullptr> inline Eigen::Index cols(const T& m) { return m.cols(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/norm2.hpp0000644000176200001440000000220214645137106022261 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_NORM2_HPP #define STAN_MATH_PRIM_FUN_NORM2_HPP #include #include #include namespace stan { namespace math { /** * Returns L2 norm of a vector. For vectors that equals the square-root of the * sum of squares of the elements. * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase) * @param v Vector. * @return L2 norm of v. */ template * = nullptr> inline double norm2(const T& v) { ref_type_t v_ref = v; return v_ref.template lpNorm<2>(); } /** * Returns L2 norm of a vector. For vectors that equals the square-root of the * sum of squares of the elements. * * @tparam Container type of the vector (must be derived from \c std::vector) * @param v Vector. * @return L2 norm of v. */ template * = nullptr> inline auto norm2(const Container& x) { return apply_vector_unary::reduce( x, [](const auto& v) { return norm2(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sinh.hpp0000644000176200001440000000375514645137106022203 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SINH_HPP #define STAN_MATH_PRIM_FUN_SINH_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap sinh() so that it can be vectorized. * * @tparam T type of argument * @param x angle in radians * @return Hyperbolic sine of x. */ struct sinh_fun { template static inline auto fun(const T& x) { using std::sinh; return sinh(x); } }; /** * Vectorized version of sinh(). * * @tparam Container type of container * @param x container * @return Hyperbolic sine of each variable in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto sinh(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of sinh() that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Hyperbolic sine of each variable in x. */ template * = nullptr> inline auto sinh(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().sinh(); }); } namespace internal { /** * Return the hyperbolic sine of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return hyperbolic sine of the argument */ template inline std::complex complex_sinh(const std::complex& z) { return 0.5 * (exp(z) - exp(-z)); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/distance.hpp0000644000176200001440000000330114645137106023017 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DISTANCE_HPP #define STAN_MATH_PRIM_FUN_DISTANCE_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the distance between two scalars. * * @tparam T1 type of first scalar. * @tparam T2 type of second scalar * @param x1 First scalar. * @param x2 Second scalar. * @return Distance between two scalars * @throw std::domain_error If the arguments are not finite. */ template * = nullptr> inline return_type_t distance(const T1& x1, const T2& x2) { check_finite("distance", "x1", x1); check_finite("distance", "x2", x2); return abs(x1 - x2); } /** * Returns the distance between the specified vectors. * * @tparam T1 type of the first vector (must be derived from \c * Eigen::MatrixBase and have one compile time dimension equal to 1) * @tparam T2 type of the second vector (must be derived from \c * Eigen::MatrixBase and have one compile time dimension equal to 1) * @param x1 First vector. * @param x2 Second vector. * @return Distance between the vectors. * @throw std::domain_error If the vectors are not the same * size. */ template * = nullptr> inline return_type_t distance(const T1& x1, const T2& x2) { using std::sqrt; check_matching_sizes("distance", "x1", x1, "x2", x2); return sqrt(squared_distance(x1, x2)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/lbeta.hpp0000644000176200001440000001035514645137106022323 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LBETA_HPP #define STAN_MATH_PRIM_FUN_LBETA_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the log of the beta function applied to the specified * arguments. * * The beta function is defined for \f$a > 0\f$ and \f$b > 0\f$ by * * \f$\mbox{B}(a, b) = \frac{\Gamma(a) \Gamma(b)}{\Gamma(a+b)}\f$. * * This function returns its log, * * \f$\log \mbox{B}(a, b) = \log \Gamma(a) + \log \Gamma(b) - \log \Gamma(a+b)\f$. * * See stan::math::lgamma() for the double-based and stan::math for the * variable-based log Gamma function. * This function is numerically more stable than naive evaluation via lgamma. * \f[ \mbox{lbeta}(\alpha, \beta) = \begin{cases} \ln\int_0^1 u^{\alpha - 1} (1 - u)^{\beta - 1} \, du & \mbox{if } \alpha, \beta>0 \\[6pt] \textrm{NaN} & \mbox{if } \alpha = \textrm{NaN or } \beta = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{lbeta}(\alpha, \beta)}{\partial \alpha} = \begin{cases} \Psi(\alpha)-\Psi(\alpha+\beta) & \mbox{if } \alpha, \beta>0 \\[6pt] \textrm{NaN} & \mbox{if } \alpha = \textrm{NaN or } \beta = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{lbeta}(\alpha, \beta)}{\partial \beta} = \begin{cases} \Psi(\beta)-\Psi(\alpha+\beta) & \mbox{if } \alpha, \beta>0 \\[6pt] \textrm{NaN} & \mbox{if } \alpha = \textrm{NaN or } \beta = \textrm{NaN} \end{cases} \f] * * @tparam T1 type of first value * @tparam T2 type of second value * @param a First value * @param b Second value * @return Log of the beta function applied to the two values. */ template * = nullptr> return_type_t lbeta(const T1 a, const T2 b) { using T_ret = return_type_t; if (is_any_nan(a, b)) { return NOT_A_NUMBER; } static const char* function = "lbeta"; check_nonnegative(function, "first argument", a); check_nonnegative(function, "second argument", b); T_ret x; // x is the smaller of the two T_ret y; if (a < b) { x = a; y = b; } else { x = b; y = a; } // Special cases if (x == 0) { return INFTY; } if (is_inf(y)) { return NEGATIVE_INFTY; } // For large x or y, separate the lgamma values into Stirling approximations // and appropriate corrections. The Stirling approximations allow for // analytic simplification and the corrections are added later. // // The overall approach is inspired by the code in R, where the algorithm is // credited to W. Fullerton of Los Alamos Scientific Laboratory if (y < lgamma_stirling_diff_useful) { // both small return lgamma(x) + lgamma(y) - lgamma(x + y); } T_ret x_over_xy = x / (x + y); if (x < lgamma_stirling_diff_useful) { // y large, x small T_ret stirling_diff = lgamma_stirling_diff(y) - lgamma_stirling_diff(x + y); T_ret stirling = (y - 0.5) * log1m(x_over_xy) + x * (1 - log(x + y)); return stirling + lgamma(x) + stirling_diff; } // both large T_ret stirling_diff = lgamma_stirling_diff(x) + lgamma_stirling_diff(y) - lgamma_stirling_diff(x + y); T_ret stirling = (x - 0.5) * log(x_over_xy) + y * log1m(x_over_xy) + HALF_LOG_TWO_PI - 0.5 * log(y); return stirling + stirling_diff; } /** * Enables the vectorized application of the lbeta function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return lbeta function applied to the two inputs. */ template * = nullptr> inline auto lbeta(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return lbeta(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/rows.hpp0000644000176200001440000000103714645137106022223 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ROWS_HPP #define STAN_MATH_PRIM_FUN_ROWS_HPP #include #include namespace stan { namespace math { /** * Return the number of rows in the specified * matrix, vector, or row vector. * * @tparam T type of the matrix * @param[in] m Input matrix, vector, or row vector. * @return Number of rows. */ template * = nullptr> inline int rows(const T& m) { return m.rows(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cholesky_corr_constrain.hpp0000644000176200001440000001131214645137106026154 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CHOLESKY_CORR_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_CHOLESKY_CORR_CONSTRAIN_HPP #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cholesky_corr_constrain(const EigVec& y, int K) { using Eigen::Dynamic; using Eigen::Matrix; using std::sqrt; using T_scalar = value_type_t; int k_choose_2 = (K * (K - 1)) / 2; check_size_match("cholesky_corr_constrain", "constrain size", y.size(), "k_choose_2", k_choose_2); Matrix z = corr_constrain(y); Matrix x(K, K); if (K == 0) { return x; } x.setZero(); x.coeffRef(0, 0) = 1; int k = 0; for (int i = 1; i < K; ++i) { x.coeffRef(i, 0) = z.coeff(k++); T_scalar sum_sqs = square(x.coeff(i, 0)); for (int j = 1; j < i; ++j) { x.coeffRef(i, j) = z.coeff(k++) * sqrt(1.0 - sum_sqs); sum_sqs += square(x.coeff(i, j)); } x.coeffRef(i, i) = sqrt(1.0 - sum_sqs); } return x; } // FIXME to match above after debugged template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cholesky_corr_constrain(const EigVec& y, int K, return_type_t& lp) { using Eigen::Dynamic; using Eigen::Matrix; using std::sqrt; using T_scalar = value_type_t; int k_choose_2 = (K * (K - 1)) / 2; check_size_match("cholesky_corr_constrain", "y.size()", y.size(), "k_choose_2", k_choose_2); Matrix z = corr_constrain(y, lp); Matrix x(K, K); if (K == 0) { return x; } x.setZero(); x.coeffRef(0, 0) = 1; int k = 0; for (int i = 1; i < K; ++i) { x.coeffRef(i, 0) = z.coeff(k++); T_scalar sum_sqs = square(x.coeff(i, 0)); for (int j = 1; j < i; ++j) { lp += 0.5 * log1m(sum_sqs); x.coeffRef(i, j) = z.coeff(k++) * sqrt(1.0 - sum_sqs); sum_sqs += square(x.coeff(i, j)); } x.coeffRef(i, i) = sqrt(1.0 - sum_sqs); } return x; } /** * Return The cholesky of a `KxK` correlation matrix. If the `Jacobian` * parameter is `true`, the log density accumulator is incremented with the log * absolute Jacobian determinant of the transform. All of the transforms are * specified with their Jacobians in the *Stan Reference Manual* chapter * Constraint Transforms. * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::DenseBase` or a `var_value` with * inner type inheriting from `Eigen::DenseBase` with compile time dynamic rows * and 1 column * @param y Linearly Serialized vector of size `(K * (K - 1))/2` holding the * column major order elements of the lower triangurlar * @param K The size of the matrix to return * @param[in,out] lp log density accumulator */ template * = nullptr> inline auto cholesky_corr_constrain(const T& y, int K, return_type_t& lp) { if (Jacobian) { return cholesky_corr_constrain(y, K, lp); } else { return cholesky_corr_constrain(y, K); } } /** * Return The cholesky of a `KxK` correlation matrix. If the `Jacobian` * parameter is `true`, the log density accumulator is incremented with the log * absolute Jacobian determinant of the transform. All of the transforms are * specified with their Jacobians in the *Stan Reference Manual* chapter * Constraint Transforms. * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A standard vector with inner type inheriting from * `Eigen::DenseBase` or a `var_value` with inner type inheriting from * `Eigen::DenseBase` with compile time dynamic rows and 1 column * @param y Linearly Serialized vector of size `(K * (K - 1))/2` holding the * column major order elements of the lower triangurlar * @param K The size of the matrix to return * @param[in,out] lp log density accumulator */ template * = nullptr> inline auto cholesky_corr_constrain(const T& y, int K, return_type_t& lp) { return apply_vector_unary::apply(y, [&lp, K](auto&& v) { return cholesky_corr_constrain(v, K, lp); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/as_array_or_scalar.hpp0000644000176200001440000000522614645137106025063 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_AS_ARRAY_OR_SCALAR_HPP #define STAN_MATH_PRIM_FUN_AS_ARRAY_OR_SCALAR_HPP #include #include #include namespace stan { namespace math { /** * Returns specified input value. * * @tparam T Type of element. * @param v Specified value. * @return Same value. */ template * = nullptr> inline T as_array_or_scalar(T&& v) { return std::forward(v); } /** * Returns a reference to rvalue specified input value. * * @tparam T Type of element. * @param v Specified value. * @return Same value. */ template * = nullptr> inline T& as_array_or_scalar(T& v) { return v; } /** * Returns specified input value. * * @tparam T Type of element. * @param v Specified value. * @return Same value. */ template * = nullptr> inline T as_array_or_scalar(T&& v) { return std::forward(v); } /** * Converts a matrix type to an array. * * @tparam T Type of \c Eigen \c Matrix or expression * @param v Specified \c Eigen \c Matrix or expression. * @return Matrix converted to an array. */ template , require_not_eigen_array_t* = nullptr> inline auto as_array_or_scalar(T&& v) { return make_holder([](auto& x) { return x.array(); }, std::forward(v)); } /** * Converts a std::vector type to an array. * * @tparam T Type of scalar element. * @param v Specified vector. * @return Matrix converted to an array. */ template * = nullptr, require_not_std_vector_t>* = nullptr> inline auto as_array_or_scalar(T&& v) { using T_map = Eigen::Map, Eigen::Dynamic, 1>>; return make_holder([](auto& x) { return T_map(x.data(), x.size()); }, std::forward(v)); } /** * Converts an std::vector to an Eigen Array. * @tparam T A standard vector with inner container of a standard vector * with an inner stan scalar. * @param v specified vector of vectorized * @return An Eigen Array with dynamic rows and columns. */ template * = nullptr, require_std_vector_vt>* = nullptr> inline auto as_array_or_scalar(T&& v) { Eigen::Array, -1, -1> ret(v.size(), v[0].size()); for (size_t i = 0; i < v.size(); ++i) { ret.row(i) = Eigen::Map, 1, -1>>( v[i].data(), v[i].size()); } return ret; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/MatrixExponential.h0000644000176200001440000002035314645137106024346 0ustar liggesusers// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009, 2010, 2013 Jitse Niesen // Copyright (C) 2011, 2013 Chen-Pang He // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // // This file was edited for to the Stan math library to create // the matrix exponential function (matrix_exp), 2016. #ifndef STAN_MATH_PRIM_FUN_MATRIXEXPONENTIAL_H #define STAN_MATH_PRIM_FUN_MATRIXEXPONENTIAL_H #include #include namespace Eigen { template struct MatrixExponentialScalingOp { /** \brief Constructor. * * \param[in] squarings The integer \f$ s \f$ in this document. */ MatrixExponentialScalingOp(int squarings) : m_squarings(squarings) { } /** \brief Scale a matrix coefficient. * * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$. */ inline const RealScalar operator() (const RealScalar& x) const { using std::ldexp; return ldexp(x, -m_squarings); } using ComplexScalar = std::complex; /** \brief Scale a matrix coefficient. * * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$. */ inline const ComplexScalar operator() (const ComplexScalar& x) const { using std::ldexp; return ComplexScalar(ldexp(x.real(), -m_squarings), ldexp(x.imag(), -m_squarings)); } private: int m_squarings; }; /** \brief Compute the (3,3)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. */ template void matrix_exp_pade3(const MatrixType &A, MatrixType &U, MatrixType &V) { using Eigen::internal::traits; using RealScalar = typename Eigen::NumTraits::Scalar>::Real; const RealScalar b[] = {120.L, 60.L, 12.L, 1.L}; const MatrixType A2 = A * A; const MatrixType tmp = b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; V = b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } /** \brief Compute the (5,5)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. */ template void matrix_exp_pade5(const MatrixType &A, MatrixType &U, MatrixType &V) { using RealScalar = typename Eigen::NumTraits< typename Eigen::internal::traits::Scalar>::Real; const RealScalar b[] = {30240.L, 15120.L, 3360.L, 420.L, 30.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; const MatrixType tmp = b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; V = b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } /** \brief Compute the (7,7)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. */ template void matrix_exp_pade7(const MatrixType &A, MatrixType &U, MatrixType &V) { using Eigen::internal::traits; using RealScalar = typename Eigen::NumTraits::Scalar>::Real; const RealScalar b[] = {17297280.L, 8648640.L, 1995840.L, 277200.L, 25200.L, 1512.L, 56.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; const MatrixType A6 = A4 * A2; const MatrixType tmp = b[7] * A6 + b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; V = b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } /** \brief Compute the (9,9)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. */ template void matrix_exp_pade9(const MatrixType &A, MatrixType &U, MatrixType &V) { using Eigen::internal::traits; using RealScalar = typename Eigen::NumTraits::Scalar>::Real; const RealScalar b[] = {17643225600.L, 8821612800.L, 2075673600.L, 302702400.L, 30270240.L, 2162160.L, 110880.L, 3960.L, 90.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; const MatrixType A6 = A4 * A2; const MatrixType A8 = A6 * A2; const MatrixType tmp = b[9] * A8 + b[7] * A6 + b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; V = b[8] * A8 + b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } /** \brief Compute the (13,13)-Padé approximant to the exponential. * * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. */ template void matrix_exp_pade13(const MatrixType &A, MatrixType &U, MatrixType &V) { using Eigen::internal::traits; using RealScalar = typename Eigen::NumTraits::Scalar>::Real; const RealScalar b[] = {64764752532480000.L, 32382376266240000.L, 7771770303897600.L, 1187353796428800.L, 129060195264000.L, 10559470521600.L, 670442572800.L, 33522128640.L, 1323241920.L, 40840800.L, 960960.L, 16380.L, 182.L, 1.L}; const MatrixType A2 = A * A; const MatrixType A4 = A2 * A2; const MatrixType A6 = A4 * A2; V = b[13] * A6 + b[11] * A4 + b[9] * A2; // used for temporary storage MatrixType tmp = A6 * V; tmp += b[7] * A6 + b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); U.noalias() = A * tmp; tmp = b[12] * A6 + b[10] * A4 + b[8] * A2; V.noalias() = A6 * tmp; V += b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); } template ::Scalar>::Real> struct matrix_exp_computeUV { /** \brief Compute Padé approximant to the exponential. * * Computes \c U, \c V and \c squarings such that \f$ (V+U)(V-U)^{-1} \f$ is a Padé * approximant of \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$, where \f$ M \f$ * denotes the matrix \c arg. The degree of the Padé approximant and the value of squarings * are chosen such that the approximation error is no more than the round-off error. * *

Edit for Stan: template ComputeUV::run so that it may used on * autodiff variables (var and fvar). This required adding the scalar_type * argument, which tells the function the type of elements used in the * matrix. */ static void run(const MatrixType& arg, MatrixType& U, MatrixType& V, int& squarings); }; template struct matrix_exp_computeUV { template static void run(const MatrixType& arg, MatrixType& U, MatrixType& V, int& squarings, T scalar_type) { using std::frexp; using std::pow; using stan::math::value_of_rec; const T l1norm = arg.cwiseAbs().colwise().sum().maxCoeff(); squarings = 0; if (l1norm < 1.495585217958292e-002) { matrix_exp_pade3(arg, U, V); } else if (l1norm < 2.539398330063230e-001) { matrix_exp_pade5(arg, U, V); } else if (l1norm < 9.504178996162932e-001) { matrix_exp_pade7(arg, U, V); } else if (l1norm < 2.097847961257068e+000) { matrix_exp_pade9(arg, U, V); } else { const double maxnorm = 5.371920351148152; frexp(value_of_rec(l1norm) / value_of_rec(maxnorm), &squarings); if (squarings < 0) { squarings = 0; } MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); matrix_exp_pade13(A, U, V); } } }; } #endif StanHeaders/inst/include/stan/math/prim/fun/eigenvalues.hpp0000644000176200001440000000374714645137106023552 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EIGENVALUES_HPP #define STAN_MATH_PRIM_FUN_EIGENVALUES_HPP #include #include namespace stan { namespace math { /** * Return the eigenvalues of a (real-valued) matrix * * @tparam EigMat type of real matrix argument * @param[in] m matrix to find the eigenvectors of. Must be square and have a * non-zero size. * @return a complex-valued column vector with entries the eigenvectors of `m` */ template * = nullptr, require_not_vt_complex* = nullptr> inline Eigen::Matrix>, -1, 1> eigenvalues( const EigMat& m) { if (unlikely(m.size() == 0)) { return Eigen::Matrix>, -1, 1>(0, 1); } check_square("eigenvalues", "m", m); using PlainMat = plain_type_t; const PlainMat& m_eval = m; Eigen::EigenSolver solver(m_eval, false); return solver.eigenvalues(); } /** * Return the eigenvalues of a (complex-valued) matrix * * @tparam EigCplxMat type of complex matrix argument * @param[in] m matrix to find the eigenvectors of. Must be square and have a * non-zero size. * @return a complex-valued column vector with entries the eigenvectors of `m` */ template * = nullptr> inline Eigen::Matrix>, -1, 1> eigenvalues(const EigCplxMat& m) { if (unlikely(m.size() == 0)) { return Eigen::Matrix>, -1, 1>(0, 1); } check_square("eigenvalues", "m", m); using PlainMat = Eigen::Matrix, -1, -1>; const PlainMat& m_eval = m; Eigen::ComplexEigenSolver solver(m_eval, false); return solver.eigenvalues(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/fmod.hpp0000644000176200001440000000241614645137106022160 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FMOD_HPP #define STAN_MATH_PRIM_FUN_FMOD_HPP #include #include #include namespace stan { namespace math { /** * Returns the floating-point remainder of * numerator/denominator (rounded towards zero). * * @tparam T1 type of the numerator (must be arithmetic) * @tparam T2 type of the denominator (must be arithmetic) * @param a the numerator * @param b the denominator * @return the floating-point remainder of a/b. */ template * = nullptr> inline double fmod(const T1& a, const T2& b) { return std::fmod(a, b); } /** * Enables the vectorized application of the fmod function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return fmod function applied to the two inputs. */ template * = nullptr> inline auto fmod(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { using std::fmod; return fmod(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/prob_free.hpp0000644000176200001440000000165714645137106023204 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PROB_FREE_HPP #define STAN_MATH_PRIM_FUN_PROB_FREE_HPP #include #include #include namespace stan { namespace math { /** * Return the free scalar that when transformed to a probability * produces the specified scalar. * *

The function that reverses the constraining transform * specified in prob_constrain(T) is the logit * function, * *

\f$f^{-1}(y) = \mbox{logit}(y) = \frac{1 - y}{y}\f$. * * @tparam T type of constrained value * @param y constrained value * @return corresponding unconstrained value * @throw std::domain_error if y is not in (0, 1) */ template inline T prob_free(const T& y) { check_bounded("prob_free", "Probability variable", y, 0, 1); return logit(y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/size_mvt.hpp0000644000176200001440000000217614645137106023076 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SIZE_MVT_HPP #define STAN_MATH_PRIM_FUN_SIZE_MVT_HPP #include #include #include #include namespace stan { namespace math { /** \ingroup type_trait * Provides the size of a multivariate argument. * * This is the default template function. For any scalar type, this * will throw an std::invalid_argument exception since a scalar is not * a multivariate structure. * * @tparam T type to take size of. The default template function should * only match scalars. * @throw std::invalid_argument since the type is a scalar. */ template * = nullptr> size_t size_mvt(const ScalarT& /* unused */) { throw std::invalid_argument("size_mvt passed to an unrecognized type."); } template * = nullptr> size_t size_mvt(const MatrixT& /* unused */) { return 1U; } template * = nullptr> size_t size_mvt(const std::vector& x) { return x.size(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/gamma_p.hpp0000644000176200001440000000562414645137106022640 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GAMMA_P_HPP #define STAN_MATH_PRIM_FUN_GAMMA_P_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the value of the normalized, lower-incomplete gamma function * applied to the specified argument. * *

This function is defined, including error conditions, as follows \f[ \mbox{gamma\_p}(a, z) = \begin{cases} \textrm{error} & \mbox{if } a\leq 0 \textrm{ or } z < 0\\ P(a, z) & \mbox{if } a > 0, z \geq 0 \\[6pt] \textrm{NaN} & \mbox{if } a = \textrm{NaN or } z = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{gamma\_p}(a, z)}{\partial a} = \begin{cases} \textrm{error} & \mbox{if } a\leq 0 \textrm{ or } z < 0\\ \frac{\partial\, P(a, z)}{\partial a} & \mbox{if } a > 0, z \geq 0 \\[6pt] \textrm{NaN} & \mbox{if } a = \textrm{NaN or } z = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{gamma\_p}(a, z)}{\partial z} = \begin{cases} \textrm{error} & \mbox{if } a\leq 0 \textrm{ or } z < 0\\ \frac{\partial\, P(a, z)}{\partial z} & \mbox{if } a > 0, z \geq 0 \\[6pt] \textrm{NaN} & \mbox{if } a = \textrm{NaN or } z = \textrm{NaN} \end{cases} \f] \f[ P(a, z)=\frac{1}{\Gamma(a)}\int_0^zt^{a-1}e^{-t}dt \f] \f[ \frac{\partial \, P(a, z)}{\partial a} = -\frac{\Psi(a)}{\Gamma^2(a)}\int_0^zt^{a-1}e^{-t}dt + \frac{1}{\Gamma(a)}\int_0^z (a-1)t^{a-2}e^{-t}dt \f] \f[ \frac{\partial \, P(a, z)}{\partial z} = \frac{z^{a-1}e^{-z}}{\Gamma(a)} \f] * * @param z first argument * @param a second argument * @return value of the normalized, lower-incomplete gamma function * applied to z and a * @throws std::domain_error if either argument is not positive or * if z is at a pole of the function */ inline double gamma_p(double z, double a) { if (is_nan(z)) { return not_a_number(); } if (is_nan(a)) { return not_a_number(); } check_positive("gamma_p", "first argument (z)", z); check_nonnegative("gamma_p", "second argument (a)", a); return boost::math::gamma_p(z, a, boost_policy_t<>()); } /** * Enables the vectorized application of the gamma_p function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return gamma_p function applied to the two inputs. */ template * = nullptr> inline auto gamma_p(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return gamma_p(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/lub_free.hpp0000644000176200001440000001426414645137106023022 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LUB_FREE_HPP #define STAN_MATH_PRIM_FUN_LUB_FREE_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** lub_free * Return the unconstrained variable that transforms to the * y given the specified bounds. * *

The transform in `lub_constrain`, * is reversed by a transformed and scaled logit, * *

\f$f^{-1}(y) = \mbox{logit}(\frac{y - L}{U - L})\f$ * * where \f$U\f$ and \f$L\f$ are the lower and upper bounds. * * @tparam T type of bounded object * @tparam L type of lower bound * @tparam U type of upper bound * @param y constrained value * @param lb lower bound * @param ub upper bound * @return the free object that transforms to the input * given the bounds * @throw std::invalid_argument if the lower bound is greater than * the upper bound, y is less than the lower bound, or y is * greater than the upper bound */ ///@{ /** * Overload for all scalar arguments */ template * = nullptr, require_all_stan_scalar_t* = nullptr> inline auto lub_free(T&& y, L&& lb, U&& ub) { const bool is_lb_inf = value_of(lb) == NEGATIVE_INFTY; const bool is_ub_inf = value_of(ub) == INFTY; if (unlikely(is_ub_inf && is_lb_inf)) { return identity_free(y, lb, ub); } else if (unlikely(is_ub_inf)) { return lb_free(identity_free(y, ub), lb); } else if (unlikely(is_lb_inf)) { return ub_free(identity_free(y, lb), ub); } else { auto&& y_ref = to_ref(std::forward(y)); check_bounded("lub_free", "Bounded variable", value_of(y_ref), value_of(lb), value_of(ub)); return eval(logit(divide(subtract(std::forward(y_ref), lb), subtract(ub, lb)))); } } /** * Overload for matrix constrained variable, matrix lower bound, scalar upper * bound */ template * = nullptr, require_stan_scalar_t* = nullptr> inline auto lub_free(T&& y, L&& lb, U&& ub) { check_matching_dims("lub_free", "y", y, "lb", lb); auto&& y_ref = to_ref(std::forward(y)); auto&& lb_ref = to_ref(std::forward(lb)); promote_scalar_t, T> ret(y.rows(), y.cols()); for (Eigen::Index j = 0; j < y.cols(); ++j) { for (Eigen::Index i = 0; i < y.rows(); ++i) { ret.coeffRef(i, j) = lub_free(y_ref.coeff(i, j), lb_ref.coeff(i, j), ub); } } return ret; } /** * Overload for matrix constrained variable, matrix upper bound, scalar lower * bound */ template * = nullptr, require_stan_scalar_t* = nullptr> inline auto lub_free(T&& y, L&& lb, U&& ub) { check_matching_dims("lub_free", "y", y, "ub", ub); auto&& y_ref = to_ref(std::forward(y)); auto&& ub_ref = to_ref(std::forward(ub)); promote_scalar_t, T> ret(y.rows(), y.cols()); for (Eigen::Index j = 0; j < y.cols(); ++j) { for (Eigen::Index i = 0; i < y.rows(); ++i) { ret.coeffRef(i, j) = lub_free(y_ref.coeff(i, j), lb, ub_ref.coeff(i, j)); } } return ret; } /** * Overload for matrix constrained variable, matrix upper bound, matrix lower * bound */ template * = nullptr> inline auto lub_free(T&& y, L&& lb, U&& ub) { check_matching_dims("lub_free", "y", y, "lb", lb); check_matching_dims("lub_free", "y", y, "ub", ub); auto&& y_ref = to_ref(std::forward(y)); auto&& lb_ref = to_ref(std::forward(lb)); auto&& ub_ref = to_ref(std::forward(ub)); promote_scalar_t, T> ret(y.rows(), y.cols()); for (Eigen::Index j = 0; j < y.cols(); ++j) { for (Eigen::Index i = 0; i < y.rows(); ++i) { ret.coeffRef(i, j) = lub_free(y_ref.coeff(i, j), lb_ref.coeff(i, j), ub_ref.coeff(i, j)); } } return ret; } /** * Overload for `std::vector` constrained variable */ template * = nullptr> inline auto lub_free(const std::vector y, const L& lb, const U& ub) { std::vector ret(y.size()); for (Eigen::Index i = 0; i < y.size(); ++i) { ret[i] = lub_free(y[i], lb, ub); } return ret; } /** * Overload for `std::vector` constrained variable and `std::vector` upper bound */ template * = nullptr> inline auto lub_free(const std::vector y, const L& lb, const std::vector& ub) { check_matching_dims("lub_free", "y", y, "ub", ub); std::vector ret(y.size()); for (Eigen::Index i = 0; i < y.size(); ++i) { ret[i] = lub_free(y[i], lb, ub[i]); } return ret; } /** * Overload for `std::vector` constrained variable and `std::vector` lower bound */ template * = nullptr> inline auto lub_free(const std::vector y, const std::vector& lb, const U& ub) { check_matching_dims("lub_free", "y", y, "lb", lb); std::vector ret(y.size()); for (Eigen::Index i = 0; i < y.size(); ++i) { ret[i] = lub_free(y[i], lb[i], ub); } return ret; } /** * Overload for `std::vector` constrained variable and `std::vector` constraints */ template inline auto lub_free(const std::vector y, const std::vector& lb, const std::vector& ub) { check_matching_dims("lub_free", "y", y, "lb", lb); check_matching_dims("lub_free", "y", y, "ub", ub); std::vector ret(y.size()); for (Eigen::Index i = 0; i < y.size(); ++i) { ret[i] = lub_free(y[i], lb[i], ub[i]); } return ret; } ///@} } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/linspaced_vector.hpp0000644000176200001440000000221514645137106024554 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LINSPACED_VECTOR_HPP #define STAN_MATH_PRIM_FUN_LINSPACED_VECTOR_HPP #include #include namespace stan { namespace math { /** * Return a vector of linearly spaced elements. * * This produces a vector from low to high (inclusive) with elements spaced * as (high - low) / (K - 1). For K=1, the vector will contain the high value; * for K=0 it returns an empty vector. * * @param K size of the vector * @param low smallest value * @param high largest value * @return A vector of size K with elements linearly spaced between * low and high. * @throw std::domain_error if K is negative, if low is nan or infinite, * if high is nan or infinite, or if high is less than low. */ inline auto linspaced_vector(int K, double low, double high) { static const char* function = "linspaced_vector"; check_nonnegative(function, "size", K); check_finite(function, "low", low); check_finite(function, "high", high); check_greater_or_equal(function, "high", high, low); return Eigen::VectorXd::LinSpaced(K, low, high); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cosh.hpp0000644000176200001440000000414614645137106022171 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COSH_HPP #define STAN_MATH_PRIM_FUN_COSH_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `cosh()` so it can be vectorized. * * @tparam T type of argument * @param x angle in radians * @return Hyperbolic cosine of x. */ struct cosh_fun { template static inline auto fun(const T& x) { using std::cosh; return cosh(x); } }; /** * Returns the elementwise `cosh()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x angles in radians * @return Hyberbolic cosine of x. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto cosh(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `cosh()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Hyberbolic cosine of x. */ template * = nullptr> inline auto cosh(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().cosh(); }); } namespace internal { /** * Return the hyperbolic cosine of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return hyperbolic cosine of the argument */ template inline std::complex complex_cosh(const std::complex& z) { return 0.5 * (exp(z) + exp(-z)); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/isnan.hpp0000644000176200001440000000113514645137106022340 0ustar liggesusers#ifndef STAN_MATH_PRIM_SCAL_FUN_ISNAN_HPP #define STAN_MATH_PRIM_SCAL_FUN_ISNAN_HPP #include #include namespace stan { namespace math { /** * Return true if specified argument is not-a-number. * * Overloads `std::isnan` from `` for argument-dependent * lookup. * * @tparam ADType type of argument * @param[in] x argument * @return true if argument is not-a-number */ template > inline bool isnan(const T& x) { return is_nan(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/hypot.hpp0000644000176200001440000000274114645137106022377 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_HYPOT_HPP #define STAN_MATH_PRIM_FUN_HYPOT_HPP #include #include #include namespace stan { namespace math { /** * Return the length of the hypotenuse of a right triangle with * opposite and adjacent side lengths given by the specified * arguments (C++11). In symbols, if the arguments are * x and y, the result is sqrt(x * * x + y * y). * * @param x First argument. * @param y Second argument. * @return Length of hypotenuse of right triangle with opposite * and adjacent side lengths x and y. */ template * = nullptr> inline double hypot(T1 x, T2 y) { using std::hypot; return hypot(x, y); } /** * Enables the vectorized application of the hypot function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return hypot function applied to the two inputs. */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< T1, T2>* = nullptr> inline auto hypot(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return hypot(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/corr_free.hpp0000644000176200001440000000162114645137106023176 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CORR_FREE_HPP #define STAN_MATH_PRIM_FUN_CORR_FREE_HPP #include #include #include namespace stan { namespace math { /** * Return the unconstrained scalar that when transformed to * a valid correlation produces the specified value. * *

This function inverts the transform defined for * corr_constrain(T), which is the inverse hyperbolic * tangent, * *

\f$ f^{-1}(y) * = \mbox{atanh}\, y * = \frac{1}{2} \log \frac{y + 1}{y - 1}\f$. * * @tparam T Type of correlation * @param[in] y correlation * @return free scalar that transforms to the specified input */ template inline T corr_free(const T& y) { check_bounded("lub_free", "Correlation variable", y, -1.0, 1.0); return atanh(y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logical_lt.hpp0000644000176200001440000000107714645137106023346 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGICAL_LT_HPP #define STAN_MATH_PRIM_FUN_LOGICAL_LT_HPP namespace stan { namespace math { /** * Return 1 if the first argument is strictly less than the second. * Equivalent to x1 < x2. * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param x1 first argument * @param x2 second argument * @return true if x1 < x2 */ template inline bool logical_lt(T1 x1, T2 x2) { return x1 < x2; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/autocorrelation.hpp0000644000176200001440000001255514645137106024452 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_AUTOCORRELATION_HPP #define STAN_MATH_PRIM_FUN_AUTOCORRELATION_HPP #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * Find the optimal next size for the FFT so that * a minimum number of zeros are padded. */ inline size_t fft_next_good_size(size_t N) { if (N <= 2) { return 2; } while (true) { size_t m = N; while ((m % 2) == 0) { m /= 2; } while ((m % 3) == 0) { m /= 3; } while ((m % 5) == 0) { m /= 5; } if (m <= 1) { return N; } N++; } } } // namespace internal /** * Write autocorrelation estimates for every lag for the specified * input sequence into the specified result using the specified * FFT engine. The return vector be resized to the same length as * the input sequence with lags given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

An FFT engine can be created for reuse for type double with: * *

 *     Eigen::FFT fft;
 * 
* * @tparam T Scalar type. * @param y Input sequence. * @param ac Autocorrelations. * @param fft FFT engine instance. */ template void autocorrelation(const std::vector& y, std::vector& ac, Eigen::FFT& fft) { using std::complex; using std::vector; size_t N = y.size(); size_t M = internal::fft_next_good_size(N); size_t Mt2 = 2 * M; vector > freqvec; // centered_signal = y-mean(y) followed by N zeroes vector centered_signal(y); centered_signal.insert(centered_signal.end(), Mt2 - N, 0.0); T mean = stan::math::mean(y); for (size_t i = 0; i < N; i++) { centered_signal[i] -= mean; } fft.fwd(freqvec, centered_signal); for (size_t i = 0; i < Mt2; ++i) { freqvec[i] = complex(norm(freqvec[i]), 0.0); } fft.inv(ac, freqvec); ac.resize(N); for (size_t i = 0; i < N; ++i) { ac[i] /= (N - i); } T var = ac[0]; for (size_t i = 0; i < N; ++i) { ac[i] /= var; } } /** * Write autocorrelation estimates for every lag for the specified * input sequence into the specified result using the specified * FFT engine. The return vector be resized to the same length as * the input sequence with lags given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

An FFT engine can be created for reuse for type double with: * *

 *     Eigen::FFT fft;
 * 
* * @tparam T Scalar type. * @param y Input sequence. * @param ac Autocorrelations. * @param fft FFT engine instance. */ template void autocorrelation(const Eigen::MatrixBase& y, Eigen::MatrixBase& ac, Eigen::FFT& fft) { size_t N = y.size(); size_t M = internal::fft_next_good_size(N); size_t Mt2 = 2 * M; // centered_signal = y-mean(y) followed by N zeros Eigen::Matrix centered_signal(Mt2); centered_signal.setZero(); centered_signal.head(N) = y.array() - y.mean(); Eigen::Matrix, Eigen::Dynamic, 1> freqvec(Mt2); fft.SetFlag(fft.HalfSpectrum); fft.fwd(freqvec, centered_signal); // cwiseAbs2 == norm freqvec = freqvec.cwiseAbs2(); Eigen::Matrix ac_tmp(Mt2); fft.inv(ac_tmp, freqvec); fft.ClearFlag(fft.HalfSpectrum); for (size_t i = 0; i < N; ++i) { ac_tmp(i) /= (N - i); } ac = ac_tmp.head(N).array() / ac_tmp(0); } /** * Write autocorrelation estimates for every lag for the specified * input sequence into the specified result. The return vector be * resized to the same length as the input sequence with lags * given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

This method is just a light wrapper around the three-argument * autocorrelation function. * * @tparam T Scalar type. * @param y Input sequence. * @param ac Autocorrelations. */ template void autocorrelation(const std::vector& y, std::vector& ac) { Eigen::FFT fft; size_t N = y.size(); ac.resize(N); const Eigen::Map > y_map(&y[0], N); Eigen::Map > ac_map(&ac[0], N); autocorrelation(y_map, ac_map, fft); } /** * Write autocorrelation estimates for every lag for the specified * input sequence into the specified result. The return vector be * resized to the same length as the input sequence with lags * given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

This method is just a light wrapper around the three-argument * autocorrelation function * * @tparam T Scalar type. * @param y Input sequence. * @param ac Autocorrelations. */ template void autocorrelation(const Eigen::MatrixBase& y, Eigen::MatrixBase& ac) { Eigen::FFT fft; autocorrelation(y, ac, fft); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/gp_exponential_cov.hpp0000644000176200001440000002464714645137106025130 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GP_EXPONENTIAL_COV_HPP #define STAN_MATH_PRIM_FUN_GP_EXPONENTIAL_COV_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns a Matern exponential covariance Matrix. * * \f[ k(x, x') = \sigma^2 exp(-\frac{d(x, x')^2}{2l^2}) \f] * * where d(x, x') is the Euclidean distance. * * @tparam T_x type for each scalar * @tparam T_s type of parameter sigma * @tparam T_l type of parameter length scale * * @param x std::vector of scalars that can be used in stan::math::distance * @param sigma standard deviation or magnitude * @param length_scale length scale * @throw std::domain error if sigma <= 0, l <= 0, or x is nan or inf * */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_exponential_cov(const std::vector &x, const T_s &sigma, const T_l &length_scale) { using std::exp; using std::pow; size_t x_size = stan::math::size(x); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); if (x_size == 0) { return cov; } const char *function = "gp_exponential_cov"; size_t x_obs_size = stan::math::size(x[0]); for (size_t i = 0; i < x_size; ++i) { check_size_match(function, "x row", x_obs_size, "x's other row", stan::math::size(x[i])); } for (size_t i = 0; i < x_size; ++i) { check_not_nan(function, "x", x[i]); } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); T_s sigma_sq = square(sigma); T_l neg_inv_l = -1.0 / length_scale; size_t block_size = 10; for (size_t jb = 0; jb < x_size; jb += block_size) { for (size_t ib = jb; ib < x_size; ib += block_size) { size_t j_end = std::min(x_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { cov(j, j) = sigma_sq; size_t i_end = std::min(x_size, ib + block_size); for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq * exp(neg_inv_l * distance(x[i], x[j])); } } } } return cov; } /** * Returns a Matern exponential covariance matrix. * * \f[ k(x, x') = \sigma^2 exp(-\sum_{k=1}^K\frac{d(x, x')}{l_k}) \f] * * where d(x, x') is the Euclidean distance. * * @tparam T_x type for each scalar * @tparam T_s type for each parameter sigma * @tparam T_l type for each length scale parameter * * @param x std::vector of Eigen::vectors of scalars * @param sigma standard deviation that can be used in stan::math::square * @param length_scale std::vector of length scales * @throw std::domain error if sigma <= 0, l <= 0, or x is nan or inf */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_exponential_cov(const std::vector> &x, const T_s &sigma, const std::vector &length_scale) { using std::exp; using std::pow; size_t x_size = stan::math::size(x); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); if (x_size == 0) { return cov; } const char *function = "gp_exponential_cov"; for (size_t n = 0; n < x_size; ++n) { check_not_nan(function, "x", x[n]); } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); size_t l_size = length_scale.size(); check_size_match(function, "x dimension", x[0].size(), "number of length scales", l_size); std::vector, -1, 1>> x_new = divide_columns(x, length_scale); T_s sigma_sq = square(sigma); size_t block_size = 10; for (size_t jb = 0; jb < x_size; jb += block_size) { for (size_t ib = jb; ib < x_size; ib += block_size) { size_t j_end = std::min(x_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { cov(j, j) = sigma_sq; size_t i_end = std::min(x_size, ib + block_size); for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { return_type_t dist = distance(x_new[i], x_new[j]); cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq * exp(-dist); } } } } return cov; } /** * Returns a Matern exponential cross covariance matrix * * \f[ k(x, x') = \sigma^2 exp(-\frac{d(x, x')}{l}) \f] * * where d(x, x') is the Euclidean distance * This function is for the cross covariance matrix needed to * compute the posterior predictive distribution * * @tparam T_x1 first type of scalars contained in vector x1 * @tparam T_x2 second type of scalars contained in vector x2 * @tparam T_s type of parameter sigma, marginal standard deviation * @tparam T_l type of parameter length scale * * @param x1 std::vector of scalars that can be used in squared_distance * @param x2 std::vector of scalars that can be used in squared_distance * @param length_scale length scale * @param sigma standard deviation that can be used in stan::math::square * @throw std::domain error if sigma <= 0, l <= 0, or x1, x2 are nan or inf */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_exponential_cov(const std::vector &x1, const std::vector &x2, const T_s &sigma, const T_l &length_scale) { using std::exp; using std::pow; size_t x1_size = stan::math::size(x1); size_t x2_size = stan::math::size(x2); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); if (x1_size == 0 || x2_size == 0) { return cov; } const char *function = "gp_exponential_cov"; size_t x1_obs_size = stan::math::size(x1[0]); for (size_t i = 0; i < x1_size; ++i) { check_size_match(function, "x1's row", x1_obs_size, "x1's other row", stan::math::size(x1[i])); } for (size_t i = 0; i < x2_size; ++i) { check_size_match(function, "x1's row", x1_obs_size, "x2's other row", stan::math::size(x2[i])); } for (size_t n = 0; n < x1_size; ++n) { check_not_nan(function, "x1", x1[n]); } for (size_t n = 0; n < x2_size; ++n) { check_not_nan(function, "x2", x2[n]); } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); T_s sigma_sq = square(sigma); T_l neg_inv_l = -1.0 / length_scale; size_t block_size = 10; for (size_t ib = 0; ib < x1_size; ib += block_size) { for (size_t jb = 0; jb < x2_size; jb += block_size) { size_t j_end = std::min(x2_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x1_size, ib + block_size); for (size_t i = ib; i < i_end; ++i) { cov(i, j) = sigma_sq * exp(neg_inv_l * distance(x1[i], x2[j])); } } } } return cov; } /** * Returns a Matern exponential cross covariance matrix * * \f[ k(x, x') = \sigma^2 exp(-\sum_{k=1}^K\frac{d(x, x')}{l_k}) \f] * * where \f$d(x, x')\f$ is the Euclidean distance * * This function is for the cross covariance matrix needed * to compute the posterior predictive density. * * @tparam T_x1 first type of std::vector of scalars * @tparam T_x2 second type of std::vector of scalars * @tparam T_s type of parameter sigma, marginal standard deviation * @tparam T_l type of parameter length scale * * @param x1 std::vector of Eigen vectors of scalars * @param x2 std::vector of Eigen vectors of scalars * @param length_scale parameter length scale * @param sigma standard deviation that can be used in stan::math::square * @throw std::domain error if sigma <= 0, l <= 0, or x1, x2 are nan or inf */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_exponential_cov(const std::vector> &x1, const std::vector> &x2, const T_s &sigma, const std::vector &length_scale) { using std::exp; using std::pow; size_t x1_size = stan::math::size(x1); size_t x2_size = stan::math::size(x2); size_t l_size = stan::math::size(length_scale); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); if (x1_size == 0 || x2_size == 0) { return cov; } const char *function = "gp_exponential_cov"; for (size_t n = 0; n < x1_size; ++n) { check_not_nan(function, "x1", x1[n]); } for (size_t n = 0; n < x2_size; ++n) { check_not_nan(function, "x2", x2[n]); } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); for (size_t i = 0; i < x1_size; ++i) { check_size_match(function, "x1's row", stan::math::size(x1[i]), "number of length scales", l_size); } for (size_t i = 0; i < x2_size; ++i) { check_size_match(function, "x2's row", stan::math::size(x2[i]), "number of length scales", l_size); } T_s sigma_sq = square(sigma); std::vector, -1, 1>> x1_new = divide_columns(x1, length_scale); std::vector, -1, 1>> x2_new = divide_columns(x2, length_scale); size_t block_size = 10; for (size_t ib = 0; ib < x1_size; ib += block_size) { for (size_t jb = 0; jb < x2_size; jb += block_size) { size_t j_end = std::min(x2_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x1_size, ib + block_size); for (size_t i = ib; i < i_end; ++i) { cov(i, j) = sigma_sq * exp(-distance(x1_new[i], x2_new[j])); } } } } return cov; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/exp2.hpp0000644000176200001440000000222714645137106022111 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EXP2_HPP #define STAN_MATH_PRIM_FUN_EXP2_HPP #include #include #include namespace stan { namespace math { /** * Structure to wrap `exp2()` so it can be vectorized. */ struct exp2_fun { /** * Return the base two exponent of the specified argument. * * @tparam T type of argument * @param x argument * @return Base two exponent of the argument. */ template static inline auto fun(const T& x) { using std::exp2; return exp2(x); } }; /** * Return the elementwise `exp2()` of the specified argument, * which may be a scalar or any Stan container of numeric scalars. * The return type is the same as the argument type. * * @tparam T type of container * @param x container * @return Elementwise exp2 of members of container. */ template < typename T, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr> inline auto exp2(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/floor.hpp0000644000176200001440000000343114645137106022352 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FLOOR_HPP #define STAN_MATH_PRIM_FUN_FLOOR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `floor()` so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return Greatest integer <= x. */ struct floor_fun { template static inline auto fun(const T& x) { using std::floor; return floor(x); } }; /** * Returns the elementwise `floor()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x container * @return Greatest integer <= each value in x. */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr, require_not_var_matrix_t* = nullptr> inline auto floor(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `floor()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Greatest integer <= each value in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto floor(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().floor(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/bessel_second_kind.hpp0000644000176200001440000000330714645137106025050 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_BESSEL_SECOND_KIND_HPP #define STAN_MATH_PRIM_FUN_BESSEL_SECOND_KIND_HPP #include #include #include namespace stan { namespace math { /** * \f[ \mbox{bessel\_second\_kind}(v, x) = \begin{cases} \textrm{error} & \mbox{if } x \leq 0 \\ Y_v(x) & \mbox{if } x > 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{bessel\_second\_kind}(v, x)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } x \leq 0 \\ \frac{\partial\, Y_v(x)}{\partial x} & \mbox{if } x > 0 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ Y_v(x)=\frac{J_v(x)\cos(v\pi)-J_{-v}(x)}{\sin(v\pi)} \f] \f[ \frac{\partial \, Y_v(x)}{\partial x} = \frac{v}{x}Y_v(x)-Y_{v+1}(x) \f] * */ template * = nullptr> inline T2 bessel_second_kind(int v, const T2 z) { return boost::math::cyl_neumann(v, z); } /** * Enables the vectorized application of the bessel second kind function, when * the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return Bessel second kind function applied to the two inputs. */ template * = nullptr> inline auto bessel_second_kind(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return bessel_second_kind(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/isinf.hpp0000644000176200001440000000120014645137106022331 0ustar liggesusers#ifndef STAN_MATH_PRIM_SCAL_FUN_ISINF_HPP #define STAN_MATH_PRIM_SCAL_FUN_ISINF_HPP #include #include #include namespace stan { namespace math { /** * Return true if specified argument is infinite (positive or * negative). * * Overloads `std::isinf` from `` for argument-dependent * lookup. * * @tparam ADType type of argument * @param[in] v argument * @return true if argument is infinite */ template > inline bool isinf(const T& v) { return is_inf(v); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/tcrossprod.hpp0000644000176200001440000000212414645137106023431 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TCROSSPROD_HPP #define STAN_MATH_PRIM_FUN_TCROSSPROD_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the result of post-multiplying a matrix by its * own transpose. * * @tparam T type of the matrix (must be derived from \c Eigen::MatrixBase) * @param M Matrix to multiply. * @return M times its transpose. */ template * = nullptr> inline Eigen::Matrix, T::RowsAtCompileTime, T::RowsAtCompileTime> tcrossprod(const T& M) { if (M.rows() == 0) { return {}; } const auto& M_ref = to_ref(M); if (M.rows() == 1) { return M_ref * M_ref.transpose(); } Eigen::Matrix, T::RowsAtCompileTime, T::RowsAtCompileTime> result(M.rows(), M.rows()); return result.setZero().template selfadjointView().rankUpdate( M_ref); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_determinant_ldlt.hpp0000644000176200001440000000117114645137106025422 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_DETERMINANT_LDLT_HPP #define STAN_MATH_PRIM_FUN_LOG_DETERMINANT_LDLT_HPP #include namespace stan { namespace math { /** * Returns log(abs(det(A))) given a LDLT_factor of A * * @tparam T type of elements in the LDLT_factor * @param A LDLT_factor * @return the log(abs(det(A)) */ template * = nullptr> inline value_type_t log_determinant_ldlt(LDLT_factor& A) { if (A.matrix().size() == 0) { return 0; } return sum(log(A.ldlt().vectorD().array())); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inv.hpp0000644000176200001440000000320414645137106022023 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INV_HPP #define STAN_MATH_PRIM_FUN_INV_HPP #include #include #include #include namespace stan { namespace math { /** * Structure to wrap 1.0 / x so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return 1 / x. */ struct inv_fun { template static inline auto fun(const T& x) { return 1.0 / x; } }; /** * Return the elementwise 1.0 / x of the specified argument, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x container * @return 1 divided by each value in x. */ template < typename T, require_not_container_st* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto inv(const T& x) { return apply_scalar_unary::apply(x); } /** * Version of \c inv() that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return 1 divided by each value in x. */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto inv(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().inverse(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/int_step.hpp0000644000176200001440000000124514645137106023057 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INT_STEP_HPP #define STAN_MATH_PRIM_FUN_INT_STEP_HPP #include namespace stan { namespace math { /** * The integer step, or Heaviside, function. * * For double NaN input, int_step(NaN) returns 0. * * \f[ \mbox{int\_step}(x) = \begin{cases} 0 & \mbox{if } x \leq 0 \\ 1 & \mbox{if } x > 0 \\[6pt] 0 & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @tparam T value type * @param[in] y value * @return 1 if value is greater than 0 and 0 otherwise */ template inline int int_step(const T& y) { return y > 0; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logical_lte.hpp0000644000176200001440000000112514645137106023505 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGICAL_LTE_HPP #define STAN_MATH_PRIM_FUN_LOGICAL_LTE_HPP namespace stan { namespace math { /** * Return 1 if the first argument is less than or equal to the second. * Equivalent to x1 <= x2. * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param x1 first argument * @param x2 second argument * @return true iff x1 <= x2 */ template inline bool logical_lte(const T1 x1, const T2 x2) { return x1 <= x2; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/to_array_1d.hpp0000644000176200001440000000251014645137106023432 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TO_ARRAY_1D_HPP #define STAN_MATH_PRIM_FUN_TO_ARRAY_1D_HPP #include #include #include namespace stan { namespace math { // real[] to_array_1d(matrix) // real[] to_array_1d(row_vector) // real[] to_array_1d(vector) template * = nullptr> inline std::vector> to_array_1d(const EigMat& matrix) { using T_val = value_type_t; std::vector result(matrix.size()); Eigen::Map>( result.data(), matrix.rows(), matrix.cols()) = matrix; return result; } // real[] to_array_1d(...) template inline std::vector to_array_1d(const std::vector& x) { return x; } // real[] to_array_1d(...) template inline std::vector::type> to_array_1d( const std::vector>& x) { size_t size1 = x.size(); size_t size2 = 0; if (size1 != 0) { size2 = x[0].size(); } std::vector y(size1 * size2); for (size_t i = 0, ij = 0; i < size1; i++) { for (size_t j = 0; j < size2; j++, ij++) { y[ij] = x[i][j]; } } return to_array_1d(y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/is_nan.hpp0000644000176200001440000000165614645137106022507 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IS_NAN_HPP #define STAN_MATH_PRIM_FUN_IS_NAN_HPP #include #include namespace stan { namespace math { /** * Tests if the input is Not a Number (NaN) * * Integer specialization, always return false. * * @tparam T Integer type. * @param x Value to test. * @return false. */ template * = nullptr> inline bool is_nan(T x) { return false; } /** * Tests if the input is Not a Number (NaN) * * Delegates to std::isnan. * * @tparam T Floating point type. * @param x Value to test. * @return true if the value is NaN. */ template * = nullptr> inline bool is_nan(T x) { return std::isnan(x); } template * = nullptr> inline bool is_nan(const T& x) { return x.hasNan(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/fma.hpp0000644000176200001440000000314314645137106021774 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FMA_HPP #define STAN_MATH_PRIM_FUN_FMA_HPP #include #include #include namespace stan { namespace math { /** * Return the product of the first two arguments plus the third * argument. * *

Warning: This does not delegate to the high-precision * platform-specific fma() implementation. * * @param x First argument. * @param y Second argument. * @param z Third argument. * @return The product of the first two arguments plus the third * argument. */ template * = nullptr> inline double fma(T1 x, T2 y, T3 z) { using std::fma; return fma(x, y, z); } template * = nullptr, require_not_var_t>* = nullptr> inline auto fma(T1&& x, T2&& y, T3&& z) { if (is_matrix::value && is_matrix::value) { check_matching_dims("fma", "x", x, "y", y); } if (is_matrix::value && is_matrix::value) { check_matching_dims("fma", "x", x, "z", z); } else if (is_matrix::value && is_matrix::value) { check_matching_dims("fma", "y", y, "z", z); } return make_holder( [](auto&& x, auto&& y, auto&& z) { return ((as_array_or_scalar(x) * as_array_or_scalar(y)) + as_array_or_scalar(z)) .matrix(); }, std::forward(x), std::forward(y), std::forward(z)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/is_inf.hpp0000644000176200001440000000072614645137106022504 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IS_INF_HPP #define STAN_MATH_PRIM_FUN_IS_INF_HPP #include #include namespace stan { namespace math { /** * Returns true if the input is infinite and false otherwise. * * Delegates to std::isinf. * * @param x Value to test. * @return true if the value is infinite. */ inline bool is_inf(double x) { return std::isinf(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/to_array_2d.hpp0000644000176200001440000000162514645137106023441 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TO_ARRAY_2D_HPP #define STAN_MATH_PRIM_FUN_TO_ARRAY_2D_HPP #include #include #include namespace stan { namespace math { // real[, ] to_array_2d(matrix) template * = nullptr> inline std::vector>> to_array_2d( const EigMat& matrix) { using std::vector; using T = value_type_t; const Eigen::Ref>& mat_ref = matrix; int C = matrix.cols(); int R = matrix.rows(); vector> result(R, vector(C)); for (int i = 0, ij = 0; i < C; i++) { for (int j = 0; j < R; j++, ij++) { result[j][i] = mat_ref.data()[ij]; } } return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/chol2inv.hpp0000644000176200001440000000303614645137106022756 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CHOL2INV_HPP #define STAN_MATH_PRIM_FUN_CHOL2INV_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the inverse of the matrix whose Cholesky factor is L * * @tparam T type of elements in the matrix * @param L Matrix that is a Cholesky factor. * @return The matrix inverse of L * L' * @throw std::domain_error If the input matrix is not square or * lower triangular */ template * = nullptr> plain_type_t chol2inv(const T& L) { const Eigen::Ref>& L_ref = L; check_square("chol2inv", "L", L_ref); check_lower_triangular("chol2inv", "L", L_ref); int K = L.rows(); using T_result = plain_type_t; if (K == 0) { return L_ref; } if (K == 1) { T_result X(1, 1); X.coeffRef(0) = inv_square(L_ref.coeff(0, 0)); return X; } T_result L_inv = mdivide_left_tri_low(L_ref, T_result::Identity(K, K)); T_result X(K, K); for (int k = 0; k < K; ++k) { X.coeffRef(k, k) = dot_self(L_inv.col(k).tail(K - k).eval()); for (int j = k + 1; j < K; ++j) { int Kmj = K - j; X.coeffRef(k, j) = X.coeffRef(j, k) = dot_product( L_inv.col(k).tail(Kmj).eval(), L_inv.col(j).tail(Kmj).eval()); } } return X; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cov_matrix_constrain.hpp0000644000176200001440000001342414645137106025467 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COV_MATRIX_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_COV_MATRIX_CONSTRAIN_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the symmetric, positive-definite matrix of dimensions K * by K resulting from transforming the specified finite vector of * size K plus (K choose 2). * *

See cov_matrix_free() for the inverse transform. * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase and * have one compile-time dimension equal to 1) * @param x The vector to convert to a covariance matrix. * @param K The number of rows and columns of the resulting * covariance matrix. * @throws std::invalid_argument if (x.size() != K + (K choose 2)). */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov_matrix_constrain(const T& x, Eigen::Index K) { using Eigen::Dynamic; using Eigen::Matrix; using std::exp; Matrix, Dynamic, Dynamic> L(K, K); check_size_match("cov_matrix_constrain", "x.size()", x.size(), "K + (K choose 2)", (K * (K + 1)) / 2); const auto& x_ref = to_ref(x); int i = 0; for (Eigen::Index m = 0; m < K; ++m) { L.row(m).head(m) = x_ref.segment(i, m); i += m; L.coeffRef(m, m) = exp(x_ref.coeff(i++)); L.row(m).tail(K - m - 1).setZero(); } return multiply_lower_tri_self_transpose(L); } /** * Return the symmetric, positive-definite matrix of dimensions K * by K resulting from transforming the specified finite vector of * size K plus (K choose 2). * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase and * have one compile-time dimension equal to 1) * @param x The vector to convert to a covariance matrix. * @param K The dimensions of the resulting covariance matrix. * @param lp Reference * @throws std::domain_error if (x.size() != K + (K choose 2)). */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov_matrix_constrain(const T& x, Eigen::Index K, return_type_t& lp) { using Eigen::Dynamic; using Eigen::Matrix; using std::exp; using std::log; check_size_match("cov_matrix_constrain", "x.size()", x.size(), "K + (K choose 2)", (K * (K + 1)) / 2); Matrix, Dynamic, Dynamic> L(K, K); const auto& x_ref = to_ref(x); int i = 0; for (Eigen::Index m = 0; m < K; ++m) { L.row(m).head(m) = x_ref.segment(i, m); i += m; L.coeffRef(m, m) = exp(x_ref.coeff(i++)); L.row(m).tail(K - m - 1).setZero(); } // Jacobian for complete transform, including exp() above lp += (K * LOG_TWO); // needless constant; want propto for (Eigen::Index k = 0; k < K; ++k) { lp += (K - k + 1) * log(L.coeff(k, k)); // only +1 because index from 0 } return multiply_lower_tri_self_transpose(L); } /** * Return the symmetric, positive-definite matrix of dimensions K by K resulting * from transforming the specified finite vector of size K plus (K choose 2). If * the `Jacobian` parameter is `true`, the log density accumulator is * incremented with the log absolute Jacobian determinant of the transform. All * of the transforms are specified with their Jacobians in the *Stan Reference * Manual* chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::DenseBase` or a `var_value` with * inner type inheriting from `Eigen::DenseBase` with compile time dynamic rows * and 1 column * @param x The vector to convert to a covariance matrix * @param K The dimensions of the resulting covariance matrix * @param[in, out] lp log density accumulator * @throws std::domain_error if (x.size() != K + (K choose 2)). */ template * = nullptr> inline auto cov_matrix_constrain(const T& x, Eigen::Index K, return_type_t& lp) { if (Jacobian) { return cov_matrix_constrain(x, K, lp); } else { return cov_matrix_constrain(x, K); } } /** * Return the symmetric, positive-definite matrix of dimensions K by K resulting * from transforming the specified finite vector of size K plus (K choose 2). If * the `Jacobian` parameter is `true`, the log density accumulator is * incremented with the log absolute Jacobian determinant of the transform. All * of the transforms are specified with their Jacobians in the *Stan Reference * Manual* chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A standard vector with inner type inheriting from * `Eigen::DenseBase` or a `var_value` with inner type inheriting from * `Eigen::DenseBase` with compile time dynamic rows and 1 column * @param x The vector to convert to a covariance matrix * @param K The dimensions of the resulting covariance matrix * @param[in, out] lp log density accumulator * @throws std::domain_error if (x.size() != K + (K choose 2)). */ template * = nullptr> inline auto cov_matrix_constrain(const T& x, Eigen::Index K, return_type_t& lp) { return apply_vector_unary::apply(x, [&lp, K](auto&& v) { return cov_matrix_constrain(v, K, lp); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/positive_free.hpp0000644000176200001440000000167214645137106024101 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_POSITIVE_FREE_HPP #define STAN_MATH_PRIM_FUN_POSITIVE_FREE_HPP #include #include #include #include namespace stan { namespace math { /** * Return the unconstrained value corresponding to the specified * positive-constrained value. * *

The transform is the inverse of the transform \f$f\f$ applied by * positive_constrain(T), namely * *

\f$f^{-1}(x) = \log(x)\f$. * *

The input is validated using check_positive(). * * @param y Input scalar. * @return Unconstrained value that produces the input when constrained. * @tparam T Type of scalar. * @throw std::domain_error if the variable is negative */ template inline T positive_free(const T& y) { check_positive("positive_free", "Positive variable", y); return log(y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logit.hpp0000644000176200001440000000623314645137106022352 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGIT_HPP #define STAN_MATH_PRIM_FUN_LOGIT_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the log odds of the argument. * * The logit function is defined as for \f$x \in [0, 1]\f$ by * returning the log odds of \f$x\f$ treated as a probability, * * \f$\mbox{logit}(x) = \log \left( \frac{x}{1 - x} \right)\f$. * * The inverse to this function is inv_logit. * * \f[ \mbox{logit}(x) = \begin{cases} \textrm{NaN}& \mbox{if } x < 0 \textrm{ or } x > 1\\ \ln\frac{x}{1-x} & \mbox{if } 0\leq x \leq 1 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{logit}(x)}{\partial x} = \begin{cases} \textrm{NaN}& \mbox{if } x < 0 \textrm{ or } x > 1\\ \frac{1}{x-x^2}& \mbox{if } 0\leq x\leq 1 \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param u argument * @return log odds of argument */ inline double logit(double u) { using std::log; return log(u / (1 - u)); } /** * Return the log odds of the argument. * * @param u argument * @return log odds of argument */ inline double logit(int u) { return logit(static_cast(u)); } /** * Structure to wrap logit() so it can be vectorized. */ struct logit_fun { /** * Return the log odds of the specified argument. * * @tparam T type of argument * @param x argument * @return log odds of the argument */ template static inline auto fun(const T& x) { return logit(x); } }; /** * Return the elementwise application of logit() to * specified argument container. The return type promotes the * underlying scalar argument type to double if it is an integer, * and otherwise is the argument type. * * @tparam Container type of container * @param x container * @return elementwise logit of container elements */ template < typename Container, require_not_container_st* = nullptr, require_not_var_matrix_t* = nullptr, require_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto logit(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of logit() that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return the logit of each variable in the container. * * Note: The return must be evaluated otherwise the Ref object falls out * of scope */ template * = nullptr> inline auto logit(const Container& x) { return make_holder( [](const auto& v_ref) { return apply_vector_unary>::apply( v_ref, [](const auto& v) { return (v.array() / (1 - v.array())).log(); }); }, to_ref(x)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logical_or.hpp0000644000176200001440000000160414645137106023343 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGICAL_OR_HPP #define STAN_MATH_PRIM_FUN_LOGICAL_OR_HPP #include namespace stan { namespace math { /** * The logical or function which returns 1 if either * argument is unequal to zero and 0 otherwise. Equivalent * to x1 != 0 || x2 != 0. * \f[ \mbox{operator||}(x, y) = \begin{cases} 0 & \mbox{if } x, y=0 \\ 1 & \mbox{if } x \neq 0 \textrm{ or } y\neq0\\[6pt] 1 & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param x1 first argument * @param x2 second argument * @return true if either x1 or x2 is not equal to 0. */ template inline int logical_or(T1 x1, T2 x2) { return (x1 != 0) || (x2 != 0); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/head.hpp0000644000176200001440000000241114645137106022127 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_HEAD_HPP #define STAN_MATH_PRIM_FUN_HEAD_HPP #include #include #include namespace stan { namespace math { /** * Return the specified number of elements as a vector or row vector (same as * input) from the front of the specified vector or row vector. * * @tparam T type of the vector * @param v Vector input. * @param n Size of return. * @return The first n elements of v. * @throw std::out_of_range if n is out of range. */ template * = nullptr> inline auto head(const T& v, size_t n) { if (n != 0) { check_vector_index("head", "n", v, n); } return v.head(n); } /** * Return the specified number of elements as a standard vector * from the front of the specified standard vector. * * @tparam T type of elements in the vector * @param sv Standard vector. * @param n Size of return. * @return The first n elements of sv. * @throw std::out_of_range if n is out of range. */ template std::vector head(const std::vector& sv, size_t n) { if (n != 0) { check_std_vector_index("head", "n", sv, n); } std::vector s(sv.begin(), sv.begin() + n); return s; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logical_neq.hpp0000644000176200001440000000114514645137106023506 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGICAL_NEQ_HPP #define STAN_MATH_PRIM_FUN_LOGICAL_NEQ_HPP #include namespace stan { namespace math { /** * Return 1 if the first argument is unequal to the second. * Equivalent to x1 != x2. * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param x1 first argument * @param x2 second argument * @return true iff x1 != x2 */ template inline int logical_neq(const T1 x1, const T2 x2) { return x1 != x2; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/one_hot_vector.hpp0000644000176200001440000000155314645137106024251 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ONE_HOT_VECTOR_HPP #define STAN_MATH_PRIM_FUN_ONE_HOT_VECTOR_HPP #include #include namespace stan { namespace math { /** * Return a vector with 1 in the k-th position and zero elsewhere * * @param K size of the vector * @param k position of the 1 (indexing from 1) * @return A vector of size K with all elements initialized to zero * and a 1 in the k-th position. * @throw std::domain_error if K is not positive, or if k is less than 1 or * greater than K. */ inline Eigen::VectorXd one_hot_vector(int K, int k) { static const char* function = "one_hot_vector"; check_positive(function, "size", K); check_bounded(function, "k", k, 1, K); Eigen::VectorXd ret = Eigen::VectorXd::Zero(K); ret(k - 1) = 1; return ret; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logical_gt.hpp0000644000176200001440000000111614645137106023333 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGICAL_GT_HPP #define STAN_MATH_PRIM_FUN_LOGICAL_GT_HPP namespace stan { namespace math { /** * Return 1 if the first argument is strictly greater than the second. * Equivalent to x1 < x2. * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param x1 first argument * @param x2 second argument * @return true if x1 > x2 */ template inline bool logical_gt(const T1 x1, const T2 x2) { return x1 > x2; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/signbit.hpp0000644000176200001440000000120014645137106022660 0ustar liggesusers#ifndef STAN_MATH_PRIM_SCAL_FUN_SIGNBIT_HPP #define STAN_MATH_PRIM_SCAL_FUN_SIGNBIT_HPP #include namespace stan { namespace math { /** * Return `true` if the specified argument is negative and `false` * otherwise. * * Overloads `std::signbit` from `` for argument-dependent * lookup. * * @tparam ADType type of argument * @param[in] v argument * @return `true` if the argument is negative */ template * = nullptr> inline bool signbit(ADType&& v) { using std::signbit; return signbit(v.val()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cholesky_factor_free.hpp0000644000176200001440000000366214645137106025417 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CHOLESKY_FACTOR_FREE_HPP #define STAN_MATH_PRIM_FUN_CHOLESKY_FACTOR_FREE_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the unconstrained vector of parameters corresponding to * the specified Cholesky factor. A Cholesky factor must be lower * triangular and have positive diagonal elements. * * @tparam T type of the Cholesky factor (must be derived from \c * Eigen::MatrixBase) * @param y Cholesky factor. * @return Unconstrained parameters for Cholesky factor. * @throw std::domain_error If the matrix is not a Cholesky factor. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, 1> cholesky_factor_free( const T& y) { using std::log; const auto& y_ref = to_ref(y); check_cholesky_factor("cholesky_factor_free", "y", y_ref); int M = y.rows(); int N = y.cols(); Eigen::Matrix, Eigen::Dynamic, 1> x((N * (N + 1)) / 2 + (M - N) * N); int pos = 0; for (int m = 0; m < N; ++m) { x.segment(pos, m) = y_ref.row(m).head(m); pos += m; x.coeffRef(pos++) = log(y_ref.coeff(m, m)); } for (int m = N; m < M; ++m) { x.segment(pos, N) = y_ref.row(m); pos += N; } return x; } /** * Overload of `cholesky_factor_free()` to untransform each matrix * in a standard vector. * @tparam T A standard vector with with a `value_type` which inherits from * `Eigen::MatrixBase`. * @param x The standard vector to untransform. */ template * = nullptr> auto cholesky_factor_free(const T& x) { return apply_vector_unary::apply( x, [](auto&& v) { return cholesky_factor_free(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/csr_extract_w.hpp0000644000176200001440000000307314645137106024102 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CSR_EXTRACT_W_HPP #define STAN_MATH_PRIM_FUN_CSR_EXTRACT_W_HPP #include #include namespace stan { namespace math { /** \addtogroup csr_format * @{ */ /* Extract the non-zero values from a sparse matrix. * * @tparam T type of elements in the matrix * @param[in] A sparse matrix. * @return Vector of non-zero entries of A. */ template const Eigen::Matrix csr_extract_w( const Eigen::SparseMatrix& A) { auto a_nonzeros = A.nonZeros(); Eigen::Matrix w = Eigen::Matrix::Zero(a_nonzeros); for (int nze = 0; nze < a_nonzeros; ++nze) { w[nze] = *(A.valuePtr() + nze); } return w; } /* Extract the non-zero values from a dense matrix by converting * to sparse and calling the sparse matrix extractor. * * @tparam T type of elements in the matrix * @tparam R number of rows, can be Eigen::Dynamic * @tparam C number of columns, can be Eigen::Dynamic * * @param[in] A dense matrix. * @return Vector of non-zero entries of A. */ template * = nullptr> const Eigen::Matrix, Eigen::Dynamic, 1> csr_extract_w( const T& A) { // conversion to sparse seems to touch data twice, so we need to call to_ref Eigen::SparseMatrix, Eigen::RowMajor> B = to_ref(A).sparseView(); return csr_extract_w(B); } /** @} */ // end of csr_format group } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/expm1.hpp0000644000176200001440000000213514645137106022263 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EXPM1_HPP #define STAN_MATH_PRIM_FUN_EXPM1_HPP #include #include #include namespace stan { namespace math { /** * Structure to wrap `expm1()` so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return Natural exponential of x minus one. */ struct expm1_fun { template static inline auto fun(const T& x) { using std::expm1; return expm1(x); } }; /** * Return the elementwise `expm1()` of the specified argument, * which may be a scalar or any Stan container of numeric scalars. * The return type is the same as the argument type. * * @tparam T type of container * @param x container * @return Natural exponential of each value in x minus one. */ template < typename T, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr> inline auto expm1(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_diff_exp.hpp0000644000176200001440000000441314645137106023657 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_DIFF_EXP_HPP #define STAN_MATH_PRIM_FUN_LOG_DIFF_EXP_HPP #include #include #include #include namespace stan { namespace math { /** * The natural logarithm of the difference of the natural exponentiation * of x and the natural exponentiation of y * * This function is only defined for x >= y * \f[ \mbox{log\_diff\_exp}(x, y) = \begin{cases} \textrm{NaN} & \mbox{if } x < y\\ \ln(\exp(x)-\exp(y)) & \mbox{if } x \geq y \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log\_diff\_exp}(x, y)}{\partial x} = \begin{cases} \textrm{NaN} & \mbox{if } x \leq y\\ \frac{\exp(x)}{\exp(x)-\exp(y)} & \mbox{if } x > y \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log\_diff\_exp}(x, y)}{\partial y} = \begin{cases} \textrm{NaN} & \mbox{if } x \leq y\\ -\frac{\exp(y)}{\exp(x)-\exp(y)} & \mbox{if } x > y \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @tparam T1 type of the first argument * @tparam T2 type of the second argument * @param x first argument * @param y second argument */ template * = nullptr> inline return_type_t log_diff_exp(const T1 x, const T2 y) { if (x <= y) { return (x < INFTY && x == y) ? NEGATIVE_INFTY : NOT_A_NUMBER; } return x + log1m_exp(y - x); } /** * Enables the vectorized application of the log_diff_exp function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return log_diff_exp function applied to the two inputs. */ template * = nullptr> inline auto log_diff_exp(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return log_diff_exp(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/rep_array.hpp0000644000176200001440000000262714645137106023223 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_REP_ARRAY_HPP #define STAN_MATH_PRIM_FUN_REP_ARRAY_HPP #include #include #include namespace stan { namespace math { template * = nullptr> inline std::vector> rep_array(const In& x, int n) { using T = plain_type_t; check_nonnegative("rep_array", "n", n); return std::vector(n, x); } template inline std::vector> rep_array(const In& x, int n) { return rep_array>>(x, n); } template inline std::vector>> rep_array(const In& x, int m, int n) { using std::vector; using T = plain_type_t; check_nonnegative("rep_array", "rows", m); check_nonnegative("rep_array", "cols", n); return vector>(m, vector(n, x)); } template inline std::vector>>> rep_array( const In& x, int k, int m, int n) { using std::vector; using T = plain_type_t; check_nonnegative("rep_array", "shelfs", k); check_nonnegative("rep_array", "rows", m); check_nonnegative("rep_array", "cols", n); return vector>>(k, vector>(m, vector(n, x))); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cbrt.hpp0000644000176200001440000000174214645137106022166 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CBRT_HPP #define STAN_MATH_PRIM_FUN_CBRT_HPP #include #include #include namespace stan { namespace math { /** * Structure to wrap `cbrt()` so it can be vectorized. * * @tparam T type of variable * @param x variable * @return Cube root of x. */ struct cbrt_fun { template static inline auto fun(const T& x) { using std::cbrt; return cbrt(x); } }; /** * Returns the elementwise `cbrt()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam T type of container * @param x container * @return Cube root of each value in x. */ template < typename T, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto cbrt(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/positive_constrain.hpp0000644000176200001440000000660714645137106025163 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_POSITIVE_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_POSITIVE_CONSTRAIN_HPP #include #include #include #include namespace stan { namespace math { /** * Return the positive value for the specified unconstrained input. * *

The transform applied is * *

\f$f(x) = \exp(x)\f$. * * @param x Arbitrary input scalar or container. * @return Input transformed to be positive. */ template inline auto positive_constrain(const T& x) { return exp(x); } /** * Return the positive value for the specified unconstrained input, * incrementing the scalar reference with the log absolute * Jacobian determinant. * *

See positive_constrain(T) for details * of the transform. The log absolute Jacobian determinant is * *

\f$\log | \frac{d}{dx} \mbox{exp}(x) | * = \log | \mbox{exp}(x) | = x\f$. * * @tparam T type of unconstrained value * @param x unconstrained value or container * @param lp log density reference. * @return positive constrained version of unconstrained value(s) */ template inline auto positive_constrain(const T& x, S& lp) { lp += sum(x); return exp(x); } /** * Return the positive value for the specified unconstrained input. If the * `Jacobian` parameter is `true`, the log density accumulator is incremented * with the log absolute Jacobian determinant of the transform. All of the * transforms are specified with their Jacobians in the *Stan Reference Manual* * chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @param x unconstrained value or container * @param[in, out] lp log density accumulator * @return positive constrained version of unconstrained value(s) */ template * = nullptr> inline auto positive_constrain(const T& x, return_type_t& lp) { if (Jacobian) { return positive_constrain(x, lp); } else { return positive_constrain(x); } } /** * Return the positive value for the specified unconstrained input. If the * `Jacobian` parameter is `true`, the log density accumulator is incremented * with the log absolute Jacobian determinant of the transform. All of the * transforms are specified with their Jacobians in the *Stan Reference Manual* * chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A standard vector with inner type inheriting from * `Eigen::EigenBase`, a `var_value` with inner type inheriting from * `Eigen::EigenBase`, a standard vector, or a scalar * @param x unconstrained value or container * @param[in, out] lp log density accumulator * @return positive constrained version of unconstrained value(s) */ template * = nullptr> inline auto positive_constrain(const T& x, return_type_t& lp) { return apply_vector_unary::apply( x, [&lp](auto&& v) { return positive_constrain(v, lp); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/symmetrize_from_upper_tri.hpp0000644000176200001440000000141514645137106026555 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SYMMETRIZE_FROM_UPPER_TRI_HPP #define STAN_MATH_PRIM_FUN_SYMMETRIZE_FROM_UPPER_TRI_HPP #include #include namespace stan { namespace math { /** * Return a symmetric matrix using elements from the upper triangular part of * the input matrix. * * @tparam T type of elements in the matrix * @param m Matrix. * @throw std:invalid_argument if the matrix is not square. */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> symmetrize_from_upper_tri(const T& m) { check_square("symmetrize_from_upper_tri", "m", m); return m.template selfadjointView(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/tail.hpp0000644000176200001440000000244314645137106022164 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TAIL_HPP #define STAN_MATH_PRIM_FUN_TAIL_HPP #include #include #include #include namespace stan { namespace math { /** * Return the specified number of elements as a vector or row vector (same as * input) from the back of the specified vector or row vector. * * @tparam T type of the vector * @param v Vector input. * @param n Size of return. * @return The last n elements of v. * @throw std::out_of_range if n is out of range. */ template * = nullptr> inline auto tail(const T& v, size_t n) { if (n != 0) { check_vector_index("tail", "n", v, n); } return v.tail(n); } /** * Return the specified number of elements as a standard vector * from the back of the specified standard vector. * * @tparam T type of elements in the vector * @param sv Standard vector. * @param n Size of return. * @return The last n elements of sv. * @throw std::out_of_range if n is out of range. */ template std::vector tail(const std::vector& sv, size_t n) { if (n != 0) { check_std_vector_index("tail", "n", sv, n); } std::vector s(sv.end() - n, sv.end()); return s; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/binary_log_loss.hpp0000644000176200001440000000322714645137106024421 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_BINARY_LOG_LOSS_HPP #define STAN_MATH_PRIM_FUN_BINARY_LOG_LOSS_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the log loss function for binary classification * with specified reference and response values. * * The log loss function for prediction \f$\hat{y} \in [0, 1]\f$ * given outcome \f$y \in \{ 0, 1 \}\f$ is * * \f$\mbox{logloss}(1, \hat{y}) = -\log \hat{y} \f$, and * * \f$\mbox{logloss}(0, \hat{y}) = -\log (1 - \hat{y}) \f$. * * @tparam T value type * @param[in] y reference value, either 0 or 1 * @param[in] y_hat response value in [0, 1] * @return Log loss for response given reference value */ template * = nullptr> inline T binary_log_loss(int y, const T& y_hat) { using std::log; return y ? -log(y_hat) : -log1m(y_hat); } /** * Enables the vectorized application of the binary log loss function, when * the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return Binary log loss function applied to the two inputs. */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto binary_log_loss(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return binary_log_loss(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/col.hpp0000644000176200001440000000144614645137106022012 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COL_HPP #define STAN_MATH_PRIM_FUN_COL_HPP #include #include namespace stan { namespace math { /** * Return the specified column of the specified matrix * using start-at-1 indexing. * * This is equivalent to calling m.col(i - 1) and * assigning the resulting template expression to a column vector. * * @tparam T type of the matrix * @param m Matrix. * @param j Column index (count from 1). * @return Specified column of the matrix. * @throw std::out_of_range if j is out of range. */ template * = nullptr> inline auto col(const T& m, size_t j) { check_column_index("col", "j", m, j); return m.col(j - 1); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/gp_matern52_cov.hpp0000644000176200001440000002703614645137106024232 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GP_MATERN52_COV_HPP #define STAN_MATH_PRIM_FUN_GP_MATERN52_COV_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns a Matern 5/2 covariance matrix with one input vector * * \f[ k(x, x') = \sigma^2\bigg(1 + * \frac{\sqrt{5}d(x, x')}{l} + \frac{5d(x, x')^2}{3l^2}\bigg) * exp\bigg(-\frac{5 d(x, x')}{l}\bigg) * \f] * * where \f$ d(x, x') \f$ is the Euclidean distance. * * @tparam T_x type of elements contained in vector x * @tparam T_s type of element of sigma, the magnitude * @tparam T_l type of elements of length scale * * @param x std::vector of elements that can be used in stan::math::distance * @param length_scale length scale * @param sigma standard deviation that can be used in stan::math::square * @throw std::domain error if sigma <= 0, l <= 0, or x is nan or inf * */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_matern52_cov(const std::vector &x, const T_s &sigma, const T_l &length_scale) { using std::exp; using std::sqrt; size_t x_size = x.size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); if (x_size == 0) { return cov; } for (size_t n = 0; n < x_size; ++n) { check_not_nan("gp_matern52_cov", "x", x[n]); } check_positive_finite("gp_matern52_cov", "magnitude", sigma); check_positive_finite("gp_matern52_cov", "length scale", length_scale); T_s sigma_sq = square(sigma); T_l root_5_inv_l = sqrt(5.0) / length_scale; T_l inv_l_sq_5_3 = 5.0 / (3.0 * square(length_scale)); size_t block_size = 10; for (size_t jb = 0; jb < x_size; jb += block_size) { for (size_t ib = jb; ib < x_size; ib += block_size) { size_t j_end = std::min(x_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { cov.coeffRef(j, j) = sigma_sq; size_t i_end = std::min(x_size, ib + block_size); for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { return_type_t sq_distance = squared_distance(x[i], x[j]); return_type_t dist = sqrt(sq_distance); cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq * (1.0 + root_5_inv_l * dist + inv_l_sq_5_3 * sq_distance) * exp(-root_5_inv_l * dist); } } } } return cov; } /** * Returns a Matern 5/2 covariance matrix with one input vector * with automatic relevance determination (ARD). * * \f[ k(x, x') = \sigma^2\bigg(1 + * \sqrt{5}\sqrt{\sum_{k=1}^{K}\frac{d(x, x')^2}{l_k^2}} + * \frac{5}{3} \sqrt{\sum_{k=1}^{K}\frac{d(x, x')^2}{l_k^2}}\bigg) * exp\bigg(-\frac{5}{3}\bigg(\sqrt{\sum_{k=1}^K{\frac{d(x, x')^2}{l_k^2}} * }\bigg)\bigg) * \f] * * where \f$ d(x, x') \f$ is the Euclidean distance. * * @tparam T_x type of elements contained in vector x * @tparam T_s type of element of sigma, the magnitude * @tparam T_l type of elements in vector of length scale * * @param x std::vector of elements that can be used in stan::math::distance * @param length_scale length scale * @param sigma standard deviation that can be used in stan::math::square * @throw std::domain error if sigma <= 0, l <= 0, or x is nan or inf * @throw std::invalid_argument if length scale size != dimension of x */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_matern52_cov(const std::vector> &x, const T_s &sigma, const std::vector &length_scale) { using std::exp; size_t x_size = x.size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); if (x_size == 0) { return cov; } size_t l_size = length_scale.size(); for (size_t n = 0; n < x_size; ++n) { check_not_nan("gp_matern52_cov", "x", x[n]); } check_positive_finite("gp_matern52_cov", "magnitude", sigma); check_positive_finite("gp_matern52_cov", "length scale", length_scale); check_size_match("gp_matern52_cov", "x dimension", x[0].size(), "number of length scales", l_size); T_s sigma_sq = square(sigma); double root_5 = sqrt(5.0); double five_thirds = 5.0 / 3.0; std::vector, -1, 1>> x_new = divide_columns(x, length_scale); size_t block_size = 10; for (size_t jb = 0; jb < x_size; jb += block_size) { for (size_t ib = jb; ib < x_size; ib += block_size) { size_t j_end = std::min(x_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { cov.coeffRef(j, j) = sigma_sq; size_t i_end = std::min(x_size, ib + block_size); for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { return_type_t sq_distance = squared_distance(x_new[i], x_new[j]); return_type_t dist = sqrt(sq_distance); cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq * (1.0 + root_5 * dist + five_thirds * sq_distance) * exp(-root_5 * dist); } } } } return cov; } /** * Returns a Matern 5/2 covariance matrix with two different input vectors * * \f[ k(x, x') = \sigma^2\bigg(1 + * \frac{\sqrt{5}d(x, x')}{l} + \frac{5d(x, x')^2}{3l^2}\bigg) * exp\bigg(-\frac{5 d(x, x')}{l}\bigg) * \f] * * where \f$ d(x, x') \f$ is the Euclidean distance. * * @tparam T_x1 type of elements contained in vector x1 * @tparam T_x2 type of elements contained in vector x2 * @tparam T_s type of element of sigma, the magnitude * @tparam T_l type of elements of length scale * * @param x1 std::vector of elements that can be used in * stan::math::distance * @param x2 std::vector of elements that can be used in * stan::math::distance * @param length_scale length scale * @param sigma standard deviation that can be used in stan::math::square * @throw std::domain error if sigma <= 0, l <= 0, or x is nan or inf * */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_matern52_cov(const std::vector &x1, const std::vector &x2, const T_s &sigma, const T_l &length_scale) { using std::exp; size_t x1_size = x1.size(); size_t x2_size = x2.size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); if (x1_size == 0 || x2_size == 0) { return cov; } for (size_t n = 0; n < x1_size; ++n) { check_not_nan("gp_matern52_cov", "x1", x1[n]); } for (size_t n = 0; n < x2_size; ++n) { check_not_nan("gp_matern52_cov", "x1", x2[n]); } check_positive_finite("gp_matern52_cov", "magnitude", sigma); check_positive_finite("gp_matern52_cov", "length scale", length_scale); T_s sigma_sq = square(sigma); T_l root_5_inv_l = sqrt(5.0) / length_scale; T_l inv_l_sq_5_3 = 5.0 / (3.0 * square(length_scale)); size_t block_size = 10; for (size_t ib = 0; ib < x1_size; ib += block_size) { for (size_t jb = 0; jb < x2_size; jb += block_size) { size_t j_end = std::min(x2_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x1_size, ib + block_size); for (size_t i = ib; i < i_end; ++i) { return_type_t sq_distance = squared_distance(x1[i], x2[j]); return_type_t dist = sqrt(sq_distance); cov.coeffRef(i, j) = sigma_sq * (1.0 + root_5_inv_l * dist + inv_l_sq_5_3 * sq_distance) * exp(-root_5_inv_l * dist); } } } } return cov; } /** * Returns a Matern 5/2 covariance matrix with two input vectors * with automatic relevance determination (ARD). * * \f[ k(x, x') = \sigma^2\bigg(1 + * \sqrt{5}\sqrt{\sum_{k=1}^{K}\frac{d(x, x')^2}{l_k^2}} + * \frac{5}{3} \sqrt{\sum_{k=1}^{K}\frac{d(x, x')^2}{l_k^2}}\bigg) * exp\bigg(-\frac{5}{3}\bigg(\sqrt{\sum_{k=1}^K{\frac{d(x, x')^2}{l_k^2}} * }\bigg)\bigg) * \f] * * where \f$ d(x, x') \f$ is the Euclidean distance. * * @tparam T_x1 type of elements contained in vector x1 * @tparam T_x2 type of elements contained in vector x2 * @tparam T_s type of element of sigma, the magnitude * @tparam T_l type of elements in vector of length scale * * @param x1 std::vector of elements that can be used in * stan::math::distance * @param x2 std::vector of elements that can be used in * stan::math::distance * @param length_scale length scale * @param sigma standard deviation that can be used in stan::math::square * @throw std::domain error if sigma <= 0, l <= 0, or x is nan or inf * @throw std::invalid_argument if length scale size != dimension of x1 or * x2 * */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_matern52_cov(const std::vector> &x1, const std::vector> &x2, const T_s &sigma, const std::vector &length_scale) { using std::exp; size_t x1_size = x1.size(); size_t x2_size = x2.size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); if (x1_size == 0 || x2_size == 0) { return cov; } size_t l_size = length_scale.size(); for (size_t n = 0; n < x1_size; ++n) { check_not_nan("gp_matern52_cov", "x1", x1[n]); } for (size_t n = 0; n < x2_size; ++n) { check_not_nan("gp_matern52_cov", "x1", x2[n]); } check_positive_finite("gp_matern52_cov", "magnitude", sigma); check_positive_finite("gp_matern52_cov", "length scale", length_scale); check_size_match("gp_matern52_cov", "x dimension", x1[0].size(), "number of length scales", l_size); check_size_match("gp_matern52_cov", "x dimension", x2[0].size(), "number of length scales", l_size); T_s sigma_sq = square(sigma); double root_5 = sqrt(5.0); double five_thirds = 5.0 / 3.0; std::vector, -1, 1>> x1_new = divide_columns(x1, length_scale); std::vector, -1, 1>> x2_new = divide_columns(x2, length_scale); size_t block_size = 10; for (size_t ib = 0; ib < x1_size; ib += block_size) { for (size_t jb = 0; jb < x2_size; jb += block_size) { size_t j_end = std::min(x2_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x1_size, ib + block_size); for (size_t i = ib; i < i_end; ++i) { return_type_t sq_distance = squared_distance(x1_new[i], x2_new[j]); return_type_t dist = sqrt(sq_distance); cov.coeffRef(i, j) = sigma_sq * (1.0 + root_5 * dist + five_thirds * sq_distance) * exp(-root_5 * dist); } } } } return cov; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/get.hpp0000644000176200001440000000245614645137106022016 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GET_HPP #define STAN_MATH_PRIM_FUN_GET_HPP #include #include #include #include #include #include namespace stan { /** \ingroup type_trait * Returns the provided element. Scalar type overload * for the function to retrieve n-th element of a vector, * \c Eigen \c Matrix or expression * * @param x input scalar * @param n index of the element to return * @return input scalar */ template > inline T get(const T& x, size_t n) { return x; } /** \ingroup type_trait * Returns the n-th element of the provided std::vector. * * @param x input vector * @param n index of the element to return * @return n-th element of the input vector */ template inline T get(const std::vector& x, size_t n) { return x[n]; } /** \ingroup type_trait * Returns the n-th element of the provided Eigen matrix. * * @param m input \c Eigen \c Matrix or expression * @param n index of the element to return * @return n-th element of the \c Eigen \c Matrix or expression */ template > inline scalar_type_t get(const T& m, size_t n) { return m(static_cast(n)); } } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/linspaced_array.hpp0000644000176200001440000000236214645137106024373 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LINSPACED_ARRAY_HPP #define STAN_MATH_PRIM_FUN_LINSPACED_ARRAY_HPP #include #include #include namespace stan { namespace math { /** * Return an array of linearly spaced elements. * * This produces an array from low to high (inclusive) with elements spaced * as (high - low) / (K - 1). For K=1, the array will contain the high value; * for K=0 it returns an empty array. * * @param K size of the array * @param low smallest value * @param high largest value * @return An array of size K with elements linearly spaced between * low and high. * @throw std::domain_error if K is negative, if low is nan or infinite, * if high is nan or infinite, or if high is less than low. */ inline std::vector linspaced_array(int K, double low, double high) { static const char* function = "linspaced_array"; check_nonnegative(function, "size", K); check_finite(function, "low", low); check_finite(function, "high", high); check_greater_or_equal(function, "high", high, low); if (K == 0) { return {}; } Eigen::VectorXd v = Eigen::VectorXd::LinSpaced(K, low, high); return {&v[0], &v[0] + K}; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/svd_U.hpp0000644000176200001440000000153314645137106022312 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SVD_U_HPP #define STAN_MATH_PRIM_FUN_SVD_U_HPP #include #include namespace stan { namespace math { /** * Given input matrix m, return matrix U where `m = UDV^{T}` * * @tparam EigMat type of the matrix * @param m MxN input matrix * @return Orthogonal matrix U */ template * = nullptr, require_not_st_var* = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> svd_U( const EigMat& m) { using MatType = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; if (unlikely(m.size() == 0)) { return MatType(0, 0); } return Eigen::JacobiSVD(m, Eigen::ComputeThinU).matrixU(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/prob_constrain.hpp0000644000176200001440000000505314645137106024255 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PROB_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_PROB_CONSTRAIN_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return a probability value constrained to fall between 0 and 1 * (inclusive) for the specified free scalar. * *

The transform is the inverse logit, * *

\f$f(x) = \mbox{logit}^{-1}(x) = \frac{1}{1 + \exp(x)}\f$. * * @tparam T type of scalar * @param[in] x unconstrained value * @return result constrained to fall in (0, 1) */ template inline T prob_constrain(const T& x) { return inv_logit(x); } /** * Return a probability value constrained to fall between 0 and 1 * (inclusive) for the specified free scalar and increment the * specified log probability reference with the log absolute Jacobian * determinant of the transform. * *

The transform is as defined for prob_constrain(T). * The log absolute Jacobian determinant is * *

The log absolute Jacobian determinant is * *

\f$\log | \frac{d}{dx} \mbox{logit}^{-1}(x) |\f$ *

\f$\log ((\mbox{logit}^{-1}(x)) (1 - \mbox{logit}^{-1}(x))\f$ *

\f$\log (\mbox{logit}^{-1}(x)) + \log (1 - \mbox{logit}^{-1}(x))\f$. * * @tparam T type of scalar * @param[in] x unconstrained value * @param[in, out] lp log density * @return result constrained to fall in (0, 1) */ template inline T prob_constrain(const T& x, T& lp) { using std::log; T inv_logit_x = inv_logit(x); lp += log(inv_logit_x) + log1m(inv_logit_x); return inv_logit_x; } /** * Return a probability value constrained to fall between 0 and 1 (inclusive) * for the specified free scalar. If the `Jacobian` parameter is `true`, the log * density accumulator is incremented with the log absolute Jacobian determinant * of the transform. All of the transforms are specified with their Jacobians * in the *Stan Reference Manual* chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T type of scalar * @param[in] x unconstrained value * @param[in, out] lp log density accumulator * @return result constrained to fall in (0, 1) */ template inline auto prob_constrain(const T& x, T& lp) { if (Jacobian) { return prob_constrain(x, lp); } else { return prob_constrain(x); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/lambert_w.hpp0000644000176200001440000000573114645137106023212 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LAMBERT_W_HPP #define STAN_MATH_PRIM_FUN_LAMBERT_W_HPP #include #include #include #include namespace stan { namespace math { /** * Compute the Lambert W function on W0 branch for a value x. * * @tparam T type of value * @param x value * @return value of the W0 branch of the Lambert W function for x * @throw std::domain_error if x is less than or equal to `-e^(-1)` */ template * = nullptr> inline double lambert_w0(const T& x) { return boost::math::lambert_w0(x, boost_policy_t<>()); } /** * Compute the Lambert W function on W-1 branch for a value x. * * @tparam T type of value * @param x value * @return value of the W-1 branch of the Lambert W function for x * @throw std::domain_error if x is less than or equal to `-e^(-1)` or greater * than or equal to 0 */ template * = nullptr> inline double lambert_wm1(const T& x) { return boost::math::lambert_wm1(x, boost_policy_t<>()); } namespace internal { /** * Structure to wrap lambert_w0() so it can be vectorized. * * @tparam T type of variable * @param x variable * @return value of the W0 branch of the Lambert W function at x. * @throw std::domain_error if x is less than or equal to `-e^(-1)` */ struct lambert_w0_fun { template static inline auto fun(const T& x) { return lambert_w0(x); } }; /** * Structure to wrap lambert_wm1() so it can be vectorized. * * @tparam T type of variable * @param x variable * @return value of the W-1 branch of the Lambert W function at x. * @throw std::domain_error if x is less than or equal to `-e^(-1)` or greater * than or equal to 0 */ struct lambert_wm1_fun { template static inline auto fun(const T& x) { return lambert_wm1(x); } }; } // namespace internal /** * Vectorized version of lambert_w0(). * * @tparam T type of container * @param x container * @return value of the W0 branch of the Lambert W function for each value in x * @throw std::domain_error if x is less than or equal to `-e^(-1)` */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto lambert_w0(const T& x) { return apply_scalar_unary::apply(x); } /** * Vectorized version of lambert_wm1(). * * @tparam T type of container * @param x container * @return value of the W0 branch of the Lambert W function for each value in x * @throw std::domain_error if x is less than or equal to `-e^(-1)` or greater * than or equal to 0 */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto lambert_wm1(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/get_base1_lhs.hpp0000644000176200001440000003070514645137106023735 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GET_BASE1_LHS_HPP #define STAN_MATH_PRIM_FUN_GET_BASE1_LHS_HPP #include #include #include namespace stan { namespace math { /** * Return a reference to the value of the specified vector at the * specified base-one index. If the index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i Index into vector plus 1. * @param error_msg Error message if the index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at i - 1 * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs(std::vector& x, size_t i, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i, idx, error_msg); return x[i - 1]; } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs(std::vector >& x, size_t i1, size_t i2, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1_lhs(x[i1 - 1], i2, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs(std::vector > >& x, size_t i1, size_t i2, size_t i3, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1_lhs(x[i1 - 1], i2, i3, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param i4 Fourth index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs( std::vector > > >& x, size_t i1, size_t i2, size_t i3, size_t i4, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1_lhs(x[i1 - 1], i2, i3, i4, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param i4 Fourth index plus 1. * @param i5 Fifth index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs( std::vector > > > >& x, size_t i1, size_t i2, size_t i3, size_t i4, size_t i5, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1_lhs(x[i1 - 1], i2, i3, i4, i5, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param i4 Fourth index plus 1. * @param i5 Fifth index plus 1. * @param i6 Sixth index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs( std::vector > > > > >& x, size_t i1, size_t i2, size_t i3, size_t i4, size_t i5, size_t i6, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1_lhs(x[i1 - 1], i2, i3, i4, i5, i6, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param i4 Fourth index plus 1. * @param i5 Fifth index plus 1. * @param i6 Sixth index plus 1. * @param i7 Seventh index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs( std::vector > > > > > >& x, size_t i1, size_t i2, size_t i3, size_t i4, size_t i5, size_t i6, size_t i7, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1_lhs(x[i1 - 1], i2, i3, i4, i5, i6, i7, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param i4 Fourth index plus 1. * @param i5 Fifth index plus 1. * @param i6 Sixth index plus 1. * @param i7 Seventh index plus 1. * @param i8 Eigth index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs( std::vector > > > > > > >& x, size_t i1, size_t i2, size_t i3, size_t i4, size_t i5, size_t i6, size_t i7, size_t i8, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1_lhs(x[i1 - 1], i2, i3, i4, i5, i6, i7, i8, error_msg, idx + 1); } /** * Return a copy of the row of the specified vector at the specified * base-one row index. If the index is out of range, throw a * std::out_of_range exception with the specified * error message and index indicated. * * Warning: Because a copy is involved, it is inefficient * to access element of matrices by first using this method * to get a row then using a second call to get the value at * a specified column. * * @tparam T type of value * @param x Matrix from which to get a row * @param m Index into matrix plus 1. * @param error_msg Error message if the index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Row of matrix at i - 1. * @throw std::out_of_range if idx is out of range. */ template inline Eigen::Block > get_base1_lhs(Eigen::Matrix& x, size_t m, const char* error_msg, size_t idx) { check_range("[]", "rows of x", x.rows(), m, idx, error_msg); return x.block(m - 1, 0, 1, x.cols()); } /** * Return a reference to the value of the specified matrix at the specified * base-one row and column indexes. If either index is out of range, * throw a std::out_of_range exception with the * specified error message and index indicated. * * @tparam T type of value * @param x Matrix from which to get a row * @param m Row index plus 1. * @param n Column index plus 1. * @param error_msg Error message if either index is out of range. * @param idx Nested index level to report in error message if * either index is out of range. * @return Value of matrix at row m - 1 and column * n - 1. * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs(Eigen::Matrix& x, size_t m, size_t n, const char* error_msg, size_t idx) { check_range("[]", "rows of x", x.rows(), m, idx, error_msg); check_range("[]", "cols of x", x.cols(), n, idx + 1, error_msg); return x(m - 1, n - 1); } /** * Return a reference to the value of the specified column vector * at the specified base-one index. If the index is out of range, * throw a std::out_of_range exception with the * specified error message and index indicated. * * @tparam T type of value * @param x Column vector from which to get a value. * @param m Row index plus 1. * @param error_msg Error message if the index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of column vector at row m - 1. * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs(Eigen::Matrix& x, size_t m, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), m, idx, error_msg); return x(m - 1); } /** * Return a reference to the value of the specified row vector * at the specified base-one index. If the index is out of range, * throw a std::out_of_range exception with the * specified error message and index indicated. * * @tparam T type of value * @param x Row vector from which to get a value. * @param n Column index plus 1. * @param error_msg Error message if the index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of row vector at column n - 1. * @throw std::out_of_range if idx is out of range. */ template inline T& get_base1_lhs(Eigen::Matrix& x, size_t n, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), n, idx, error_msg); return x(n - 1); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/crossprod.hpp0000644000176200001440000000123414645137106023246 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CROSSPROD_HPP #define STAN_MATH_PRIM_FUN_CROSSPROD_HPP #include #include #include namespace stan { namespace math { /** * Returns the result of pre-multiplying a matrix by its * own transpose. * * @tparam EigMat type of the matrix (must be derived from \c Eigen::MatrixBase) * @param M Matrix to multiply. * @return Transpose of M times M */ template * = nullptr> inline auto crossprod(const EigMat& M) { return tcrossprod(M.transpose()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sort_asc.hpp0000644000176200001440000000235314645137106023050 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SORT_ASC_HPP #define STAN_MATH_PRIM_FUN_SORT_ASC_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the specified standard vector in ascending order. * * @tparam T Type of elements contained in vector. * @param xs Vector to order. * @return Vector in ascending order. * @throw std::domain_error If any of the values are NaN. */ template inline std::vector sort_asc(std::vector xs) { check_not_nan("sort_asc", "container argument", xs); std::sort(xs.begin(), xs.end()); return xs; } /** * Return the specified vector in ascending order. * * @tparam EigVec type of the vector * * @param xs Vector to order. * @return Vector in ascending order. * @throw std::domain_error If any of the values are NaN. */ template * = nullptr> inline plain_type_t sort_asc(EigVec&& xs) { plain_type_t x = std::forward(xs); check_not_nan("sort_asc", "container argument", x); std::sort(x.data(), x.data() + x.size()); return x; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/qr_thin_Q.hpp0000644000176200001440000000211214645137106023150 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_QR_THIN_Q_HPP #define STAN_MATH_PRIM_FUN_QR_THIN_Q_HPP #include #include #include namespace stan { namespace math { /** * Returns the orthogonal factor of the thin QR decomposition * * @tparam EigMat type of the matrix * @param m Matrix. * @return Orthogonal matrix with minimal columns */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> qr_thin_Q( const EigMat& m) { using matrix_t = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; if (unlikely(m.size() == 0)) { return matrix_t(0, 0); } Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); const int min_size = std::min(m.rows(), m.cols()); matrix_t Q = qr.householderQ() * matrix_t::Identity(m.rows(), min_size); for (int i = 0; i < min_size; i++) { if (qr.matrixQR().coeff(i, i) < 0) { Q.col(i) *= -1.0; } } return Q; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/quad_form_diag.hpp0000644000176200001440000000145314645137106024174 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_QUAD_FORM_DIAG_HPP #define STAN_MATH_PRIM_FUN_QUAD_FORM_DIAG_HPP #include #include #include namespace stan { namespace math { template * = nullptr, require_eigen_vector_t* = nullptr> inline auto quad_form_diag(const EigMat& mat, const EigVec& vec) { check_square("quad_form_diag", "mat", mat); check_size_match("quad_form_diag", "rows of mat", mat.rows(), "size of vec", vec.size()); return make_holder( [](const auto& v, const auto& x) { return v.asDiagonal() * x * v.asDiagonal(); }, to_ref(vec), to_ref(mat)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/csr_to_dense_matrix.hpp0000644000176200001440000000506214645137106025266 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CSR_TO_DENSE_MATRIX_HPP #define STAN_MATH_PRIM_FUN_CSR_TO_DENSE_MATRIX_HPP #include #include #include #include #include namespace stan { namespace math { /** \addtogroup csr_format * @{ */ /** * Construct a dense Eigen matrix from the CSR format components. * * @tparam T type of the matrix * @param[in] m Number of matrix rows. * @param[in] n Number of matrix columns. * @param[in] w Values of non-zero matrix entries. * @param[in] v Column index for each value in w. * @param[in] u Index of where each row starts in w. * @return Dense matrix defined by previous arguments. * @throw std::domain_error If the arguments do not define a matrix. * @throw std::invalid_argument if m/n/w/v/u are not internally * consistent, as defined by the indexing scheme. Extractors are * defined in Stan which guarantee a consistent set of m/n/w/v/u * for a given sparse matrix. * @throw std::out_of_range if any of the indices are out of range. */ template inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> csr_to_dense_matrix(int m, int n, const T& w, const std::vector& v, const std::vector& u) { using Eigen::Dynamic; using Eigen::Matrix; check_positive("csr_to_dense_matrix", "m", m); check_positive("csr_to_dense_matrix", "n", n); check_size_match("csr_to_dense_matrix", "m", m, "u", u.size() - 1); check_size_match("csr_to_dense_matrix", "w", w.size(), "v", v.size()); check_size_match("csr_to_dense_matrix", "u/z", u[m - 1] + csr_u_to_z(u, m - 1) - 1, "v", v.size()); for (int i : v) { check_range("csr_to_dense_matrix", "v[]", n, i); } const auto& w_ref = to_ref(w); Matrix, Dynamic, Dynamic> result(m, n); result.setZero(); for (int row = 0; row < m; ++row) { int row_nnz = csr_u_to_z(u, row); if (row_nnz > 0) { int row_end_in_w = (u[row] - stan::error_index::value) + row_nnz; check_range("csr_to_dense_matrix", "w", w.size(), row_end_in_w); for (int nze = u[row] - stan::error_index::value; nze < row_end_in_w; ++nze) { // row is row index, v[nze] is column index. w[nze] is entry value. check_range("csr_to_dense_matrix", "j", n, v[nze]); result(row, v[nze] - stan::error_index::value) = w_ref.coeff(nze); } } } return result; } /** @} */ // end of csr_format group } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mdivide_right_tri_low.hpp0000644000176200001440000000224114645137106025604 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MDIVIDE_RIGHT_TRI_LOW_HPP #define STAN_MATH_PRIM_FUN_MDIVIDE_RIGHT_TRI_LOW_HPP #include #include #include namespace stan { namespace math { /** * Returns the solution of the system x tri(A) = b when tri(A) is a * lower triangular view of the matrix A. * * @tparam EigMat1 type of the right-hand side matrix or vector * @tparam EigMat2 type of the second matrix * * @param b right-hand side matrix or vector * @param A matrix * @return x = b * tri(A)^-1, solution of the linear system. * @throws std::domain_error if A is not square or the rows of b don't * match the size of A. */ template * = nullptr, require_all_not_vt_fvar* = nullptr> inline Eigen::Matrix, EigMat1::RowsAtCompileTime, EigMat2::ColsAtCompileTime> mdivide_right_tri_low(const EigMat1& b, const EigMat2& A) { return mdivide_right_tri(b, A); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/trunc.hpp0000644000176200001440000000226614645137106022371 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TRUNC_HPP #define STAN_MATH_PRIM_FUN_TRUNC_HPP #include #include #include namespace stan { namespace math { /** * Structure to wrap `trunc()` so it can be vectorized. */ struct trunc_fun { /** * Return the truncation of the specified argument to the * nearest value. * * @tparam T type of argument * @param x argument * @return truncation of the argument */ template static inline auto fun(const T& x) { using std::trunc; return trunc(x); } }; /** * Return the elementwise application of `trunc()` to * specified argument container. The return type promotes the * underlying scalar argument type to double if it is an integer, * and otherwise is the argument type. * * @tparam T type of container * @param x container * @return elementwise trunc of container elements */ template * = nullptr> inline auto trunc(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/pow.hpp0000644000176200001440000000374514645137106022046 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_POW_HPP #define STAN_MATH_PRIM_FUN_POW_HPP #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * Return the first argument raised to the power of the second * argument. At least one of the arguments must be a complex number. * * @tparam U type of base * @tparam V type of exponent * @param[in] x base * @param[in] y exponent * @return base raised to the power of the exponent */ template inline complex_return_t complex_pow(const U& x, const V& y) { return exp(y * log(x)); } } // namespace internal /** * Return the first argument raised to the power of the second * argument. * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param a first argument * @param b second argument * @return the first argument raised to the power of the second * argument. */ template , std::is_arithmetic>, disjunction, std::is_arithmetic>>* = nullptr> inline auto pow(const T1& a, const T2& b) { return std::pow(a, b); } /** * Returns the elementwise raising of the first argument to the power of the * second argument. * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param a first argument * @param b second argument * @return the elementwise raising of the first argument to the power of the * second argument. */ template * = nullptr, require_all_not_matrix_st* = nullptr> inline auto pow(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [](const auto& c, const auto& d) { using std::pow; return pow(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sub_row.hpp0000644000176200001440000000170214645137106022710 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SUB_ROW_HPP #define STAN_MATH_PRIM_FUN_SUB_ROW_HPP #include #include namespace stan { namespace math { /** * Return a 1 x ncols subrow starting at (i-1, j-1). * * @tparam T type of the matrix * @param m Matrix Input matrix. * @param i Starting row + 1. * @param j Starting column + 1. * @param ncols Number of columns in block. * @throw std::out_of_range if either index is out of range. */ template < typename T, require_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto sub_row(const T& m, size_t i, size_t j, size_t ncols) { check_row_index("sub_row", "i", m, i); check_column_index("sub_row", "j", m, j); if (ncols > 0) { check_column_index("sub_col", "j+ncols-1", m, j + ncols - 1); } return m.row(i - 1).segment(j - 1, ncols); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/ones_row_vector.hpp0000644000176200001440000000111014645137106024436 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ONES_ROW_VECTOR_HPP #define STAN_MATH_PRIM_FUN_ONES_ROW_VECTOR_HPP #include #include namespace stan { namespace math { /** * Return a row vector of ones * * @param K size of the row vector * @return A row vector of size K with all elements initialized to 1. * @throw std::domain_error if K is negative. */ inline auto ones_row_vector(int K) { check_nonnegative("ones_row_vector", "size", K); return Eigen::RowVectorXd::Constant(K, 1); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/to_vector.hpp0000644000176200001440000000240314645137106023233 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TO_VECTOR_HPP #define STAN_MATH_PRIM_FUN_TO_VECTOR_HPP #include #include #include namespace stan { namespace math { // vector to_vector(matrix) // vector to_vector(row_vector) // vector to_vector(vector) template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, 1> to_vector( const EigMat& matrix) { using T = value_type_t; Eigen::Matrix res(matrix.size()); Eigen::Map< Eigen::Matrix> res_map(res.data(), matrix.rows(), matrix.cols()); res_map = matrix; return res; } // vector to_vector(real[]) template inline Eigen::Matrix to_vector( const std::vector& vec) { return Eigen::Matrix::Map(vec.data(), vec.size()); } // vector to_vector(int[]) inline Eigen::Matrix to_vector( const std::vector& vec) { int R = vec.size(); Eigen::Matrix result(R); for (int i = 0; i < R; i++) { result(i) = vec[i]; } return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/proj.hpp0000644000176200001440000000201214645137106022175 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PROJ_HPP #define STAN_MATH_PRIM_FUN_PROJ_HPP #include #include #include namespace stan { namespace math { /** * Return the projection of the complex argument onto the Riemann * sphere. * * @tparam V value type of argument * @param[in] z argument * @return projection of the argument onto the Riemann sphere */ template inline std::complex proj(const std::complex& z) { return std::proj(z); } namespace internal { /** * Return the projection of the complex argument onto the Riemann * sphere. * * @tparam V value type of argument * @param[in] z argument * @return projection of the argument onto the Riemann sphere */ template inline std::complex complex_proj(const std::complex& z) { if (is_inf(z.real()) || is_inf(z.imag())) { return {std::numeric_limits::infinity(), z.imag() < 0 ? -0.0 : 0.0}; } return z; } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/squared_distance.hpp0000644000176200001440000000414714645137106024554 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SQUARED_DISTANCE_HPP #define STAN_MATH_PRIM_FUN_SQUARED_DISTANCE_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the squared distance. * * @tparam Scal1 Type of the first scalar. * @tparam Scal2 Type of the second scalar. * @param x1 First scalar. * @param x2 Second scalar. * @return Squared distance between scalars * @throw std::domain_error Any scalar is not finite. */ template * = nullptr, require_all_not_var_t* = nullptr> inline return_type_t squared_distance(const Scal1& x1, const Scal2& x2) { check_finite("squared_distance", "x1", x1); check_finite("squared_distance", "x2", x2); return square(x1 - x2); } /** * Returns the squared distance between the specified vectors * of the same dimensions. * * @tparam EigVec1 type of the first vector (must be derived from \c * Eigen::MatrixBase and have one compile time dimension equal to 1) * @tparam EigVec2 type of the second vector (must be derived from \c * Eigen::MatrixBase and have one compile time dimension equal to 1) * @param v1 First vector. * @param v2 Second vector. * @return Square of distance between vectors. * @throw std::domain_error If the vectors are not the same * size. */ template * = nullptr, require_all_not_eigen_vt* = nullptr> inline return_type_t squared_distance(const EigVec1& v1, const EigVec2& v2) { check_matching_sizes("squared_distance", "v1", v1, "v2", v2); return (as_column_vector_or_scalar(v1) - as_column_vector_or_scalar(v2)) .squaredNorm(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logical_gte.hpp0000644000176200001440000000112714645137106023502 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGICAL_GTE_HPP #define STAN_MATH_PRIM_FUN_LOGICAL_GTE_HPP namespace stan { namespace math { /** * Return 1 if the first argument is greater than or equal to the second. * Equivalent to x1 >= x2. * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param x1 first argument * @param x2 second argument * @return true if x1 >= x2 */ template inline bool logical_gte(const T1 x1, const T2 x2) { return x1 >= x2; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/isnormal.hpp0000644000176200001440000000124114645137106023052 0ustar liggesusers#ifndef STAN_MATH_PRIM_SCAL_FUN_ISNORMAL_HPP #define STAN_MATH_PRIM_SCAL_FUN_ISNORMAL_HPP #include namespace stan { namespace math { /** * Return true if specified argument is normal. A number is normal if * it is finite, non-zero and not subnormal. * * Overloads `std::isnormal` from `` for argument-dependent * lookup. * * @tparam ADType type of argument * @param[in] v argument * @return true if argument is normal */ template * = nullptr> inline bool isnormal(ADType&& v) { using std::isnormal; return isnormal(v.val()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/read_corr_matrix.hpp0000644000176200001440000000455514645137106024565 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_READ_CORR_MATRIX_HPP #define STAN_MATH_PRIM_FUN_READ_CORR_MATRIX_HPP #include #include #include namespace stan { namespace math { /** * Return the correlation matrix of the specified dimensionality * corresponding to the specified canonical partial correlations. * *

See read_corr_matrix(Array, size_t, T) * for more information. * * @tparam T_CPCs type of the array (must be derived from \c Eigen::ArrayBase * and have one compile-time dimension equal to 1) * @param CPCs The (K choose 2) canonical partial correlations in (-1, 1). * @param K Dimensionality of correlation matrix. * @return Cholesky factor of correlation matrix for specified * canonical partial correlations. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> read_corr_matrix(const T_CPCs& CPCs, size_t K) { if (K == 0) { return {}; } return multiply_lower_tri_self_transpose(read_corr_L(CPCs, K)); } /** * Return the correlation matrix of the specified dimensionality * corresponding to the specified canonical partial correlations, * incrementing the specified scalar reference with the log * absolute determinant of the Jacobian of the transformation. * * It is usually preferable to utilize the version that returns * the Cholesky factor of the correlation matrix rather than the * correlation matrix itself in statistical calculations. * * @tparam T_CPCs type of the array (must be derived from \c Eigen::ArrayBase * and have one compile-time dimension equal to 1) * @param CPCs The (K choose 2) canonical partial correlations in * (-1, 1). * @param K Dimensionality of correlation matrix. * @param log_prob Reference to variable to increment with the log * Jacobian determinant. * @return Correlation matrix for specified partial correlations. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> read_corr_matrix(const T_CPCs& CPCs, size_t K, value_type_t& log_prob) { if (K == 0) { return {}; } return multiply_lower_tri_self_transpose(read_corr_L(CPCs, K, log_prob)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/scalar_seq_view.hpp0000644000176200001440000001164414645137106024405 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SCALAR_SEQ_VIEW_HPP #define STAN_MATH_PRIM_FUN_SCALAR_SEQ_VIEW_HPP #include #include #include #include namespace stan { /** * scalar_seq_view provides a uniform sequence-like wrapper around either a * scalar or a sequence of scalars. * * @tparam C the container type; will be the scalar type if wrapping a scalar * @tparam T the scalar type */ template class scalar_seq_view; template class scalar_seq_view> { public: template , plain_type_t>> explicit scalar_seq_view(T&& c) : c_(std::forward(c)) {} /** * Segfaults if out of bounds. * @param i index * @return the element at the specified position in the container */ inline auto operator[](size_t i) const { return c_.coeff(i); } inline auto size() const noexcept { return c_.size(); } inline const value_type_t* data() const noexcept { return c_.data(); } inline value_type_t* data() noexcept { return c_.data(); } template * = nullptr> inline decltype(auto) val(size_t i) const { return c_.coeffRef(i); } template * = nullptr> inline decltype(auto) val(size_t i) const { return c_.coeffRef(i).val(); } private: ref_type_t c_; }; template class scalar_seq_view> { public: template , plain_type_t>> explicit scalar_seq_view(T&& c) : c_(std::forward(c)) {} /** * Segfaults if out of bounds. * @param i index * @return the element at the specified position in the container */ inline auto operator[](size_t i) const { return c_.coeff(i); } inline const auto* data() const noexcept { return c_.vi_; } inline auto* data() noexcept { return c_.vi_; } inline auto size() const noexcept { return c_.size(); } template * = nullptr> inline auto val(size_t i) const { return c_.val().coeff(i); } template * = nullptr> inline auto& val(size_t i) { return c_.val().coeffRef(i); } private: std::decay_t c_; }; template class scalar_seq_view> { public: template , plain_type_t>> explicit scalar_seq_view(T&& c) : c_(std::forward(c)) {} /** * Segfaults if out of bounds. * @param i index * @return the element at the specified position in the container */ inline auto operator[](size_t i) const { return c_[i]; } inline auto size() const noexcept { return c_.size(); } inline const auto* data() const noexcept { return c_.data(); } template * = nullptr> inline decltype(auto) val(size_t i) const { return c_[i]; } template * = nullptr> inline decltype(auto) val(size_t i) const { return c_[i].val(); } private: const C& c_; }; template class scalar_seq_view>> { public: template , plain_type_t>> explicit scalar_seq_view(const T& c) : c_(c) {} /** * Segfaults if out of bounds. * @param i index * @return the element at the specified position in the container */ inline auto operator[](size_t i) const { return c_[i]; } inline auto size() const noexcept { static_assert(1, "Cannot Return Size of scalar_seq_view with pointer type"); } inline const auto* data() const noexcept { return &c_[0]; } template * = nullptr> inline decltype(auto) val(size_t i) const { return c_[i]; } template * = nullptr> inline decltype(auto) val(size_t i) const { return c_[i].val(); } private: const C& c_; }; /** * This specialization handles wrapping a scalar as if it were a sequence. * * @tparam T the scalar type */ template class scalar_seq_view> { public: explicit scalar_seq_view(const C& t) noexcept : t_(t) {} inline decltype(auto) operator[](int /* i */) const noexcept { return t_; } template * = nullptr> inline decltype(auto) val(int /* i */) const noexcept { return t_; } template * = nullptr> inline decltype(auto) val(int /* i */) const noexcept { return t_.val(); } static constexpr auto size() { return 1; } inline const auto* data() const noexcept { return &t_; } inline auto* data() noexcept { return &t_; } private: std::decay_t t_; }; } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/generalized_inverse.hpp0000644000176200001440000000353614645137106025263 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GENERALIZED_INVERSE_HPP #define STAN_MATH_PRIM_FUN_GENERALIZED_INVERSE_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the Moore-Penrose generalized inverse of the specified matrix. * * The method is based on the Cholesky computation of the transform as specified * in * *

  • Courrieu, Pierre. 2008. Fast Computation of Moore-Penrose Inverse Matrices. * arXiv 0804.4809
* * @tparam EigMat type of the matrix (must be derived from `Eigen::MatrixBase`) * * @param G specified matrix * @return Generalized inverse of the matrix (an empty matrix if the specified * matrix has size zero). */ template * = nullptr, require_not_vt_var* = nullptr> inline Eigen::Matrix, EigMat::ColsAtCompileTime, EigMat::RowsAtCompileTime> generalized_inverse(const EigMat& G) { const auto& G_ref = to_ref(G); if (G_ref.size() == 0) return {}; if (G_ref.rows() == G_ref.cols()) { Eigen::CompleteOrthogonalDecomposition< Eigen::Matrix, EigMat::RowsAtCompileTime, EigMat::ColsAtCompileTime>> complete_ortho_decomp_G = G_ref.completeOrthogonalDecomposition(); if (!(complete_ortho_decomp_G.rank() < G_ref.rows())) return inverse(G_ref); else return complete_ortho_decomp_G.pseudoInverse(); } if (G_ref.rows() < G_ref.cols()) { return (G_ref * G_ref.transpose()).ldlt().solve(G_ref).transpose(); } else { return (G_ref.transpose() * G_ref).ldlt().solve(G_ref.transpose()); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/corr_matrix_constrain.hpp0000644000176200001440000001356714645137106025655 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CORR_MATRIX_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_CORR_MATRIX_CONSTRAIN_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the correlation matrix of the specified dimensionality * derived from the specified vector of unconstrained values. The * input vector must be of length \f${k \choose 2} = * \frac{k(k-1)}{2}\f$. The values in the input vector represent * unconstrained (partial) correlations among the dimensions. * *

The transform based on partial correlations is as specified * in * *

  • Lewandowski, Daniel, Dorota Kurowicka, and Harry * Joe. 2009. Generating random correlation matrices based on * vines and extended onion method. Journal of Multivariate * Analysis 100:1989–-2001.
* *

The free vector entries are first constrained to be * valid correlation values using corr_constrain(T). * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase and * have one compile-time dimension equal to 1) * @param x Vector of unconstrained partial correlations. * @param k Dimensionality of returned correlation matrix. * @throw std::invalid_argument if x is not a valid correlation * matrix. */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> corr_matrix_constrain(const T& x, Eigen::Index k) { Eigen::Index k_choose_2 = (k * (k - 1)) / 2; check_size_match("cov_matrix_constrain", "x.size()", x.size(), "k_choose_2", k_choose_2); return read_corr_matrix(corr_constrain(x), k); } /** * Return the correlation matrix of the specified dimensionality * derived from the specified vector of unconstrained values. The * input vector must be of length \f${k \choose 2} = * \frac{k(k-1)}{2}\f$. The values in the input vector represent * unconstrained (partial) correlations among the dimensions. * *

The transform is as specified for * corr_matrix_constrain(Matrix, size_t); the * paper it cites also defines the Jacobians for correlation inputs, * which are composed with the correlation constrained Jacobians * defined in corr_constrain(T, double) for * this function. * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase and * have one compile-time dimension equal to 1) * @param x Vector of unconstrained partial correlations. * @param k Dimensionality of returned correlation matrix. * @param lp Log probability reference to increment. */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> corr_matrix_constrain(const T& x, Eigen::Index k, return_type_t& lp) { Eigen::Index k_choose_2 = (k * (k - 1)) / 2; check_size_match("cov_matrix_constrain", "x.size()", x.size(), "k_choose_2", k_choose_2); return read_corr_matrix(corr_constrain(x, lp), k, lp); } /** * Return the correlation matrix of the specified dimensionality derived from * the specified vector of unconstrained values. The input vector must be of * length \f${k \choose 2} = \frac{k(k-1)}{2}\f$. The values in the input * vector represent unconstrained (partial) correlations among the dimensions. * If the `Jacobian` parameter is `true`, the log density accumulator is * incremented with the log absolute Jacobian determinant of the transform. All * of the transforms are specified with their Jacobians in the *Stan Reference * Manual* chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::DenseBase` or a `var_value` with * inner type inheriting from `Eigen::DenseBase` with compile time dynamic rows * and 1 column * @param x Vector of unconstrained partial correlations * @param k Dimensionality of returned correlation matrix * @param[in,out] lp log density accumulator */ template * = nullptr> inline auto corr_matrix_constrain(const T& x, Eigen::Index k, return_type_t& lp) { if (Jacobian) { return corr_matrix_constrain(x, k, lp); } else { return corr_matrix_constrain(x, k); } } /** * Return the correlation matrix of the specified dimensionality derived from * the specified vector of unconstrained values. The input vector must be of * length \f${k \choose 2} = \frac{k(k-1)}{2}\f$. The values in the input * vector represent unconstrained (partial) correlations among the dimensions. * If the `Jacobian` parameter is `true`, the log density accumulator is * incremented with the log absolute Jacobian determinant of the transform. All * of the transforms are specified with their Jacobians in the *Stan Reference * Manual* chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A standard vector with inner type inheriting from * `Eigen::DenseBase` or a `var_value` with inner type inheriting from * `Eigen::DenseBase` with compile time dynamic rows and 1 column * @param y Vector of unconstrained partial correlations * @param K Dimensionality of returned correlation matrix * @param[in,out] lp log density accumulator */ template * = nullptr> inline auto corr_matrix_constrain(const T& y, int K, return_type_t& lp) { return apply_vector_unary::apply(y, [&lp, K](auto&& v) { return corr_matrix_constrain(v, K, lp); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_falling_factorial.hpp0000644000176200001440000000515114645137106025533 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_FALLING_FACTORIAL_HPP #define STAN_MATH_PRIM_FUN_LOG_FALLING_FACTORIAL_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * * Return the natural log of the falling factorial of the * specified arguments. * \f[ \mbox{log\_falling\_factorial}(x, n) = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ \ln (x)_n & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log\_falling\_factorial}(x, n)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ \Psi(x) & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log\_falling\_factorial}(x, n)}{\partial n} = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ -\Psi(n) & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] * @tparam T1 type of first argument * @tparam T2 type of second argument * @param[in] x First argument * @param[in] n Second argument * @return log of falling factorial of arguments * @throw std::domain_error if the first argument is not * positive */ template * = nullptr> inline return_type_t log_falling_factorial(const T1 x, const T2 n) { if (is_any_nan(x, n)) { return NOT_A_NUMBER; } static const char* function = "log_falling_factorial"; check_positive(function, "first argument", x); return lgamma(x + 1) - lgamma(x - n + 1); } /** * Enables the vectorized application of the log_falling_factorial function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return log_falling_factorial function applied to the two inputs. */ template * = nullptr> inline auto log_falling_factorial(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return log_falling_factorial(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/grad_reg_inc_gamma.hpp0000644000176200001440000000754214645137106025005 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GRAD_REG_INC_GAMMA_HPP #define STAN_MATH_PRIM_FUN_GRAD_REG_INC_GAMMA_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Gradient of the regularized incomplete gamma functions igamma(a, z) * * For small z, the gradient is computed via the series expansion; * for large z, the series is numerically inaccurate due to cancellation * and the asymptotic expansion is used. * * @tparam T1 type of the shape parameter * @tparam T2 type of the location parameter * @param a shape parameter, a > 0 * @param z location z >= 0 * @param g stan::math::tgamma(a) (precomputed value) * @param dig boost::math::digamma(a) (precomputed value) * @param precision required precision; applies to series expansion only * @param max_steps number of steps to take. * @throw throws std::domain_error if not converged after max_steps * or increment overflows to inf. * * For the asymptotic expansion, the gradient is given by: \f[ \begin{array}{rcl} \Gamma(a, z) & = & z^{a-1}e^{-z} \sum_{k=0}^N \frac{(a-1)_k}{z^k} \qquad , z \gg a\\ Q(a, z) & = & \frac{z^{a-1}e^{-z}}{\Gamma(a)} \sum_{k=0}^N \frac{(a-1)_k}{z^k}\\ (a)_k & = & (a)_{k-1}(a-k)\\ \frac{d}{da} (a)_k & = & (a)_{k-1} + (a-k)\frac{d}{da} (a)_{k-1}\\ \frac{d}{da}Q(a, z) & = & (log(z) - \psi(a)) Q(a, z)\\ && + \frac{z^{a-1}e^{-z}}{\Gamma(a)} \sum_{k=0}^N \left(\frac{d}{da} (a-1)_k\right) \frac{1}{z^k} \end{array} \f] */ template return_type_t grad_reg_inc_gamma(T1 a, T2 z, T1 g, T1 dig, double precision = 1e-6, int max_steps = 1e5) { using std::exp; using std::fabs; using std::log; using TP = return_type_t; if (is_any_nan(a, z, g, dig)) { return std::numeric_limits::quiet_NaN(); } T2 l = log(z); if (z >= a && z >= 8) { // asymptotic expansion http://dlmf.nist.gov/8.11#E2 TP S = 0; T1 a_minus_one_minus_k = a - 1; T1 fac = a_minus_one_minus_k; // falling_factorial(a-1, k) T1 dfac = 1; // d/da[falling_factorial(a-1, k)] T2 zpow = z; // z ** k TP delta = dfac / zpow; for (int k = 1; k < 10; ++k) { a_minus_one_minus_k -= 1; S += delta; zpow *= z; dfac = a_minus_one_minus_k * dfac + fac; fac *= a_minus_one_minus_k; delta = dfac / zpow; if (is_inf(delta)) { throw_domain_error("grad_reg_inc_gamma", "is not converging", "", ""); } } return gamma_q(a, z) * (l - dig) + exp(-z + (a - 1) * l) * S / g; } else { // gradient of series expansion http://dlmf.nist.gov/8.7#E3 TP S = 0; TP log_s = 0.0; double s_sign = 1.0; T2 log_z = log(z); TP log_delta = log_s - multiply_log(2, a); for (int k = 1; k <= max_steps; ++k) { S += s_sign >= 0.0 ? exp(log_delta) : -exp(log_delta); log_s += log_z - log(k); s_sign = -s_sign; log_delta = log_s - multiply_log(2, k + a); if (is_inf(log_delta)) { throw_domain_error("grad_reg_inc_gamma", "is not converging", "", ""); } if (log_delta <= log(precision)) { return gamma_p(a, z) * (dig - l) + exp(a * l) * S / g; } } throw_domain_error( "grad_reg_inc_gamma", "k (internal counter)", max_steps, "exceeded ", " iterations, gamma function gradient did not converge."); return INFTY; } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/append_col.hpp0000644000176200001440000000654514645137106023346 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_APPEND_COL_HPP #define STAN_MATH_PRIM_FUN_APPEND_COL_HPP #include #include #include #include namespace stan { namespace math { /** * Return the result of appending the second argument matrix after the * first argument matrix, that is, putting them side by side, with * the first matrix followed by the second matrix. * * Given input types result in following outputs: * (matrix, matrix) -> matrix, * (matrix, vector) -> matrix, * (vector, matrix) -> matrix, * (vector, vector) -> matrix, * (row vector, row vector) -> row_vector. * * @tparam T1 type of the first matrix * @tparam T2 type of the second matrix * * @param A First matrix. * @param B Second matrix. * @return Result of appending the first matrix followed by the * second matrix side by side. */ template > inline auto append_col(const T1& A, const T2& B) { using Eigen::Dynamic; using Eigen::Matrix; using T_return = return_type_t; constexpr int row_type = (T1::RowsAtCompileTime == 1 && T2::RowsAtCompileTime == 1) ? 1 : Eigen::Dynamic; int Arows = A.rows(); int Brows = B.rows(); int Acols = A.cols(); int Bcols = B.cols(); check_size_match("append_col", "rows of A", Arows, "rows of B", Brows); Matrix result(Arows, Acols + Bcols); result.leftCols(Acols) = A.template cast(); result.rightCols(Bcols) = B.template cast(); return result; } /** * Return the result of stacking an scalar on top of the * a row vector, with the result being a row vector. * * This function applies to (scalar, row vector) and returns a * row vector. * * @tparam Scal type of the scalar * @tparam RowVec type of the row vector * * @param A scalar. * @param B row vector. * @return Result of stacking the scalar on top of the row vector. */ template * = nullptr, require_t>* = nullptr> inline Eigen::Matrix, 1, Eigen::Dynamic> append_col( const Scal& A, const RowVec& B) { using Eigen::Dynamic; using Eigen::Matrix; using T_return = return_type_t; Matrix result(B.size() + 1); result << A, B.template cast(); return result; } /** * Return the result of stacking a row vector on top of the * an scalar, with the result being a row vector. * * This function applies to (row vector, scalar) and returns a * row vector. * * @tparam RowVec type of the row vector * @tparam Scal type of the scalar * * @param A row vector. * @param B scalar. * @return Result of stacking the row vector on top of the scalar. */ template >* = nullptr, require_stan_scalar_t* = nullptr> inline Eigen::Matrix, 1, Eigen::Dynamic> append_col( const RowVec& A, const Scal& B) { using Eigen::Dynamic; using Eigen::Matrix; using T_return = return_type_t; Matrix result(A.size() + 1); result << A.template cast(), B; return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log1m_exp.hpp0000644000176200001440000000420614645137106023125 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG1M_EXP_HPP #define STAN_MATH_PRIM_FUN_LOG1M_EXP_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Calculates the natural logarithm of one minus the exponential * of the specified value without overflow, * *

log1m_exp(x) = log(1-exp(x)) * * This function is only defined for x < 0 * \f[ \mbox{log1m\_exp}(x) = \begin{cases} \ln(1-\exp(x)) & \mbox{if } x < 0 \\ \textrm{NaN} & \mbox{if } x \geq 0\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{asinh}(x)}{\partial x} = \begin{cases} -\frac{\exp(x)}{1-\exp(x)} & \mbox{if } x < 0 \\ \textrm{NaN} & \mbox{if } x \geq 0\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * @param[in] a Argument. * @return natural logarithm of one minus the exponential of the * argument. * */ inline double log1m_exp(double a) { using std::exp; using std::log; if (a > 0) { return NOT_A_NUMBER; } else if (a > -0.693147) { return log(-expm1(a)); // 0.693147 ~= log(2) } else { return log1m(exp(a)); } } /** * Structure to wrap log1m_exp() so it can be vectorized. * * @tparam T type of variable * @param x variable * @return Natural log of (1 - exp(x)). */ struct log1m_exp_fun { template static inline auto fun(const T& x) { return log1m_exp(x); } }; /** * Vectorized version of log1m_exp(). * * @tparam T type of container * @param x container * @return Natural log of (1 - exp()) applied to each value in x. */ template < typename T, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto log1m_exp(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/arg.hpp0000644000176200001440000000141314645137106022000 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ARG_HPP #define STAN_MATH_PRIM_FUN_ARG_HPP #include #include namespace stan { namespace math { /** * Return the phase angle of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return phase angle of the argument */ template inline V arg(const std::complex& z) { return std::arg(z); } namespace internal { /** * Return the phase angle of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return phase angle of the argument */ template inline V complex_arg(const std::complex& z) { using std::atan2; return atan2(z.imag(), z.real()); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/trigamma.hpp0000644000176200001440000001062014645137106023030 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TRIGAMMA_HPP #define STAN_MATH_PRIM_FUN_TRIGAMMA_HPP #include #include #include #include #include #include #include #include #include // Reference: // BE Schneider, // Algorithm AS 121: // Trigamma Function, // Applied Statistics, // Volume 27, Number 1, pages 97-99, 1978. namespace stan { namespace math { /** * Return the trigamma function applied to the argument. The * trigamma function returns the second derivative of the * log Gamma function evaluated at the specified argument. * This base templated version is used in prim, fwd, and rev * implementations. * * @tparam T scalar argument type * @param x argument * @return second derivative of log Gamma function at argument * */ template inline T trigamma_impl(const T& x) { using std::floor; using std::sin; double small = 0.0001; double large = 5.0; T value; T y; T z; // bernoulli numbers double b2 = inv(6.0); double b4 = -inv(30.0); double b6 = inv(42.0); double b8 = -inv(30.0); // negative integers and zero return positive infinity // see http://mathworld.wolfram.com/PolygammaFunction.html if (x <= 0.0 && floor(x) == x) { value = positive_infinity(); return value; } // negative non-integers: use the reflection formula // see http://mathworld.wolfram.com/PolygammaFunction.html if (x <= 0 && floor(x) != x) { value = -trigamma_impl(-x + 1.0) + square(pi() / sin(-pi() * x)); return value; } // small value approximation if x <= small. if (x <= small) { return inv_square(x); } // use recurrence relation until x >= large // see http://mathworld.wolfram.com/PolygammaFunction.html z = x; value = 0.0; while (z < large) { value += inv_square(z); z += 1.0; } // asymptotic expansion as a Laurent series if x >= large // see http://en.wikipedia.org/wiki/Trigamma_function y = inv_square(z); value += 0.5 * y + (1.0 + y * (b2 + y * (b4 + y * (b6 + y * b8)))) / z; return value; } /** * Return the second derivative of the log Gamma function * evaluated at the specified argument. * \f[ \mbox{trigamma}(x) = \begin{cases} \textrm{error} & \mbox{if } x\in \{\dots, -3, -2, -1, 0\}\\ \Psi_1(x) & \mbox{if } x\not\in \{\dots, -3, -2, -1, 0\}\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{trigamma}(x)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } x\in \{\dots, -3, -2, -1, 0\}\\ \frac{\partial\, \Psi_1(x)}{\partial x} & \mbox{if } x\not\in \{\dots, -3, -2, -1, 0\}\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \Psi_1(x)=\sum_{n=0}^\infty \frac{1}{(x+n)^2} \f] \f[ \frac{\partial \, \Psi_1(x)}{\partial x} = -2\sum_{n=0}^\infty \frac{1}{(x+n)^3} \f] * * @param[in] u argument * @return second derivative of log Gamma function at argument */ inline double trigamma(double u) { return trigamma_impl(u); } /** * Return the second derivative of the log Gamma function * evaluated at the specified argument. * * @param[in] u argument * @return second derivative of log Gamma function at argument */ inline double trigamma(int u) { return trigamma(static_cast(u)); } /** * Structure to wrap `trigamma()` so it can be vectorized. */ struct trigamma_fun { /** * Return the trigamma() function applied to * the argument. * * @tparam T type of argument * @param x argument * @return trigamma applied to argument. */ template static inline auto fun(const T& x) { return trigamma(x); } }; /** * Return the elementwise application of `trigamma()` to * specified argument container. The return type promotes the * underlying scalar argument type to double if it is an integer, * and otherwise is the argument type. * * @tparam T type of container * @param x container * @return elementwise trigamma of container elements */ template * = nullptr> inline auto trigamma(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/poisson_binomial_log_probs.hpp0000644000176200001440000000466114645137106026651 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_POISSON_BINOMIAL_LOG_PROBS_HPP #define STAN_MATH_PRIM_FUN_POISSON_BINOMIAL_LOG_PROBS_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the last row of the log probability matrix of the Poisson-Binomial * distribution given the number of successes and a vector of success * probabilities. * * @tparam T_theta template expression * @param y numbers of successes * @param theta N-dimensional vector of success probabilities for each trial * @return the last row of the computed log probability matrix */ template , require_eigen_vector_t* = nullptr> plain_type_t poisson_binomial_log_probs(int y, const T_theta& theta) { int size_theta = theta.size(); plain_type_t log_theta = log(theta); plain_type_t log1m_theta = log1m(theta); Eigen::Matrix alpha(size_theta + 1, y + 1); // alpha[i, j] = log prob of j successes in first i trials alpha(0, 0) = 0.0; for (int i = 0; i < size_theta; ++i) { // no success in i trials alpha(i + 1, 0) = alpha(i, 0) + log1m_theta[i]; // 0 < j < i successes in i trials for (int j = 0; j < std::min(y, i); ++j) { alpha(i + 1, j + 1) = log_sum_exp(alpha(i, j) + log_theta[i], alpha(i, j + 1) + log1m_theta[i]); } // i successes in i trials if (i < y) { alpha(i + 1, i + 1) = alpha(i, i) + log_theta(i); } } return alpha.row(size_theta); } template * = nullptr> auto poisson_binomial_log_probs(const T_y& y, const T_theta& theta) { using T_scalar = scalar_type_t; size_t max_sizes = std::max(stan::math::size(y), size_mvt(theta)); std::vector> result(max_sizes); scalar_seq_view y_vec(y); vector_seq_view theta_vec(theta); for (size_t i = 0; i < max_sizes; ++i) { result[i] = poisson_binomial_log_probs(y_vec[i], theta_vec[i]); } return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/offset_multiplier_constrain.hpp0000644000176200001440000002744314645137106027056 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_OFFSET_MULTIPLIER_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_OFFSET_MULTIPLIER_CONSTRAIN_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the linearly transformed value for the specified unconstrained input * and specified offset and multiplier. * *

The transform applied is * *

\f$f(x) = mu + sigma * x\f$ * *

where mu is the offset and sigma is the multiplier. * *

If the offset is zero and the multiplier is one this * reduces to identity_constrain(x). * * @tparam T type of scalar * @tparam M type of offset * @tparam S type of multiplier * @param[in] x Unconstrained scalar input * @param[in] mu offset of constrained output * @param[in] sigma multiplier of constrained output * @return linear transformed value corresponding to inputs * @throw std::domain_error if sigma <= 0 * @throw std::domain_error if mu is not finite */ template * = nullptr> inline auto offset_multiplier_constrain(const T& x, const M& mu, const S& sigma) { const auto& mu_ref = to_ref(mu); const auto& sigma_ref = to_ref(sigma); if (is_matrix::value && is_matrix::value) { check_matching_dims("offset_multiplier_constrain", "x", x, "mu", mu); } if (is_matrix::value && is_matrix::value) { check_matching_dims("offset_multiplier_constrain", "x", x, "sigma", sigma); } else if (is_matrix::value && is_matrix::value) { check_matching_dims("offset_multiplier_constrain", "mu", mu, "sigma", sigma); } check_finite("offset_multiplier_constrain", "offset", value_of_rec(mu_ref)); check_positive_finite("offset_multiplier_constrain", "multiplier", value_of_rec(sigma_ref)); return fma(sigma_ref, x, mu_ref); } /** * Return the linearly transformed value for the specified unconstrained input * and specified offset and multiplier, incrementing the specified * reference with the log absolute Jacobian determinant of the * transform. * *

The transform applied is * *

\f$f(x) = mu + sigma * x\f$ * *

where mu is the offset and sigma is the multiplier. * * If the offset is zero and multiplier is one, this function * reduces to identity_constraint(x, lp). * * @tparam T type of scalar * @tparam M type of offset * @tparam S type of multiplier * @param[in] x Unconstrained scalar input * @param[in] mu offset of constrained output * @param[in] sigma multiplier of constrained output * @param[in,out] lp Reference to log probability to increment. * @return linear transformed value corresponding to inputs * @throw std::domain_error if sigma <= 0 * @throw std::domain_error if mu is not finite */ template * = nullptr> inline auto offset_multiplier_constrain(const T& x, const M& mu, const S& sigma, return_type_t& lp) { const auto& mu_ref = to_ref(mu); const auto& sigma_ref = to_ref(sigma); if (is_matrix::value && is_matrix::value) { check_matching_dims("offset_multiplier_constrain", "x", x, "mu", mu); } if (is_matrix::value && is_matrix::value) { check_matching_dims("offset_multiplier_constrain", "x", x, "sigma", sigma); } else if (is_matrix::value && is_matrix::value) { check_matching_dims("offset_multiplier_constrain", "mu", mu, "sigma", sigma); } check_finite("offset_multiplier_constrain", "offset", value_of_rec(mu_ref)); check_positive_finite("offset_multiplier_constrain", "multiplier", value_of_rec(sigma_ref)); if (math::size(sigma_ref) == 1) { lp += sum(multiply_log(math::size(x), sigma_ref)); } else { lp += sum(log(sigma_ref)); } return fma(sigma_ref, x, mu_ref); } /** * Overload for array of x and non-array mu and sigma */ template * = nullptr> inline auto offset_multiplier_constrain(const std::vector& x, const M& mu, const S& sigma) { std::vector< plain_type_t> ret; ret.reserve(x.size()); const auto& mu_ref = to_ref(mu); const auto& sigma_ref = to_ref(sigma); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_constrain(x[i], mu_ref, sigma_ref)); } return ret; } /** * Overload for array of x and non-array mu and sigma with lp */ template * = nullptr> inline auto offset_multiplier_constrain(const std::vector& x, const M& mu, const S& sigma, return_type_t& lp) { std::vector< plain_type_t> ret; ret.reserve(x.size()); const auto& mu_ref = to_ref(mu); const auto& sigma_ref = to_ref(sigma); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_constrain(x[i], mu_ref, sigma_ref, lp)); } return ret; } /** * Overload for array of x and sigma and non-array mu */ template * = nullptr> inline auto offset_multiplier_constrain(const std::vector& x, const M& mu, const std::vector& sigma) { check_matching_dims("offset_multiplier_constrain", "x", x, "sigma", sigma); std::vector< plain_type_t> ret; ret.reserve(x.size()); const auto& mu_ref = to_ref(mu); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_constrain(x[i], mu_ref, sigma[i])); } return ret; } /** * Overload for array of x and sigma and non-array mu with lp */ template * = nullptr> inline auto offset_multiplier_constrain(const std::vector& x, const M& mu, const std::vector& sigma, return_type_t& lp) { check_matching_dims("offset_multiplier_constrain", "x", x, "sigma", sigma); std::vector> ret; ret.reserve(x.size()); const auto& mu_ref = to_ref(mu); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_constrain(x[i], mu_ref, sigma[i], lp)); } return ret; } /** * Overload for array of x and mu and non-array sigma */ template * = nullptr> inline auto offset_multiplier_constrain(const std::vector& x, const std::vector& mu, const S& sigma) { check_matching_dims("offset_multiplier_constrain", "x", x, "mu", mu); std::vector< plain_type_t> ret; ret.reserve(x.size()); const auto& sigma_ref = to_ref(sigma); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_constrain(x[i], mu[i], sigma_ref)); } return ret; } /** * Overload for array of x and mu and non-array sigma with lp */ template * = nullptr> inline auto offset_multiplier_constrain(const std::vector& x, const std::vector& mu, const S& sigma, return_type_t& lp) { check_matching_dims("offset_multiplier_constrain", "x", x, "mu", mu); std::vector> ret; ret.reserve(x.size()); const auto& sigma_ref = to_ref(sigma); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_constrain(x[i], mu[i], sigma_ref, lp)); } return ret; } /** * Overload for array of x, mu, and sigma */ template inline auto offset_multiplier_constrain(const std::vector& x, const std::vector& mu, const std::vector& sigma) { check_matching_dims("offset_multiplier_constrain", "x", x, "mu", mu); check_matching_dims("offset_multiplier_constrain", "x", x, "sigma", sigma); std::vector> ret; ret.reserve(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_constrain(x[i], mu[i], sigma[i])); } return ret; } /** * Overload for array of x, mu, and sigma with lp */ template inline auto offset_multiplier_constrain(const std::vector& x, const std::vector& mu, const std::vector& sigma, return_type_t& lp) { check_matching_dims("offset_multiplier_constrain", "x", x, "mu", mu); check_matching_dims("offset_multiplier_constrain", "x", x, "sigma", sigma); std::vector> ret; ret.reserve(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_constrain(x[i], mu[i], sigma[i], lp)); } return ret; } /** * Return the linearly transformed value for the specified unconstrained input * and specified offset and multiplier. If the `Jacobian` parameter is `true`, * the log density accumulator is incremented with the log absolute Jacobian * determinant of the transform. All of the transforms are specified with their * Jacobians in the *Stan Reference Manual* chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @tparam M A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @tparam S A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @param[in] x Unconstrained scalar input * @param[in] mu offset of constrained output * @param[in] sigma multiplier of constrained output * @param[in, out] lp log density accumulator * @return linear transformed value corresponding to inputs * @throw std::domain_error if sigma <= 0 * @throw std::domain_error if mu is not finite */ template inline auto offset_multiplier_constrain(const T& x, const M& mu, const S& sigma, return_type_t& lp) { if (Jacobian) { return offset_multiplier_constrain(x, mu, sigma, lp); } else { return offset_multiplier_constrain(x, mu, sigma); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/matrix_exp.hpp0000644000176200001440000000242014645137106023406 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MATRIX_EXP_HPP #define STAN_MATH_PRIM_FUN_MATRIX_EXP_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the matrix exponential of the input * matrix. * * @tparam T type of the matrix * @param[in] A_in Matrix to exponentiate. * @return Matrix exponential, dynamically-sized. * @throw std::invalid_argument if the input matrix * is not square. */ template > inline plain_type_t matrix_exp(const T& A_in) { using std::exp; const auto& A = A_in.eval(); check_square("matrix_exp", "input matrix", A); if (T::RowsAtCompileTime == 1 && T::ColsAtCompileTime == 1) { plain_type_t res; res << exp(A(0)); return res; } if (A_in.size() == 0) { return {}; } return (A.cols() == 2 && square(value_of(A(0, 0)) - value_of(A(1, 1))) + 4 * value_of(A(0, 1)) * value_of(A(1, 0)) > 0) ? matrix_exp_2x2(A) : matrix_exp_pade(A); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/round.hpp0000644000176200001440000000311614645137106022360 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ROUND_HPP #define STAN_MATH_PRIM_FUN_ROUND_HPP #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `round()` so it can be vectorized. * * @tparam T type of argument * @param x argument variable * @return Rounded value of x. */ struct round_fun { template static inline auto fun(const T& x) { using std::round; return round(x); } }; /** * Vectorized version of `round()`. * * @tparam Container type of container * @param x container * @return Rounded value of each value in x. */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto round(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `round()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Rounded value of each value in x. */ template * = nullptr> inline auto round(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().round(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/fft.hpp0000644000176200001440000000773014645137106022016 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FFT_HPP #define STAN_MATH_PRIM_FUN_FFT_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the discrete Fourier transform of the specified complex * vector. * * Given an input complex vector `x[0:N-1]` of size `N`, the discrete * Fourier transform computes entries of the resulting complex * vector `y[0:N-1]` by * * ``` * y[n] = SUM_{i < N} x[i] * exp(-n * i * 2 * pi * sqrt(-1) / N) * ``` * * If the input is of size zero, the result is a size zero vector. * * @tparam V type of complex vector argument * @param[in] x vector to transform * @return discrete Fourier transform of `x` */ template * = nullptr, require_not_var_t>>* = nullptr> inline Eigen::Matrix, -1, 1> fft(const V& x) { // copy because fft() requires Eigen::Matrix type Eigen::Matrix, -1, 1> xv = x; if (xv.size() <= 1) return xv; Eigen::FFT> fft; return fft.fwd(xv); } /** * Return the inverse discrete Fourier transform of the specified * complex vector. * * Given an input complex vector `y[0:N-1]` of size `N`, the inverse * discrete Fourier transform computes entries of the resulting * complex vector `x[0:N-1]` by * * ``` * x[n] = SUM_{i < N} y[i] * exp(n * i * 2 * pi * sqrt(-1) / N) * ``` * * If the input is of size zero, the result is a size zero vector. * The only difference between the discrete DFT and its inverse is * the sign of the exponent. * * @tparam V type of complex vector argument * @param[in] y vector to inverse transform * @return inverse discrete Fourier transform of `y` */ template * = nullptr, require_not_var_t>>* = nullptr> inline Eigen::Matrix, -1, 1> inv_fft(const V& y) { // copy because fft() requires Eigen::Matrix type Eigen::Matrix, -1, 1> yv = y; if (y.size() <= 1) return yv; Eigen::FFT> fft; return fft.inv(yv); } /** * Return the two-dimensional discrete Fourier transform of the * specified complex matrix. The 2D discrete Fourier transform first * runs the discrete Fourier transform on the each row, then on each * column of the result. * * @tparam M type of complex matrix argument * @param[in] x matrix to transform * @return discrete 2D Fourier transform of `x` */ template * = nullptr, require_not_var_t>>* = nullptr> inline Eigen::Matrix, -1, -1> fft2(const M& x) { Eigen::Matrix, -1, -1> y(x.rows(), x.cols()); for (int i = 0; i < y.rows(); ++i) y.row(i) = fft(x.row(i)); for (int j = 0; j < y.cols(); ++j) y.col(j) = fft(y.col(j)); return y; } /** * Return the two-dimensional inverse discrete Fourier transform of * the specified complex matrix. The 2D inverse discrete Fourier * transform first runs the 1D inverse Fourier transform on the * columns, and then on the resulting rows. The composition of the * FFT and inverse FFT (or vice-versa) is the identity. * * @tparam M type of complex matrix argument * @param[in] y matrix to inverse trnasform * @return inverse discrete 2D Fourier transform of `y` */ template * = nullptr, require_not_var_t>>* = nullptr> inline Eigen::Matrix, -1, -1> inv_fft2(const M& y) { Eigen::Matrix, -1, -1> x(y.rows(), y.cols()); for (int j = 0; j < x.cols(); ++j) x.col(j) = inv_fft(y.col(j)); for (int i = 0; i < x.rows(); ++i) x.row(i) = inv_fft(x.row(i)); return x; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/abs.hpp0000644000176200001440000000543414645137106022003 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ABS_HPP #define STAN_MATH_PRIM_FUN_ABS_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the absolute value of the specified arithmetic argument. * The return type is the same as the argument type. * * @tparam T type of argument (must be arithmetic) * @param x argument * @return absolute value of argument */ template * = nullptr> inline T abs(T x) { return std::abs(x); } /** * Return the absolute value (also known as the norm, modulus, or * magnitude) of the specified complex argument. * * @tparam T type of argument (must be complex) * @param x argument * @return absolute value of argument (a real number) */ template * = nullptr> inline auto abs(T x) { return hypot(x.real(), x.imag()); } /** * Return elementwise absolute value of the specified real-valued * container. * * @tparam T type of argument * @param x argument * @return absolute value of argument */ struct abs_fun { template static inline auto fun(const T& x) { return abs(x); } }; /** * Returns the elementwise `abs()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x argument * @return Absolute value of each variable in the container. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_not_stan_scalar_t* = nullptr> inline auto abs(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `abs()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x argument * @return Absolute value of each variable in the container. */ template * = nullptr> inline auto abs(const Container& x) { return apply_vector_unary::apply( x, [&](const auto& v) { return v.array().abs(); }); } namespace internal { /** * Return the absolute value of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return absolute value of the argument */ template inline V complex_abs(const std::complex& z) { return hypot(z.real(), z.imag()); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/eigendecompose.hpp0000644000176200001440000000530314645137106024217 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EIGENDECOMPOSE_HPP #define STAN_MATH_PRIM_FUN_EIGENDECOMPOSE_HPP #include #include namespace stan { namespace math { /** * Return the eigendecomposition of a (real-valued) matrix * * @tparam EigMat type of real matrix argument * @param[in] m matrix to find the eigendecomposition of. Must be square and * have a non-zero size. * @return A tuple V,D where V is a matrix where the columns are the * complex-valued eigenvectors of `m` and D is a complex-valued column vector * with entries the eigenvectors of `m` */ template * = nullptr, require_not_vt_complex* = nullptr> inline std::tuple>, -1, -1>, Eigen::Matrix>, -1, 1>> eigendecompose(const EigMat& m) { if (unlikely(m.size() == 0)) { return std::make_tuple( Eigen::Matrix>, -1, -1>(0, 0), Eigen::Matrix>, -1, 1>(0, 1)); } check_square("eigendecompose", "m", m); using PlainMat = plain_type_t; const PlainMat& m_eval = m; Eigen::EigenSolver solver(m_eval); return std::make_tuple(std::move(solver.eigenvectors()), std::move(solver.eigenvalues())); } /** * Return the eigendecomposition of a (complex-valued) matrix * * @tparam EigCplxMat type of complex matrix argument * @param[in] m matrix to find the eigendecomposition of. Must be square and * have a non-zero size. * @return A tuple V,D where V is a matrix where the columns are the * complex-valued eigenvectors of `m` and D is a complex-valued column vector * with entries the eigenvectors of `m` */ template * = nullptr> inline std::tuple< Eigen::Matrix>, -1, -1>, Eigen::Matrix>, -1, 1>> eigendecompose(const EigCplxMat& m) { if (unlikely(m.size() == 0)) { return std::make_tuple( Eigen::Matrix>, -1, -1>(0, 0), Eigen::Matrix>, -1, 1>(0, 1)); } check_square("eigendecompose", "m", m); using PlainMat = Eigen::Matrix, -1, -1>; const PlainMat& m_eval = m; Eigen::ComplexEigenSolver solver(m_eval); return std::make_tuple(std::move(solver.eigenvectors()), std::move(solver.eigenvalues())); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/lb_constrain.hpp0000644000176200001440000002203214645137106023704 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LB_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_LB_CONSTRAIN_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the lower-bounded value for the specified unconstrained input * and specified lower bound. * *

The transform applied is * *

\f$f(x) = \exp(x) + L\f$ * *

where \f$L\f$ is the constant lower bound. * * @tparam T Scalar. * @tparam L Scalar. * @param[in] x Unconstrained input * @param[in] lb Lower bound * @return Constrained matrix */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto lb_constrain(const T& x, const L& lb) { if (unlikely(value_of_rec(lb) == NEGATIVE_INFTY)) { return identity_constrain(x, lb); } else { return add(exp(x), lb); } } /** * Return the lower-bounded value for the specified unconstrained * input and specified lower bound, incrementing the specified * reference with the log absolute Jacobian determinant of the * transform. * * @tparam T Scalar. * @tparam L Scalar. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @param[in,out] lp reference to log probability to increment * @return lower-bound constrained value corresponding to inputs */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto lb_constrain(const T& x, const L& lb, return_type_t& lp) { if (value_of_rec(lb) == NEGATIVE_INFTY) { return identity_constrain(x, lb); } else { lp += x; return add(exp(x), lb); } } /** * Specialization of `lb_constrain` to apply a scalar lower bound elementwise * to each input. * * @tparam T A type inheriting from `EigenBase`. * @tparam L Scalar. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @return lower-bound constrained value corresponding to inputs */ template * = nullptr, require_stan_scalar_t* = nullptr, require_all_not_st_var* = nullptr> inline auto lb_constrain(T&& x, L&& lb) { return eval(x.unaryExpr([lb](auto&& x) { return lb_constrain(x, lb); })); } /** * Specialization of `lb_constrain` to apply a scalar lower bound elementwise * to each input. * * @tparam T A type inheriting from `EigenBase`. * @tparam L Scalar. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @param[in,out] lp reference to log probability to increment * @return lower-bound constrained value corresponding to inputs */ template * = nullptr, require_stan_scalar_t* = nullptr, require_all_not_st_var* = nullptr> inline auto lb_constrain(const T& x, const L& lb, return_type_t& lp) { return eval( x.unaryExpr([lb, &lp](auto&& xx) { return lb_constrain(xx, lb, lp); })); } /** * Specialization of `lb_constrain` to apply a matrix of lower bounds * elementwise to each input element. * * @tparam T A type inheriting from `EigenBase`. * @tparam L A type inheriting from `EigenBase`. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @return lower-bound constrained value corresponding to inputs */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto lb_constrain(T&& x, L&& lb) { check_matching_dims("lb_constrain", "x", x, "lb", lb); return eval(x.binaryExpr( lb, [](auto&& x, auto&& lb) { return lb_constrain(x, lb); })); } /** * Specialization of `lb_constrain` to apply a matrix of lower bounds * elementwise to each input element. * * @tparam T A type inheriting from `EigenBase`. * @tparam L A type inheriting from `EigenBase`. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @param[in,out] lp reference to log probability to increment * @return lower-bound constrained value corresponding to inputs */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto lb_constrain(const T& x, const L& lb, return_type_t& lp) { check_matching_dims("lb_constrain", "x", x, "lb", lb); return eval(x.binaryExpr( lb, [&lp](auto&& xx, auto&& lbb) { return lb_constrain(xx, lbb, lp); })); } /** * Specialization of `lb_constrain` to apply a container of lower bounds * elementwise to each input element. * * @tparam T A Any type with a Scalar `scalar_type`. * @tparam L A type inheriting from `EigenBase` or a scalar. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @return lower-bound constrained value corresponding to inputs */ template * = nullptr> inline auto lb_constrain(const std::vector& x, const L& lb) { std::vector> ret(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lb_constrain(x[i], lb); } return ret; } /** * Specialization of `lb_constrain` to apply a container of lower bounds * elementwise to each input element. * * @tparam T A Any type with a Scalar `scalar_type`. * @tparam L A type inheriting from `EigenBase` or a standard vector. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @param[in,out] lp reference to log probability to increment * @return lower-bound constrained value corresponding to inputs */ template * = nullptr> inline auto lb_constrain(const std::vector& x, const L& lb, return_type_t& lp) { std::vector> ret(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lb_constrain(x[i], lb, lp); } return ret; } /** * Specialization of `lb_constrain` to apply a container of lower bounds * elementwise to each input element. * * @tparam T A Any type with a Scalar `scalar_type`. * @tparam L A type inheriting from `EigenBase` or a standard vector. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @return lower-bound constrained value corresponding to inputs */ template inline auto lb_constrain(const std::vector& x, const std::vector& lb) { check_matching_dims("lb_constrain", "x", x, "lb", lb); std::vector> ret(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lb_constrain(x[i], lb[i]); } return ret; } /** * Specialization of `lb_constrain` to apply a container of lower bounds * elementwise to each input element. * * @tparam T A Any type with a Scalar `scalar_type`. * @tparam L A type inheriting from `EigenBase` or a standard vector. * @param[in] x unconstrained input * @param[in] lb lower bound on output * @param[in,out] lp reference to log probability to increment * @return lower-bound constrained value corresponding to inputs */ template inline auto lb_constrain(const std::vector& x, const std::vector& lb, return_type_t& lp) { check_matching_dims("lb_constrain", "x", x, "lb", lb); std::vector> ret(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = lb_constrain(x[i], lb[i], lp); } return ret; } /** * Specialization of `lb_constrain` to apply a container of lower bounds * elementwise to each input element. If the `Jacobian` parameter is `true`, the * log density accumulator is incremented with the log absolute Jacobian * determinant of the transform. All of the transforms are specified with their * Jacobians in the *Stan Reference Manual* chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @tparam L A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @param[in] x unconstrained input * @param[in] lb lower bound on output * @param[in, out] lp log density accumulator * @return lower-bound constrained value corresponding to inputs */ template inline auto lb_constrain(const T& x, const L& lb, return_type_t& lp) { if (Jacobian) { return lb_constrain(x, lb, lp); } else { return lb_constrain(x, lb); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log1p.hpp0000644000176200001440000000442414645137106022256 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG1P_HPP #define STAN_MATH_PRIM_FUN_LOG1P_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the natural logarithm of one plus the specified value. * * \f[ * \mbox{log1p}(x) = \log(1 + x) * \f] * * This version is more stable for arguments near zero than * the direct definition. If x == -1, log1p(x) * is defined to be negative infinity. * * @param[in] x Argument. * @return Natural log of one plus the argument. * @throw std::domain_error If argument is less than -1. */ inline double log1p(double x) { if (is_nan(x)) { return x; } else { check_greater_or_equal("log1p", "x", x, -1.0); return std::log1p(x); } } /** * Return the natural logarithm of one plus the specified * argument. This version is required to disambiguate * log1p(int). * * @param[in] x Argument. * @return Natural logarithm of one plus the argument. * @throw std::domain_error If argument is less than -1. */ inline double log1p(int x) { check_greater_or_equal("log1p", "x", x, -1); return std::log1p(x); } /** * Structure to wrap log1p() so it can be vectorized. */ struct log1p_fun { /** * Return the natural logarithm of one plus the specified value. * * @tparam T type of argument * @param x argument * @return natural log of one plus the argument */ template static inline auto fun(const T& x) { return log1p(x); } }; /** * Return the elementwise application of log1p() to * specified argument container. The return type promotes the * underlying scalar argument type to double if it is an integer, * and otherwise is the argument type. * * @tparam T type of container * @param x container * @return Elementwise log1p of members of container. */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto log1p(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/fmin.hpp0000644000176200001440000000224014645137106022157 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FMIN_HPP #define STAN_MATH_PRIM_FUN_FMIN_HPP #include #include #include namespace stan { namespace math { /** * Return the lesser of the two specified arguments. If one is * not-a-number, return the other. * * @param x First argument. * @param y Second argument. * @return Minimum of x or y and if one is NaN return the other */ template * = nullptr> inline double fmin(T1 x, T2 y) { using std::fmin; return fmin(x, y); } /** * Enables the vectorized application of the fmin function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return fmin function applied to the two inputs. */ template * = nullptr> inline auto fmin(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return fmin(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log1p_exp.hpp0000644000176200001440000000374014645137106023132 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG1P_EXP_HPP #define STAN_MATH_PRIM_FUN_LOG1P_EXP_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Calculates the log of 1 plus the exponential of the specified * value without overflow. * * This function is related to other special functions by: * * log1p_exp(x) * * = log1p(exp(a)) * * = log(1 + exp(x)) * * = log_sum_exp(0, x). * \f[ \mbox{log1p\_exp}(x) = \begin{cases} \ln(1+\exp(x)) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log1p\_exp}(x)}{\partial x} = \begin{cases} \frac{\exp(x)}{1+\exp(x)} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * */ inline double log1p_exp(double a) { using std::exp; // like log_sum_exp below with b=0.0; prevents underflow if (a > 0.0) { return a + log1p(exp(-a)); } return log1p(exp(a)); } /** * Structure to wrap log1p_exp() so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return Natural log of (1 + exp(x)). */ struct log1p_exp_fun { template static inline auto fun(const T& x) { return log1p_exp(x); } }; /** * Vectorized version of log1p_exp(). * * @tparam T type of container * @param x container * @return Natural log of (1 + exp()) applied to each value in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto log1p_exp(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log10.hpp0000644000176200001440000000405014645137106022151 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG10_HPP #define STAN_MATH_PRIM_FUN_LOG10_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap log10() so it can be vectorized. * * @tparam T type of variable * @param x variable * @return Log base-10 of x. */ struct log10_fun { template static inline auto fun(const T& x) { using std::log10; return log10(x); } }; /** * Vectorized version of log10(). * * @tparam Container type of container * @param x container * @return Log base-10 applied to each value in x. */ template < typename Container, require_not_var_matrix_t* = nullptr, require_not_container_st* = nullptr, require_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto log10(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of log10() that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Log base-10 of each variable in the container. */ template * = nullptr> inline auto log10(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().log10(); }); } namespace internal { /** * Return the base 10 logarithm of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return base 10 logarithm of the argument */ template inline std::complex complex_log10(const std::complex& z) { static const double inv_log_10 = 1 / std::log(10); return log(z) * inv_log_10; } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/scaled_add.hpp0000644000176200001440000000066414645137106023301 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SCALED_ADD_HPP #define STAN_MATH_PRIM_FUN_SCALED_ADD_HPP #include #include #include namespace stan { namespace math { inline void scaled_add(std::vector& x, const std::vector& y, double lambda) { for (size_t i = 0; i < x.size(); ++i) { x[i] += lambda * y[i]; } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/is_uninitialized.hpp0000644000176200001440000000117614645137106024600 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IS_UNINITIALIZED_HPP #define STAN_MATH_PRIM_FUN_IS_UNINITIALIZED_HPP #include namespace stan { namespace math { /** * Returns true if the specified variable is * uninitialized. Arithmetic types are always initialized * by definition (the value is not specified). * * @tparam T Type of object to test. * @param x Object to test. * @return true if the specified object is uninitialized. * @return false if input is NaN. */ template inline bool is_uninitialized(T x) { return false; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sort_indices.hpp0000644000176200001440000000377314645137106023727 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SORT_INDICES_HPP #define STAN_MATH_PRIM_FUN_SORT_INDICES_HPP #include #include #include #include #include namespace stan { namespace math { /** * A comparator that works for any container type that has the * brackets operator. * * @tparam ascending true if sorting in ascending order * @tparam C container type */ namespace internal { template class index_comparator { const C& xs_; public: /** * Construct an index comparator holding a reference * to the specified container. * * @param xs Container */ explicit index_comparator(const C& xs) : xs_(xs) {} /** * Return true if the value at the first index is sorted in * front of the value at the second index; this will depend * on the template parameter ascending. * * @param i Index of first value for comparison * @param j Index of second value for comparison */ bool operator()(int i, int j) const { if (ascending) { return xs_[i - 1] < xs_[j - 1]; } else { return xs_[i - 1] > xs_[j - 1]; } } }; } // namespace internal /** * Return an integer array of indices of the specified container * sorting the values in ascending or descending order based on * the value of the first template parameter. * * @tparam ascending true if sort is in ascending order * @tparam C type of container * @param xs Container to sort * @return sorted version of container */ template std::vector sort_indices(const C& xs) { using idx_t = index_type_t; idx_t xs_size = xs.size(); std::vector idxs; idxs.resize(xs_size); for (idx_t i = 0; i < xs_size; ++i) { idxs[i] = i + 1; } internal::index_comparator> comparator(to_ref(xs)); std::sort(idxs.begin(), idxs.end(), comparator); return idxs; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/gp_matern32_cov.hpp0000644000176200001440000002631614645137106024230 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GP_MATERN32_COV_HPP #define STAN_MATH_PRIM_FUN_GP_MATERN32_COV_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns a Matern 3/2 covariance matrix * * \f[ k(x, x') = \sigma^2(1 + * \frac{\sqrt{3}d(x, x')}{l})exp(-\frac{\sqrt{3}d(x, x')}{l}) * \f] * * where \f$ d(x, x') \f$ is the Euclidean distance. * * @tparam T_x type for each scalar * @tparam T_s type of parameter of sigma * @tparam T_l type of parameter length scale * * @param x std::vector of scalars that can be used in squared distance * @param length_scale length scale * @param sigma marginal standard deviation or magnitude * @throw std::domain error if sigma <= 0, l <= 0, or x is nan or inf */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_matern32_cov(const std::vector &x, const T_s &sigma, const T_l &length_scale) { using std::exp; using std::pow; size_t x_size = stan::math::size(x); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); if (x_size == 0) { return cov; } const char *function = "gp_matern32_cov"; size_t x_obs_size = stan::math::size(x[0]); for (size_t i = 0; i < x_size; ++i) { check_size_match(function, "x row", x_obs_size, "x's other row", stan::math::size(x[i])); } for (size_t n = 0; n < x_size; ++n) { check_not_nan(function, "x", x[n]); } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); T_s sigma_sq = square(sigma); T_l root_3_inv_l = std::sqrt(3.0) / length_scale; size_t block_size = 10; for (size_t jb = 0; jb < x_size; jb += block_size) { for (size_t ib = jb; ib < x_size; ib += block_size) { size_t j_end = std::min(x_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { cov.coeffRef(j, j) = sigma_sq; size_t i_end = std::min(x_size, ib + block_size); for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { return_type_t dist = distance(x[i], x[j]); cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq * (1.0 + root_3_inv_l * dist) * exp(-root_3_inv_l * dist); } } } } return cov; } /** * Returns a Matern 3/2 covariance matrix * * \f[ k(x, x') = \sigma^2(1 + \sqrt{3} * \sqrt{\sum_{k=1}^{K}\frac{d(x, x')^2}{l_k^2}}) * exp(-\sqrt{3}\sqrt{\sum_{k=1}^{K}\frac{d(x, x')^2}{l_k^2}}) \f] * * where \f$d(x, x')\f$ is the Euclidean distance. * * @tparam T_x type for each scalar * @tparam T_s type of element of parameter sigma * @tparam T_l type of each length scale parameter * * @param x std::vector of Eigen vectors of scalars. * @param length_scale std::vector of length scales * @param sigma standard deviation that can be used in stan::math::square * @throw std::domain error if sigma <= 0, l <= 0, or x is nan or inf */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_matern32_cov(const std::vector> &x, const T_s &sigma, const std::vector &length_scale) { using std::exp; size_t x_size = stan::math::size(x); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); if (x_size == 0) { return cov; } const char *function = "gp_matern32_cov"; size_t l_size = length_scale.size(); for (size_t n = 0; n < x_size; ++n) { check_not_nan(function, "x", x[n]); } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); for (size_t n = 0; n < x_size; ++n) { check_not_nan(function, "length scale", length_scale[n]); } check_size_match(function, "x dimension", stan::math::size(x[0]), "number of length scales", l_size); T_s sigma_sq = square(sigma); double root_3 = std::sqrt(3.0); std::vector, -1, 1>> x_new = divide_columns(x, length_scale); size_t block_size = 10; for (size_t jb = 0; jb < x_size; jb += block_size) { for (size_t ib = jb; ib < x_size; ib += block_size) { size_t j_end = std::min(x_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x_size, ib + block_size); for (size_t i = std::max(ib, j); i < i_end; ++i) { return_type_t dist = distance(x_new[i], x_new[j]); cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq * (1.0 + root_3 * dist) * exp(-root_3 * dist); } } } } return cov; } /** * Returns a Matern 3/2 cross covariance matrix * * \f[ k(x, x') = \sigma^2(1 + * \frac{\sqrt{3}d(x, x')}{l})exp(-\sqrt{3}\frac{d(x, x')}{l}) * \f] * * where \f$d(x, x')\f$ is the Euclidean distance. * * This function is for the cross covariance matrix needed * to compute the posterior predictive density. * * @tparam T_x1 first type of scalars contained in vector x1 * @tparam T_x2 second type of scalars contained in vector x2 * @tparam T_s type of parameter sigma, marginal standard deviation * @tparam T_l type of parameter length scale * * @param x1 std::vector of scalars that can be used in squared_distance * @param x2 std::vector of scalars that can be used in squared_distance * @param length_scale length scale * @param sigma standard deviation that can be used in stan::math::square * @throw std::domain error if sigma <= 0, l <= 0, or x1, x2 are nan or inf * */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_matern32_cov(const std::vector &x1, const std::vector &x2, const T_s &sigma, const T_l &length_scale) { using std::exp; size_t x1_size = stan::math::size(x1); size_t x2_size = stan::math::size(x2); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); if (x1_size == 0 || x2_size == 0) { return cov; } const char *function = "gp_matern32_cov"; size_t x1_obs_size = stan::math::size(x1[0]); for (size_t i = 0; i < x1_size; ++i) { check_size_match(function, "x1's row", x1_obs_size, "x1's other row", stan::math::size(x1[i])); } for (size_t i = 0; i < x2_size; ++i) { check_size_match(function, "x1's row", x1_obs_size, "x2's other row", stan::math::size(x2[i])); } for (size_t n = 0; n < x1_size; ++n) { check_not_nan(function, "x1", x1[n]); } for (size_t n = 0; n < x2_size; ++n) { check_not_nan(function, "x2", x2[n]); } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); T_s sigma_sq = square(sigma); T_l root_3_inv_l_sq = std::sqrt(3.0) / length_scale; size_t block_size = 10; for (size_t ib = 0; ib < x1.size(); ib += block_size) { for (size_t jb = 0; jb < x2.size(); jb += block_size) { size_t j_end = std::min(x2_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x1_size, ib + block_size); for (size_t i = ib; i < i_end; ++i) { return_type_t dist = distance(x1[i], x2[j]); cov(i, j) = sigma_sq * (1.0 + root_3_inv_l_sq * dist) * exp(-root_3_inv_l_sq * dist); } } } } return cov; } /** * Returns a Matern 3/2 cross covariance matrix * * \f[ k(x, x') = \sigma^2(1 + \sqrt{3} * \sqrt{\sum_{k=1}^{K}\frac{d(x, x')^2}{l_k^2}}) * exp(-\sqrt{3}\sqrt{\sum_{k=1}^{K}\frac{d(x, x')^2}{l_k^2}}) \f] * * where \f$d(x, x')\f$ is the Euclidean distance * * This function is for the cross covariance matrix needed * to compute the posterior predictive density. * * @tparam T_x1 first type of std::vector of scalars * @tparam T_x2 second type of std::vector of scalars * @tparam T_s type of parameter sigma, marginal standard deviation * @tparam T_l type of parameter length scale * * @param x1 std::vector of Eigen vectors of scalars * @param x2 std::vector of Eigen vectors of scalars * @param length_scale parameter length scale * @param sigma standard deviation that can be used in stan::math::square * @throw std::domain error if sigma <= 0, l <= 0, or x1, x2 are nan or inf * */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_matern32_cov(const std::vector> &x1, const std::vector> &x2, const T_s &sigma, const std::vector &length_scale) { using std::exp; size_t x1_size = stan::math::size(x1); size_t x2_size = stan::math::size(x2); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); if (x1_size == 0 || x2_size == 0) { return cov; } const char *function = "gp_matern_32_cov"; for (size_t n = 0; n < x1_size; ++n) { check_not_nan(function, "x1", x1[n]); } for (size_t n = 0; n < x2_size; ++n) { check_not_nan(function, "x2", x2[n]); } check_positive_finite(function, "magnitude", sigma); check_positive_finite(function, "length scale", length_scale); size_t l_size = length_scale.size(); for (size_t n = 0; n < l_size; ++n) { check_not_nan(function, "length scale", length_scale[n]); } for (size_t i = 0; i < x1_size; ++i) { check_size_match(function, "x1's row", stan::math::size(x1[i]), "number of length scales", l_size); } for (size_t i = 0; i < x2_size; ++i) { check_size_match(function, "x2's row", stan::math::size(x2[i]), "number of length scales", l_size); } T_s sigma_sq = square(sigma); double root_3 = std::sqrt(3.0); std::vector, -1, 1>> x1_new = divide_columns(x1, length_scale); std::vector, -1, 1>> x2_new = divide_columns(x2, length_scale); size_t block_size = 10; for (size_t ib = 0; ib < x1.size(); ib += block_size) { for (size_t jb = 0; jb < x2.size(); jb += block_size) { size_t j_end = std::min(x2_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x1_size, ib + block_size); for (size_t i = ib; i < i_end; ++i) { return_type_t dist = distance(x1_new[i], x2_new[j]); cov(i, j) = sigma_sq * (1.0 + root_3 * dist) * exp(-root_3 * dist); } } } } return cov; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/max_size_mvt.hpp0000644000176200001440000000173214645137106023740 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MAX_SIZE_MVT_HPP #define STAN_MATH_PRIM_FUN_MAX_SIZE_MVT_HPP #include #include namespace stan { namespace math { /** * Calculate the size of the largest multivariate input. A multivariate * container is either an Eigen matrix, whose mvt size is 1, or an std::vector * of Eigen matrices, whose mvt size is the size of the std::vector. It is an * error to supply any other type of input. * @tparam T1 type of the first input * @tparam Ts types of the other inputs * @param x1 first input * @param xs other inputs * @return the size of the largest input * @throw `invalid_argument` if provided with an input that is not an Eigen * matrix or std::vector of Eigen matrices */ template inline size_t max_size_mvt(const T1& x1, const Ts&... xs) { return std::max({stan::math::size_mvt(x1), stan::math::size_mvt(xs)...}); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/hypergeometric_pFq.hpp0000644000176200001440000000424314645137106025067 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_HYPERGEOMETRIC_PFQ_HPP #define STAN_MATH_PRIM_FUN_HYPERGEOMETRIC_PFQ_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the generalized hypergeometric function applied to the * input arguments: * \f$_pF_q(a_1,...,a_p;b_1,...,b_q;z)\f$ * * This function is not intended to be exposed to end users, only * used for p & q values that are stable with the grad_pFq * implementation. * * See 'grad_pFq.hpp' for the derivatives wrt each parameter * * @param[in] a Vector of 'a' arguments to function * @param[in] b Vector of 'b' arguments to function * @param[in] z Scalar z argument * @return Generalized hypergeometric function */ template * = nullptr, require_arithmetic_t* = nullptr> return_type_t hypergeometric_pFq(const Ta& a, const Tb& b, const Tz& z) { plain_type_t a_ref = a; plain_type_t b_ref = b; check_finite("hypergeometric_pFq", "a", a_ref); check_finite("hypergeometric_pFq", "b", b_ref); check_finite("hypergeometric_pFq", "z", z); check_not_nan("hypergeometric_pFq", "a", a_ref); check_not_nan("hypergeometric_pFq", "b", b_ref); check_not_nan("hypergeometric_pFq", "z", z); bool condition_1 = (a_ref.size() > (b_ref.size() + 1)) && (z != 0); bool condition_2 = (a_ref.size() == (b_ref.size() + 1)) && (std::fabs(z) > 1); if (condition_1 || condition_2) { std::stringstream msg; msg << "hypergeometric function pFq does not meet convergence " << "conditions with given arguments. " << "a: " << a_ref << ", b: " << b_ref << ", " << ", z: " << z; throw std::domain_error(msg.str()); } return boost::math::hypergeometric_pFq( std::vector(a_ref.data(), a_ref.data() + a_ref.size()), std::vector(b_ref.data(), b_ref.data() + b_ref.size()), z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/ceil.hpp0000644000176200001440000000330514645137106022145 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CEIL_HPP #define STAN_MATH_PRIM_FUN_CEIL_HPP #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `ceil()` so it can be vectorized. * * @tparam T type of variable * @param x variable * @return Least integer >= x. */ struct ceil_fun { template static inline auto fun(const T& x) { using std::ceil; return ceil(x); } }; /** * Returns the elementwise `ceil()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x container * @return Least integer >= each value in x. */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto ceil(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `ceil()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Least integer >= each value in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto ceil(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().ceil(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inverse.hpp0000644000176200001440000000176114645137106022710 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INVERSE_HPP #define STAN_MATH_PRIM_FUN_INVERSE_HPP #include #include #include namespace stan { namespace math { /** * Returns the inverse of the specified matrix. * * @tparam T type of elements in the matrix * @tparam R number of rows, can be Eigen::Dynamic * @tparam C number of columns, can be Eigen::Dynamic * * @param m specified matrix * @return Inverse of the matrix (an empty matrix if the specified matrix has * size zero). * @throw std::invalid_argument if the matrix is not square. */ template * = nullptr> inline Eigen::Matrix, EigMat::RowsAtCompileTime, EigMat::ColsAtCompileTime> inverse(const EigMat& m) { check_square("inverse", "m", m); if (m.size() == 0) { return {}; } return m.inverse(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/linspaced_int_array.hpp0000644000176200001440000000276014645137106025247 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LINSPACED_INT_ARRAY_HPP #define STAN_MATH_PRIM_FUN_LINSPACED_INT_ARRAY_HPP #include #include #include namespace stan { namespace math { /** * Return an array of linearly spaced integers. * * This produces an array from `low` to `high` (inclusive). If `high - low` is * greater or equal to `K - 1`, then the integers are evenly spaced. If it is * not possible to get from `low` to `high` with a multiple of an integer, * `high` is lowered until this is possible. * * If `K - 1` is greater than `high - low`, then integers are repeated. For * instance, `low, low, low + 1, low + 1, ...`. `high` is lowered until `K - 1` * is a multiple of `high - low` * * For `K = 1`, the array will contain the `high` value. For `K = 0` it returns * an empty array. * * @param K size of the array * @param low smallest value * @param high largest value * @return An array of size K with elements linearly spaced between * low and high. * @throw std::domain_error if K is negative or high is less than low. */ inline std::vector linspaced_int_array(int K, int low, int high) { static const char* function = "linspaced_int_array"; check_nonnegative(function, "size", K); check_greater_or_equal(function, "high", high, low); if (K == 0) { return {}; } Eigen::VectorXi v = Eigen::VectorXi::LinSpaced(K, low, high); return {v.data(), v.data() + K}; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/corr_matrix_free.hpp0000644000176200001440000000470114645137106024564 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CORR_MATRIX_FREE_HPP #define STAN_MATH_PRIM_FUN_CORR_MATRIX_FREE_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the vector of unconstrained partial correlations that * define the specified correlation matrix when transformed. * *

The constraining transform is defined as for * corr_matrix_constrain(Matrix, size_t). The * inverse transform in this function is simpler in that it only * needs to compute the \f$k \choose 2\f$ partial correlations * and then free those. * * @tparam T type of the matrix (must be derived from \c Eigen::MatrixBase) * @param y The correlation matrix to free. * @return Vector of unconstrained values that produce the * specified correlation matrix when transformed. * @throw std::domain_error if the correlation matrix has no * elements or is not a square matrix. * @throw std::runtime_error if the correlation matrix cannot be * factorized by factor_cov_matrix() or if the sds returned by * factor_cov_matrix() on log scale are unconstrained. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, 1> corr_matrix_free(const T& y) { using Eigen::Array; using Eigen::Dynamic; check_square("corr_matrix_free", "y", y); check_nonzero_size("corr_matrix_free", "y", y); Eigen::Index k = y.rows(); Eigen::Index k_choose_2 = (k * (k - 1)) / 2; Eigen::Matrix, Dynamic, 1> x(k_choose_2); Array, Dynamic, 1> sds(k); bool successful = factor_cov_matrix(y, x.array(), sds); if (!successful) { throw_domain_error("corr_matrix_free", "factor_cov_matrix failed on y", y, ""); } check_bounded("corr_matrix_free", "log(sd)", sds, -CONSTRAINT_TOLERANCE, CONSTRAINT_TOLERANCE); return x; } /** * Overload of `corr_matrix_free()` to untransform each matrix * in a standard vector. * @tparam T A standard vector with with a `value_type` which inherits from * `Eigen::MatrixBase`. * @param x The standard vector to untransform. */ template * = nullptr> auto corr_matrix_free(const T& x) { return apply_vector_unary::apply( x, [](auto&& v) { return corr_matrix_free(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/rep_row_vector.hpp0000644000176200001440000000142414645137106024270 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_REP_ROW_VECTOR_HPP #define STAN_MATH_PRIM_FUN_REP_ROW_VECTOR_HPP #include #include namespace stan { namespace math { template * = nullptr, require_stan_scalar_t* = nullptr> inline auto rep_row_vector(const T& x, int n) { check_nonnegative("rep_vector", "n", n); return T_ret::Constant(n, x); } template * = nullptr> inline auto rep_row_vector(const T& x, int n) { return rep_row_vector, 1, Eigen::Dynamic>>(x, n); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/as_value_column_vector_or_scalar.hpp0000644000176200001440000000145614645137106030021 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_AS_VALUE_COLUMN_VECTOR_OR_SCALAR #define STAN_MATH_PRIM_FUN_AS_VALUE_COLUMN_VECTOR_OR_SCALAR #include #include #include #include #include namespace stan { namespace math { /** * Extract values from input argument and transform to a column vector. For * scalar this returns a scalar. For arithmetic types this is an identity * function. * * @tparam T Type of scalar element. * @param a Specified scalar. * @return the scalar. */ template inline auto as_value_column_vector_or_scalar(T&& a) { return value_of(as_column_vector_or_scalar(std::forward(a))); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_sum_exp.hpp0000644000176200001440000000653414645137106023561 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_SUM_EXP_HPP #define STAN_MATH_PRIM_FUN_LOG_SUM_EXP_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Calculates the log sum of exponentials without overflow. * * \f$\log (\exp(a) + \exp(b)) = m + \log(\exp(a-m) + \exp(b-m))\f$, * * where \f$m = max(a, b)\f$. * \f[ \mbox{log\_sum\_exp}(x, y) = \begin{cases} \ln(\exp(x)+\exp(y)) & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log\_sum\_exp}(x, y)}{\partial x} = \begin{cases} \frac{\exp(x)}{\exp(x)+\exp(y)} & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log\_sum\_exp}(x, y)}{\partial y} = \begin{cases} \frac{\exp(y)}{\exp(x)+\exp(y)} & \mbox{if } -\infty\leq x, y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @tparam T1 type of the first variable * @tparam T2 type of the second variable * @param a the first variable * @param b the second variable */ template * = nullptr, require_all_stan_scalar_t* = nullptr> inline return_type_t log_sum_exp(const T2& a, const T1& b) { if (a == NEGATIVE_INFTY) { return b; } if (a == INFTY && b == INFTY) { return INFTY; } if (a > b) { return a + log1p_exp(b - a); } return b + log1p_exp(a - b); } /** * Return the log of the sum of the exponentiated values of the specified * matrix of values. The matrix may be a full matrix, a vector, * a row vector, or a container of these. * * The function is defined as follows to prevent overflow in exponential * calculations. * * \f$\log \sum_{n=1}^N \exp(x_n) = \max(x) + \log \sum_{n=1}^N \exp(x_n - * \max(x))\f$. * * @tparam T type of input vector or matrix * @param[in] x matrix of specified values * @return The log of the sum of the exponentiated vector values. */ template * = nullptr> inline auto log_sum_exp(const T& x) { return apply_vector_unary::reduce(x, [&](const auto& v) { if (v.size() == 0) { return NEGATIVE_INFTY; } const auto& v_ref = to_ref(v); const double max = v_ref.maxCoeff(); if (!std::isfinite(max)) { return max; } return max + std::log((v_ref.array() - max).exp().sum()); }); } /** * Enables the vectorized application of the log_sum_exp function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return log_sum_exp function applied to the two inputs. */ template * = nullptr> inline auto log_sum_exp(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [](const auto& c, const auto& d) { return log_sum_exp(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/typedefs.hpp0000644000176200001440000000137014645137106023054 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TYPEDEFS_HPP #define STAN_MATH_PRIM_FUN_TYPEDEFS_HPP #include #include namespace stan { namespace math { /** * Type for sizes and indexes in an Eigen matrix with double elements. */ using size_type = index_type_t>; /** * Type for matrix of double values. */ using matrix_d = Eigen::Matrix; /** * Type for (column) vector of double values. */ using vector_d = Eigen::Matrix; /** * Type for (row) vector of double values. */ using row_vector_d = Eigen::Matrix; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/eval.hpp0000644000176200001440000000174414645137106022165 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EVAL_HPP #define STAN_MATH_PRIM_FUN_EVAL_HPP #include #include namespace stan { namespace math { /** * Inputs which have a plain_type equal to the own time are forwarded * unmodified (for Eigen expressions these types are different) * * @tparam T Input type * @param[in] arg Input argument * @return Forwarded input argument **/ template , plain_type_t>* = nullptr> inline T eval(T&& arg) { return std::forward(arg); } /** * Inputs which have a plain_type different from their own type are * Eval'd (this catches Eigen expressions) * * @tparam T Input type * @param[in] arg Input argument * @return Eval'd argument **/ template , plain_type_t>* = nullptr> inline decltype(auto) eval(const T& arg) { return arg.eval(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cumulative_sum.hpp0000644000176200001440000000354614645137106024302 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CUMULATIVE_SUM_HPP #define STAN_MATH_PRIM_FUN_CUMULATIVE_SUM_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the cumulative sum of the specified vector. * * The cumulative sum of a vector of values \code{x} is the * * \code x[0], x[1] + x[2], ..., x[1] + , ..., + x[x.size()-1] @endcode * * @tparam T type of elements in the vector * @param x Vector of values. * @return Cumulative sum of values. */ template inline std::vector cumulative_sum(const std::vector& x) { std::vector result(x.size()); if (x.size() == 0) { return result; } std::partial_sum(x.begin(), x.end(), result.begin(), std::plus()); return result; } /** * Return the cumulative sum of the specified vector. * * The cumulative sum is of the same type as the input and * has values defined by * * \code x(0), x(1) + x(2), ..., x(1) + , ..., + x(x.size()-1) @endcode * * @tparam EigVec type of the vector (must be derived from \c Eigen::MatrixBase * and have one compile time dimension equal to 1) * * @param m Vector of values. * @return Cumulative sum of values. */ template * = nullptr, require_not_st_var* = nullptr> inline auto cumulative_sum(const EigVec& m) { using T_scalar = value_type_t; Eigen::Matrix result(m.rows(), m.cols()); if (m.size() == 0) { return result; } const auto& m_ref = to_ref(m); result.coeffRef(0) = m_ref.coeff(0); for (Eigen::Index i = 1; i < m_ref.size(); ++i) { result.coeffRef(i) = m_ref.coeff(i) + result.coeffRef(i - 1); } return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/make_nu.hpp0000644000176200001440000000231314645137106022646 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MAKE_NU_HPP #define STAN_MATH_PRIM_FUN_MAKE_NU_HPP #include #include namespace stan { namespace math { /** * Return the degrees of freedom for the t distribution that * corresponds to the shape parameter in the LKJ distribution. * * @tparam T scalar type * @param eta LKJ distribution parameter in (0, inf) * @param K number of variables in correlation matrix */ template Eigen::Array make_nu(const T& eta, size_t K) { using size_type = index_type_t>; // Best (1978) implies nu = 2 * alpha for the dof in a t // distribution that generates a beta variate on (-1, 1) Eigen::Array nu(K * (K - 1) / 2); T alpha = eta + 0.5 * (K - 2.0); // from Lewandowski et. al. T alpha2 = 2.0 * alpha; for (size_type j = 0; j < (K - 1); ++j) { nu(j) = alpha2; } size_t counter = K - 1; for (size_type i = 1; i < (K - 1); ++i) { alpha -= 0.5; alpha2 = 2.0 * alpha; for (size_type j = i + 1; j < K; ++j, ++counter) { nu(counter) = alpha2; } } return nu; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/lb_free.hpp0000644000176200001440000001010014645137106022616 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LB_FREE_HPP #define STAN_MATH_PRIM_FUN_LB_FREE_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the unconstrained value that produces the specified * lower-bound constrained value. * * @tparam T type of bounded object * @tparam L type of lower bound * @param[in] y input object * @param[in] lb lower bound * @return unconstrained value that produces the input when * constrained * @throw std::domain_error if y is lower than the lower bound */ template * = nullptr, require_stan_scalar_t* = nullptr> inline auto lb_free(T&& y, L&& lb) { if (value_of_rec(lb) == NEGATIVE_INFTY) { return identity_free(y, lb); } else { auto&& y_ref = to_ref(std::forward(y)); auto&& lb_ref = to_ref(std::forward(lb)); check_greater_or_equal("lb_free", "Lower bounded variable", y_ref, lb_ref); return eval(log(subtract(std::forward(y_ref), std::forward(lb_ref)))); } } /** * Return the free matrix that corresponds to the specified * lower-bounded matrix with respect to the specified lower bound. * * The transform is the reverse of the `lb_constrain` transform. * * @tparam T type of matrix * @tparam L type of lower bound * @param y constrained matrix with specified lower bound * @param lb lower bound * @return unconstrained matrix with respect to lower bound * @throw std::invalid_argument if any element of constrained variable * is greater than the lower bound. */ template * = nullptr> inline auto lb_free(T&& y, L&& lb) { auto&& y_ref = to_ref(std::forward(y)); auto&& lb_ref = to_ref(std::forward(lb)); promote_scalar_t, T> ret(y.rows(), y.cols()); for (Eigen::Index j = 0; j < y.cols(); ++j) { for (Eigen::Index i = 0; i < y.rows(); ++i) { ret.coeffRef(i, j) = lb_free(y_ref.coeff(i, j), lb_ref.coeff(i, j)); } } return ret; } /** * Return the free variable that corresponds to the specified * lower-bounded variable with respect to the specified lower bound. * * The transform is the reverse of the `lb_constrain` transform. * * @tparam T type of constrained variable * @tparam L type of lower bound * @param y constrained variable with specified lower bound * @param lb lower bound * @return unconstrained variable with respect to lower bound * @throw std::invalid_argument if any element of constrained variable * is greater than the lower bound. */ template * = nullptr> inline auto lb_free(const std::vector y, const L& lb) { auto&& lb_ref = to_ref(lb); std::vector ret(y.size()); std::transform(y.begin(), y.end(), ret.begin(), [&lb_ref](auto&& yy) { return lb_free(yy, lb_ref); }); return ret; } /** * Return the free variable that corresponds to the specified * lower-bounded variable with respect to the specified lower bound. * * The transform is the reverse of the `lb_constrain` transform. * * @tparam T type of constrained variable * @tparam L type of lower bound * @param y constrained variable with specified lower bound * @param lb lower bound * @return unconstrained variable with respect to lower bound * @throw std::invalid_argument if any element of constrained variable * is greater than the lower bound. */ template inline auto lb_free(const std::vector y, const std::vector& lb) { std::vector ret(y.size()); std::transform(y.begin(), y.end(), lb.begin(), ret.begin(), [](auto&& yy, auto&& lbb) { return lb_free(yy, lbb); }); return ret; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/i_times.hpp0000644000176200001440000000254614645137106022670 0ustar liggesusers#ifndef STAN_MATH_PRIM_SCAL_FUN_I_TIMES_HPP #define STAN_MATH_PRIM_SCAL_FUN_I_TIMES_HPP #include namespace stan { namespace math { /** * Return the specified complex number multiplied by `i`. * * This compound function is more efficient than mulitplying by a * constant `i` because it involves only a single arithmetic negation. * * @tparam value type of complex argument * @param[in] z complex argument * @return argument multipled by `i` */ template inline std::complex i_times(const std::complex& z) { return {-z.imag(), z.real()}; } /** * Return the specified complex number multiplied by `-i`. * * This compound function is more efficient than mulitplying by the * constant `-i` because it involves only a single arithmetic * negation. * * @tparam value type of complex argument * @param[in] z complex argument * @return argument multipled by `-i` */ template inline std::complex neg_i_times(const std::complex& z) { return {z.imag(), -z.real()}; } /** * Return the complex negation of the specified complex argument. * * @tparam V value type of complex argument * @param[in] z argument * @return negation of argument */ template inline std::complex complex_negate(const std::complex& z) { return {-z.real(), -z.imag()}; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/complex_schur_decompose.hpp0000644000176200001440000000666714645137106026160 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COMPLEX_SCHUR_DECOMPOSE_HPP #define STAN_MATH_PRIM_FUN_COMPLEX_SCHUR_DECOMPOSE_HPP #include #include #include namespace stan { namespace math { /** * Return the unitary matrix of the complex Schur decomposition of the * specified square matrix. * * The complex Schur decomposition of a square matrix `A` produces a * complex unitary matrix `U` and a complex upper-triangular Schur * form matrix `T` such that `A = U * T * inv(U)`. Further, the * unitary matrix's inverse is equal to its conjugate transpose, * `inv(U) = U*`, where `U*(i, j) = conj(U(j, i))` * * @tparam M type of matrix * @param m real matrix to decompose * @return complex unitary matrix of the complex Schur decomposition of the * specified matrix * @see complex_schur_decompose_t */ template * = nullptr> inline Eigen::Matrix>, -1, -1> complex_schur_decompose_u(const M& m) { if (unlikely(m.size() == 0)) { return m; } check_square("complex_schur_decompose_u", "m", m); using MatType = Eigen::Matrix, -1, -1>; // copy because ComplexSchur requires Eigen::Matrix type Eigen::ComplexSchur cs{MatType(m)}; return cs.matrixU(); } /** * Return the Schur form matrix of the complex Schur decomposition of the * specified square matrix. * * @tparam M type of matrix * @param m real matrix to decompose * @return Schur form matrix of the complex Schur decomposition of the * specified matrix * @see complex_schur_decompose_u for a definition of the complex * Schur decomposition */ template * = nullptr> inline Eigen::Matrix>, -1, -1> complex_schur_decompose_t(const M& m) { if (unlikely(m.size() == 0)) { return m; } check_square("complex_schur_decompose_t", "m", m); using MatType = Eigen::Matrix, -1, -1>; // copy because ComplexSchur requires Eigen::Matrix type Eigen::ComplexSchur cs{MatType(m), false}; return cs.matrixT(); } /** * Return the complex Schur decomposition of the * specified square matrix. * * The complex Schur decomposition of a square matrix `A` produces a * complex unitary matrix `U` and a complex upper-triangular Schur * form matrix `T` such that `A = U * T * inv(U)`. Further, the * unitary matrix's inverse is equal to its conjugate transpose, * `inv(U) = U*`, where `U*(i, j) = conj(U(j, i))` * * @tparam M type of matrix * @param m real matrix to decompose * @return a tuple (U,T) where U is the complex unitary matrix of the complex * Schur decomposition of `m` and T is the Schur form matrix of * the complex Schur decomposition of `m` */ template * = nullptr> inline std::tuple>, -1, -1>, Eigen::Matrix>, -1, -1>> complex_schur_decompose(const M& m) { if (unlikely(m.size() == 0)) { return std::make_tuple(m, m); } check_square("complex_schur_decompose", "m", m); using MatType = Eigen::Matrix, -1, -1>; // copy because ComplexSchur requires Eigen::Matrix type Eigen::ComplexSchur cs{MatType(m)}; return std::make_tuple(std::move(cs.matrixU()), std::move(cs.matrixT())); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/variance.hpp0000644000176200001440000000324614645137106023025 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_VARIANCE_HPP #define STAN_MATH_PRIM_FUN_VARIANCE_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the sample variance (divide by length - 1) of the * coefficients in the specified matrix * * @tparam EigMat type inheriting from `EigenBase` that does not have an `var` * scalar type. * * @param m matrix * @return sample variance of coefficients * @throw std::invalid_argument if the matrix has size zero */ template * = nullptr, require_not_vt_var* = nullptr> inline value_type_t variance(const EigMat& m) { using value_t = value_type_t; const auto& mat = to_ref(m); check_nonzero_size("variance", "m", mat); if (mat.size() == 1) { return value_t{0.0}; } return (mat.array() - mat.mean()).square().sum() / value_t(mat.size() - 1.0); } /** * Returns the sample variance (divide by length - 1) of the * coefficients in the specified standard vector. * * @tparam StdVec A standard library vector that does not contain a var. * @param v specified vector * @return sample variance of vector * @throw std::invalid_argument if the vector has size zero */ template * = nullptr, require_not_vt_var* = nullptr> inline value_type_t variance(const StdVec& v) { using eigen_t = Eigen::Matrix, -1, 1>; return variance(Eigen::Map(v.data(), v.size())); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/eigen_comparisons.hpp0000644000176200001440000000260414645137106024736 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EIGEN_COMPARISONS_HPP #define STAN_MATH_PRIM_FUN_EIGEN_COMPARISONS_HPP #include #include #include namespace stan { namespace math { /** * Add support for comparisons involving Eigen types with different scalars, * where one of the scalars is an autodiff type. This includes comparisons of * an Eigen type and a scalar. * @param OPERATOR name of the operator function to implement * @param OP operator to use for comparison of values **/ #define ADD_MIXED_AUTODIFF_SCALAR_COMPARISON(OPERATOR, OP) \ template * = nullptr, \ require_any_st_autodiff* = nullptr, \ require_not_st_same* = nullptr> \ auto OPERATOR(const T_a& a, const T_b& b) { \ return value_of(a) OP value_of(b); \ } ADD_MIXED_AUTODIFF_SCALAR_COMPARISON(operator<, <); ADD_MIXED_AUTODIFF_SCALAR_COMPARISON(operator<=, <=); ADD_MIXED_AUTODIFF_SCALAR_COMPARISON(operator>, >); ADD_MIXED_AUTODIFF_SCALAR_COMPARISON(operator>=, >=); ADD_MIXED_AUTODIFF_SCALAR_COMPARISON(operator==, ==); ADD_MIXED_AUTODIFF_SCALAR_COMPARISON(operator!=, !=); #undef ADD_MIXED_AUTODIFF_SCALAR_COMPARISON } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logb.hpp0000644000176200001440000000144614645137106022160 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGB_HPP #define STAN_MATH_PRIM_FUN_LOGB_HPP #include #include namespace stan { namespace math { /** * Returns the value of the unbiased radix-independent exponent from * the floating-point argument. * * Implementation note: This implementation works for all autodiff * types because it is a step function, so can return a `double` which * will produce the correct zero derivative if promoted to an autodiff * variable. * * @tparam T floating-point type * @param[in] x floating-point argument * @return unbiased radix-independent exponent of the argument */ template > double logb(const T& x) { return std::logb(value_of_rec(x)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/qr_Q.hpp0000644000176200001440000000206514645137106022135 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_QR_Q_HPP #define STAN_MATH_PRIM_FUN_QR_Q_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the orthogonal factor of the fat QR decomposition * * @tparam EigMat type of the matrix * @param m Matrix. * @return Orthogonal matrix with maximal columns */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> qr_Q( const EigMat& m) { using matrix_t = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; if (unlikely(m.size() == 0)) { return matrix_t(0, 0); } Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); matrix_t Q = qr.householderQ(); const int min_size = std::min(m.rows(), m.cols()); for (int i = 0; i < min_size; i++) { if (qr.matrixQR().coeff(i, i) < 0) { Q.col(i) *= -1.0; } } return Q; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/pseudo_eigenvectors.hpp0000644000176200001440000000107314645137106025305 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PSEUDO_EIGENVECTORS_HPP #define STAN_MATH_PRIM_FUN_PSEUDO_EIGENVECTORS_HPP #include #include namespace stan { namespace math { template Eigen::Matrix pseudo_eigenvectors( const Eigen::Matrix& m) { check_nonzero_size("pseudo_eigenvectors", "m", m); check_square("pseudo_eigenvectors", "m", m); Eigen::EigenSolver> solver(m); return solver.pseudoEigenvectors(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/trace_gen_quad_form.hpp0000644000176200001440000000575514645137106025230 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TRACE_GEN_QUAD_FORM_HPP #define STAN_MATH_PRIM_FUN_TRACE_GEN_QUAD_FORM_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the trace of D times the quadratic form of B and A. * That is, `trace_gen_quad_form(D, A, B) = trace(D * B' * A * B).` * * @tparam TD type of the first matrix or expression * @tparam TA type of the second matrix or expression * @tparam TB type of the third matrix or expression * * @param D multiplier * @param A outside term in quadratic form * @param B inner term in quadratic form * @return trace(D * B' * A * B) * @throw std::domain_error if A or D is not square * @throw std::domain_error if A cannot be multiplied by B or B cannot * be multiplied by D. */ template , typename = require_all_not_vt_var, typename = require_any_not_vt_arithmetic> inline auto trace_gen_quad_form(const TD& D, const TA& A, const TB& B) { check_square("trace_gen_quad_form", "A", A); check_square("trace_gen_quad_form", "D", D); check_multiplicable("trace_gen_quad_form", "A", A, "B", B); check_multiplicable("trace_gen_quad_form", "B", B, "D", D); const auto& B_ref = to_ref(B); return multiply(B_ref, D.transpose()).cwiseProduct(multiply(A, B_ref)).sum(); } /** * Return the trace of D times the quadratic form of B and A. * That is, `trace_gen_quad_form(D, A, B) = trace(D * B' * A * B).` * This is the overload for arithmetic types to allow Eigen's expression * templates to be used for efficiency. * * @tparam EigMatD type of the first matrix or expression * @tparam EigMatA type of the second matrix or expression * @tparam EigMatB type of the third matrix or expression * * @param D multiplier * @param A outside term in quadratic form * @param B inner term in quadratic form * @return trace(D * B' * A * B) * @throw std::domain_error if A or D is not square * @throw std::domain_error if A cannot be multiplied by B or B cannot * be multiplied by D. */ template * = nullptr> inline double trace_gen_quad_form(const EigMatD& D, const EigMatA& A, const EigMatB& B) { check_square("trace_gen_quad_form", "A", A); check_square("trace_gen_quad_form", "D", D); check_multiplicable("trace_gen_quad_form", "A", A, "B", B); check_multiplicable("trace_gen_quad_form", "B", B, "D", D); const auto& B_ref = to_ref(B); return (B_ref * D.transpose()).cwiseProduct(A * B_ref).sum(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/bessel_first_kind.hpp0000644000176200001440000000350514645137106024724 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_BESSEL_FIRST_KIND_HPP #define STAN_MATH_PRIM_FUN_BESSEL_FIRST_KIND_HPP #include #include #include #include namespace stan { namespace math { /** * \f[ \mbox{bessel\_first\_kind}(v, x) = \begin{cases} J_v(x) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{error} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{bessel\_first\_kind}(v, x)}{\partial x} = \begin{cases} \frac{\partial\, J_v(x)}{\partial x} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{error} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ J_v(x)=\left(\frac{1}{2}x\right)^v \sum_{k=0}^\infty \frac{\left(-\frac{1}{4}x^2\right)^k}{k!\, \Gamma(v+k+1)} \f] \f[ \frac{\partial \, J_v(x)}{\partial x} = \frac{v}{x}J_v(x)-J_{v+1}(x) \f] * */ template * = nullptr> inline T2 bessel_first_kind(int v, const T2 z) { check_not_nan("bessel_first_kind", "z", z); return boost::math::cyl_bessel_j(v, z); } /** * Enables the vectorized application of the bessel first kind function, when * the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return Bessel first kind function applied to the two inputs. */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto bessel_first_kind(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return bessel_first_kind(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/as_value_column_array_or_scalar.hpp0000644000176200001440000000157614645137106027640 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_AS_VALUE_COLUMN_ARRAY_OR_SCALAR #define STAN_MATH_PRIM_FUN_AS_VALUE_COLUMN_ARRAY_OR_SCALAR #include #include #include #include #include #include namespace stan { namespace math { /** * Extract the value from an object and for eigen vectors and `std::vectors` * convert to an eigen column array and for scalars return a scalar. * @tparam T A stan scalar, eigen vector, or `std::vector`. * @param a Specified scalar. * @return the scalar. */ template inline auto as_value_column_array_or_scalar(T&& a) { return value_of( as_array_or_scalar(as_column_vector_or_scalar(std::forward(a)))); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cov_matrix_constrain_lkj.hpp0000644000176200001440000001265714645137106026336 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COV_MATRIX_CONSTRAIN_LKJ_HPP #define STAN_MATH_PRIM_FUN_COV_MATRIX_CONSTRAIN_LKJ_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the covariance matrix of the specified dimensionality * derived from constraining the specified vector of unconstrained * values. The input vector must be of length \f$k \choose 2 + * k\f$. The first \f$k \choose 2\f$ values in the input * represent unconstrained (partial) correlations and the last * \f$k\f$ are unconstrained standard deviations of the dimensions. * *

The transform scales the correlation matrix transform defined * in corr_matrix_constrain(Matrix, size_t) * with the constrained deviations. * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase and * have one compile-time dimension equal to 1) * @param x Input vector of unconstrained partial correlations and * standard deviations. * @param k Dimensionality of returned covariance matrix. * @return Covariance matrix derived from the unconstrained partial * correlations and deviations. */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov_matrix_constrain_lkj(const T& x, size_t k) { size_t k_choose_2 = (k * (k - 1)) / 2; const auto& x_ref = to_ref(x); return read_cov_matrix(corr_constrain(x_ref.head(k_choose_2)), positive_constrain(x_ref.tail(k))); } /** * Return the covariance matrix of the specified dimensionality * derived from constraining the specified vector of unconstrained * values and increment the specified log probability reference * with the log absolute Jacobian determinant. * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase and * have one compile-time dimension equal to 1) * @param x Input vector of unconstrained partial correlations and * standard deviations. * @param k Dimensionality of returned covariance matrix. * @param lp Log probability reference to increment. * @return Covariance matrix derived from the unconstrained partial * correlations and deviations. */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov_matrix_constrain_lkj(const T& x, size_t k, return_type_t& lp) { size_t k_choose_2 = (k * (k - 1)) / 2; const auto& x_ref = x; return read_cov_matrix(corr_constrain(x_ref.head(k_choose_2)), positive_constrain(x_ref.tail(k)), lp); } /** * Return the covariance matrix of the specified dimensionality derived from * constraining the specified vector of unconstrained values. If the `Jacobian` * parameter is `true`, the log density accumulator is incremented with the log * absolute Jacobian determinant of the transform. All of the transforms are * specified with their Jacobians in the *Stan Reference Manual* chapter * Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::DenseBase` or a `var_value` with * inner type inheriting from `Eigen::DenseBase` with compile time rows or * columns equal to 1 * @param x Input vector of unconstrained partial correlations and * standard deviations * @param k Dimensionality of returned covariance matrix * @param[in, out] lp log density accumulator * @return Covariance matrix derived from the unconstrained partial * correlations and deviations. */ template * = nullptr> inline auto cov_matrix_constrain_lkj(const T& x, size_t k, return_type_t& lp) { if (Jacobian) { return cov_matrix_constrain_lkj(x, k, lp); } else { return cov_matrix_constrain_lkj(x, k); } } /** * Return the covariance matrix of the specified dimensionality derived from * constraining the specified vector of unconstrained values. If the `Jacobian` * parameter is `true`, the log density accumulator is incremented with the log * absolute Jacobian determinant of the transform. All of the transforms are * specified with their Jacobians in the *Stan Reference Manual* chapter * Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A standard vector with inner type inheriting from * `Eigen::DenseBase` or a `var_value` with inner type inheriting from * `Eigen::DenseBase` with compile time rows or columns equal to 1 * @param x Input vector of unconstrained partial correlations and * standard deviations * @param k Dimensionality of returned covariance matrix * @param[in, out] lp log density accumulator * @return Covariance matrix derived from the unconstrained partial * correlations and deviations. */ template * = nullptr> inline auto cov_matrix_constrain_lkj(const T& x, size_t k, return_type_t& lp) { return apply_vector_unary::apply(x, [&lp, k](auto&& v) { return cov_matrix_constrain_lkj(v, k, lp); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sd.hpp0000644000176200001440000000167514645137106021647 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SD_HPP #define STAN_MATH_PRIM_FUN_SD_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the unbiased sample standard deviation of the * coefficients in the specified std vector, column vector, row vector, or * matrix. * * @tparam T type of the container * * @param m Specified container. * @return Sample variance. */ template * = nullptr, require_not_st_var* = nullptr> inline auto sd(const T& m) { using std::sqrt; return apply_vector_unary::reduce(m, [](const auto& x) { check_nonzero_size("sd", "x", x); if (x.size() == 1) { return scalar_type_t(0.0); } return sqrt(variance(x)); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/unitspaced_array.hpp0000644000176200001440000000176014645137106024571 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_UNITSPACED_ARRAY_HPP #define STAN_MATH_PRIM_FUN_UNITSPACED_ARRAY_HPP #include #include #include #include namespace stan { namespace math { /** * Return an array of integers in an ordered sequence. * * This produces an array from low to high (included). * * @param low smallest integer * @param high largest integer * @return An array of size (high - low + 1) with elements linearly spaced * between low and high. * @throw std::domain_error if high is less than low. */ inline std::vector unitspaced_array(int low, int high) { static const char* function = "unitspaced_array"; check_greater_or_equal(function, "high", high, low); int K = std::abs(high - low + 1); std::vector result(K); Eigen::Map(result.data(), K) = Eigen::VectorXi::LinSpaced(K, low, high); return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/copysign.hpp0000644000176200001440000000542214645137106023066 0ustar liggesusers#ifndef STAN_MATH_PRIM_SCAL_FUN_COPYSIGN_HPP #define STAN_MATH_PRIM_SCAL_FUN_COPYSIGN_HPP #include #include #include #include namespace stan { namespace math { /** * Return the negation of the first argument if the first and second * argument have different signs, otherwise return a copy of the first * argument. For the sake of this function, zero is considered * positive. This function uses negation rather than literally copying * signs to preserve derivatives. * * Overload of `std::copysign` from `cmath` for argument-dependent * lookup. * * @tparam T type of first argument * @tparam U type of second argument * @param[in] x first complex argument * @param[in] y second complex argument * @return copy of the first argument, negated if necessary to match * the sign of the second argument */ template inline T copysign(const T& x, const U& y) { return (std::signbit(value_of_rec(x)) != std::signbit(value_of_rec(y))) ? -x : x; } /** * Return the negation of the first argument if the first and second * arguments have different signs and the first argument is not zero, * otherwise return a copy of the first argument. * * @tparam T type of first argument * @tparam U type of second argument * @param[in] x first complex argument * @param[in] y second complex argument * @return copy of the first argument, negated if necessary to match * the sign of the second argument * @see copysign */ template inline T copysign_non_zero(const T& x, const U& y) { return x != 0 ? stan::math::copysign(x, y) : x; } /** * Return the complex number composed of the real and complex parts * with signs copied from the real and complex parts of the first * arguments to the real and complex parts of the second. * * This is an overload of the standard libary `copysign` for complex * numbers that will be used with argument-dependent lookup. Rather * than using the standard library `copysign`, it uses * `copysign_non_zero`, which does not change sign if the reference * value is zero (`-0.0` or `0.0`). * * @tparam T value type of first argument * @tparam U value type of second argument * @param[in] x first complex argument * @param[in] y second complex argument * @return copy of second argument, with components negated if * necessary to match sign of first argument */ template inline std::complex copysign(const std::complex& x, const std::complex& y) { return {copysign_non_zero(x.real(), y.real()), copysign_non_zero(x.imag(), y.imag())}; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/beta.hpp0000644000176200001440000000454114645137106022147 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_BETA_HPP #define STAN_MATH_PRIM_FUN_BETA_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the beta function applied to the specified * arguments. * * The beta function is defined for \f$a > 0\f$ and \f$b > 0\f$ by * * \f$\mbox{B}(a, b) = \frac{\Gamma(a) \Gamma(b)}{\Gamma(a+b)}\f$. * \f[ \mbox{beta}(\alpha, \beta) = \begin{cases} \int_0^1 u^{\alpha - 1} (1 - u)^{\beta - 1} \, du & \mbox{if } \alpha, \beta>0 \\[6pt] \textrm{NaN} & \mbox{if } \alpha = \textrm{NaN or } \beta = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{beta}(\alpha, \beta)}{\partial \alpha} = \begin{cases} \left(\psi(\alpha)-\psi(\alpha+\beta)\right)*\mbox{beta}(\alpha, \beta) & \mbox{if } \alpha, \beta>0 \\[6pt] \textrm{NaN} & \mbox{if } \alpha = \textrm{NaN or } \beta = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{beta}(\alpha, \beta)}{\partial \beta} = \begin{cases} \left(\psi(\beta)-\psi(\alpha+\beta)\right)*\mbox{beta}(\alpha, \beta) & \mbox{if } \alpha, \beta>0 \\[6pt] \textrm{NaN} & \mbox{if } \alpha = \textrm{NaN or } \beta = \textrm{NaN} \end{cases} \f] * * @tparam T1 type of first value * @tparam T2 type of second value * @param a First value * @param b Second value * @return Beta function applied to the two values. */ template * = nullptr> inline return_type_t beta(const T1 a, const T2 b) { using std::exp; return exp(lgamma(a) + lgamma(b) - lgamma(a + b)); } /** * Enables the vectorized application of the beta function, when * the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return Beta function applied to the two inputs. */ template * = nullptr, require_all_not_var_matrix_t* = nullptr> inline auto beta(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [](const auto& c, const auto& d) { return beta(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/one_hot_array.hpp0000644000176200001440000000147614645137106024071 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ONE_HOT_ARRAY_HPP #define STAN_MATH_PRIM_FUN_ONE_HOT_ARRAY_HPP #include #include namespace stan { namespace math { /** * Return an array with 1 in the k-th position and zero elsewhere. * * @param K size of the array * @param k position of the 1 (indexing from 1) * @return An array of size K with all elements initialized to zero * and a 1 in the k-th position. * @throw std::domain_error if K is not positive, or if k is less than 1 or * greater than K. */ inline std::vector one_hot_array(int K, int k) { static const char* function = "one_hot_array"; check_positive(function, "size", K); check_bounded(function, "k", k, 1, K); std::vector v(K, 0); v[k - 1] = 1; return v; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/csr_extract_u.hpp0000644000176200001440000000304714645137106024101 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CSR_EXTRACT_U_HPP #define STAN_MATH_PRIM_FUN_CSR_EXTRACT_U_HPP #include #include #include #include #include namespace stan { namespace math { /** \addtogroup csr_format * @{ */ /** * Extract the NZE index for each entry from a sparse matrix. * * @tparam T type of elements in the matrix * @param A Sparse matrix. * @return Array of indexes into non-zero entries of A. */ template const std::vector csr_extract_u( const Eigen::SparseMatrix& A) { std::vector u(A.outerSize() + 1); // last entry is garbage. for (int nze = 0; nze <= A.outerSize(); ++nze) { u[nze] = *(A.outerIndexPtr() + nze) + stan::error_index::value; } return u; } /** * Extract the NZE index for each entry from a sparse matrix. * * @tparam T type of elements in the matrix * @tparam R number of rows, can be Eigen::Dynamic * @tparam C number of columns, can be Eigen::Dynamic * @param A Dense matrix. * @return Array of indexes into non-zero entries of A. */ template * = nullptr> const std::vector csr_extract_u(const T& A) { // conversion to sparse seems to touch data twice, so we need to call to_ref Eigen::SparseMatrix, Eigen::RowMajor> B = to_ref(A).sparseView(); std::vector u = csr_extract_u(B); return u; } /** @} */ // end of csr_format group } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/owens_t.hpp0000644000176200001440000000465714645137106022722 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_OWENS_T_HPP #define STAN_MATH_PRIM_FUN_OWENS_T_HPP #include #include #include namespace stan { namespace math { /** * Return the result of applying Owen's T function to the * specified arguments. * * Used to compute the cumulative density function for the skew normal * distribution. * \f[ \mbox{owens\_t}(h, a) = \begin{cases} \mbox{owens\_t}(h, a) & \mbox{if } -\infty\leq h, a \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } h = \textrm{NaN or } a = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{owens\_t}(h, a)}{\partial h} = \begin{cases} \frac{\partial\, \mbox{owens\_t}(h, a)}{\partial h} & \mbox{if } -\infty\leq h, a\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } h = \textrm{NaN or } a = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{owens\_t}(h, a)}{\partial a} = \begin{cases} \frac{\partial\, \mbox{owens\_t}(h, a)}{\partial a} & \mbox{if } -\infty\leq h, a\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } h = \textrm{NaN or } a = \textrm{NaN} \end{cases} \f] \f[ \mbox{owens\_t}(h, a) = \frac{1}{2\pi} \int_0^a \frac{\exp(-\frac{1}{2}h^2(1+x^2))}{1+x^2}dx \f] \f[ \frac{\partial \, \mbox{owens\_t}(h, a)}{\partial h} = -\frac{1}{2\sqrt{2\pi}} \operatorname{erf}\left(\frac{ha}{\sqrt{2}}\right) \exp\left(-\frac{h^2}{2}\right) \f] \f[ \frac{\partial \, \mbox{owens\_t}(h, a)}{\partial a} = \frac{\exp\left(-\frac{1}{2}h^2(1+a^2)\right)}{2\pi (1+a^2)} \f] * * @param h First argument * @param a Second argument * @return Owen's T function applied to the arguments. */ inline double owens_t(double h, double a) { return boost::math::owens_t(h, a); } /** * Enables the vectorized application of the owens_t * function, when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return owens_t function applied to the two inputs. */ template * = nullptr, require_all_not_var_and_matrix_types* = nullptr> inline auto owens_t(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [](const auto& c, const auto& d) { return owens_t(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mdivide_left_tri_low.hpp0000644000176200001440000000345614645137106025432 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MDIVIDE_LEFT_TRI_LOW_HPP #define STAN_MATH_PRIM_FUN_MDIVIDE_LEFT_TRI_LOW_HPP #include #include #include namespace stan { namespace math { /** * Return the result of left dividing the second argument by the * first argument. Calling mdivide_left_tri_low(A, * b) with divisor A and dividend * b is more arithmetically stable than calling * inv(A) * b. * * @tparam T1 type of the divisor matrix * @tparam T2 type of the dividend matrix * * @param A divisor, an invertible square matrix * @param b dividend, a matrix or vector with the same number of * rows as the divisor has columns * @return left division of b by A * @throws std::invalid_argument if the divisor is not square or * the dividend does not have the same number of rows as the * divisor has columns. */ template * = nullptr, require_all_not_eigen_vt* = nullptr> inline Eigen::Matrix, T1::RowsAtCompileTime, T2::ColsAtCompileTime> mdivide_left_tri_low(const T1& A, const T2& b) { check_square("mdivide_left_tri_low", "A", A); check_multiplicable("mdivide_left_tri_low", "A", A, "b", b); if (A.rows() == 0) { return {0, b.cols()}; } return mdivide_left_tri(A, b); } template * = nullptr, require_not_eigen_vt* = nullptr> inline plain_type_t mdivide_left_tri_low(const T& A) { check_square("mdivide_left_tri_low", "A", A); if (A.rows() == 0) { return {}; } return mdivide_left_tri(A); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mdivide_left_tri.hpp0000644000176200001440000000400414645137106024537 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MDIVIDE_LEFT_TRI_HPP #define STAN_MATH_PRIM_FUN_MDIVIDE_LEFT_TRI_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the solution of the system Ax=b when A is triangular. * * @tparam TriView Specifies whether A is upper (Eigen::Upper) * or lower triangular (Eigen::Lower). * @tparam T1 type of the triangular matrix * @tparam T2 type of the right-hand side matrix or vector * * @param A Triangular matrix. * @param b Right hand side matrix or vector. * @return x = A^-1 b, solution of the linear system. * @throws std::domain_error if A is not square or the rows of b don't * match the size of A. */ template * = nullptr, require_all_not_eigen_vt * = nullptr> inline auto mdivide_left_tri(const T1 &A, const T2 &b) { using T_return = return_type_t; using ret_type = Eigen::Matrix; check_square("mdivide_left_tri", "A", A); check_multiplicable("mdivide_left_tri", "A", A, "b", b); if (A.rows() == 0) { return ret_type(0, b.cols()); } return ret_type(A) .template triangularView() .solve(ret_type(b)) .eval(); } /** * Returns the solution of the system Ax=b when A is triangular and b=I. * * @tparam T type of the matrix * * @param A Triangular matrix. * @return x = A^-1 . * @throws std::domain_error if A is not square */ template * = nullptr> inline plain_type_t mdivide_left_tri(const T &A) { check_square("mdivide_left_tri", "A", A); if (A.rows() == 0) { return {}; } int n = A.rows(); plain_type_t b = plain_type_t::Identity(n, n); A.template triangularView().solveInPlace(b); return b; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/columns_dot_product.hpp0000644000176200001440000000220214645137106025312 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COLUMNS_DOT_PRODUCT_HPP #define STAN_MATH_PRIM_FUN_COLUMNS_DOT_PRODUCT_HPP #include #include namespace stan { namespace math { /** * Returns the dot product of columns of the specified matrices. * * @tparam Mat1 type of the first matrix (must be derived from \c * Eigen::MatrixBase) * @tparam Mat2 type of the second matrix (must be derived from \c * Eigen::MatrixBase) * * @param v1 Matrix of first vectors. * @param v2 Matrix of second vectors. * @return Dot product of the vectors. * @throw std::domain_error If the vectors are not the same * size or if they are both not vector dimensioned. */ template * = nullptr, require_all_not_eigen_vt* = nullptr> inline Eigen::Matrix, 1, Mat1::ColsAtCompileTime> columns_dot_product(const Mat1& v1, const Mat2& v2) { check_matching_sizes("columns_dot_product", "v1", v1, "v2", v2); return v1.cwiseProduct(v2).colwise().sum(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/svd.hpp0000644000176200001440000000263114645137106022026 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SVD_HPP #define STAN_MATH_PRIM_FUN_SVD_HPP #include #include namespace stan { namespace math { /** * Given input matrix m, return the singular value decomposition (U,D,V) * such that `m = U*diag(D)*V^{T}` * * @tparam EigMat type of the matrix * @param m MxN input matrix * @return a tuple (U,D,V) where U is an orthogonal matrix, D a vector of * singular values (in decreasing order), and V an orthogonal matrix */ template * = nullptr, require_not_st_var* = nullptr> std::tuple, -1, -1>, Eigen::Matrix, -1, 1>, Eigen::Matrix, -1, -1>> svd(const EigMat& m) { if (unlikely(m.size() == 0)) { return std::make_tuple(Eigen::Matrix, -1, -1>(0, 0), Eigen::Matrix, -1, 1>(0, 1), Eigen::Matrix, -1, -1>(0, 0)); } Eigen::JacobiSVD, -1, -1>> svd( m, Eigen::ComputeThinU | Eigen::ComputeThinV); return std::make_tuple(std::move(svd.matrixU()), std::move(svd.singularValues()), std::move(svd.matrixV())); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/minus.hpp0000644000176200001440000000154014645137106022363 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MINUS_HPP #define STAN_MATH_PRIM_FUN_MINUS_HPP #include #include namespace stan { namespace math { /** * Returns the negation of the specified scalar or matrix. * * @tparam T Type of subtrahend. * @param x Subtrahend. * @return Negation of subtrahend. */ template * = nullptr> inline auto minus(const T& x) { return -x; } /** * Return the negation of the each element of a vector * * @tparam T Type of container. * @param x Container. * @return Container where each element is negated. */ template inline auto minus(const std::vector& x) { return apply_vector_unary>::apply( x, [](const auto& v) { return -v; }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_rising_factorial.hpp0000644000176200001440000000522214645137106025411 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_RISING_FACTORIAL_HPP #define STAN_MATH_PRIM_FUN_LOG_RISING_FACTORIAL_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the natural logarithm of the rising factorial from the * first specified argument to the second. * \f[ \mbox{log\_rising\_factorial}(x, n) = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ \ln x^{(n)} & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log\_rising\_factorial}(x, n)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ \Psi(x+n) - \Psi(x) & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log\_rising\_factorial}(x, n)}{\partial n} = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ \Psi(x+n) & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] * * @tparam T1 type of first argument x * @tparam T2 type of second argument n * @param[in] x first argument * @param[in] n second argument * @return natural logarithm of the rising factorial from x to n * @throw std::domain_error if the first argument is not positive */ template * = nullptr> inline return_type_t log_rising_factorial(const T1& x, const T2& n) { if (is_any_nan(x, n)) { return NOT_A_NUMBER; } static const char* function = "log_rising_factorial"; check_positive(function, "first argument", x); return lgamma(x + n) - lgamma(x); } /** * Enables the vectorized application of the log_rising_factorial * function, when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return log_rising_factorial function applied to the two inputs. */ template * = nullptr> inline auto log_rising_factorial(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return log_rising_factorial(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inverse_softmax.hpp0000644000176200001440000000251214645137106024444 0ustar liggesusers#ifndef STAN_MATH_ARR_SCAL_FUN_INVERSE_SOFTMAX_HPP #define STAN_MATH_ARR_SCAL_FUN_INVERSE_SOFTMAX_HPP #include #include #include #include namespace stan { namespace math { /** * Writes the inverse softmax of the simplex argument into the second * argument. See softmax for the inverse * function and a definition of the relation. * * The inverse softmax function is defined by * * \f$\mbox{inverse\_softmax}(x)[i] = \log x[i]\f$. * * This function defines the inverse of softmax * up to a scaling factor. * * Because of the definition, values of 0.0 in the simplex * are converted to negative infinity, and values of 1.0 * are converted to 0.0. * * There is no check that the input vector is a valid simplex vector. * * @tparam Vector type of the simplex vector * @param simplex Simplex vector input. * @param y Vector into which the inverse softmax is written. * @throw std::invalid_argument if size of the input and * output vectors differ. */ template * = nullptr> void inverse_softmax(const Vector& simplex, Vector& y) { check_matching_sizes("inverse_softmax", "simplex", simplex, "y", y); y = log(simplex); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/qr.hpp0000644000176200001440000000305714645137106021657 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_QR_HPP #define STAN_MATH_PRIM_FUN_QR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the fat QR decomposition * * @tparam EigMat type of the matrix * @param m Matrix. * @return A tuple containing (Q,R): * 1. Orthogonal matrix with maximal columns * 2. Upper triangular matrix with maximal rows */ template * = nullptr> std::tuple, Eigen::Dynamic, Eigen::Dynamic>, Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>> qr(const EigMat& m) { using matrix_t = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; if (unlikely(m.size() == 0)) { return std::make_tuple(matrix_t(0, 0), matrix_t(0, 0)); } Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); matrix_t Q = qr.householderQ(); const int min_size = std::min(m.rows(), m.cols()); for (int i = 0; i < min_size; i++) { if (qr.matrixQR().coeff(i, i) < 0) { Q.col(i) *= -1.0; } } matrix_t R = qr.matrixQR(); if (m.rows() > m.cols()) { R.bottomRows(m.rows() - m.cols()).setZero(); } for (int i = 0; i < min_size; i++) { for (int j = 0; j < i; j++) { R.coeffRef(i, j) = 0.0; } if (R(i, i) < 0) { R.row(i) *= -1.0; } } return std::make_tuple(std::move(Q), std::move(R)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/accumulator.hpp0000644000176200001440000000502214645137106023546 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ACCUMULATOR_HPP #define STAN_MATH_PRIM_FUN_ACCUMULATOR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Class to accumulate values and eventually return their sum. If * no values are ever added, the return value is 0. * * This class is useful for speeding up autodiff of long sums * because it uses the sum() operation (either from * stan::math or one defined by argument-dependent lookup. * * @tparam T Type of scalar added */ template class accumulator { private: std::vector buf_; public: /** * Add the specified arithmetic type value to the buffer after * static casting it to the class type T. * *

See the std library doc for std::is_arithmetic * for information on what counts as an arithmetic type. * * @tparam S Type of argument * @param x Value to add */ template > inline void add(S x) { buf_.push_back(x); } /** * Add each entry in the specified matrix, vector, or row vector * of values to the buffer. * * @tparam S type of the matrix * @param m Matrix of values to add */ template * = nullptr> inline void add(const S& m) { buf_.push_back(stan::math::sum(m)); } /** * Recursively add each entry in the specified standard vector * to the buffer. This will allow vectors of primitives, * autodiff variables to be added; if the vector entries * are collections, their elements are recursively added. * * @tparam S Type of value to recursively add. * @param xs Vector of entries to add */ template inline void add(const std::vector& xs) { for (size_t i = 0; i < xs.size(); ++i) { this->add(xs[i]); } } #ifdef STAN_OPENCL /** * Sum each entry and then push to the buffer. * @tparam S A Type inheriting from `matrix_cl_base` * @param xs An OpenCL matrix */ template * = nullptr> inline void add(const S& xs) { buf_.push_back(stan::math::sum(xs)); } #endif /** * Return the sum of the accumulated values. * * @return Sum of accumulated values. */ inline T sum() const { return stan::math::sum(buf_); } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inverse_spd.hpp0000644000176200001440000000315514645137106023555 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INVERSE_SPD_HPP #define STAN_MATH_PRIM_FUN_INVERSE_SPD_HPP #include #include #include namespace stan { namespace math { /** * Returns the inverse of the specified symmetric, pos/neg-definite matrix. * * @tparam EigMat type of elements in the matrix * @param m specified matrix * @return Inverse of the matrix (an empty matrix if the specified matrix has * size zero). * @throw std::invalid_argument if the matrix is not symmetric. * @throw std::domain_error if the matrix is not positive definite. */ template inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> inverse_spd(const EigMat& m) { using Eigen::Dynamic; using Eigen::LDLT; using Eigen::Matrix; using Scalar = value_type_t; if (m.size() == 0) { return {}; } const Eigen::Ref>& m_ref = m; check_symmetric("inverse_spd", "m", m_ref); plain_type_t mmt = 0.5 * (m_ref + m_ref.transpose()); LDLT> ldlt(mmt); if (ldlt.info() != Eigen::Success) { throw_domain_error("invese_spd", "LDLT factor failed", "", ""); } if (!ldlt.isPositive()) { throw_domain_error("invese_spd", "matrix not positive definite", "", ""); } Matrix diag_ldlt = ldlt.vectorD(); check_positive("inverse_spd", "matrix not positive definite", diag_ldlt); return ldlt.solve( Eigen::Matrix::Identity( m.rows(), m.cols())); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cholesky_decompose.hpp0000644000176200001440000000331014645137106025104 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CHOLESKY_DECOMPOSE_HPP #define STAN_MATH_PRIM_FUN_CHOLESKY_DECOMPOSE_HPP #include #include #include #include namespace stan { namespace math { /** * Return the lower-triangular Cholesky factor (i.e., matrix * square root) of the specified square, symmetric matrix. The return * value \f$L\f$ will be a lower-triangular matrix such that the * original matrix \f$A\f$ is given by *

\f$A = L \times L^EigMat\f$. * * @tparam EigMat type of the matrix (must be derived from \c Eigen::MatrixBase) * @param m Symmetric matrix. * @return Square root of matrix. * @note Because OpenCL only works on doubles there are two * cholesky_decompose functions. One that works on doubles * and another that works on all other types (this one). * @throw std::domain_error if m is not a symmetric matrix or * if m is not positive definite (if m has more than 0 elements) */ template * = nullptr, require_not_eigen_vt* = nullptr> inline Eigen::Matrix, EigMat::RowsAtCompileTime, EigMat::ColsAtCompileTime> cholesky_decompose(const EigMat& m) { const eval_return_type_t& m_eval = m.eval(); check_symmetric("cholesky_decompose", "m", m_eval); check_not_nan("cholesky_decompose", "m", m_eval); Eigen::LLT, EigMat::RowsAtCompileTime, EigMat::ColsAtCompileTime>> llt = m_eval.llt(); check_pos_definite("cholesky_decompose", "m", llt); return llt.matrixL(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/exp.hpp0000644000176200001440000000621114645137106022024 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EXP_HPP #define STAN_MATH_PRIM_FUN_EXP_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `exp()` so that it can be * vectorized. */ struct exp_fun { /** * Return the exponential of the specified scalar argument. * * @tparam T type of argument * @param[in] x argument * @return Exponential of argument. */ template static inline auto fun(const T& x) { using std::exp; return exp(x); } }; /** * Return the elementwise `exp()` of the specified argument, * which may be a scalar or any Stan container of numeric scalars. * The return type is the same as the argument type. * * @tparam Container type of container * @param[in] x container * @return Elementwise application of exponentiation to the argument. */ template < typename Container, require_not_container_st* = nullptr, require_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr> inline auto exp(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `exp()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Elementwise application of exponentiation to the argument. */ template * = nullptr> inline auto exp(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().exp(); }); } namespace internal { /** * Return the natural (base e) complex exponentiation of the specified * complex argument. * * @tparam V value type (must be Stan autodiff type) * @param z complex number * @return natural exponentiation of specified complex number * @see documentation for `std::complex` for boundary condition and * branch cut details */ template inline std::complex complex_exp(const std::complex& z) { if (is_inf(z.real()) && z.real() > 0) { if (is_nan(z.imag()) || z.imag() == 0) { // (+inf, nan), (+inf, 0) return z; } else if (is_inf(z.imag()) && z.imag() > 0) { // (+inf, +inf) return {z.real(), std::numeric_limits::quiet_NaN()}; } else if (is_inf(z.imag()) && z.imag() < 0) { // (+inf, -inf) return {std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN()}; } } if (is_inf(z.real()) && z.real() < 0 && (is_nan(z.imag()) || is_inf(z.imag()))) { // (-inf, nan), (-inf, -inf), (-inf, inf) return {0, 0}; } if (is_nan(z.real()) && z.imag() == -0.0) { // (nan, -0) return z; } V exp_re = exp(z.real()); return {exp_re * cos(z.imag()), exp_re * sin(z.imag())}; } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/digamma.hpp0000644000176200001440000000430314645137106022627 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DIGAMMA_HPP #define STAN_MATH_PRIM_FUN_DIGAMMA_HPP #include #include #include #include namespace stan { namespace math { /** * Return the derivative of the log gamma function * at the specified value. * \f[ \mbox{digamma}(x) = \begin{cases} \textrm{error} & \mbox{if } x\in \{\dots, -3, -2, -1, 0\}\\ \Psi(x) & \mbox{if } x\not\in \{\dots, -3, -2, -1, 0\}\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{digamma}(x)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } x\in \{\dots, -3, -2, -1, 0\}\\ \frac{\partial\, \Psi(x)}{\partial x} & \mbox{if } x\not\in \{\dots, -3, -2, -1, 0\}\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \Psi(x)=\frac{\Gamma'(x)}{\Gamma(x)} \f] \f[ \frac{\partial \, \Psi(x)}{\partial x} = \frac{\Gamma''(x)\Gamma(x)-(\Gamma'(x))^2}{\Gamma^2(x)} \f] * * The design follows the standard C++ library in returning NaN * rather than throwing exceptions. * * @param[in] x argument * @return derivative of log gamma function at argument */ inline double digamma(double x) { return boost::math::digamma(x, boost_policy_t<>()); } /** * Structure to wrap digamma() so it can be vectorized. * * @tparam T type of variable * @param x variable * @return Digamma function applied to x. * @throw std::domain_error if x is a negative integer or 0 */ struct digamma_fun { template static inline auto fun(const T& x) { return digamma(x); } }; /** * Vectorized version of digamma(). * * @tparam T type of container * @param x container * @return Digamma function applied to each value in x. * @throw std::domain_error if any value is a negative integer or 0 */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto digamma(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/divide_columns.hpp0000644000176200001440000000274214645137106024241 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DIVIDE_COLUMNS_HPP #define STAN_MATH_PRIM_FUN_DIVIDE_COLUMNS_HPP #include #include #include #include namespace stan { namespace math { /** * Takes Stan data type vector[n] x[D] and divides column * vector in x element-wise by the values in vec * * @tparam T_x Type of dividend * @tparam T_v Scalar type of divisor * @param x std::vector of matrices * @param vec std::vector of divisors * @throw std::invalid argument if D != length of vector */ template inline typename std::vector< Eigen::Matrix, Eigen::Dynamic, 1>> divide_columns(const std::vector> &x, const std::vector &vec) { const size_t N = x.size(); const size_t D = x[0].size(); check_size_match("divide_columns", "x dimension", D, "vector", vec.size()); Eigen::Map> v_vec(&vec[0], vec.size()); std::vector, Eigen::Dynamic, 1>> out(N); for (size_t n = 0; n < N; ++n) { out[n].resize(D); check_size_match("divide_columns", "x dimension", x[n].size(), "vector", v_vec.size()); out[n] = x[n].array() / v_vec.array(); } return out; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/matrix_exp_action_handler.hpp0000644000176200001440000001707014645137106026447 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MATRIX_EXP_ACTION_HANDLER_HPP #define STAN_MATH_PRIM_FUN_MATRIX_EXP_ACTION_HANDLER_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * The implementation of the work by Awad H. Al-Mohy and Nicholas J. Higham * "Computing the Action of the Matrix Exponential, * with an Application to Exponential Integrators" * Read More: https://epubs.siam.org/doi/abs/10.1137/100788860 * See also: * https://www.mathworks.com/matlabcentral/fileexchange/29576-matrix-exponential-times-a-vector * * Calculates exp(mat*t)*b, where mat & b are matrices, * and t is double. */ class matrix_exp_action_handler { const int _p_max = 8; const int _m_max = 55; const double _tol = 1.1e-16; // from the paper, double precision: 2^-53 const std::vector _theta_m{ 2.22044605e-16, 2.58095680e-08, 1.38634787e-05, 3.39716884e-04, 2.40087636e-03, 9.06565641e-03, 2.38445553e-02, 4.99122887e-02, 8.95776020e-02, 1.44182976e-01, 2.14235807e-01, 2.99615891e-01, 3.99777534e-01, 5.13914694e-01, 6.41083523e-01, 7.80287426e-01, 9.30532846e-01, 1.09086372e+00, 1.26038106e+00, 1.43825260e+00, 1.62371595e+00, 1.81607782e+00, 2.01471078e+00, 2.21904887e+00, 2.42858252e+00, 2.64285346e+00, 2.86144963e+00, 3.08400054e+00, 3.31017284e+00, 3.53966635e+00, 3.77221050e+00, 4.00756109e+00, 4.24549744e+00, 4.48581986e+00, 4.72834735e+00, 4.97291563e+00, 5.21937537e+00, 5.46759063e+00, 5.71743745e+00, 5.96880263e+00, 6.22158266e+00, 6.47568274e+00, 6.73101590e+00, 6.98750228e+00, 7.24506843e+00, 7.50364669e+00, 7.76317466e+00, 8.02359473e+00, 8.28485363e+00, 8.54690205e+00, 8.80969427e+00, 9.07318789e+00, 9.33734351e+00, 9.60212447e+00, 9.86749668e+00, 1.01334283e+01, 1.03998897e+01, 1.06668532e+01, 1.09342929e+01, 1.12021845e+01, 1.14705053e+01, 1.17392341e+01, 1.20083509e+01, 1.22778370e+01, 1.25476748e+01, 1.28178476e+01, 1.30883399e+01, 1.33591369e+01, 1.36302250e+01, 1.39015909e+01, 1.41732223e+01, 1.44451076e+01, 1.47172357e+01, 1.49895963e+01, 1.52621795e+01, 1.55349758e+01, 1.58079765e+01, 1.60811732e+01, 1.63545578e+01, 1.66281227e+01, 1.69018609e+01, 1.71757655e+01, 1.74498298e+01, 1.77240478e+01, 1.79984136e+01, 1.82729215e+01, 1.85475662e+01, 1.88223426e+01, 1.90972458e+01, 1.93722711e+01, 1.96474142e+01, 1.99226707e+01, 2.01980367e+01, 2.04735082e+01, 2.07490816e+01, 2.10247533e+01, 2.13005199e+01, 2.15763782e+01, 2.18523250e+01, 2.21283574e+01}; public: /** * Perform the matrix exponential action exp(A*t)*B * @param [in] mat matrix A * @param [in] b matrix B * @param [in] t double t, e.g. time. * @return matrix exp(A*t)*B */ template * = nullptr, require_all_st_same* = nullptr> inline Eigen::MatrixXd action(const EigMat1& mat, const EigMat2& b, const double& t = 1.0) { Eigen::MatrixXd A = mat; double mu = A.trace() / A.rows(); A.diagonal().array() -= mu; int m, s; set_approx_order(A, b, t, m, s); double eta = exp(t * mu / s); const auto& b_eval = b.eval(); Eigen::MatrixXd f = b_eval; Eigen::MatrixXd bi = b_eval; for (int i = 0; i < s; ++i) { // maximum absolute row sum, aka inf-norm of matrix operator double c1 = matrix_operator_inf_norm(bi); for (int j = 0; j < m; ++j) { bi = (t / (s * (j + 1))) * (A * bi); f += bi; double c2 = matrix_operator_inf_norm(bi); if (c1 + c2 < _tol * matrix_operator_inf_norm(f)) break; c1 = c2; } f *= eta; bi = f; } return f; } /** * Eigen expression for matrix operator infinity norm * * @param x matrix */ double matrix_operator_inf_norm(Eigen::MatrixXd const& x) { return x.cwiseAbs().rowwise().sum().maxCoeff(); } /** * Estimate the 1-norm of mat^m * * See A. H. Al-Mohy and N. J. Higham, A New Scaling and Squaring * Algorithm for the Matrix Exponential, SIAM J. Matrix Anal. Appl. 31(3): * 970-989, 2009. * * For positive matrices the results is exact. Otherwise it falls * back to Eigen's norm, which is only * efficient for small & medium-size matrices (n < 100). Large size * matrices require a more efficient 1-norm approximation * algorithm such as normest1. See, e.g., * https://hg.savannah.gnu.org/hgweb/octave/file/e35866e6a2e0/scripts/linear-algebra/normest1.m * * @param mat matrix * @param m power */ template * = nullptr, require_all_st_same* = nullptr> double mat_power_1_norm(const EigMat1& mat, int m) { if ((mat.array() > 0.0).all()) { Eigen::VectorXd e = Eigen::VectorXd::Constant(mat.rows(), 1.0); for (int j = 0; j < m; ++j) { e = mat.transpose() * e; } return e.lpNorm(); } else { return mat.pow(m).cwiseAbs().colwise().sum().maxCoeff(); } } /** * Approximation is based on parameter "m" and "s", * proposed in CODE FRAGMENT 3.1 of the reference. The * parameters depend on matrix A, as well as limits * "m_max" & "p_max", defined in table 3.1 and eq. 3.11, * respectively. Ideally one can choose "m_max" and * "p_max" to suite the specific computing precision needs, * here we just use the one suggested in the paper * (paragraph between eq. 3.11 & eq. 3.12). * * @param [in] mat matrix A * @param [in] b matrix B * @param [in] t double t in exp(A*t)*B * @param [out] m int parameter m; m >= 0, m < _m_max * @param [out] s int parameter s; s >= 1 */ template * = nullptr, require_all_st_same* = nullptr> inline void set_approx_order(const EigMat1& mat, const EigMat2& b, const double& t, int& m, int& s) { if (t < _tol) { m = 0; s = 1; return; } // L1 norm double normA = mat.colwise().template lpNorm<1>().maxCoeff(); if (normA < _tol) { m = 0; s = 1; return; } Eigen::VectorXd alpha(_p_max - 1); if (normA * (_m_max * b.cols()) < 4.0 * _theta_m[_m_max] * _p_max * (_p_max + 3)) { alpha = Eigen::VectorXd::Constant(_p_max - 1, normA); } else { Eigen::VectorXd eta(_p_max); for (int p = 0; p < _p_max; ++p) { eta[p] = std::pow(mat_power_1_norm(mat, p + 2), 1.0 / (p + 2)); } for (int p = 0; p < _p_max - 1; ++p) { alpha[p] = std::max(eta[p], eta[p + 1]); } } Eigen::MatrixXd mt = Eigen::MatrixXd::Zero(_p_max - 1, _m_max); for (int p = 1; p < _p_max; ++p) { for (int i = p * (p + 1) - 2; i < _m_max; ++i) { mt(p - 1, i) = alpha[p - 1] / _theta_m[i]; } } Eigen::VectorXd u = Eigen::VectorXd::LinSpaced(_m_max, 1, _m_max); Eigen::MatrixXd c = stan::math::ceil(mt) * u.asDiagonal(); for (Eigen::MatrixXd::Index i = 0; i < c.size(); ++i) { if (c(i) <= 1.e-16) { c(i) = std::numeric_limits::infinity(); } } int cost = c.colwise().minCoeff().minCoeff(&m); if (std::isinf(cost)) { s = 1; return; } s = std::max(cost / m, 1); } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/prod.hpp0000644000176200001440000000242414645137106022176 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PROD_HPP #define STAN_MATH_PRIM_FUN_PROD_HPP #include #include #include namespace stan { namespace math { /** * Returns the product of given scalar. This is a no-op. * * @tparam T type of the scalar * @param v specified scalar * @return the scalar */ template * = nullptr> T prod(const T& v) { return v; } /** * Returns the product of the coefficients of the specified * standard vector. * * @tparam T type of elements in the vector * @param v Specified vector. * @return Product of coefficients of vector. */ template inline T prod(const std::vector& v) { if (v.size() == 0) { return 1; } Eigen::Map> m(&v[0], v.size()); return m.prod(); } /** * Returns the product of the coefficients of the specified * column vector. * * @tparam T type of elements in the vector * @param v Specified vector. * @return Product of coefficients of vector. */ template * = nullptr> inline value_type_t prod(const EigMat& v) { if (v.size() == 0) { return 1.0; } return v.prod(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/softmax.hpp0000644000176200001440000000265314645137106022717 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SOFTMAX_HPP #define STAN_MATH_PRIM_FUN_SOFTMAX_HPP #include #include #include #include namespace stan { namespace math { /** * Return the softmax of the specified vector. * *

* \f$ * \mbox{softmax}(y) * = \frac{\exp(y)} * {\sum_{k=1}^K \exp(y_k)}, * \f$ * *

The entries in the Jacobian of the softmax function are given by * \f$ * \begin{array}{l} * \displaystyle * \frac{\partial}{\partial y_m} \mbox{softmax}(y)[k] * \\[8pt] * \displaystyle * \mbox{ } \ \ \ = \left\{ * \begin{array}{ll} * \mbox{softmax}(y)[k] \times (1 - \mbox{softmax}(y)[m]) * & \mbox{ if } m = k, \mbox{ and} * \\[6pt] * -\mbox{softmax}(y)[k] \times \mbox{softmax}(y)[m] * & \mbox{ if } m \neq k. * \end{array} * \right. * \end{array} * \f$ * * @tparam ColVec type of elements in the vector * @param[in] v Vector to transform. * @return Unit simplex result of the softmax transform of the vector. */ template * = nullptr> inline plain_type_t softmax(const ColVec& v) { using std::exp; if (v.size() == 0) { return v; } const auto& v_ref = to_ref(v); const auto theta = (v_ref.array() - v_ref.maxCoeff()).exp().eval(); return theta.array() / theta.sum(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/stan_print.hpp0000644000176200001440000000401014645137106023404 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_STAN_PRINT_HPP #define STAN_MATH_PRIM_FUN_STAN_PRINT_HPP #include #include #include #include namespace stan { namespace math { // prints used in generator for print() statements in modeling language template * = nullptr, require_not_tuple_t* = nullptr> void stan_print(std::ostream* o, const T& x) { *o << x; } template * = nullptr> void stan_print(std::ostream* o, const EigVec& x) { const auto& x_ref = to_ref(x); *o << '['; for (int i = 0; i < x_ref.size(); ++i) { if (i > 0) { *o << ','; } stan_print(o, x_ref.coeff(i)); } *o << ']'; } template * = nullptr, require_not_eigen_vector_t* = nullptr> void stan_print(std::ostream* o, const EigMat& x) { const auto& x_ref = to_ref(x); *o << '['; for (int i = 0; i < x_ref.rows(); ++i) { if (i > 0) { *o << ','; } *o << '['; for (int j = 0; j < x_ref.cols(); ++j) { if (j > 0) { *o << ','; } stan_print(o, x_ref.coeff(i, j)); } *o << ']'; } *o << ']'; } // forward decl to allow the next two overloads to call each other template * = nullptr> void stan_print(std::ostream* o, const T& x); template * = nullptr> void stan_print(std::ostream* o, const T& x) { *o << '['; for (size_t i = 0; i < x.size(); ++i) { if (i > 0) { *o << ','; } stan_print(o, x[i]); } *o << ']'; } template *> void stan_print(std::ostream* o, const T& x) { *o << '('; size_t i = 0; stan::math::for_each( [&i, o](auto&& elt) { if (i > 0) { *o << ','; } stan_print(o, elt); i++; }, x); *o << ')'; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/size.hpp0000644000176200001440000000161714645137106022207 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SIZE_HPP #define STAN_MATH_PRIM_FUN_SIZE_HPP #include #include #include #include namespace stan { namespace math { /** \ingroup type_trait * Returns the length of primitive scalar types * that are always of length 1. */ template * = nullptr> inline size_t size(const T& /*x*/) { return 1U; } /** \ingroup type_trait * Returns the size of the provided Eigen matrix, expression or std::vector. * * @param m input \c Eigen \c Matrix, expression or std::vector * @tparam T type of m */ template * = nullptr> inline size_t size(const T& m) { return m.size(); } template * = nullptr> inline size_t size(const T& m) { return m.size(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mdivide_right_ldlt.hpp0000644000176200001440000000410714645137106025067 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MDIVIDE_RIGHT_LDLT_HPP #define STAN_MATH_PRIM_FUN_MDIVIDE_RIGHT_LDLT_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the solution of the system xA=b given an LDLT_factor of A * * @tparam EigMat type of the right hand side * @tparam T type of matrix in the LDLT_factor * * @param A LDLT_factor * @param b Right hand side matrix or vector. * @return x = b A^-1, solution of the linear system. * @throws std::domain_error if rows of b don't match the size of A. */ template * = nullptr, require_any_not_st_arithmetic* = nullptr> inline auto mdivide_right_ldlt(const EigMat& b, LDLT_factor& A) { check_multiplicable("mdivide_right_ldlt", "b", b, "A", A.matrix()); check_ldlt_factor("mdivide_right_ldlt", "A", A); return mdivide_left_ldlt(A, b.transpose()).transpose().eval(); } /** * Returns the solution of the system xA=b given an LDLT_factor of A * * Overload for arithmetic types * * @tparam EigMat type of the right hand side * @tparam T type of matrix in the LDLT_factor * * @param A LDLT_factor * @param b Right hand side matrix or vector. * @return x = b A^-1, solution of the linear system. * @throws std::domain_error if rows of b don't match the size of A. */ template * = nullptr, require_all_st_arithmetic* = nullptr> inline Eigen::Matrix mdivide_right_ldlt(const EigMat& b, LDLT_factor& A) { check_multiplicable("mdivide_right_ldlt", "b", b, "A", A.matrix()); check_ldlt_factor("mdivide_right_ldlt", "A", A); if (A.matrix().rows() == 0) { return {b.rows(), 0}; } return A.ldlt().solve(b.transpose()).transpose(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/elt_divide.hpp0000644000176200001440000000403214645137106023337 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ELT_DIVIDE_HPP #define STAN_MATH_PRIM_FUN_ELT_DIVIDE_HPP #include #include #include #include namespace stan { namespace math { /** * Return the elementwise division of the specified matrices. * * @tparam Mat1 type of the first matrix or expression * @tparam Mat2 type of the second matrix or expression * * @param m1 First matrix or expression * @param m2 Second matrix or expression * @return Elementwise division of matrices. */ template * = nullptr, require_all_not_st_var* = nullptr> auto elt_divide(const Mat1& m1, const Mat2& m2) { check_matching_dims("elt_divide", "m1", m1, "m2", m2); return (m1.array() / m2.array()).matrix(); } /** * Return the elementwise division of the specified matrix * by the specified scalar. * * @tparam Mat type of the matrix or expression * @tparam Scal type of the scalar * * @param m matrix or expression * @param s scalar * @return Elementwise division of a scalar by matrix. */ template * = nullptr, require_stan_scalar_t* = nullptr> auto elt_divide(const Mat& m, Scal s) { return divide(m, s); } /** * Return the elementwise division of the specified scalar * by the specified matrix. * * @tparam Scal type of the scalar * @tparam Mat type of the matrix or expression * * @param s scalar * @param m matrix or expression * @return Elementwise division of a scalar by matrix. */ template * = nullptr, require_eigen_t* = nullptr> auto elt_divide(Scal s, const Mat& m) { return (s / m.array()).matrix(); } template * = nullptr> auto elt_divide(Scal1 s1, Scal2 s2) { return s1 / s2; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/columns_dot_self.hpp0000644000176200001440000000140614645137106024570 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COLUMNS_DOT_SELF_HPP #define STAN_MATH_PRIM_FUN_COLUMNS_DOT_SELF_HPP #include #include namespace stan { namespace math { /** * Returns the dot product of each column of a matrix with itself. * * @tparam T type of the matrix (must be derived from \c Eigen::MatrixBase) * * @param x Matrix. * @return Row vector containing the dot product of each column of the matrix * with itself. */ template * = nullptr, require_not_eigen_vt* = nullptr> inline Eigen::Matrix, 1, T::ColsAtCompileTime> columns_dot_self( const T& x) { return x.colwise().squaredNorm(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/unit_vector_free.hpp0000644000176200001440000000272514645137106024600 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_UNIT_VECTOR_FREE_HPP #define STAN_MATH_PRIM_FUN_UNIT_VECTOR_FREE_HPP #include #include #include #include #include namespace stan { namespace math { /** * Transformation of a unit length vector to a "free" vector * However, we are just fixing the unidentified radius to 1. * Thus, the transformation is just the identity * * @tparam EigVec A type derived from `EigenBase` with 1 compile time row or * column. * @param x unit vector of dimension K * @return Unit vector of dimension K considered "free" */ template * = nullptr> inline auto unit_vector_free(EigVec&& x) { auto&& x_ref = to_ref(std::forward(x)); check_unit_vector("stan::math::unit_vector_free", "Unit vector variable", x_ref); return x_ref; } /** * Overload of `unit_vector_free()` to untransform each Eigen vector * in a standard vector. * @tparam T A standard vector with with a `value_type` which inherits from * `Eigen::MatrixBase` with compile time rows or columns equal to 1. * @param x The standard vector to untransform. */ template * = nullptr> auto unit_vector_free(const T& x) { return apply_vector_unary::apply( x, [](auto&& v) { return unit_vector_free(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mdivide_right_spd.hpp0000644000176200001440000000264314645137106024721 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MDIVIDE_RIGHT_SPD_HPP #define STAN_MATH_PRIM_FUN_MDIVIDE_RIGHT_SPD_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the solution of the system xA=b where A is symmetric * positive definite. * * @tparam EigMat1 type of the right-hand side matrix or vector * @tparam EigMat2 type of the second matrix * * @param b right-hand side matrix or vector * @param A matrix * @return x = b A^-1, solution of the linear system. * @throws std::domain_error if A is not square or the rows of b don't * match the size of A. */ template * = nullptr> inline Eigen::Matrix, EigMat1::RowsAtCompileTime, EigMat2::ColsAtCompileTime> mdivide_right_spd(const EigMat1& b, const EigMat2& A) { static const char* function = "mdivide_right_spd"; check_multiplicable(function, "b", b, "A", A); const auto& A_ref = to_ref(A); check_symmetric(function, "A", A_ref); check_not_nan(function, "A", A_ref); if (A.size() == 0) { return {b.rows(), 0}; } return mdivide_left_spd(A_ref, b.transpose()).transpose(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/hypergeometric_2F1.hpp0000644000176200001440000001650714645137106024677 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_HYPERGEOMETRIC_2F1_HPP #define STAN_MATH_PRIM_FUN_HYPERGEOMETRIC_2F1_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * Calculate the Gauss Hypergeometric (2F1) function for special-case * combinations of parameters which can be calculated in closed-form. For * more background (and other possible special-cases), see: * https://functions.wolfram.com/HypergeometricFunctions/Hypergeometric2F1/03/ * * The return value is wrapped in a boost::optional<> type so that a void * return is possible if no special-case rules are applicable * * @tparam Ta1 Type of scalar first 'a' argument * @tparam Ta2 Type of scalar second 'a' argument * @tparam Tb Type of scalar 'b' argument * @tparam Tz Type of scalar 'z' argument * @param[in] a1 First of 'a' arguments to function * @param[in] a2 Second of 'a' arguments to function * @param[in] b 'b' argument to function * @param[in] z Scalar z argument * @return Gauss hypergeometric function */ template >, require_all_arithmetic_t* = nullptr> inline RtnT hyper_2F1_special_cases(const Ta1& a1, const Ta2& a2, const Tb& b, const Tz& z) { // https://functions.wolfram.com/HypergeometricFunctions/Hypergeometric2F1/03/01/ // // NOLINT if (z == 0.0) { return 1.0; } // https://functions.wolfram.com/HypergeometricFunctions/Hypergeometric2F1/03/06/01/0001/ // // NOLINT if (a1 == b) { return inv(pow(1.0 - z, a2)); } // https://functions.wolfram.com/HypergeometricFunctions/Hypergeometric2F1/03/06/01/0003/ // // NOLINT if (b == (a2 - 1.0)) { return (pow((1.0 - z), -a1 - 1.0) * (a2 + z * (a1 - a2 + 1.0) - 1.0)) / (a2 - 1); } if (a1 == a2) { // https://www.wolframalpha.com/input?i=Hypergeometric2F1%281%2C+1%2C+2%2C+-z%29 // // NOLINT if (a1 == 1.0 && b == 2.0 && z < 0) { auto pos_z = abs(z); return log1p(pos_z) / pos_z; } if (a1 == 0.5 && b == 1.5 && z < 1.0) { auto sqrt_z = sqrt(abs(z)); auto numerator = (z > 0.0) // https://www.wolframalpha.com/input?i=Hypergeometric2F1%281%2F2%2C+1%2F2%2C+3%2F2%2C+z%29 // // NOLINT ? asin(sqrt_z) // https://www.wolframalpha.com/input?i=Hypergeometric2F1%281%2F2%2C+1%2F2%2C+3%2F2%2C+-z%29 // // NOLINT : asinh(sqrt_z); return numerator / sqrt_z; } // https://functions.wolfram.com/HypergeometricFunctions/Hypergeometric2F1/03/04/03/ // // NOLINT if (b == (a1 + 1) && z == 0.5) { return pow(2, a1 - 1) * a1 * (digamma((a1 + 1) / 2.0) - digamma(a1 / 2.0)); } } if (z == 1.0) { // https://www.wolframalpha.com/input?i=Hypergeometric2F1%28a1%2C+a2%2C+a1+%2B+a2+%2B+2%2C+1%29 // // NOLINT if (b == (a1 + a2 + 2)) { auto log_2f1 = lgamma(b) - (lgamma(a1 + 2) + lgamma(a2 + 2)); return exp(log_2f1); // https://functions.wolfram.com/HypergeometricFunctions/Hypergeometric2F1/03/02/0001/ // // NOLINT } else if (b > (a1 + a2)) { auto log_2f1 = (lgamma(b) + lgamma(b - a1 - a2)) - (lgamma(b - a1) + lgamma(b - a2)); return exp(log_2f1); } } // https://www.wolframalpha.com/input?i=Hypergeometric2F1%283%2F2%2C+2%2C+3%2C+-z%29 // // NOLINT if (a1 == 1.5 && a2 == 2.0 && b == 3.0 && z < 0.0) { auto abs_z = abs(z); auto sqrt_1pz = sqrt(1 + abs_z); return -4 * (2 * sqrt_1pz + z - 2) / (sqrt_1pz * square(z)); } return {}; } } // namespace internal /** * Returns the Gauss hypergeometric function applied to the * input arguments: * \f$_2F_1(a_1,a_2;b;z)\f$ * * If the input parameters do not meet convergence criteria, then Euler's * transformation is applied to resolve this: * https://mathworld.wolfram.com/EulerTransform.html * * For some special-case combinations of parameters the series is calculated * in closed form, see the internal::hyper_2F1_special_cases function for more * details. * * See 'grad_2F1.hpp' for the derivatives wrt each parameter * * @tparam Ta1 Type of scalar first 'a' argument * @tparam Ta2 Type of scalar second 'a' argument * @tparam Tb Type of scalar 'b' argument * @tparam Tz Type of scalar 'z' argument * @param[in] a1 First of 'a' arguments to function * @param[in] a2 Second of 'a' arguments to function * @param[in] b 'b' argument to function * @param[in] z Scalar z argument * @return Gauss hypergeometric function */ template , typename OptT = boost::optional, require_all_arithmetic_t* = nullptr> inline return_type_t hypergeometric_2F1(const Ta1& a1, const Ta2& a2, const Tb& b, const Tz& z) { check_finite("hypergeometric_2F1", "a1", a1); check_finite("hypergeometric_2F1", "a2", a2); check_finite("hypergeometric_2F1", "b", b); check_finite("hypergeometric_2F1", "z", z); check_not_nan("hypergeometric_2F1", "a1", a1); check_not_nan("hypergeometric_2F1", "a2", a2); check_not_nan("hypergeometric_2F1", "b", b); check_not_nan("hypergeometric_2F1", "z", z); // Check whether value can be calculated by any special-case rules // before estimating infinite sum OptT special_case_a1a2 = internal::hyper_2F1_special_cases(a1, a2, b, z); if (special_case_a1a2.is_initialized()) { return special_case_a1a2.get(); } // Check whether any special case rules apply with 'a' arguments reversed // as 2F1(a1, a2, b, z) = 2F1(a2, a1, b, z) OptT special_case_a2a1 = internal::hyper_2F1_special_cases(a2, a1, b, z); if (special_case_a2a1.is_initialized()) { return special_case_a2a1.get(); } Eigen::Matrix a_args(2); Eigen::Matrix b_args(1); try { check_2F1_converges("hypergeometric_2F1", a1, a2, b, z); a_args << a1, a2; b_args << b; return hypergeometric_pFq(a_args, b_args, z); } catch (const std::exception& e) { // Apply Euler's hypergeometric transformation if function // will not converge with current arguments ScalarT a1_t = b - a1; ScalarT a2_t = a2; ScalarT b_t = b; ScalarT z_t = z / (z - 1); check_2F1_converges("hypergeometric_2F1", a1_t, a2_t, b_t, z_t); a_args << a1_t, a2_t; b_args << b_t; return hypergeometric_pFq(a_args, b_args, z_t) / pow(1 - z, a2); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log.hpp0000644000176200001440000000515414645137106022016 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_HPP #define STAN_MATH_PRIM_FUN_LOG_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `log()` so that it can be vectorized. */ struct log_fun { /** * Return natural log of specified argument. * * @tparam T type of argument * @param[in] x argument * @return Natural log of x. */ template static inline auto fun(const T& x) { using std::log; return log(x); } }; /** * Return the elementwise natural log of the specified argument, * which may be a scalar or any Stan container of numeric scalars. * The return type is the same as the argument type. * * @tparam Container type of container * @param[in] x container * @return Elementwise application of natural log to the argument. */ template < typename Container, require_not_container_st* = nullptr, require_not_var_matrix_t* = nullptr, require_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto log(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `log()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Natural log of each variable in the container. */ template * = nullptr> inline auto log(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().log(); }); } namespace internal { /** * Return the natural logarithm of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return natural logarithm of the argument */ template inline std::complex complex_log(const std::complex& z) { static const double inf = std::numeric_limits::infinity(); static const double nan = std::numeric_limits::quiet_NaN(); if ((is_nan(z.real()) && is_inf(z.imag())) || (is_inf(z.real()) && is_nan(z.imag()))) { return {inf, nan}; } V r = sqrt(norm(z)); V theta = arg(z); return {log(r), theta}; } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/lmgamma.hpp0000644000176200001440000000474214645137106022652 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LMGAMMA_HPP #define STAN_MATH_PRIM_FUN_LMGAMMA_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the natural logarithm of the multivariate gamma function * with the specified dimensions and argument. * *

The multivariate gamma function \f$\Gamma_k(x)\f$ for * dimensionality \f$k\f$ and argument \f$x\f$ is defined by * *

\f$\Gamma_k(x) = \pi^{k(k-1)/4} \, \prod_{j=1}^k \Gamma(x + (1 - j)/2)\f$, * * where \f$\Gamma()\f$ is the gamma function. * \f[ \mbox{lmgamma}(n, x) = \begin{cases} \textrm{error} & \mbox{if } x\in \{\dots, -3, -2, -1, 0\}\\ \ln\Gamma_n(x) & \mbox{if } x\not\in \{\dots, -3, -2, -1, 0\}\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{lmgamma}(n, x)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } x\in \{\dots, -3, -2, -1, 0\}\\ \frac{\partial\, \ln\Gamma_n(x)}{\partial x} & \mbox{if } x\not\in \{\dots, -3, -2, -1, 0\}\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \ln\Gamma_n(x) = \pi^{n(n-1)/4} \, \prod_{j=1}^n \Gamma(x + (1 - j)/2) \f] \f[ \frac{\partial \, \ln\Gamma_n(x)}{\partial x} = \sum_{j=1}^n \Psi(x + (1 - j) / 2) \f] * * @tparam T type of scalar * @param k Number of dimensions. * @param x Function argument. * @return Natural log of the multivariate gamma function. */ template * = nullptr> inline return_type_t lmgamma(int k, T x) { return_type_t result = k * (k - 1) * LOG_PI_OVER_FOUR; return result + sum(lgamma(x + (1 - Eigen::ArrayXd::LinSpaced(k, 1, k)) / 2)); } /** * Enables the vectorized application of the natural log of the multivariate * gamma function, when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return Natural log of the multivariate gamma function applied to the two * inputs. */ template * = nullptr> inline auto lmgamma(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return lmgamma(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/multiply_log.hpp0000644000176200001440000000363314645137106023755 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MULTIPLY_LOG_HPP #define STAN_MATH_PRIM_FUN_MULTIPLY_LOG_HPP #include #include #include #include namespace stan { namespace math { /** * Calculate the value of the first argument * times log of the second argument while behaving * properly with 0 inputs. * * \f$ a * \log b \f$. * \f[ \mbox{multiply\_log}(x, y) = \begin{cases} 0 & \mbox{if } x=y=0\\ x\ln y & \mbox{if } x, y\neq 0 \\[6pt] \end{cases} \f] \f[ \frac{\partial\, \mbox{multiply\_log}(x, y)}{\partial x} = \begin{cases} \ln y \\[6pt] \end{cases} \f] \f[ \frac{\partial\, \mbox{multiply\_log}(x, y)}{\partial y} = \begin{cases} \frac{x}{y} \\[6pt] \end{cases} \f] * * @tparam T_a type of the first variable * @tparam T_b type of the second variable * @param a the first variable * @param b the second variable * @return a * log(b) */ template * = nullptr> inline return_type_t multiply_log(const T_a a, const T_b b) { using std::log; if (b == 0.0 && a == 0.0) { return 0.0; } return a * log(b); } /** * Enables the vectorized application of the multiply_log * function, when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return multiply_log function applied to the two inputs. */ template * = nullptr, require_all_not_var_matrix_t* = nullptr> inline auto multiply_log(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return multiply_log(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/boost_policy.hpp0000644000176200001440000000231714645137106023740 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_BOOST_POLICY_HPP #define STAN_MATH_PRIM_FUN_BOOST_POLICY_HPP #include #include namespace stan { namespace math { /** * Boost policy that overrides the defaults to match the built-in * C++ standard library functions. * * The non-default behavior from Boost's built-ins are * (1) overflow errors return error numbers on error. * (2) pole errors return error numbers on error. * (3) doubles passed to Boost are not promoted to long double. * * The policy is equipped with an optional generic argument B controlling the * precision in some functions. If set to 0, the maximum precision available * in the type being used is demanded from Boost. Otherwise, it correspond to * the approximately B-bit precision, i.e. for trading speed for accuracy. */ template using boost_policy_t = boost::math::policies::policy< boost::math::policies::overflow_error< boost::math::policies::errno_on_error>, boost::math::policies::pole_error, boost::math::policies::promote_double, boost::math::policies::digits2>; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/divide.hpp0000644000176200001440000000260014645137106022472 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DIVIDE_HPP #define STAN_MATH_PRIM_FUN_DIVIDE_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the division of the first scalar by * the second scalar. * @param[in] x Specified scalar. * @param[in] y Specified scalar. * @return Scalar divided by the scalar. */ template * = nullptr> inline return_type_t divide(const Scal1& x, const Scal2& y) { return x / y; } inline int divide(int x, int y) { if (unlikely(y == 0)) { throw_domain_error("divide", "denominator is", y, ""); } return x / y; } /** * Return matrix divided by scalar. * * @tparam Mat type of the matrix or expression * @tparam Scal type of the scalar * @param[in] m specified matrix or expression * @param[in] c specified scalar * @return matrix divided by the scalar */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto divide(const T1& m, const T2& c) { return (as_array_or_scalar(m) / as_array_or_scalar(c)).matrix(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/modified_bessel_first_kind.hpp0000644000176200001440000000431214645137106026561 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MODIFIED_BESSEL_FIRST_KIND_HPP #define STAN_MATH_PRIM_FUN_MODIFIED_BESSEL_FIRST_KIND_HPP #include #include #include #include namespace stan { namespace math { /** * \f[ \mbox{modified\_bessel\_first\_kind}(v, z) = \begin{cases} I_v(z) & \mbox{if } -\infty\leq z \leq \infty \\[6pt] \textrm{error} & \mbox{if } z = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{modified\_bessel\_first\_kind}(v, z)}{\partial z} = \begin{cases} \frac{\partial\, I_v(z)}{\partial z} & \mbox{if } -\infty\leq z\leq \infty \\[6pt] \textrm{error} & \mbox{if } z = \textrm{NaN} \end{cases} \f] \f[ {I_v}(z) = \left(\frac{1}{2}z\right)^v\sum_{k=0}^\infty \frac{\left(\frac{1}{4}z^2\right)^k}{k!\Gamma(v+k+1)} \f] \f[ \frac{\partial \, I_v(z)}{\partial z} = I_{v-1}(z)-\frac{v}{z}I_v(z) \f] * */ template * = nullptr> inline T2 modified_bessel_first_kind(int v, const T2 z) { check_not_nan("modified_bessel_first_kind", "z", z); return boost::math::cyl_bessel_i(v, z); } /** * This function exists because when z is of type integer, * cyl_bessel_i(v, z) returns an integer. This * results in overflow when the function value is large. */ inline double modified_bessel_first_kind(int v, int z) { check_not_nan("modified_bessel_first_kind", "z", z); return boost::math::cyl_bessel_i(v, static_cast(z)); } /** * Enables the vectorized application of the modified_bessel_first_kind * function, when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return modified_bessel_first_kind function applied to the two inputs. */ template * = nullptr> inline auto modified_bessel_first_kind(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return modified_bessel_first_kind(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/get_imag.hpp0000644000176200001440000000263214645137106023007 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GET_IMAG_HPP #define STAN_MATH_PRIM_FUN_GET_IMAG_HPP #include #include namespace stan { namespace math { /** * Return the imaginary component of the complex argument. * * @tparam T value type of complex argument * @param[in] z complex value whose imaginary component is extracted * @return imaginary component of argument */ template inline T get_imag(const std::complex& z) { return z.imag(); } /** * Return the real component of the complex argument. * * @tparam Eig A type derived from `Eigen::EigenBase` * @param[in] z complex value whose real component is extracted * @return real component of argument */ template * = nullptr> inline auto get_imag(const Eig& z) { return z.imag(); } /** * Return the real component of the complex argument. * * @tparam StdVec A `std::vector` type with complex scalar type * @param[in] z complex value whose real component is extracted * @return real component of argument */ template * = nullptr> inline auto get_imag(const StdVec& z) { promote_scalar_t, StdVec> result(z.size()); std::transform(z.begin(), z.end(), result.begin(), [](auto&& x) { return get_imag(x); }); return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/corr_constrain.hpp0000644000176200001440000000467414645137106024270 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CORR_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_CORR_CONSTRAIN_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the result of transforming the specified scalar or container of values * to have a valid correlation value between -1 and 1 (inclusive). * *

The transform used is the hyperbolic tangent function, * *

\f$f(x) = \tanh x = \frac{\exp(2x) - 1}{\exp(2x) + 1}\f$. * * @tparam T type of value or container * @param[in] x value or container * @return tanh transform */ template inline plain_type_t corr_constrain(const T& x) { return tanh(x); } /** * Return the result of transforming the specified scalar or container of values * to have a valid correlation value between -1 and 1 (inclusive). * *

The transform used is as specified for * corr_constrain(T). The log absolute Jacobian * determinant is * *

\f$\log | \frac{d}{dx} \tanh x | = \log (1 - \tanh^2 x)\f$. * * @tparam T_x Type of scalar or container * @param[in] x value or container * @param[in,out] lp log density accumulator */ template inline auto corr_constrain(const T_x& x, T_lp& lp) { plain_type_t tanh_x = tanh(x); lp += sum(log1m(square(tanh_x))); return tanh_x; } /** * Return the result of transforming the specified scalar or container of values * to have a valid correlation value between -1 and 1 (inclusive). If the * `Jacobian` parameter is `true`, the log density accumulator is incremented * with the log absolute Jacobian determinant of the transform. All of the * transforms are specified with their Jacobians in the *Stan Reference Manual* * chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T_x Type of scalar or container * @tparam T_lp type of target log density accumulator * @param[in] x value or container * @param[in,out] lp log density accumulator */ template inline auto corr_constrain(const T_x& x, T_lp& lp) { if (Jacobian) { return corr_constrain(x, lp); } else { return corr_constrain(x); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/positive_ordered_constrain.hpp0000644000176200001440000001026214645137106026657 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_POSITIVE_ORDERED_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_POSITIVE_ORDERED_CONSTRAIN_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return an increasing positive ordered vector derived from the specified * free vector. The returned constrained vector will have the * same dimensionality as the specified free vector. * * @tparam T type of elements in the vector * @param x Free vector of scalars. * @return Positive, increasing ordered vector. */ template * = nullptr, require_not_st_var* = nullptr> inline auto positive_ordered_constrain(const EigVec& x) { using std::exp; Eigen::Index k = x.size(); plain_type_t y(k); if (k == 0) { return y; } const auto& x_ref = to_ref(x); y.coeffRef(0) = exp(x_ref.coeff(0)); for (Eigen::Index i = 1; i < k; ++i) { y.coeffRef(i) = y.coeff(i - 1) + exp(x_ref.coeff(i)); } return y; } /** * Return a positive valued, increasing positive ordered vector derived * from the specified free vector and increment the specified log * probability reference with the log absolute Jacobian determinant * of the transform. The returned constrained vector * will have the same dimensionality as the specified free vector. * * @tparam T type of elements in the vector * @param x Free vector of scalars. * @param lp Log probability reference. * @return Positive, increasing ordered vector. */ template * = nullptr> inline auto positive_ordered_constrain(const Vec& x, return_type_t& lp) { const auto& x_ref = to_ref(x); lp += sum(x_ref); return positive_ordered_constrain(x_ref); } /** * Return a positive valued, increasing positive ordered vector derived from the * specified free vector. The returned constrained vector will have the same * dimensionality as the specified free vector. If the `Jacobian` parameter is * `true`, the log density accumulator is incremented with the log absolute * Jacobian determinant of the transform. All of the transforms are specified * with their Jacobians in the *Stan Reference Manual* chapter Constraint * Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam Vec A type inheriting from `Eigen::EigenBase`, a `var_value` with * inner type inheriting from `Eigen::EigenBase` * @param x Free vector of scalars * @param[in, out] lp log density accumulato * @return Positive, increasing ordered vector */ template * = nullptr> inline auto positive_ordered_constrain(const Vec& x, return_type_t& lp) { if (Jacobian) { return positive_ordered_constrain(x, lp); } else { return positive_ordered_constrain(x); } } /** * Return a positive valued, increasing positive ordered vector derived from the * specified free vector. The returned constrained vector will have the same * dimensionality as the specified free vector. If the `Jacobian` parameter is * `true`, the log density accumulator is incremented with the log absolute * Jacobian determinant of the transform. All of the transforms are specified * with their Jacobians in the *Stan Reference Manual* chapter Constraint * Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam Vec A standard vector with inner type inheriting from * `Eigen::EigenBase`, a `var_value` with inner type inheriting from * `Eigen::EigenBase` * @param x Free vector of scalars * @param[in, out] lp log density accumulato * @return Positive, increasing ordered vector */ template * = nullptr> inline auto positive_ordered_constrain(const T& x, return_type_t& lp) { return apply_vector_unary::apply(x, [&lp](auto&& v) { return positive_ordered_constrain(v, lp); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inc_beta_dda.hpp0000644000176200001440000000563014645137106023610 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INC_BETA_DDA_HPP #define STAN_MATH_PRIM_FUN_INC_BETA_DDA_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the partial derivative of the regularized * incomplete beta function, I_{z}(a, b) with respect to a. * The power series used to compute the derivative tends to * converge slowly when a and b are large, especially if z * approaches 1. The implementation will throw an exception * if the series have not converged within 100,000 iterations. * The current implementation has been tested for values * of a and b up to 12500 and z = 0.999. * * @tparam T scalar types of arguments * @param a first argument * @param b second argument * @param z upper bound of the integral * @param digamma_a value of digamma(a) * @param digamma_ab value of digamma(b) * @return partial derivative of the incomplete beta with respect to a * * @pre a >= 0 * @pre b >= 0 * @pre 0 <= z <= 1 */ template T inc_beta_dda(T a, T b, T z, T digamma_a, T digamma_ab) { using std::fabs; using std::log; using std::pow; if (b > a) { if ((0.1 < z && z <= 0.75 && b > 500) || (0.01 < z && z <= 0.1 && b > 2500) || (0.001 < z && z <= 0.01 && b > 1e5)) { return -inc_beta_ddb(b, a, 1 - z, digamma_a, digamma_ab); } } if (z > 0.75 && a < 500) { return -inc_beta_ddb(b, a, 1 - z, digamma_a, digamma_ab); } if (z > 0.9 && a < 2500) { return -inc_beta_ddb(b, a, 1 - z, digamma_a, digamma_ab); } if (z > 0.99 && a < 1e5) { return -inc_beta_ddb(b, a, 1 - z, digamma_a, digamma_ab); } if (z > 0.999) { return -inc_beta_ddb(b, a, 1 - z, digamma_a, digamma_ab); } double threshold = 1e-10; const T a_plus_b = a + b; const T a_plus_1 = a + 1; digamma_a += inv(a); // Need digamma(a + 1), not digamma(a); // Common prefactor to regularize numerator and denominator T prefactor = pow(a_plus_1 / a_plus_b, 3); T sum_numer = (digamma_ab - digamma_a) * prefactor; T sum_denom = prefactor; T summand = prefactor * z * a_plus_b / a_plus_1; T k = 1; digamma_ab += inv(a_plus_b); digamma_a += inv(a_plus_1); while (fabs(summand) > threshold) { sum_numer += (digamma_ab - digamma_a) * summand; sum_denom += summand; summand *= (1 + (a_plus_b) / k) * (1 + k) / (1 + a_plus_1 / k); digamma_ab += inv(a_plus_b + k); digamma_a += inv(a_plus_1 + k); ++k; summand *= z / k; if (k > 1e5) { throw_domain_error("inc_beta_dda", "did not converge within 10000 iterations", "", ""); } } return inc_beta(a, b, z) * (log(z) + sum_numer / sum_denom); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/quad_form.hpp0000644000176200001440000000405714645137106023213 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_QUAD_FORM_HPP #define STAN_MATH_PRIM_FUN_QUAD_FORM_HPP #include #include #include namespace stan { namespace math { /** * Return the quadratic form \f$ B^T A B \f$. * * Symmetry of the resulting matrix is not guaranteed due to numerical * precision. * * @tparam EigMat1 type of the first (square) matrix * @tparam EigMat2 type of the second matrix * * @param A square matrix * @param B second matrix * @return The quadratic form, which is a symmetric matrix. * @throws std::invalid_argument if A is not square, or if A cannot be * multiplied by B */ template * = nullptr, require_not_eigen_col_vector_t* = nullptr, require_vt_same* = nullptr, require_all_vt_arithmetic* = nullptr> inline auto quad_form(const EigMat1& A, const EigMat2& B) { check_square("quad_form", "A", A); check_multiplicable("quad_form", "A", A, "B", B); return make_holder( [](const auto& b, const auto& a) { return b.transpose() * a * b; }, to_ref(B), to_ref(A)); } /** * Return the quadratic form \f$ B^T A B \f$. * * @tparam EigMat type of the matrix * @tparam ColVec type of the vector * * @param A square matrix * @param B vector * @return The quadratic form (a scalar). * @throws std::invalid_argument if A is not square, or if A cannot be * multiplied by B */ template * = nullptr, require_eigen_col_vector_t* = nullptr, require_vt_same* = nullptr, require_all_vt_arithmetic* = nullptr> inline value_type_t quad_form(const EigMat& A, const ColVec& B) { check_square("quad_form", "A", A); check_multiplicable("quad_form", "A", A, "B", B); const auto& B_ref = to_ref(B); return B_ref.dot(A * B_ref); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/simplex_constrain.hpp0000644000176200001440000001136014645137106024772 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SIMPLEX_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_SIMPLEX_CONSTRAIN_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the simplex corresponding to the specified free vector. * A simplex is a vector containing values greater than or equal * to 0 that sum to 1. A vector with (K-1) unconstrained values * will produce a simplex of size K. * * The transform is based on a centered stick-breaking process. * * @tparam Vec type of the vector * @param y Free vector input of dimensionality K - 1. * @return Simplex of dimensionality K. */ template * = nullptr, require_not_st_var* = nullptr> inline auto simplex_constrain(const Vec& y) { // cut & paste simplex_constrain(Eigen::Matrix, T) w/o Jacobian using std::log; using T = value_type_t; int Km1 = y.size(); plain_type_t x(Km1 + 1); T stick_len(1.0); for (Eigen::Index k = 0; k < Km1; ++k) { T z_k = inv_logit(y.coeff(k) - log(Km1 - k)); x.coeffRef(k) = stick_len * z_k; stick_len -= x.coeff(k); } x.coeffRef(Km1) = stick_len; return x; } /** * Return the simplex corresponding to the specified free vector * and increment the specified log probability reference with * the log absolute Jacobian determinant of the transform. * * The simplex transform is defined through a centered * stick-breaking process. * * @tparam Vec type of the vector * @param y Free vector input of dimensionality K - 1. * @param lp Log probability reference to increment. * @return Simplex of dimensionality K. */ template * = nullptr, require_not_st_var* = nullptr> inline auto simplex_constrain(const Vec& y, value_type_t& lp) { using Eigen::Dynamic; using Eigen::Matrix; using std::log; using T = value_type_t; int Km1 = y.size(); // K = Km1 + 1 plain_type_t x(Km1 + 1); T stick_len(1.0); for (Eigen::Index k = 0; k < Km1; ++k) { double eq_share = -log(Km1 - k); // = logit(1.0/(Km1 + 1 - k)); T adj_y_k = y.coeff(k) + eq_share; T z_k = inv_logit(adj_y_k); x.coeffRef(k) = stick_len * z_k; lp += log(stick_len); lp -= log1p_exp(-adj_y_k); lp -= log1p_exp(adj_y_k); stick_len -= x.coeff(k); // equivalently *= (1 - z_k); } x.coeffRef(Km1) = stick_len; // no Jacobian contrib for last dim return x; } /** * Return the simplex corresponding to the specified free vector. If the * `Jacobian` parameter is `true`, the log density accumulator is incremented * with the log absolute Jacobian determinant of the transform. All of the * transforms are specified with their Jacobians in the *Stan Reference Manual* * chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam Vec A type inheriting from `Eigen::DenseBase` or a `var_value` with * inner type inheriting from `Eigen::DenseBase` with compile time dynamic rows * and 1 column * @param[in] y free vector * @param[in, out] lp log density accumulator * @return simplex of dimensionality one greater than `y` */ template * = nullptr> auto simplex_constrain(const Vec& y, return_type_t& lp) { if (Jacobian) { return simplex_constrain(y, lp); } else { return simplex_constrain(y); } } /** * Return the simplex corresponding to the specified free vector. If the * `Jacobian` parameter is `true`, the log density accumulator is incremented * with the log absolute Jacobian determinant of the transform. All of the * transforms are specified with their Jacobians in the *Stan Reference Manual* * chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam Vec A standard vector with inner type inheriting from * `Eigen::DenseBase` or a `var_value` with inner type inheriting from * `Eigen::DenseBase` with compile time dynamic rows and 1 column * @param[in] y free vector * @param[in, out] lp log density accumulator * @return simplex of dimensionality one greater than `y` */ template * = nullptr> inline auto simplex_constrain(const T& y, return_type_t& lp) { return apply_vector_unary::apply( y, [&lp](auto&& v) { return simplex_constrain(v, lp); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/scale_matrix_exp_multiply.hpp0000644000176200001440000000456414645137106026527 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SCALE_MATRIX_EXP_MULTIPLY_HPP #define STAN_MATH_PRIM_FUN_SCALE_MATRIX_EXP_MULTIPLY_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return product of exp(At) and B, where A is a NxN double matrix, * B is a NxCb double matrix, and t is a double. * * Specialized for double values for efficiency. * * @tparam EigMat1 type of the first matrix * @tparam EigMat2 type of the second matrix * * @param[in] A Matrix * @param[in] B Matrix * @param[in] t double * @return exponential of At multiplied by B */ template * = nullptr> inline Eigen::Matrix scale_matrix_exp_multiply(const double& t, const EigMat1& A, const EigMat2& B) { check_square("scale_matrix_exp_multiply", "input matrix", A); check_multiplicable("scale_matrix_exp_multiply", "A", A, "B", B); if (A.size() == 0) { return {0, B.cols()}; } return matrix_exp_action_handler().action(A, B, t); } /** * Return product of exp(At) and B, where A is a NxN matrix, * B is a NxCb matrix and t is a scalar. * * Generic implementation when arguments are not all double. * * @tparam Tt type of \c t * @tparam EigMat1 type of the first matrix * @tparam EigMat2 type of the second matrix * @param[in] A Matrix * @param[in] B Matrix * @param[in] t double * @return exponential of At multiplied by B */ template * = nullptr, require_any_autodiff_t, value_type_t>* = nullptr> inline Eigen::Matrix, Eigen::Dynamic, EigMat2::ColsAtCompileTime> scale_matrix_exp_multiply(const Tt& t, const EigMat1& A, const EigMat2& B) { check_square("scale_matrix_exp_multiply", "input matrix", A); check_multiplicable("scale_matrix_exp_multiply", "A", A, "B", B); if (A.size() == 0) { return {0, B.cols()}; } return multiply(matrix_exp(multiply(A, t)), B); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/block.hpp0000644000176200001440000000167414645137106022332 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_BLOCK_HPP #define STAN_MATH_PRIM_FUN_BLOCK_HPP #include #include namespace stan { namespace math { /** * Return a nrows x ncols submatrix starting at (i-1, j-1). * * @tparam T type of elements in the matrix * @param m Matrix. * @param i Starting row. * @param j Starting column. * @param nrows Number of rows in block. * @param ncols Number of columns in block. * @throw std::out_of_range if either index is out of range. */ template * = nullptr> inline auto block(const T& m, size_t i, size_t j, size_t nrows, size_t ncols) { check_row_index("block", "i", m, i); check_row_index("block", "i+nrows-1", m, i + nrows - 1); check_column_index("block", "j", m, j); check_column_index("block", "j+ncols-1", m, j + ncols - 1); return m.block(i - 1, j - 1, nrows, ncols); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/modified_bessel_second_kind.hpp0000644000176200001440000000346214645137106026712 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MODIFIED_BESSEL_SECOND_KIND_HPP #define STAN_MATH_PRIM_FUN_MODIFIED_BESSEL_SECOND_KIND_HPP #include #include #include namespace stan { namespace math { /** * \f[ \mbox{modified\_bessel\_second\_kind}(v, z) = \begin{cases} \textrm{error} & \mbox{if } z \leq 0 \\ K_v(z) & \mbox{if } z > 0 \\[6pt] \textrm{NaN} & \mbox{if } z = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{modified\_bessel\_second\_kind}(v, z)}{\partial z} = \begin{cases} \textrm{error} & \mbox{if } z \leq 0 \\ \frac{\partial\, K_v(z)}{\partial z} & \mbox{if } z > 0 \\[6pt] \textrm{NaN} & \mbox{if } z = \textrm{NaN} \end{cases} \f] \f[ {K_v}(z) = \frac{\pi}{2}\cdot\frac{I_{-v}(z) - I_{v}(z)}{\sin(v\pi)} \f] \f[ \frac{\partial \, K_v(z)}{\partial z} = -\frac{v}{z}K_v(z)-K_{v-1}(z) \f] * */ template * = nullptr> inline T2 modified_bessel_second_kind(int v, const T2 z) { return boost::math::cyl_bessel_k(v, z); } /** * Enables the vectorized application of the modified_bessel_second_kind * function, when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return modified_bessel_second_kind function applied to the two inputs. */ template * = nullptr> inline auto modified_bessel_second_kind(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return modified_bessel_second_kind(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/identity_matrix.hpp0000644000176200001440000000120414645137106024442 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IDENTITY_MATRIX_HPP #define STAN_MATH_PRIM_FUN_IDENTITY_MATRIX_HPP #include #include #include namespace stan { namespace math { /** * Return a square identity matrix * * @param K size of the matrix * @return An identity matrix of size K. * @throw std::domain_error if K is negative. */ template * = nullptr> inline auto identity_matrix(int K) { check_nonnegative("identity_matrix", "size", K); return T::Identity(K, K); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sign.hpp0000644000176200001440000000210114645137106022162 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SIGN_HPP #define STAN_MATH_PRIM_FUN_SIGN_HPP #include #include namespace stan { namespace math { // returns 1 if NaN is passed in. template * = nullptr> inline int sign(const T& z) { return (z == 0) ? 0 : z < 0 ? -1 : 1; } /** * Structure to wrap `sign()` so it can be vectorized. */ struct sign_fun { /** * Return the sign of the specified argument. * * @tparam T type of argument * @param x argument * @return sign of the argument. */ template static inline int fun(const T& x) { return sign(x); } }; /** * Return the elementwise application of `sign()` to * specified argument container. * * @tparam T type of container * @param x container * @return Elementwise sign of members of container. */ template * = nullptr> inline auto sign(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/reverse.hpp0000644000176200001440000000173414645137106022710 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_REVERSE_HPP #define STAN_MATH_PRIM_FUN_REVERSE_HPP #include #include #include #include namespace stan { namespace math { /** * Return a copy of the specified array in reversed order. * * @tparam T type of elements in the array * @param x array to reverse * @return Array in reversed order. */ template inline std::vector reverse(const std::vector& x) { std::vector rev(x.size()); std::reverse_copy(x.begin(), x.end(), rev.begin()); return rev; } /** * Return a copy of the specified vector or row vector * in reversed order. * * @tparam T type of container (vector or row vector) * @param x vector or row vector to reverse * @return Vector or row vector in reversed order. */ template > inline auto reverse(const T& x) { return x.reverse(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/append_array.hpp0000644000176200001440000000443614645137106023704 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_APPEND_ARRAY_HPP #define STAN_MATH_PRIM_FUN_APPEND_ARRAY_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the concatenation of two specified vectors in the order of * the arguments. * * The return type is computed with append_return_type * * @tparam T1 type of elements in first vector * @tparam T2 type of elements in second vector * @param x First vector * @param y Second vector * @return A vector of x and y concatenated together (in that order) */ template inline typename append_return_type, std::vector >::type append_array(const std::vector& x, const std::vector& y) { typename append_return_type, std::vector >::type z; std::vector zdims; if (x.empty()) { zdims = dims(y); zdims[0] += x.size(); } else { zdims = dims(x); zdims[0] += y.size(); } resize(z, zdims); for (size_t i = 0; i < x.size(); ++i) { assign(z[i], x[i]); } for (size_t i = 0; i < y.size(); ++i) { assign(z[i + x.size()], y[i]); } return z; } /** * Return the concatenation of two specified vectors in the order of * the arguments. * * @tparam T1 type of elements in both vectors * @param x First vector * @param y Second vector * @return A vector of x and y concatenated together (in that order) */ template inline std::vector append_array(const std::vector& x, const std::vector& y) { std::vector z; if (!x.empty() && !y.empty()) { std::vector xdims = dims(x), ydims = dims(y); check_matching_sizes("append_array", "dimension of x", xdims, "dimension of y", ydims); for (size_t i = 1; i < xdims.size(); ++i) { check_size_match("append_array", "shape of x", xdims[i], "shape of y", ydims[i]); } } z.reserve(x.size() + y.size()); z.insert(z.end(), x.begin(), x.end()); z.insert(z.end(), y.begin(), y.end()); return z; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/LDLT_factor.hpp0000644000176200001440000000341314645137106023326 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LDLT_FACTOR_HPP #define STAN_MATH_PRIM_FUN_LDLT_FACTOR_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * LDLT_factor is a structure that holds a matrix of type T and the * LDLT of its values. * * @tparam T type of elements in the matrix */ template class LDLT_factor; /** * An LDLT_factor is a structure that holds a matrix of type T and the * LDLT of its values. * * @tparam T type of elements in the matrix */ template class LDLT_factor::value && !is_var>::value>::value>> { private: plain_type_t matrix_; Eigen::LDLT> ldlt_; public: template , plain_type_t>* = nullptr> explicit LDLT_factor(const S& matrix) : matrix_(matrix), ldlt_(matrix_.ldlt()) {} /** * Return a const reference to the underlying matrix */ const auto& matrix() const { return matrix_; } /** * Return a const reference to the LDLT factor of the matrix */ const auto& ldlt() const { return ldlt_; } }; /** * Make an LDLT_factor with matrix type `plain_type_t` * * @tparam T Type of matrix to take the LDLT of * @param A Matrix to take the LDLT of * @return LDLT_factor of A */ template * = nullptr> inline auto make_ldlt_factor(const T& A) { return LDLT_factor>(A); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/gp_periodic_cov.hpp0000644000176200001440000001424614645137106024372 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GP_PERIODIC_COV_HPP #define STAN_MATH_PRIM_FUN_GP_PERIODIC_COV_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns a periodic covariance matrix \f$ \mathbf{K} \f$ using the input \f$ * \mathbf{X} \f$. The elements of \f$ \mathbf{K} \f$ are defined as \f$ * \mathbf{K}_{ij} = k(\mathbf{X}_i,\mathbf{X}_j), \f$ where \f$ \mathbf{X}_i * \f$ is the \f$i\f$-th row of \f$ \mathbf{X} \f$ and \n \f$ * k(\mathbf{x},\mathbf{x}^\prime) = \sigma^2 \exp\left(-\frac{2\sin^2(\pi * |\mathbf{x}-\mathbf{x}^\prime|/p)}{\ell^2}\right), \f$ \n where \f$ \sigma^2 * \f$, \f$ \ell \f$ and \f$ p \f$ are the signal variance, length-scale and * period. * * @tparam T_x type of std::vector elements of x. * T_x can be a scalar, an Eigen::Vector, or an Eigen::RowVector. * @tparam T_sigma type of sigma * @tparam T_l type of length-scale * @tparam T_p type of period * * @param x std::vector of input elements. * This function assumes that all elements of x have the same size. * @param sigma standard deviation of the signal * @param l length-scale * @param p period * @return periodic covariance matrix * @throw std::domain_error if sigma <= 0, l <= 0, p <= 0 or * x is nan or infinite */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_periodic_cov(const std::vector &x, const T_sigma &sigma, const T_l &l, const T_p &p) { using std::exp; using std::sin; const char *fun = "gp_periodic_cov"; check_positive(fun, "signal standard deviation", sigma); check_positive(fun, "length-scale", l); check_positive(fun, "period", p); for (size_t n = 0; n < x.size(); ++n) { check_not_nan(fun, "element of x", x[n]); } Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x.size(), x.size()); size_t x_size = x.size(); if (x_size == 0) { return cov; } T_sigma sigma_sq = square(sigma); T_l neg_two_inv_l_sq = -2.0 * inv_square(l); T_p pi_div_p = pi() / p; size_t block_size = 10; for (size_t jb = 0; jb < x_size; jb += block_size) { for (size_t ib = jb; ib < x_size; ib += block_size) { size_t j_end = std::min(x_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { cov.coeffRef(j, j) = sigma_sq; size_t i_end = std::min(x_size, ib + block_size); for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq * exp(square(sin(pi_div_p * distance(x[i], x[j]))) * neg_two_inv_l_sq); } } } } return cov; } /** * Returns a periodic covariance matrix \f$ \mathbf{K} \f$ using inputs * \f$ \mathbf{X}_1 \f$ and \f$ \mathbf{X}_2 \f$. * The elements of \f$ \mathbf{K} \f$ are defined as * \f$ \mathbf{K}_{ij} = k(\mathbf{X}_{1_i},\mathbf{X}_{2_j}), \f$ where * \f$ \mathbf{X}_{1_i} \f$ and \f$ \mathbf{X}_{2_j} \f$ are the \f$i\f$-th * and \f$j\f$-th rows of \f$ \mathbf{X}_1 \f$ and \f$ \mathbf{X}_2 \f$ and * \n \f$ k(\mathbf{x},\mathbf{x}^\prime) = \sigma^2 * \exp\left(-\frac{2\sin^2(\pi * |\mathbf{x}-\mathbf{x}^\prime|/p)}{\ell^2}\right), \f$ \n where \f$ * \sigma^2 \f$, \f$ \ell \f$ and \f$ p \f$ are the signal variance, * length-scale and period. * * @tparam T_x1 type of std::vector elements of x1 * T_x1 can be a scalar, an Eigen::Vector, or an Eigen::RowVector. * @tparam T_x2 type of std::vector elements of x2 * T_x2 can be a scalar, an Eigen::Vector, or an Eigen::RowVector. * @tparam T_sigma type of sigma * @tparam T_l type of length-scale * @tparam T_p type of period * * @param x1 std::vector of first input elements * @param x2 std::vector of second input elements. * This function assumes that all the elements of x1 and x2 have the same * sizes. * @param sigma standard deviation of the signal * @param l length-scale * @param p period * @return periodic covariance matrix * @throw std::domain_error if sigma <= 0, l <= 0, p <= 0 , * x1 or x2 is nan or infinite */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_periodic_cov(const std::vector &x1, const std::vector &x2, const T_sigma &sigma, const T_l &l, const T_p &p) { using std::exp; using std::sin; const char *fun = "gp_periodic_cov"; check_positive(fun, "signal standard deviation", sigma); check_positive(fun, "length-scale", l); check_positive(fun, "period", p); for (size_t n = 0; n < x1.size(); ++n) { check_not_nan(fun, "element of x1", x1[n]); } for (size_t n = 0; n < x2.size(); ++n) { check_not_nan(fun, "element of x2", x2[n]); } Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1.size(), x2.size()); if (x1.size() == 0 || x2.size() == 0) { return cov; } T_sigma sigma_sq = square(sigma); T_l neg_two_inv_l_sq = -2.0 * inv_square(l); T_p pi_div_p = pi() / p; size_t block_size = 10; for (size_t ib = 0; ib < x1.size(); ib += block_size) { for (size_t jb = 0; jb < x2.size(); jb += block_size) { size_t j_end = std::min(x2.size(), jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x1.size(), ib + block_size); for (size_t i = ib; i < i_end; ++i) { cov.coeffRef(i, j) = sigma_sq * exp(square(sin(pi_div_p * distance(x1[i], x2[j]))) * neg_two_inv_l_sq); } } } } return cov; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/trace.hpp0000644000176200001440000000131514645137106022326 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TRACE_HPP #define STAN_MATH_PRIM_FUN_TRACE_HPP #include #include namespace stan { namespace math { /** * Returns the trace of the specified matrix. The trace * is defined as the sum of the elements on the diagonal. * The matrix is not required to be square. Returns 0 if * matrix is empty. * * @tparam T type of the elements in the matrix * @param[in] m Specified matrix. * @return Trace of the matrix. */ template * = nullptr, require_not_st_var* = nullptr> inline value_type_t trace(const T& m) { return m.trace(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/to_int.hpp0000644000176200001440000000452614645137106022533 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TO_INT_HPP #define STAN_MATH_PRIM_FUN_TO_INT_HPP #include #include namespace stan { namespace math { /** * Returns the input scalar as an integer type. Specialization for integral * types which do not need conversion, reduces to a no-op. * * @tparam T type of integral argument * @param x argument * @return Input argument unchanged */ template * = nullptr> inline T to_int(T x) { return std::forward(x); } /** * Returns the input scalar as an integer type. This function performs no * rounding and simply truncates the decimal to return only the signficand as an * integer. * * Casting NaN and Inf values to integers is considered undefined behavior as * NaN and Inf cannot be represented as an integer and most implementations * simply overflow, as such this function throws for these inputs. * * The function also throws for floating-point values that are too large to be * represented as an integer. * * @tparam T type of argument (must be arithmetic) * @param x argument * @return Integer value of argument * @throw std::domain_error for NaN, Inf, or floating point values not in range * to be represented as int */ template * = nullptr> inline int to_int(T x) { static const char* function = "to_int"; check_bounded(function, "x", x, std::numeric_limits::min(), std::numeric_limits::max()); return static_cast(x); } /** * Return elementwise integer value of the specified real-valued * container. * * @tparam T type of argument * @param x argument * @return Integer value of argument */ struct to_int_fun { template static inline auto fun(const T& x) { return to_int(x); } }; /** * Returns the elementwise `to_int()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x argument * @return Integer value of each variable in the container. */ template * = nullptr> inline auto to_int(const Container& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/any.hpp0000644000176200001440000000437114645137106022024 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ANY_HPP #define STAN_MATH_PRIM_FUN_ANY_HPP #include #include #include namespace stan { namespace math { /** * Return true if any values in the input are true. * * Overload for a single boolean input * * @tparam T Any type convertible to `bool` * @param x boolean input * @return The input unchanged */ template >* = nullptr> constexpr inline bool any(T x) { return x; } /** * Return true if any values in the input are true. * * Overload for Eigen types * * @tparam ContainerT A type derived from `Eigen::EigenBase` that has an * `integral` scalar type * @param x Eigen object of boolean inputs * @return Boolean indicating whether any elements are true */ template * = nullptr> inline bool any(const ContainerT& x) { return x.any(); } // Forward-declaration for correct resolution of any(std::vector) template inline bool any(const std::tuple& x); /** * Return true if any values in the input are true. * * Overload for a std::vector/nested inputs. The Eigen::Map/apply_vector_unary * approach cannot be used as std::vector types do not have a .data() * member and are not always stored contiguously. * * @tparam InnerT Type within std::vector * @param x Nested container of boolean inputs * @return Boolean indicating whether any elements are true */ template inline bool any(const std::vector& x) { return std::any_of(x.begin(), x.end(), [](const auto& i) { return any(i); }); } /** * Return true if any values in the input are true. * * Overload for a tuple input. * * @tparam Types of items within tuple * @param x Tuple of boolean scalar-type elements * @return Boolean indicating whether any elements are true */ template inline bool any(const std::tuple& x) { bool any_true = false; math::for_each( [&any_true](const auto& i) { any_true = any_true || any(i); return; }, x); return any_true; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/array_builder.hpp0000644000176200001440000000221014645137106024047 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ARRAY_BUILDER_HPP #define STAN_MATH_PRIM_FUN_ARRAY_BUILDER_HPP #include #include #include namespace stan { namespace math { /** * Structure for building up arrays in an expression (rather than * in statements) using an argument-chaining add() method and * a getter method array() to return the result. * Array elements are held in std::vector of type T. * * @tparam T type of array elements */ template class array_builder { private: std::vector x_; public: /** * Construct an array_builder. */ array_builder() : x_() {} /** * Add one element of type S to array, promoting to type T. * * @param u element to add * @returns this array_builder object */ template array_builder& add(const S& u) { x_.push_back(promote_elements::promote(u)); return *this; } /** * Getter method to return array itself. * * @returns std:vector of composed array elements. */ std::vector array() { return x_; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/zeros_row_vector.hpp0000644000176200001440000000110614645137106024641 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ZEROS_ROW_VECTOR_HPP #define STAN_MATH_PRIM_FUN_ZEROS_ROW_VECTOR_HPP #include #include namespace stan { namespace math { /** * Return a row vector of zeros * * @param K size of the row vector * @return A row vector of size K with all elements initialized to 0. * @throw std::domain_error if K is negative. */ inline auto zeros_row_vector(int K) { check_nonnegative("zeros_row_vector", "size", K); return Eigen::RowVectorXd::Zero(K); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/min.hpp0000644000176200001440000000310114645137106022006 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MIN_HPP #define STAN_MATH_PRIM_FUN_MIN_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the minimum coefficient of the two specified * scalar arguments. * * @tparam T1 type of first argument (must be arithmetic) * @tparam T2 type of second argument (must be arithmetic) * @param x first argument * @param y second argument * @return minimum value of the two arguments */ template * = nullptr> auto min(T1 x, T2 y) { return std::min(x, y); } /** * Returns the minimum coefficient in the specified * matrix, vector, row vector or std vector. * * @tparam T type of elements in the container * @param m specified matrix, vector, row vector or std vector * @return mainimum coefficient value in the container, or infinity if the * container is size zero and the scalar type in container is floating point * number * @throws std::invalid_argument if the vector is size zero and the * scalar type in the container is integer */ template * = nullptr> inline value_type_t min(const T& m) { if (std::is_integral>::value) { check_nonzero_size("min", "int vector", m); } else if (m.size() == 0) { return INFTY; } return apply_vector_unary::reduce( m, [](const auto& x) { return x.minCoeff(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/grad_2F1.hpp0000644000176200001440000003255114645137106022563 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GRAD_2F1_HPP #define STAN_MATH_PRIM_FUN_GRAD_2F1_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * Implementation function to calculate the gradients of the hypergeometric * function, 2F1 with respect to the a1, a2, and b2 parameters. * * Calculate the gradients of the hypergeometric function (2F1) * as the power series stopping when the series converges * to within precision or throwing when the * function takes max_steps steps. * * @tparam calc_a1 boolean for whether to calculate gradients w.r.t a1 * @tparam calc_a2 boolean for whether to calculate gradients w.r.t a2 * @tparam calc_b1 boolean for whether to calculate gradients w.r.t b1 * @tparam T1 scalar type of a1 * @tparam T2 scalar type of a2 * @tparam T3 scalar type of b1 * @tparam T_z scalar type of z * @param[in] a1 see generalized hypergeometric function definition * @param[in] a2 see generalized hypergeometric function definition * @param[in] b1 see generalized hypergeometric function definition * @param[in] z see generalized hypergeometric function definition * @param[in] precision magnitude of the increment of the infinite sum * to truncate the sum * @param[in] max_steps number of steps to take * @return Three-element tuple containing gradients w.r.t. a1, a2, and b1, * as indicated by the calc_a1, calc_a2, and calc_b1 booleans */ template , typename TupleT = std::tuple> TupleT grad_2F1_impl_ab(const T1& a1, const T2& a2, const T3& b1, const T_z& z, double precision = 1e-14, int max_steps = 1e6) { TupleT grad_tuple = TupleT(0, 0, 0); if (z == 0) { return grad_tuple; } using ScalarArrayT = Eigen::Array; ScalarArrayT log_g_old = ScalarArrayT::Constant(3, 1, NEGATIVE_INFTY); ScalarT log_t_old = 0.0; ScalarT log_t_new = 0.0; int sign_z = sign(z); auto log_z = log(abs(z)); int log_t_new_sign = 1.0; int log_t_old_sign = 1.0; Eigen::Array log_g_old_sign = Eigen::Array::Ones(3); int sign_zk = sign_z; int k = 0; const int min_steps = 5; ScalarT inner_diff = 1; ScalarArrayT g_current = ScalarArrayT::Zero(3); while ((inner_diff > precision || k < min_steps) && k < max_steps) { ScalarT p = ((a1 + k) * (a2 + k) / ((b1 + k) * (1.0 + k))); if (p == 0) { return grad_tuple; } log_t_new += log(fabs(p)) + log_z; log_t_new_sign = sign(value_of_rec(p)) * log_t_new_sign; if (calc_a1) { ScalarT term_a1 = log_g_old_sign(0) * log_t_old_sign * exp(log_g_old(0) - log_t_old) + inv(a1 + k); log_g_old(0) = log_t_new + log(abs(term_a1)); log_g_old_sign(0) = sign(value_of_rec(term_a1)) * log_t_new_sign; g_current(0) = log_g_old_sign(0) * exp(log_g_old(0)) * sign_zk; std::get<0>(grad_tuple) += g_current(0); } if (calc_a2) { ScalarT term_a2 = log_g_old_sign(1) * log_t_old_sign * exp(log_g_old(1) - log_t_old) + inv(a2 + k); log_g_old(1) = log_t_new + log(abs(term_a2)); log_g_old_sign(1) = sign(value_of_rec(term_a2)) * log_t_new_sign; g_current(1) = log_g_old_sign(1) * exp(log_g_old(1)) * sign_zk; std::get<1>(grad_tuple) += g_current(1); } if (calc_b1) { ScalarT term_b1 = log_g_old_sign(2) * log_t_old_sign * exp(log_g_old(2) - log_t_old) + inv(-(b1 + k)); log_g_old(2) = log_t_new + log(abs(term_b1)); log_g_old_sign(2) = sign(value_of_rec(term_b1)) * log_t_new_sign; g_current(2) = log_g_old_sign(2) * exp(log_g_old(2)) * sign_zk; std::get<2>(grad_tuple) += g_current(2); } inner_diff = g_current.array().abs().maxCoeff(); log_t_old = log_t_new; log_t_old_sign = log_t_new_sign; sign_zk *= sign_z; ++k; } if (k > max_steps) { throw_domain_error("grad_2F1", "k (internal counter)", max_steps, "exceeded ", " iterations, hypergeometric function gradient " "did not converge."); } return grad_tuple; } /** * Implementation function to calculate the gradients of the hypergeometric * function, 2F1. * * Calculate the gradients of the hypergeometric function (2F1) * as the power series stopping when the series converges * to within precision or throwing when the * function takes max_steps steps. * * This power-series representation converges for all gradients * under the same conditions as the 2F1 function itself. As with the * hypergeometric_2F1 function, if the parameters do not meet convergence * criteria then the gradients are calculated using Euler's transformation. * * @tparam calc_a1 boolean for whether to calculate gradients w.r.t a1 * @tparam calc_a2 boolean for whether to calculate gradients w.r.t a2 * @tparam calc_b1 boolean for whether to calculate gradients w.r.t b1 * @tparam calc_z boolean for whether to calculate gradients w.r.t z * @tparam T1 scalar type of a1 * @tparam T2 scalar type of a2 * @tparam T3 scalar type of b1 * @tparam T_z scalar type of z * @param[in] a1 see generalized hypergeometric function definition * @param[in] a2 see generalized hypergeometric function definition * @param[in] b1 see generalized hypergeometric function definition * @param[in] z see generalized hypergeometric function definition * @param[in] precision magnitude of the increment of the infinite sum * to truncate the sum * @param[in] max_steps number of steps to take * @return Four-element tuple containing gradients w.r.t. to each parameter, * as indicated by the calc_* booleans */ template , typename TupleT = std::tuple> TupleT grad_2F1_impl(const T1& a1, const T2& a2, const T3& b1, const T_z& z, double precision = 1e-14, int max_steps = 1e6) { bool euler_transform = false; try { check_2F1_converges("hypergeometric_2F1", a1, a2, b1, z); } catch (const std::exception& e) { // Apply Euler's hypergeometric transformation if function // will not converge with current arguments check_2F1_converges("hypergeometric_2F1 (euler transform)", b1 - a1, a2, b1, z / (z - 1)); euler_transform = true; } std::tuple grad_tuple_ab; TupleT grad_tuple_rtn = TupleT(0, 0, 0, 0); if (euler_transform) { ScalarT a1_euler = a2; ScalarT a2_euler = b1 - a1; ScalarT z_euler = z / (z - 1); if (calc_z) { auto hyper1 = hypergeometric_2F1(a1_euler, a2_euler, b1, z_euler); auto hyper2 = hypergeometric_2F1(1 + a2, 1 - a1 + b1, 1 + b1, z_euler); std::get<3>(grad_tuple_rtn) = a2 * pow(1 - z, -1 - a2) * hyper1 + (a2 * (b1 - a1) * pow(1 - z, -a2) * (inv(z - 1) - z / square(z - 1)) * hyper2) / b1; } if (calc_a1 || calc_a2 || calc_b1) { // 'a' gradients under Euler transform are constructed using the gradients // of both elements, so need to compute both if any are required constexpr bool calc_a1_euler = calc_a1 || calc_a2; // 'b' gradients under Euler transform require gradients from 'a2' constexpr bool calc_a2_euler = calc_a1 || calc_a2 || calc_b1; grad_tuple_ab = grad_2F1_impl_ab( a1_euler, a2_euler, b1, z_euler); auto pre_mult_ab = inv(pow(1.0 - z, a2)); if (calc_a1) { std::get<0>(grad_tuple_rtn) = -pre_mult_ab * std::get<1>(grad_tuple_ab); } if (calc_a2) { auto hyper_da2 = hypergeometric_2F1(a1_euler, a2, b1, z_euler); std::get<1>(grad_tuple_rtn) = -pre_mult_ab * hyper_da2 * log1m(z) + pre_mult_ab * std::get<0>(grad_tuple_ab); } if (calc_b1) { std::get<2>(grad_tuple_rtn) = pre_mult_ab * (std::get<1>(grad_tuple_ab) + std::get<2>(grad_tuple_ab)); } } } else { if (calc_z) { auto hyper_2f1_dz = hypergeometric_2F1(a1 + 1.0, a2 + 1.0, b1 + 1.0, z); std::get<3>(grad_tuple_rtn) = (a1 * a2 * hyper_2f1_dz) / b1; } if (calc_a1 || calc_a2 || calc_b1) { grad_tuple_ab = grad_2F1_impl_ab(a1, a2, b1, z); if (calc_a1) { std::get<0>(grad_tuple_rtn) = std::get<0>(grad_tuple_ab); } if (calc_a2) { std::get<1>(grad_tuple_rtn) = std::get<1>(grad_tuple_ab); } if (calc_b1) { std::get<2>(grad_tuple_rtn) = std::get<2>(grad_tuple_ab); } } } return grad_tuple_rtn; } } // namespace internal /** * Calculate the gradients of the hypergeometric function (2F1) * as the power series stopping when the series converges * to within precision or throwing when the * function takes max_steps steps. * * Overload for use where the destination gradients are not required to be the * same type as the input variables (most use-cases except grad_inc_beta) * * @tparam ReturnSameT Whether return gradients need to be the same type as * as inputs * @tparam T1 scalar type of a1 * @tparam T2 scalar type of a2 * @tparam T3 scalar type of b1 * @tparam T_z scalar type of z * @param[in] a1 see generalized hypergeometric function definition * @param[in] a2 see generalized hypergeometric function definition * @param[in] b1 see generalized hypergeometric function definition * @param[in] z see generalized hypergeometric function definition * @param[in] precision magnitude of the increment of the infinite sum * to truncate the sum * @param[in] max_steps number of steps to take */ template >* = nullptr> auto grad_2F1(const T1& a1, const T2& a2, const T3& b1, const T_z& z, double precision = 1e-14, int max_steps = 1e6) { return internal::grad_2F1_impl< !is_constant::value, !is_constant::value, !is_constant::value, !is_constant::value>(value_of(a1), value_of(a2), value_of(b1), value_of(z), precision, max_steps); } /** * Calculate the gradients of the hypergeometric function (2F1) * as the power series stopping when the series converges * to within precision or throwing when the * function takes max_steps steps. * * Overload for use where the destination gradients should be the same type * as the input variables (needed for the grad_inc_beta overloads) * * @tparam ReturnSameT Whether return gradients need to be the same type as * as inputs * @tparam T1 scalar type of a1 * @tparam T2 scalar type of a2 * @tparam T3 scalar type of b1 * @tparam T_z scalar type of z * @param[in] a1 see generalized hypergeometric function definition * @param[in] a2 see generalized hypergeometric function definition * @param[in] b1 see generalized hypergeometric function definition * @param[in] z see generalized hypergeometric function definition * @param[in] precision magnitude of the increment of the infinite sum * to truncate the sum * @param[in] max_steps number of steps to take */ template >* = nullptr> auto grad_2F1(const T1& a1, const T2& a2, const T3& b1, const T_z& z, double precision = 1e-14, int max_steps = 1e6) { return internal::grad_2F1_impl(a1, a2, b1, z, precision, max_steps); } /** * Calculate the gradients of the hypergeometric function (2F1) * as the power series stopping when the series converges * to within precision or throwing when the * function takes max_steps steps. * * @tparam T1 scalar type of a1 * @tparam T2 scalar type of a2 * @tparam T3 scalar type of b1 * @tparam T_z scalar type of z * @param[in] a1 see generalized hypergeometric function definition * @param[in] a2 see generalized hypergeometric function definition * @param[in] b1 see generalized hypergeometric function definition * @param[in] z see generalized hypergeometric function definition * @param[in] precision magnitude of the increment of the infinite sum * to truncate the sum * @param[in] max_steps number of steps to take */ template auto grad_2F1(const T1& a1, const T2& a2, const T3& b1, const T_z& z, double precision = 1e-14, int max_steps = 1e6) { return grad_2F1(a1, a2, b1, z, precision, max_steps); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/get_lp.hpp0000644000176200001440000000061514645137106022504 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GET_LP_HPP #define STAN_MATH_PRIM_FUN_GET_LP_HPP #include namespace stan { namespace math { template inline return_type_t get_lp( const T_lp& lp, const accumulator& lp_accum) { return lp + lp_accum.sum(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/tgamma.hpp0000644000176200001440000000272114645137106022500 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TGAMMA_HPP #define STAN_MATH_PRIM_FUN_TGAMMA_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the gamma function applied to the specified argument. * * @param x Argument. * @return The gamma function applied to argument. */ inline double tgamma(double x) { if (x == 0.0 || is_nonpositive_integer(x)) { throw_domain_error("tgamma", "x", x, "x == 0 or negative integer"); } return std::tgamma(x); } /** * Structure to wrap tgamma() so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return Gamma function applied to x. * @throw std::domain_error if x is 0 or a negative integer */ struct tgamma_fun { template static inline auto fun(const T& x) { return tgamma(x); } }; /** * Vectorized version of tgamma(). * * @tparam T type of container * @param x container * @return Gamma function applied to each value in x. * @throw std::domain_error if any value is 0 or a negative integer */ template < typename T, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr> inline auto tgamma(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/is_any_nan.hpp0000644000176200001440000000176014645137106023352 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IS_ANY_NAN_HPP #define STAN_MATH_PRIM_FUN_IS_ANY_NAN_HPP #include #include namespace stan { namespace math { /** * Returns true if the input is NaN and false otherwise. * * Delegates to stan::math::is_nan so that * appropriate specializations can be loaded for autodiff * types. * * @param x Value to test. * @return true if the value is NaN. */ template inline bool is_any_nan(const T& x) { return is_nan(x); } /** * Returns true if any input is NaN and false otherwise. * * Delegates to stan::math::is_nan. * * @param x first argument * @param xs parameter pack of remaining arguments to forward to function * @return true if any value is NaN */ template inline bool is_any_nan(const T& x, const Ts&... xs) { return is_any_nan(x) || is_any_nan(xs...); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/rep_matrix.hpp0000644000176200001440000000404514645137106023405 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_REP_MATRIX_HPP #define STAN_MATH_PRIM_FUN_REP_MATRIX_HPP #include #include #include namespace stan { namespace math { /** * Implementation of rep_matrix returning an Eigen matrix with scalar * type equal to the input scalar type. * @tparam Ret An Eigen type. * @tparam T A Scalar type. * @param x A Scalar whose values are propogated to all values in the return * matrix. * @param m Number or rows. * @param n Number of columns. */ template * = nullptr, require_stan_scalar_t* = nullptr> inline auto rep_matrix(const T& x, int m, int n) { check_nonnegative("rep_matrix", "rows", m); check_nonnegative("rep_matrix", "cols", n); return Ret::Constant(m, n, x); } /** * Default Implementation of rep_matrix returning an Eigen matrix with scalar * type equal to the input scalar type. * @tparam T A Scalar type. * @param x A Scalar whose values are propogated to all values in the return * matrix. * @param m Number or rows. * @param n Number of columns. */ template * = nullptr> inline auto rep_matrix(const T& x, int m, int n) { return rep_matrix< Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>>(x, m, n); } /** * Implementation of rep_matrix returning an Eigen matrix from an Eigen * vector. * @tparam Vec An Eigen vector. * @param x An Eigen vector. For Row vectors the values are replacated rowwise. * and for column vectors the values are repliacated colwise. * @param n Number of rows or columns. */ template * = nullptr> inline auto rep_matrix(const Vec& x, int n) { if (is_eigen_row_vector::value) { check_nonnegative("rep_matrix", "rows", n); return x.replicate(n, 1); } else { check_nonnegative("rep_matrix", "cols", n); return x.replicate(1, n); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/offset_multiplier_free.hpp0000644000176200001440000001152714645137106025773 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_OFFSET_MULTIPLIER_FREE_HPP #define STAN_MATH_PRIM_FUN_OFFSET_MULTIPLIER_FREE_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the unconstrained variable that transforms to the * specified offset and multiplier constrained variable given the specified * offset and multiplier. * *

The transform in locmultiplier_constrain(T, double, double), * is reversed by the reverse affine transformation, * *

\f$f^{-1}(y) = \frac{y - L}{S}\f$ * * where \f$L\f$ and \f$S\f$ are the offset and multiplier. * *

If the offset is zero and multiplier is one, * this function reduces to identity_free(y). * * @tparam T type of constrained variable * @tparam L type of offset * @tparam S type of multiplier * @param y constrained value * @param[in] mu offset of constrained output * @param[in] sigma multiplier of constrained output * @return the unconstrained variable that transforms to the given constrained * variable given the offset and multiplier * @throw std::domain_error if sigma <= 0 * @throw std::domain_error if mu is not finite * @throw std::invalid_argument if non-scalar arguments don't match in size */ template inline auto offset_multiplier_free(const T& y, const M& mu, const S& sigma) { auto&& mu_ref = to_ref(mu); auto&& sigma_ref = to_ref(sigma); if (is_matrix::value && is_matrix::value) { check_matching_dims("offset_multiplier_constrain", "y", y, "mu", mu); } if (is_matrix::value && is_matrix::value) { check_matching_dims("offset_multiplier_constrain", "y", y, "sigma", sigma); } else if (is_matrix::value && is_matrix::value) { check_matching_dims("offset_multiplier_constrain", "mu", mu, "sigma", sigma); } check_finite("offset_multiplier_constrain", "offset", value_of(mu_ref)); check_positive_finite("offset_multiplier_constrain", "multiplier", value_of(sigma_ref)); return divide(subtract(y, mu_ref), sigma_ref); } /** * Overload for array of x and non-array mu and sigma */ template * = nullptr> inline auto offset_multiplier_free(const std::vector& x, const M& mu, const S& sigma) { std::vector> ret; ret.reserve(x.size()); const auto& mu_ref = to_ref(mu); const auto& sigma_ref = to_ref(sigma); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_free(x[i], mu_ref, sigma_ref)); } return ret; } /** * Overload for array of x and sigma and non-array mu */ template * = nullptr> inline auto offset_multiplier_free(const std::vector& x, const M& mu, const std::vector& sigma) { check_matching_dims("offset_multiplier_free", "x", x, "sigma", sigma); std::vector< plain_type_t> ret; ret.reserve(x.size()); const auto& mu_ref = to_ref(mu); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_free(x[i], mu_ref, sigma[i])); } return ret; } /** * Overload for array of x and mu and non-array sigma */ template * = nullptr> inline auto offset_multiplier_free(const std::vector& x, const std::vector& mu, const S& sigma) { check_matching_dims("offset_multiplier_free", "x", x, "mu", mu); std::vector< plain_type_t> ret; ret.reserve(x.size()); const auto& sigma_ref = to_ref(sigma); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_free(x[i], mu[i], sigma_ref)); } return ret; } /** * Overload for array of x, mu, and sigma */ template inline auto offset_multiplier_free(const std::vector& x, const std::vector& mu, const std::vector& sigma) { check_matching_dims("offset_multiplier_free", "x", x, "mu", mu); check_matching_dims("offset_multiplier_free", "x", x, "sigma", sigma); std::vector< plain_type_t> ret; ret.reserve(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret.emplace_back(offset_multiplier_free(x[i], mu[i], sigma[i])); } return ret; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mdivide_left.hpp0000644000176200001440000000245414645137106023670 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MDIVIDE_LEFT_HPP #define STAN_MATH_PRIM_FUN_MDIVIDE_LEFT_HPP #include #include #include namespace stan { namespace math { /** * Returns the solution of the system Ax=b. * * @tparam T1 type of the first matrix * @tparam T2 type of the right-hand side matrix or vector * * @param A Matrix. * @param b Right hand side matrix or vector. * @return x = A^-1 b, solution of the linear system. * @throws std::domain_error if A is not square or the rows of b don't * match the size of A. */ template * = nullptr> inline Eigen::Matrix, T1::RowsAtCompileTime, T2::ColsAtCompileTime> mdivide_left(const T1& A, const T2& b) { check_square("mdivide_left", "A", A); check_multiplicable("mdivide_left", "A", A, "b", b); if (A.size() == 0) { return {0, b.cols()}; } return Eigen::Matrix, T1::RowsAtCompileTime, T1::ColsAtCompileTime>(A) .lu() .solve(Eigen::Matrix, T2::RowsAtCompileTime, T2::ColsAtCompileTime>(b)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/lgamma_stirling.hpp0000644000176200001440000000135614645137106024406 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LGAMMA_STIRLING_HPP #define STAN_MATH_PRIM_FUN_LGAMMA_STIRLING_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the Stirling approximation to the lgamma function. * \f[ \mbox{lgamma_stirling}(x) = \frac{1}{2} \log(2\pi) + (x-\frac{1}{2})*\log(x) - x \f] * * @tparam T type of value * @param x value * @return Stirling's approximation to lgamma(x). */ template return_type_t lgamma_stirling(const T x) { return HALF_LOG_TWO_PI + (x - 0.5) * log(x) - x; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/trace_gen_inv_quad_form_ldlt.hpp0000644000176200001440000000573714645137106027123 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TRACE_GEN_INV_QUAD_FORM_LDLT_HPP #define STAN_MATH_PRIM_FUN_TRACE_GEN_INV_QUAD_FORM_LDLT_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Compute the trace of an inverse quadratic form. I.E., this computes * trace(D B^T A^-1 B) * where D is a square matrix and the LDLT_factor of A is provided. * * @tparam EigMat1 type of the first matrix * @tparam T2 type of matrix in the LDLT_factor * @tparam EigMat3 type of the third matrix * * @param D multiplier * @param A LDLT_factor * @param B inner term in quadratic form * @return trace(D * B^T * A^-1 * B) * @throw std::domain_error if D is not square * @throw std::domain_error if A cannot be multiplied by B or B cannot * be multiplied by D. */ template * = nullptr, require_all_not_st_var* = nullptr> inline return_type_t trace_gen_inv_quad_form_ldlt( const EigMat1& D, LDLT_factor& A, const EigMat3& B) { check_square("trace_gen_inv_quad_form_ldlt", "D", D); check_multiplicable("trace_gen_inv_quad_form_ldlt", "A", A.matrix(), "B", B); check_multiplicable("trace_gen_inv_quad_form_ldlt", "B", B, "D", D); if (D.size() == 0 || A.matrix().size() == 0) { return 0; } return multiply(B, D.transpose()).cwiseProduct(mdivide_left_ldlt(A, B)).sum(); } /** * Compute the trace of an inverse quadratic form. I.E., this computes * `trace(diag(D) B^T A^-1 B)` * where D is the diagonal of a diagonal matrix (`diag(D)` is the diagonal * matrix itself) and the LDLT_factor of A is provided. * * @tparam EigVec type of the diagonal of first matrix * @tparam T type of matrix in the LDLT_factor * @tparam EigMat type of the B matrix * * @param D diagonal of multiplier * @param A LDLT_factor * @param B inner term in quadratic form * @return trace(diag(D) * B^T * A^-1 * B) * @throw std::domain_error if A cannot be multiplied by B or B cannot * be multiplied by diag(D). */ template * = nullptr, require_all_not_st_var* = nullptr> inline return_type_t trace_gen_inv_quad_form_ldlt( const EigVec& D, LDLT_factor& A, const EigMat& B) { check_multiplicable("trace_gen_inv_quad_form_ldlt", "A", A.matrix(), "B", B); check_multiplicable("trace_gen_inv_quad_form_ldlt", "B", B, "D", D); if (D.size() == 0 || A.matrix().size() == 0) { return 0; } return (B * D.asDiagonal()).cwiseProduct(mdivide_left_ldlt(A, B)).sum(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inv_square.hpp0000644000176200001440000000235014645137106023404 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INV_SQUARE_HPP #define STAN_MATH_PRIM_FUN_INV_SQUARE_HPP #include #include #include #include namespace stan { namespace math { /** * Returns `1 / square(x)`. * * @tparam Container type of container * @param x container * @return `1 / square(x)` of each value in x. */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto inv_square(const Container& x) { return inv(square(x)); } /** * Version of inv_square() that accepts Eigen Matrix/Array objects or * expressions. * * @tparam T Type of x * @param x Eigen Matrix/Array or expression * @return 1 / the square of each value in x. */ template * = nullptr> inline auto inv_square(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().square().inverse(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/step.hpp0000644000176200001440000000266114645137106022210 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_STEP_HPP #define STAN_MATH_PRIM_FUN_STEP_HPP #include #include namespace stan { namespace math { /** * The step, or Heaviside, function. * * The function is defined by * * step(y) = (y < 0.0) ? 0 : 1. * \f[ \mbox{step}(x) = \begin{cases} 0 & \mbox{if } x \le 0 \\ 1 & \mbox{if } x \ge 0 \\[6pt] 0 & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @tparam T type of value * @param y value * @return zero if the value is less than zero, and one otherwise */ template * = nullptr> inline T step(const T& y) { return y < 0.0 ? 0 : 1; } /** * Structure to wrap `step()` so it can be vectorized. */ struct step_fun { /** * Return zero if the value is less than zero and one otherwise * * @tparam T type of argument * @param y argument * @return step(y) */ template static inline auto fun(const T& y) { return step(y); } }; /** * Return the elementwise application of `step()` to * specified argument container. * * @tparam T type of container * @param x container * @return Elementwise step of members of container. */ template * = nullptr> inline auto step(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/to_ref.hpp0000644000176200001440000000234714645137106022514 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TO_REF_HPP #define STAN_MATH_PRIM_FUN_TO_REF_HPP #include namespace stan { namespace math { /** * This evaluates expensive Eigen expressions. If given expression involves no * calculations this is a no-op that should be optimized away. * @tparam T argument type * @param a argument * @return optionally evaluated argument */ template inline ref_type_t to_ref(T&& a) { return std::forward(a); } /** * No-op that should be optimized away. * @tparam Cond condition * @tparam T argument type * @param a argument * @return argument */ template * = nullptr> inline T to_ref_if(T&& a) { return std::forward(a); } /** * If the condition is true, evaluates expensive Eigen expressions. If given * expression involves no calculations this is a no-op that should be optimized * away. * @tparam Cond condition * @tparam T argument type (Eigen expression) * @param a argument * @return argument converted to `Eigen::Ref` */ template * = nullptr> inline ref_type_t to_ref_if(T&& a) { return std::forward(a); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sum.hpp0000644000176200001440000000303014645137106022030 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SUM_HPP #define STAN_MATH_PRIM_FUN_SUM_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns specified input value. * * @tparam T Type of element. * @param m Specified value. * @return Same value (the sum of one value). */ template * = nullptr> inline T sum(T&& m) { return std::forward(m); } /** * Return the sum of the values in the specified standard vector. * * @tparam T Type of elements summed. * @param m Standard vector to sum. * @return Sum of elements. */ template * = nullptr> inline T sum(const std::vector& m) { return std::accumulate(m.begin(), m.end(), T{0}); } /** * Returns the sum of the coefficients of the specified * Eigen Matrix, Array or expression. * * @tparam T Type of argument * @param m argument * @return Sum of coefficients of argument. */ template * = nullptr> inline value_type_t sum(const T& m) { return m.sum(); } /** * Returns the sum of the coefficients of the specified * Eigen Matrix, Array or expression of complex type. * * @tparam T Type of argument * @param m argument * @return Sum of coefficients of argument. */ template * = nullptr> inline value_type_t sum(const T& m) { return m.sum(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/linspaced_row_vector.hpp0000644000176200001440000000226014645137106025443 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LINSPACED_ROW_VECTOR_HPP #define STAN_MATH_PRIM_FUN_LINSPACED_ROW_VECTOR_HPP #include #include namespace stan { namespace math { /** * Return a row vector of linearly spaced elements. * * This produces a row vector from low to high (inclusive) with elements spaced * as (high - low) / (K - 1). For K=1, the vector will contain the high value; * for K=0 it returns an empty vector. * * @param K size of the row vector * @param low smallest value * @param high largest value * @return A row vector of size K with elements linearly spaced between * low and high. * @throw std::domain_error if K is negative, if low is nan or infinite, * if high is nan or infinite, or if high is less than low. */ inline auto linspaced_row_vector(int K, double low, double high) { static const char* function = "linspaced_row_vector"; check_nonnegative(function, "size", K); check_finite(function, "low", low); check_finite(function, "high", high); check_greater_or_equal(function, "high", high, low); return Eigen::RowVectorXd::LinSpaced(K, low, high); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/ones_array.hpp0000644000176200001440000000103014645137106023364 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ONES_ARRAY_HPP #define STAN_MATH_PRIM_FUN_ONES_ARRAY_HPP #include #include namespace stan { namespace math { /** * Return an array of ones. * * @param K size of the array * @return An array of size K with all elements initialized to 1. * @throw std::domain_error if K is negative. */ inline std::vector ones_array(int K) { check_nonnegative("ones_array", "size", K); return std::vector(K, 1); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/ordered_free.hpp0000644000176200001440000000345614645137106023665 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ORDERED_FREE_HPP #define STAN_MATH_PRIM_FUN_ORDERED_FREE_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the vector of unconstrained scalars that transform to * the specified positive ordered vector. * *

This function inverts the constraining operation defined in * ordered_constrain(Matrix), * * @tparam T type of elements in the vector * @param y Vector of positive, ordered scalars. * @return Free vector that transforms into the input vector. * @throw std::domain_error if y is not a vector of positive, * ordered scalars. */ template * = nullptr> plain_type_t ordered_free(const EigVec& y) { const auto& y_ref = to_ref(y); check_ordered("stan::math::ordered_free", "Ordered variable", y_ref); using std::log; Eigen::Index k = y.size(); plain_type_t x(k); if (k == 0) { return x; } x[0] = y_ref[0]; for (Eigen::Index i = 1; i < k; ++i) { x.coeffRef(i) = log(y_ref.coeff(i) - y_ref.coeff(i - 1)); } return x; } /** * Overload of `ordered_free()` to untransform each Eigen vector * in a standard vector. * @tparam T A standard vector with with a `value_type` which inherits from * `Eigen::MatrixBase` with compile time rows or columns equal to 1. * @param x The standard vector to untransform. */ template * = nullptr> auto ordered_free(const T& x) { return apply_vector_unary::apply(x, [](auto&& v) { return ordered_free(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/choose.hpp0000644000176200001440000000342014645137106022507 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CHOOSE_HPP #define STAN_MATH_PRIM_FUN_CHOOSE_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the binomial coefficient for the specified integer * arguments. * * The binomial coefficient, \f${n \choose k}\f$, read "n choose k", is * defined for \f$0 \leq k \leq n\f$ (otherwise return 0) by * * \f${n \choose k} = \frac{n!}{k! (n-k)!}\f$. * * @param n total number of objects * @param k number of objects chosen * @return n choose k or 0 iff k > n * @throw std::domain_error if either argument is negative or the * result will not fit in an int type */ inline int choose(int n, int k) { check_nonnegative("choose", "n", n); check_nonnegative("choose", "k", k); if (k > n) { return 0; } const double choices = boost::math::binomial_coefficient(n, k); check_less_or_equal("choose", "n choose k", choices, std::numeric_limits::max()); return static_cast(std::round(choices)); } /** * Enables the vectorized application of the binomial coefficient function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return Binomial coefficient function applied to the two inputs. */ template * = nullptr> inline auto choose(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return choose(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inv_logit.hpp0000644000176200001440000000450314645137106023224 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INV_LOGIT_HPP #define STAN_MATH_PRIM_FUN_INV_LOGIT_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the inverse logit function applied to the argument. * * The inverse logit function is defined by * * \f$\mbox{logit}^{-1}(x) = \frac{1}{1 + \exp(-x)}\f$. * * This function can be used to implement the inverse link function * for logistic regression. * * The inverse to this function is logit. * \f[ \mbox{inv\_logit}(y) = \begin{cases} \mbox{logit}^{-1}(y) & \mbox{if } -\infty\leq y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{inv\_logit}(y)}{\partial y} = \begin{cases} \frac{\partial\, \mbox{logit}^{-1}(y)}{\partial y} & \mbox{if } -\infty\leq y\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } y = \textrm{NaN} \end{cases} \f] \f[ \mbox{logit}^{-1}(y) = \frac{1}{1 + \exp(-y)} \f] \f[ \frac{\partial \, \mbox{logit}^{-1}(y)}{\partial y} = \frac{\exp(y)}{(\exp(y)+1)^2} \f] * * @param a Argument. * @return Inverse logit of argument. */ inline double inv_logit(double a) { using std::exp; if (a < 0) { double exp_a = exp(a); if (a < LOG_EPSILON) { return exp_a; } return exp_a / (1 + exp_a); } return inv(1 + exp(-a)); } /** * Structure to wrap inv_logit() so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return Inverse logit of x. */ struct inv_logit_fun { template static inline auto fun(const T& x) { return inv_logit(x); } }; /** * Vectorized version of inv_logit(). * * @tparam T type of container * @param x container * @return Inverse logit applied to each value in x. */ template < typename T, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto inv_logit(const T& x) { return apply_scalar_unary::apply(x); } // TODO(Tadej): Eigen is introducing their implementation logistic() of this // in 3.4. Use that once we switch to Eigen 3.4 } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/rows_dot_product.hpp0000644000176200001440000000211214645137106024624 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ROWS_DOT_PRODUCT_HPP #define STAN_MATH_PRIM_FUN_ROWS_DOT_PRODUCT_HPP #include #include namespace stan { namespace math { /** * Returns the dot product of rows of the specified matrices. * * @tparam Mat1 type of the first matrix (must be derived from \c * Eigen::MatrixBase) * @tparam Mat2 type of the second matrix (must be derived from \c * Eigen::MatrixBase) * * @param v1 Matrix of first vectors. * @param v2 Matrix of second vectors. * @return Dot product of the vectors. * @throw std::domain_error If the matrices are not the same * size */ template * = nullptr, require_all_not_eigen_vt* = nullptr> inline Eigen::Matrix, Mat1::RowsAtCompileTime, 1> rows_dot_product(const Mat1& v1, const Mat2& v2) { check_matching_sizes("rows_dot_product", "v1", v1, "v2", v2); return (v1.cwiseProduct(v2)).rowwise().sum(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inv_Phi.hpp0000644000176200001440000001343114645137106022626 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INV_PHI_HPP #define STAN_MATH_PRIM_FUN_INV_PHI_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * The largest integer that protects against floating point errors * for the inv_Phi function. The value was found by finding the largest * integer that passed the unit tests for accuracy when the input into inv_Phi * is near 1. */ const int BIGINT = 2000000000; /** * The inverse of the unit normal cumulative distribution function. * * @param p argument between 0 and 1 inclusive * @return Real value of the inverse cdf for the standard normal distribution. */ inline double inv_Phi_lambda(double p) { check_bounded("inv_Phi", "Probability variable", p, 0, 1); if (p < 8e-311) { return NEGATIVE_INFTY; } if (p == 1) { return INFTY; } static const double a[8] = {3.3871328727963666080e+00, 1.3314166789178437745e+02, 1.9715909503065514427e+03, 1.3731693765509461125e+04, 4.5921953931549871457e+04, 6.7265770927008700853e+04, 3.3430575583588128105e+04, 2.5090809287301226727e+03}; static const double b[7] = {4.2313330701600911252e+01, 6.8718700749205790830e+02, 5.3941960214247511077e+03, 2.1213794301586595867e+04, 3.9307895800092710610e+04, 2.8729085735721942674e+04, 5.2264952788528545610e+03}; static const double c[8] = {1.42343711074968357734e+00, 4.63033784615654529590e+00, 5.76949722146069140550e+00, 3.64784832476320460504e+00, 1.27045825245236838258e+00, 2.41780725177450611770e-01, 2.27238449892691845833e-02, 7.74545014278341407640e-04}; static const double d[7] = {2.05319162663775882187e+00, 1.67638483018380384940e+00, 6.89767334985100004550e-01, 1.48103976427480074590e-01, 1.51986665636164571966e-02, 5.47593808499534494600e-04, 1.05075007164441684324e-09}; static const double e[8] = {6.65790464350110377720e+00, 5.46378491116411436990e+00, 1.78482653991729133580e+00, 2.96560571828504891230e-01, 2.65321895265761230930e-02, 1.24266094738807843860e-03, 2.71155556874348757815e-05, 2.01033439929228813265e-07}; static const double f[7] = {5.99832206555887937690e-01, 1.36929880922735805310e-01, 1.48753612908506148525e-02, 7.86869131145613259100e-04, 1.84631831751005468180e-05, 1.42151175831644588870e-07, 2.04426310338993978564e-15}; double q = p - 0.5; double r; double val; if (std::fabs(q) <= .425) { r = .180625 - square(q); return q * (((((((a[7] * r + a[6]) * r + a[5]) * r + a[4]) * r + a[3]) * r + a[2]) * r + a[1]) * r + a[0]) / (((((((b[6] * r + b[5]) * r + b[4]) * r + b[3]) * r + b[2]) * r + b[1]) * r + b[0]) * r + 1.0); } else { r = q < 0 ? p : 1 - p; if (r <= 0) return 0; r = std::sqrt(-std::log(r)); if (r <= 5.0) { r += -1.6; val = (((((((c[7] * r + c[6]) * r + c[5]) * r + c[4]) * r + c[3]) * r + c[2]) * r + c[1]) * r + c[0]) / (((((((d[6] * r + d[5]) * r + d[4]) * r + d[3]) * r + d[2]) * r + d[1]) * r + d[0]) * r + 1.0); } else { r -= 5.0; val = (((((((e[7] * r + e[6]) * r + e[5]) * r + e[4]) * r + e[3]) * r + e[2]) * r + e[1]) * r + e[0]) / (((((((f[6] * r + f[5]) * r + f[4]) * r + f[3]) * r + f[2]) * r + f[1]) * r + f[0]) * r + 1.0); } if (q < 0.0) return -val; } return val; } } // namespace internal /** * Return the value of the inverse standard normal cumulative distribution * function at the specified argument. * * The precision is at or better than 1.5e-15 for values between 0.0000001 he * largest integer that protects against floating point errors for the inv_Phi * function. The value was found by finding the largest integer that passed the * unit tests for accuracy when the input into inv_Phi is near 1. * * @param p argument between 0 and 1 inclusive * @return real value of the inverse cdf for the standard normal distribution */ inline double inv_Phi(double p) { return p >= 0.9999 ? -internal::inv_Phi_lambda( (internal::BIGINT - internal::BIGINT * p) / internal::BIGINT) : internal::inv_Phi_lambda(p); } /** * Structure to wrap inv_Phi() so it can be vectorized. * * @tparam T type of variable * @param x variable in range [0, 1] * @return Inverse unit normal CDF of x. * @throw std::domain_error if x is not between 0 and 1. */ struct inv_Phi_fun { template static inline auto fun(const T& x) { return inv_Phi(x); } }; /** * Vectorized version of inv_Phi(). * * @tparam T type of container * @param x variables in range [0, 1] * @return Inverse unit normal CDF of each value in x. * @throw std::domain_error if any value is not between 0 and 1. */ template < typename T, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr> inline auto inv_Phi(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/grad_F32.hpp0000644000176200001440000001103714645137106022561 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GRAD_F32_HPP #define STAN_MATH_PRIM_FUN_GRAD_F32_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Gradients of the hypergeometric function, 3F2. * * Calculate the gradients of the hypergeometric function (3F2) * as the power series stopping when the series converges * to within precision or throwing when the * function takes max_steps steps. * * This power-series representation converges for all gradients * under the same conditions as the 3F2 function itself. * * @tparam T type of arguments and result * @param[out] g g pointer to array of six values of type T, result. * @param[in] a1 a1 see generalized hypergeometric function definition. * @param[in] a2 a2 see generalized hypergeometric function definition. * @param[in] a3 a3 see generalized hypergeometric function definition. * @param[in] b1 b1 see generalized hypergeometric function definition. * @param[in] b2 b2 see generalized hypergeometric function definition. * @param[in] z z see generalized hypergeometric function definition. * @param[in] precision precision of the infinite sum * @param[in] max_steps number of steps to take */ template void grad_F32(T* g, const T& a1, const T& a2, const T& a3, const T& b1, const T& b2, const T& z, const T& precision = 1e-6, int max_steps = 1e5) { check_3F2_converges("grad_F32", a1, a2, a3, b1, b2, z); using std::exp; using std::fabs; using std::log; for (int i = 0; i < 6; ++i) { g[i] = 0.0; } T log_g_old[6]; for (auto& x : log_g_old) { x = NEGATIVE_INFTY; } T log_t_old = 0.0; T log_t_new = 0.0; T log_z = log(z); double log_t_new_sign = 1.0; double log_t_old_sign = 1.0; double log_g_old_sign[6]; for (int i = 0; i < 6; ++i) { log_g_old_sign[i] = 1.0; } for (int k = 0; k <= max_steps; ++k) { T p = (a1 + k) * (a2 + k) * (a3 + k) / ((b1 + k) * (b2 + k) * (1 + k)); if (p == 0) { return; } log_t_new += log(fabs(p)) + log_z; log_t_new_sign = p >= 0.0 ? log_t_new_sign : -log_t_new_sign; // g_old[0] = t_new * (g_old[0] / t_old + 1.0 / (a1 + k)); T term = log_g_old_sign[0] * log_t_old_sign * exp(log_g_old[0] - log_t_old) + inv(a1 + k); log_g_old[0] = log_t_new + log(fabs(term)); log_g_old_sign[0] = term >= 0.0 ? log_t_new_sign : -log_t_new_sign; // g_old[1] = t_new * (g_old[1] / t_old + 1.0 / (a2 + k)); term = log_g_old_sign[1] * log_t_old_sign * exp(log_g_old[1] - log_t_old) + inv(a2 + k); log_g_old[1] = log_t_new + log(fabs(term)); log_g_old_sign[1] = term >= 0.0 ? log_t_new_sign : -log_t_new_sign; // g_old[2] = t_new * (g_old[2] / t_old + 1.0 / (a3 + k)); term = log_g_old_sign[2] * log_t_old_sign * exp(log_g_old[2] - log_t_old) + inv(a3 + k); log_g_old[2] = log_t_new + log(fabs(term)); log_g_old_sign[2] = term >= 0.0 ? log_t_new_sign : -log_t_new_sign; // g_old[3] = t_new * (g_old[3] / t_old - 1.0 / (b1 + k)); term = log_g_old_sign[3] * log_t_old_sign * exp(log_g_old[3] - log_t_old) - inv(b1 + k); log_g_old[3] = log_t_new + log(fabs(term)); log_g_old_sign[3] = term >= 0.0 ? log_t_new_sign : -log_t_new_sign; // g_old[4] = t_new * (g_old[4] / t_old - 1.0 / (b2 + k)); term = log_g_old_sign[4] * log_t_old_sign * exp(log_g_old[4] - log_t_old) - inv(b2 + k); log_g_old[4] = log_t_new + log(fabs(term)); log_g_old_sign[4] = term >= 0.0 ? log_t_new_sign : -log_t_new_sign; // g_old[5] = t_new * (g_old[5] / t_old + 1.0 / z); term = log_g_old_sign[5] * log_t_old_sign * exp(log_g_old[5] - log_t_old) + inv(z); log_g_old[5] = log_t_new + log(fabs(term)); log_g_old_sign[5] = term >= 0.0 ? log_t_new_sign : -log_t_new_sign; for (int i = 0; i < 6; ++i) { g[i] += log_g_old_sign[i] * exp(log_g_old[i]); } if (log_t_new <= log(precision)) { return; // implicit abs } log_t_old = log_t_new; log_t_old_sign = log_t_new_sign; } throw_domain_error("grad_F32", "k (internal counter)", max_steps, "exceeded ", " iterations, hypergeometric function gradient " "did not converge."); return; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/conj.hpp0000644000176200001440000000322214645137106022160 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CONJ_HPP #define STAN_MATH_PRIM_FUN_CONJ_HPP #include #include namespace stan { namespace math { /** * Return the complex conjugate the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return complex conjugate of the argument */ template inline std::complex conj(const std::complex& z) { return std::conj(z); } /** * Return the complex conjugate the Eigen object. * * @tparam Eig A type derived from `Eigen::EigenBase` * @param[in] z argument * @return complex conjugate of the argument */ template * = nullptr> inline auto conj(const Eig& z) { return z.conjugate(); } /** * Return the complex conjugate the vector with complex scalar components. * * @tparam StdVec A `std::vector` type with complex scalar type * @param[in] z argument * @return complex conjugate of the argument */ template * = nullptr> inline auto conj(const StdVec& z) { promote_scalar_t, StdVec> result(z.size()); std::transform(z.begin(), z.end(), result.begin(), [](auto&& x) { return stan::math::conj(x); }); return result; } namespace internal { /** * Return the complex conjugate the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return complex conjugate of the argument */ template inline std::complex complex_conj(const std::complex& z) { return {z.real(), -z.imag()}; } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/determinant.hpp0000644000176200001440000000134714645137106023547 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DETERMINANT_HPP #define STAN_MATH_PRIM_FUN_DETERMINANT_HPP #include #include #include namespace stan { namespace math { /** * Returns the determinant of the specified square matrix. * * @tparam T type of the matrix (must be derived from \c Eigen::MatrixBase) * * @param m Specified matrix. * @return Determinant of the matrix. * @throw std::domain_error if matrix is not square. */ template * = nullptr> inline value_type_t determinant(const T& m) { check_square("determinant", "m", m); return m.determinant(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/ordered_constrain.hpp0000644000176200001440000001040514645137106024734 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ORDERED_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_ORDERED_CONSTRAIN_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return an increasing ordered vector derived from the specified * free vector. The returned constrained vector will have the * same dimensionality as the specified free vector. * * @tparam T type of the vector * @param x Free vector of scalars. * @return Positive, increasing ordered vector. * @tparam T Type of scalar. */ template * = nullptr, require_not_st_var* = nullptr> inline plain_type_t ordered_constrain(const EigVec& x) { using std::exp; Eigen::Index k = x.size(); plain_type_t y(k); const auto& x_ref = to_ref(x); if (unlikely(k == 0)) { return y; } y[0] = x_ref[0]; for (Eigen::Index i = 1; i < k; ++i) { y.coeffRef(i) = y.coeff(i - 1) + exp(x_ref.coeff(i)); } return y; } /** * Return a positive valued, increasing ordered vector derived * from the specified free vector and increment the specified log * probability reference with the log absolute Jacobian determinant * of the transform. The returned constrained vector * will have the same dimensionality as the specified free vector. * * @tparam T type of the vector * @param x Free vector of scalars. * @param lp Log probability reference. * @return Positive, increasing ordered vector. */ template * = nullptr> inline auto ordered_constrain(const EigVec& x, value_type_t& lp) { const auto& x_ref = to_ref(x); if (likely(x.size() > 1)) { lp += sum(x_ref.tail(x.size() - 1)); } return ordered_constrain(x_ref); } /** * Return a positive valued, increasing ordered vector derived from the * specified free vector. The returned constrained vector will have the same * dimensionality as the specified free vector. If the `Jacobian` parameter is * `true`, the log density accumulator is incremented with the log absolute * Jacobian determinant of the transform. All of the transforms are specified * with their Jacobians in the *Stan Reference Manual* chapter Constraint * Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::DenseBase` or a `var_value` with * inner type inheriting from `Eigen::DenseBase` with compile time dynamic rows * and 1 column * @param x Free vector of scalars * @param[in, out] lp log density accumulator * @return Positive, increasing ordered vector. */ template * = nullptr> inline auto ordered_constrain(const T& x, return_type_t& lp) { if (Jacobian) { return ordered_constrain(x, lp); } else { return ordered_constrain(x); } } /** * Return a positive valued, increasing ordered vector derived from the * specified free vector. The returned constrained vector will have the same * dimensionality as the specified free vector. If the `Jacobian` parameter is * `true`, the log density accumulator is incremented with the log absolute * Jacobian determinant of the transform. All of the transforms are specified * with their Jacobians in the *Stan Reference Manual* chapter Constraint * Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A standard vector with inner type inheriting from * `Eigen::DenseBase` or a `var_value` with inner type inheriting from * `Eigen::DenseBase` with compile time dynamic rows and 1 column * @param x Free vector of scalars * @param[in, out] lp log density accumulator * @return Positive, increasing ordered vector. */ template * = nullptr> inline auto ordered_constrain(const T& x, return_type_t& lp) { return apply_vector_unary::apply( x, [&lp](auto&& v) { return ordered_constrain(v, lp); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/matrix_power.hpp0000644000176200001440000000324514645137106023754 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MATRIX_POWER_HPP #define STAN_MATH_PRIM_FUN_MATRIX_POWER_HPP #include #include namespace stan { namespace math { /** * Returns the nth power of the specific matrix. M^n = M * M * ... * M. * * @tparam T type of the matrix * * @param[in] M a square matrix * @param[in] n exponent * @return nth power of M * @throw std::domain_error if the matrix contains NaNs or infinities. * @throw std::invalid_argument if the exponent is negative or the matrix is not * square. */ template * = nullptr, require_not_vt_var* = nullptr> inline Eigen::Matrix, EigMat::RowsAtCompileTime, EigMat::ColsAtCompileTime> matrix_power(const EigMat& M, const int n) { using T = value_type_t; constexpr int R = EigMat::RowsAtCompileTime; constexpr int C = EigMat::ColsAtCompileTime; check_square("matrix_power", "M", M); check_nonnegative("matrix_power", "n", n); Eigen::Matrix MM = M; check_finite("matrix_power", "M", MM); if (n == 0) return Eigen::Matrix::Identity(M.rows(), M.cols()); Eigen::Matrix result = MM; for (int nn = n - 1; nn > 0; nn /= 2) { if (nn % 2 == 1) { result = result * MM; --nn; } MM = MM * MM; } return result; } template * = nullptr> inline Eigen::Matrix, EigMat::RowsAtCompileTime, EigMat::ColsAtCompileTime> operator^(const EigMat& M, const int n) { return matrix_power(M, n); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inv_inc_beta.hpp0000644000176200001440000000352514645137106023655 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INV_INC_BETA_HPP #define STAN_MATH_PRIM_FUN_INV_INC_BETA_HPP #include #include #include #include namespace stan { namespace math { /** * The inverse of the normalized incomplete beta function of a, b, with * probability p. * * Used to compute the inverse cumulative density function for the beta * distribution. * * @param a Shape parameter a >= 0; a and b can't both be 0 * @param b Shape parameter b >= 0 * @param p Random variate. 0 <= p <= 1 * @throws if constraints are violated or if any argument is NaN * @return The inverse of the normalized incomplete beta function. */ inline double inv_inc_beta(double a, double b, double p) { check_not_nan("inv_inc_beta", "a", a); check_not_nan("inv_inc_beta", "b", b); check_not_nan("inv_inc_beta", "p", p); check_positive("inv_inc_beta", "a", a); check_positive("inv_inc_beta", "b", b); check_bounded("inv_inc_beta", "p", p, 0, 1); return boost::math::ibeta_inv(a, b, p, boost_policy_t<>()); } /** * Enables the vectorized application of the inv_inc_beta function, when * any arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @tparam T3 type of third input * @param a First input * @param b Second input * @param c Third input * @return Inverse of the incomplete Beta function applied to the three inputs. */ template * = nullptr> inline auto inv_inc_beta(const T1& a, const T2& b, const T3& c) { return apply_scalar_ternary( [](const auto& d, const auto& e, const auto& f) { return inv_inc_beta(d, e, f); }, a, b, c); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_inv_logit.hpp0000644000176200001440000000454714645137106024075 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_INV_LOGIT_HPP #define STAN_MATH_PRIM_FUN_LOG_INV_LOGIT_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the natural logarithm of the inverse logit of the * specified argument. * \f[ \mbox{log\_inv\_logit}(x) = \begin{cases} \ln\left(\frac{1}{1+\exp(-x)}\right)& \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log\_inv\_logit}(x)}{\partial x} = \begin{cases} \frac{1}{1+\exp(x)} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param u argument * @return log of the inverse logit of argument */ inline double log_inv_logit(double u) { using std::exp; if (u < 0.0) { return u - log1p(exp(u)); // prevent underflow } return -log1p(exp(-u)); } /** * Returns the natural logarithm of the inverse logit of the * specified argument. * * @param u argument * @return log of the inverse logit of argument */ inline double log_inv_logit(int u) { return log_inv_logit(static_cast(u)); } /** * Structure to wrap log_inv_logit() so it can be vectorized. */ struct log_inv_logit_fun { /** * Return the natural logarithm of the inverse logit * of the specified argument. * * @tparam T argument scalar type * @param x argument * @return natural log of inverse logit of argument */ template static inline auto fun(const T& x) { return log_inv_logit(x); } }; /** * Return the elementwise application of * log_inv_logit() to specified argument container. * The return type promotes the underlying scalar argument type to * double if it is an integer, and otherwise is the argument type. * * @tparam T type of container * @param x container * @return elementwise log_inv_logit of members of container */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto log_inv_logit(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/matrix_exp_multiply.hpp0000644000176200001440000000215314645137106025350 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MATRIX_EXP_MULTIPLY_HPP #define STAN_MATH_PRIM_FUN_MATRIX_EXP_MULTIPLY_HPP #include #include #include namespace stan { namespace math { /** * Return product of exp(A) and B, where A is a NxN double matrix, * B is a NxCb double matrix, and t is a double * * @tparam Cb number of columns in matrix B, can be Eigen::Dynamic * @param[in] A Matrix * @param[in] B Matrix * @return exponential of A multiplies B */ template * = nullptr, require_all_st_same* = nullptr> inline Eigen::Matrix matrix_exp_multiply(const EigMat1& A, const EigMat2& B) { check_square("matrix_exp_multiply", "input matrix", A); check_multiplicable("matrix_exp_multiply", "A", A, "B", B); if (A.size() == 0) { return {0, B.cols()}; } return matrix_exp_action_handler().action(A, B); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sub_col.hpp0000644000176200001440000000166214645137106022663 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SUB_COL_HPP #define STAN_MATH_PRIM_FUN_SUB_COL_HPP #include #include namespace stan { namespace math { /** * Return a nrows x 1 subcolumn starting at (i-1, j-1). * * @tparam T type of the matrix * @param m Matrix. * @param i Starting row + 1. * @param j Starting column + 1. * @param nrows Number of rows in block. * @throw std::out_of_range if either index is out of range. */ template < typename T, require_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto sub_col(const T& m, size_t i, size_t j, size_t nrows) { check_row_index("sub_col", "i", m, i); if (nrows > 0) { check_row_index("sub_col", "i+nrows-1", m, i + nrows - 1); } check_column_index("sub_col", "j", m, j); return m.col(j - 1).segment(i - 1, nrows); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/fill.hpp0000644000176200001440000000302214645137106022153 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FILL_HPP #define STAN_MATH_PRIM_FUN_FILL_HPP #include #include #include namespace stan { namespace math { /** * Fill the specified container with the specified value. * * The specified matrix is filled by element. * * @tparam EigMat Type inheriting from `EigenBase` * @tparam S Type of value. * * @param x Container. * @param y Value. */ template * = nullptr, require_stan_scalar_t* = nullptr> inline void fill(EigMat& x, const S& y) { x.fill(y); } /** * Fill the specified container with the specified value. * * This base case simply assigns the value to the container. * * @tparam T Type of reference container. * @tparam S Type of value. * @param x Container. * @param y Value. */ template < typename T, typename S, require_t&, std::decay_t>>* = nullptr> inline void fill(T& x, S&& y) { x = std::forward(y); } /** * Fill the specified container with the specified value. * * Each container in the specified standard vector is filled * recursively by calling fill. * * @tparam Vec A standard vector * @tparam S type of value * @param[in] x Container. * @param[in, out] y Value. */ template * = nullptr> inline void fill(Vec& x, S&& y) { for (auto& x_val : x) { fill(x_val, y); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/simplex_free.hpp0000644000176200001440000000401014645137106023705 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SIMPLEX_FREE_HPP #define STAN_MATH_PRIM_FUN_SIMPLEX_FREE_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return an unconstrained vector that when transformed produces * the specified simplex. It applies to a simplex of dimensionality * K and produces an unconstrained vector of dimensionality (K-1). * *

The simplex transform is defined through a centered * stick-breaking process. * * @tparam ColVec type of the simplex (must be a column vector) * @param x Simplex of dimensionality K. * @return Free vector of dimensionality (K-1) that transforms to * the simplex. * @throw std::domain_error if x is not a valid simplex */ template * = nullptr> auto simplex_free(const Vec& x) { using std::log; using T = value_type_t; const auto& x_ref = to_ref(x); check_simplex("stan::math::simplex_free", "Simplex variable", x_ref); int Km1 = x_ref.size() - 1; plain_type_t y(Km1); T stick_len = x_ref.coeff(Km1); for (Eigen::Index k = Km1; --k >= 0;) { stick_len += x_ref.coeff(k); T z_k = x_ref.coeff(k) / stick_len; y.coeffRef(k) = logit(z_k) + log(Km1 - k); // note: log(Km1 - k) = logit(1.0 / (Km1 + 1 - k)); } return y; } /** * Overload of `simplex_free()` to untransform each Eigen vector * in a standard vector. * @tparam T A standard vector with with a `value_type` which inherits from * `Eigen::MatrixBase` with compile time rows or columns equal to 1. * @param x The standard vector to untransform. */ template * = nullptr> auto simplex_free(const T& x) { return apply_vector_unary::apply(x, [](auto&& v) { return simplex_free(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/acos.hpp0000644000176200001440000000456414645137106022166 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ACOS_HPP #define STAN_MATH_PRIM_FUN_ACOS_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `acos()` so it can be vectorized. * * @tparam T type of variable * @param x argument * @return Arc cosine of variable in radians. */ struct acos_fun { template static inline auto fun(const T& x) { using std::acos; return acos(x); } }; /** * Returns the elementwise `acos()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x argument * @return Arc cosine of each variable in the container, in radians. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto acos(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `acos()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x argument * @return Arc cosine of each variable in the container, in radians. */ template * = nullptr> inline auto acos(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().acos(); }); } namespace internal { /** * Return the arc cosine of the complex argument. * * @tparam V value type of argument * @param[in] x argument * @return arc cosine of the argument */ template inline std::complex complex_acos(const std::complex& x) { return 0.5 * pi() - asin(x); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/resize.hpp0000644000176200001440000000225214645137106022532 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_RESIZE_HPP #define STAN_MATH_PRIM_FUN_RESIZE_HPP #include #include namespace stan { namespace math { namespace internal { template void resize(Eigen::Matrix& x, const std::vector& dims, int pos) { x.resize(dims[pos], dims[pos + 1]); } template void resize(T /*x*/, const std::vector& /*dims*/, int /*pos*/) { // no-op } template void resize(std::vector& x, const std::vector& dims, int pos) { x.resize(dims[pos]); ++pos; if (pos >= static_cast(dims.size())) { return; // skips lowest loop to scalar } for (size_t i = 0; i < x.size(); ++i) { resize(x[i], dims, pos); } } } // namespace internal /** * Recursively resize the specified vector of vectors, * which must bottom out at scalar values, Eigen vectors * or Eigen matrices. * * @tparam T type of object being resized * @param x Array-like object to resize. * @param dims New dimensions. */ template inline void resize(T& x, std::vector dims) { internal::resize(x, dims, 0U); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/matrix_exp_pade.hpp0000644000176200001440000000263414645137106024406 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MATRIX_EXP_PADE_HPP #define STAN_MATH_PRIM_FUN_MATRIX_EXP_PADE_HPP #include #include #include namespace stan { namespace math { /** * Computes the matrix exponential, using a Pade * approximation, coupled with scaling and * squaring. * * @tparam MatrixType type of the matrix * @param[in] arg matrix to exponentiate. * @return Matrix exponential of input matrix. */ template * = nullptr> Eigen::Matrix, EigMat::RowsAtCompileTime, EigMat::ColsAtCompileTime> matrix_exp_pade(const EigMat& arg) { using MatrixType = Eigen::Matrix, EigMat::RowsAtCompileTime, EigMat::ColsAtCompileTime>; check_square("matrix_exp_pade", "arg", arg); if (arg.size() == 0) { return {}; } MatrixType U, V; int squarings; Eigen::matrix_exp_computeUV::run(arg, U, V, squarings, arg(0, 0)); // Pade approximant is // (U+V) / (-U+V) MatrixType numer = U + V; MatrixType denom = -U + V; MatrixType pade_approximation = denom.partialPivLu().solve(numer); for (int i = 0; i < squarings; ++i) { pade_approximation *= pade_approximation; // undo scaling by } // repeated squaring return pade_approximation; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/isfinite.hpp0000644000176200001440000000120414645137106023037 0ustar liggesusers#ifndef STAN_MATH_PRIM_SCAL_FUN_ISFINITE_HPP #define STAN_MATH_PRIM_SCAL_FUN_ISFINITE_HPP #include namespace stan { namespace math { /** * Return true if specified argument is finite (not infinite and not * not-a-number). * * Overloads `std::isfinite` from `` for argument-dependent * lookup. * * @tparam ADType type of argument * @param[in] v argument * @return true if argument is finite */ template * = nullptr> inline bool isfinite(ADType&& v) { using std::isfinite; return isfinite(v.val()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/eigenvalues_sym.hpp0000644000176200001440000000224614645137106024433 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EIGENVALUES_SYM_HPP #define STAN_MATH_PRIM_FUN_EIGENVALUES_SYM_HPP #include #include #include namespace stan { namespace math { /** * Return the eigenvalues of the specified symmetric matrix * in ascending order of magnitude. This function is more * efficient than the general eigenvalues function for symmetric * matrices. * * @tparam EigMat type of the matrix * @param m Specified matrix. * @return Eigenvalues of matrix. */ template * = nullptr, require_not_st_var* = nullptr> Eigen::Matrix, -1, 1> eigenvalues_sym(const EigMat& m) { if (unlikely(m.size() == 0)) { return Eigen::Matrix, -1, 1>(0, 1); } using PlainMat = plain_type_t; const PlainMat& m_eval = m; check_symmetric("eigenvalues_sym", "m", m_eval); Eigen::SelfAdjointEigenSolver solver(m_eval, Eigen::EigenvaluesOnly); return solver.eigenvalues(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_determinant.hpp0000644000176200001440000000151514645137106024405 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_DETERMINANT_HPP #define STAN_MATH_PRIM_FUN_LOG_DETERMINANT_HPP #include #include #include namespace stan { namespace math { /** * Returns the log absolute determinant of the specified square matrix. * * @tparam EigMat type of the matrix * * @param m Specified matrix. * @return log absolute determinant of the matrix. * @throw std::domain_error if matrix is not square. */ template * = nullptr> inline value_type_t log_determinant(const EigMat& m) { if (m.size() == 0) { return 0; } check_square("log_determinant", "m", m); return m.colPivHouseholderQr().logAbsDeterminant(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/square.hpp0000644000176200001440000000405414645137106022533 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SQUARE_HPP #define STAN_MATH_PRIM_FUN_SQUARE_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the square of the specified argument. * *

\f$\mbox{square}(x) = x^2\f$. * *

The implementation of square(x) is just * x * x. Given this, this method is mainly useful * in cases where x is not a simple primitive type, * particularly when it is an autodiff type. * * @param x Input to square. * @return Square of input. */ inline double square(double x) { return std::pow(x, 2); } /** * Structure to wrap square() so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return x squared. */ struct square_fun { template static inline auto fun(const T& x) { return square(x); } }; /** * Vectorized version of square(). * * @tparam Container type of container * @param x container * @return Each value in x squared. */ template < typename Container, require_not_stan_scalar_t* = nullptr, require_not_container_st* = nullptr, require_not_var_matrix_t* = nullptr, require_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto square(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of square() that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Each value in x squared. */ template * = nullptr> inline auto square(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().square(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/as_column_vector_or_scalar.hpp0000644000176200001440000000503114645137106026616 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_AS_COLUMN_VECTOR_OR_SCALAR_HPP #define STAN_MATH_PRIM_FUN_AS_COLUMN_VECTOR_OR_SCALAR_HPP #include #include #include namespace stan { namespace math { namespace internal { template class empty_broadcast_array; } /** * no-op that passes the scalar * * @tparam T Type of scalar element. * @param a Specified scalar. * @return the scalar. */ template * = nullptr> inline T as_column_vector_or_scalar(const T& a) { return a; } /** * No-op used when working with operands and partials. * This is not implimented so it cannot be invoked and only exists so the * compiler can resolve it's output type. */ template internal::empty_broadcast_array& as_column_vector_or_scalar( internal::empty_broadcast_array& a); /** * no-op that returns a column vector. * * @tparam T Type inheriting from `EigenBase` with dynamic compile time rows * and fixed column of 1. * @param a Specified vector. * @return Same vector. */ template * = nullptr> inline T&& as_column_vector_or_scalar(T&& a) { return std::forward(a); } /** * Converts a row vector to an eigen column vector. For row vectors this returns * a `Transpose>`. * * @tparam T Type inheriting from `EigenBase` with dynamic compile time columns * and fixed row of 1. * @param a Specified vector. * @return Transposed vector. */ template * = nullptr, require_not_eigen_col_vector_t* = nullptr> inline auto as_column_vector_or_scalar(T&& a) { return make_holder([](auto& x) { return x.transpose(); }, std::forward(a)); } /** * Converts `std::vector` to a column vector. * * @tparam T `std::vector` type. * @param a Specified vector. * @return input converted to a column vector. */ template * = nullptr> inline auto as_column_vector_or_scalar(T&& a) { using plain_vector = Eigen::Matrix, Eigen::Dynamic, 1>; using optionally_const_vector = std::conditional_t>::value, const plain_vector, plain_vector>; using T_map = Eigen::Map; return make_holder([](auto& x) { return T_map(x.data(), x.size()); }, std::forward(a)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/dot_product.hpp0000644000176200001440000000411114645137106023553 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DOT_PRODUCT_HPP #define STAN_MATH_PRIM_FUN_DOT_PRODUCT_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the dot product of the specified vectors. * * @param v1 First vector. * @param v2 Second vector. * @return Dot product of the vectors. * @throw std::invalid_argument If the vectors are not the same * size or if they are both not vector dimensioned. */ template * = nullptr, require_not_var_t> * = nullptr> inline auto dot_product(const Vec1 &v1, const Vec2 &v2) { check_matching_sizes("dot_product", "v1", v1, "v2", v2); return v1.dot(v2); } /** * Returns the dot product of the specified arrays. * * @param v1 First array. * @param v2 Second array. * @param length Length of both arrays. */ template * = nullptr, require_all_not_var_t * = nullptr> inline auto dot_product(const Scalar1 *v1, const Scalar2 *v2, size_t length) { return_type_t result = 0; for (size_t i = 0; i < length; i++) { result += v1[i] * v2[i]; } return result; } /** * Returns the dot product of the specified arrays. * * @param v1 First array. * @param v2 Second array. * @throw std::domain_error if the vectors are not the same size. */ template * = nullptr> inline return_type_t dot_product( const std::vector &v1, const std::vector &v2) { check_matching_sizes("dot_product", "v1", v1, "v2", v2); if (v1.size() == 0) { return 0.0; } return dot_product(as_column_vector_or_scalar(v1), as_column_vector_or_scalar(v2)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/transpose.hpp0000644000176200001440000000076414645137106023255 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TRANSPOSE_HPP #define STAN_MATH_PRIM_FUN_TRANSPOSE_HPP #include #include namespace stan { namespace math { /** * Transposes a matrix. * @tparam T type of the matrix or expression * @param m matrix or expression * @return transposed matrix */ template * = nullptr> auto inline transpose(const T& m) { return m.transpose(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/trace_inv_quad_form_ldlt.hpp0000644000176200001440000000240714645137106026261 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TRACE_INV_QUAD_FORM_LDLT_HPP #define STAN_MATH_PRIM_FUN_TRACE_INV_QUAD_FORM_LDLT_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Compute the trace of an inverse quadratic form. I.E., this computes * trace(B^T A^-1 B) * where the LDLT_factor of A is provided. * * @tparam T type of the first matrix and the LDLT_factor * @tparam EigMat2 type of the second matrix * @param A first matrix as LDLT * @param B second matrix */ template > inline return_type_t trace_inv_quad_form_ldlt(LDLT_factor& A, const EigMat2& B) { check_multiplicable("trace_inv_quad_form_ldlt", "A", A.matrix(), "B", B); if (A.matrix().size() == 0) { return 0; } return B.cwiseProduct(mdivide_left_ldlt(A, B)).sum(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/promote_elements.hpp0000644000176200001440000000765214645137106024623 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PROMOTE_ELEMENTS_HPP #define STAN_MATH_PRIM_FUN_PROMOTE_ELEMENTS_HPP #include #include #include #include namespace stan { namespace math { /** * Struct with static function for elementwise type promotion. * *

This base implementation promotes one scalar value to another. * * @tparam T type of promoted element * @tparam S type of input element, must be assignable to T */ template struct promote_elements { /** * Return input element. * * @param u input of type S, assignable to type T * @returns input as type T */ inline static T promote(const S& u) { return u; } }; /** * Struct with static function for elementwise type promotion. * *

This specialization promotes scalar values of the same type. * * @tparam T type of elements */ template struct promote_elements { /** * Return input element. * * @param u input of type T * @returns input as type T */ inline static const T& promote(const T& u) { return u; } }; /** * Struct with static function for elementwise type promotion. * *

This specialization promotes vector elements of different types * which must be compatible with promotion. * * @tparam T type of promoted elements * @tparam S type of input elements, must be assignable to T */ template struct promote_elements, std::vector > { /** * Return input vector of type S as vector of type T. * * @param u vector of type S, assignable to type T * @returns vector of type T */ inline static std::vector promote(const std::vector& u) { std::vector t; t.reserve(u.size()); for (size_t i = 0; i < u.size(); ++i) { t.push_back(promote_elements::promote(u[i])); } return t; } }; /** * Struct with static function for elementwise type promotion. * *

This specialization promotes vector elements of the same type. * * @tparam T type of elements */ template struct promote_elements, std::vector > { /** * Return input vector. * * @param u vector of type T * @returns vector of type T */ inline static const std::vector& promote(const std::vector& u) { return u; } }; /** * Struct with static function for elementwise type promotion. * *

This specialization promotes matrix elements of different types * which must be compatible with promotion. * * @tparam T type of promoted elements * @tparam S type of input elements, must be assignable to T * @tparam R number of rows, can be Eigen::Dynamic * @tparam C number of columns, can be Eigen::Dynamic */ template struct promote_elements, Eigen::Matrix > { /** * Return input matrix of type S as matrix of type T. * * @param u matrix of type S, assignable to type T * @returns matrix of type T */ inline static Eigen::Matrix promote( const Eigen::Matrix& u) { Eigen::Matrix t(u.rows(), u.cols()); for (int i = 0; i < u.size(); ++i) { t(i) = promote_elements::promote(u(i)); } return t; } }; /** * Struct with static function for elementwise type promotion. * *

This specialization promotes matrix elements of the same type. * * @tparam T type of elements in the matrices * @tparam R number of rows, can be Eigen::Dynamic * @tparam C number of columns, can be Eigen::Dynamic */ template struct promote_elements, Eigen::Matrix > { /** * Return input matrix. * * @param u matrix of type T * @returns matrix of type T */ inline static const Eigen::Matrix& promote( const Eigen::Matrix& u) { return u; } }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/quad_form_sym.hpp0000644000176200001440000000441314645137106024077 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_QUAD_FORM_SYM_HPP #define STAN_MATH_PRIM_FUN_QUAD_FORM_SYM_HPP #include #include #include namespace stan { namespace math { /** * Return the quadratic form \f$ B^T A B \f$ of a symmetric matrix. * * Symmetry of the resulting matrix is guaranteed. * * @tparam EigMat1 type of the first (symmetric) matrix * @tparam EigMat2 type of the second matrix * * @param A symmetric matrix * @param B second matrix * @return The quadratic form, which is a symmetric matrix. * @throws std::invalid_argument if A is not symmetric, or if A cannot be * multiplied by B */ template * = nullptr, require_not_eigen_col_vector_t* = nullptr, require_vt_same* = nullptr, require_all_vt_arithmetic* = nullptr> inline plain_type_t quad_form_sym(const EigMat1& A, const EigMat2& B) { check_multiplicable("quad_form_sym", "A", A, "B", B); const auto& A_ref = to_ref(A); const auto& B_ref = to_ref(B); check_symmetric("quad_form_sym", "A", A_ref); return make_holder( [](const auto& ret) { return 0.5 * (ret + ret.transpose()); }, (B_ref.transpose() * A_ref * B_ref).eval()); } /** * Return the quadratic form \f$ B^T A B \f$ of a symmetric matrix. * * @tparam EigMat type of the (symmetric) matrix * @tparam ColVec type of the vector * * @param A symmetric matrix * @param B vector * @return The quadratic form (a scalar). * @throws std::invalid_argument if A is not symmetric, or if A cannot be * multiplied by B */ template * = nullptr, require_eigen_col_vector_t* = nullptr, require_vt_same* = nullptr, require_all_vt_arithmetic* = nullptr> inline value_type_t quad_form_sym(const EigMat& A, const ColVec& B) { check_multiplicable("quad_form_sym", "A", A, "B", B); const auto& A_ref = to_ref(A); const auto& B_ref = to_ref(B); check_symmetric("quad_form_sym", "A", A_ref); return B_ref.dot(A_ref * B_ref); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logical_and.hpp0000644000176200001440000000163014645137106023464 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGICAL_AND_HPP #define STAN_MATH_PRIM_FUN_LOGICAL_AND_HPP #include namespace stan { namespace math { /** * The logical and function which returns 1 if both arguments * are unequal to zero and 0 otherwise. * Equivalent * to x1 != 0 && x2 != 0. * \f[ \mbox{operator\&\&}(x, y) = \begin{cases} 0 & \mbox{if } x = 0 \textrm{ or } y=0 \\ 1 & \mbox{if } x, y \neq 0 \\[6pt] 1 & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param x1 first argument * @param x2 second argument * @return true if both x1 and x2 are not equal to 0. */ template inline int logical_and(const T1 x1, const T2 x2) { return (x1 != 0) && (x2 != 0); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/constants.hpp0000644000176200001440000001256614645137106023256 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CONSTANTS_HPP #define STAN_MATH_PRIM_FUN_CONSTANTS_HPP #include #include #include #include #include namespace stan { namespace math { // TODO(anyone) Use constexpr when moving to C++17 /** * Return the base of the natural logarithm. * * @return Base of natural logarithm. */ static constexpr double e() { return boost::math::constants::e(); } /** * Return the Euler's gamma constant. * * @return Euler's Gamma. */ static constexpr double egamma() { return boost::math::constants::euler(); } /** * Return the value of pi. * * @return Pi. */ static constexpr double pi() { return boost::math::constants::pi(); } /** * Smallest positive value. */ static constexpr double EPSILON = std::numeric_limits::epsilon(); /** * Positive infinity. */ static constexpr double INFTY = std::numeric_limits::infinity(); /** * Negative infinity. */ static constexpr double NEGATIVE_INFTY = -INFTY; /** * (Quiet) not-a-number value. */ static constexpr double NOT_A_NUMBER = std::numeric_limits::quiet_NaN(); /** * Twice the value of \f$ \pi \f$, * \f$ 2\pi \f$. */ static constexpr double TWO_PI = boost::math::constants::two_pi(); /** * The natural logarithm of 0, * \f$ \log 0 \f$. */ static constexpr double LOG_ZERO = -INFTY; /** * The natural logarithm of machine precision \f$ \epsilon \f$, * \f$ \log \epsilon \f$. */ const double LOG_EPSILON = std::log(EPSILON); /** * The natural logarithm of \f$ \pi \f$, * \f$ \log \pi \f$. */ const double LOG_PI = std::log(pi()); /** * The natural logarithm of 2, * \f$ \log 2 \f$. */ static constexpr double LOG_TWO = boost::math::constants::ln_two(); /** * The natural logarithm of 0.5, * \f$ \log 0.5 = \log 1 - \log 2 \f$. */ static constexpr double LOG_HALF = -LOG_TWO; /** * The natural logarithm of 2 plus the natural logarithm of \f$ \pi \f$, * \f$ \log(2\pi) \f$. */ const double LOG_TWO_PI = LOG_TWO + LOG_PI; /** * The value of one quarter the natural logarithm of \f$ \pi \f$, * \f$ \log(\pi) / 4 \f$. */ const double LOG_PI_OVER_FOUR = 0.25 * LOG_PI; /** * The natural logarithm of the square root of \f$ \pi \f$, * \f$ \log(sqrt{\pi}) \f$. */ const double LOG_SQRT_PI = std::log(boost::math::constants::root_pi()); /** * The natural logarithm of 10, * \f$ \log 10 \f$. */ static constexpr double LOG_TEN = boost::math::constants::ln_ten(); /** * The value of the square root of 2, * \f$ \sqrt{2} \f$. */ static constexpr double SQRT_TWO = boost::math::constants::root_two(); /** * The value of the square root of \f$ \pi \f$, * \f$ \sqrt{\pi} \f$. */ static constexpr double SQRT_PI = boost::math::constants::root_pi(); /** * The value of the square root of \f$ 2\pi \f$, * \f$ \sqrt{2\pi} \f$. */ static constexpr double SQRT_TWO_PI = boost::math::constants::root_two_pi(); /** * The square root of 2 divided by the square root of \f$ \pi \f$, * \f$ \sqrt{2} / \sqrt{\pi} \f$. */ static constexpr double SQRT_TWO_OVER_SQRT_PI = SQRT_TWO / SQRT_PI; /** * The value of 1 over the square root of 2, * \f$ 1 / \sqrt{2} \f$. */ static constexpr double INV_SQRT_TWO = boost::math::constants::one_div_root_two(); /** * The value of 1 over the square root of \f$ \pi \f$, * \f$ 1 / \sqrt{\pi} \f$. */ static constexpr double INV_SQRT_PI = boost::math::constants::one_div_root_pi(); /** * The value of 1 over the square root of \f$ 2\pi \f$, * \f$ 1 / \sqrt{2\pi} \f$. */ static constexpr double INV_SQRT_TWO_PI = boost::math::constants::one_div_root_two_pi(); /** * The value of 2 over the square root of \f$ \pi \f$, * \f$ 2 / \sqrt{\pi} \f$. */ static constexpr double TWO_OVER_SQRT_PI = boost::math::constants::two_div_root_pi(); /** * The value of half the natural logarithm 2, * \f$ \log(2) / 2 \f$. */ static constexpr double HALF_LOG_TWO = 0.5 * LOG_TWO; /** * The value of half the natural logarithm \f$ 2\pi \f$, * \f$ \log(2\pi) / 2 \f$. */ const double HALF_LOG_TWO_PI = 0.5 * LOG_TWO_PI; /** * The value of minus the natural logarithm of the square root of \f$ 2\pi \f$, * \f$ -\log(\sqrt{2\pi}) \f$. */ const double NEG_LOG_SQRT_TWO_PI = -std::log(SQRT_TWO_PI); /** * Largest rate parameter allowed in Poisson RNG */ const double POISSON_MAX_RATE = std::pow(2.0, 30); /** * Return positive infinity. * * @return Positive infinity. */ static constexpr inline double positive_infinity() { return INFTY; } /** * Return negative infinity. * * @return Negative infinity. */ static constexpr inline double negative_infinity() { return NEGATIVE_INFTY; } /** * Return (quiet) not-a-number. * * @return Quiet not-a-number. */ static constexpr inline double not_a_number() { return NOT_A_NUMBER; } /** * Returns the difference between 1.0 and the next value * representable. * * @return Minimum positive number. */ static constexpr inline double machine_precision() { return EPSILON; } /** * Returns the natural logarithm of ten. * * @return Natural logarithm of ten. */ static constexpr inline double log10() { return LOG_TEN; } /** * Returns the square root of two. * * @return Square root of two. */ static constexpr inline double sqrt2() { return SQRT_TWO; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logical_negation.hpp0000644000176200001440000000077214645137106024534 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGICAL_NEGATION_HPP #define STAN_MATH_PRIM_FUN_LOGICAL_NEGATION_HPP #include namespace stan { namespace math { /** * The logical negation function which returns one if the input * is equal to zero and zero otherwise. * * @tparam T type of value * @param x value * @return 1 if value is zero and 0 otherwise */ template inline int logical_negation(const T& x) { return x == 0; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/rising_factorial.hpp0000644000176200001440000000524414645137106024554 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_RISING_FACTORIAL_HPP #define STAN_MATH_PRIM_FUN_RISING_FACTORIAL_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the rising factorial function evaluated * at the inputs. * * @tparam T type of the first argument * @param x first argument * @param n second argument * @return Result of rising factorial function. * @throw std::domain_error if x is NaN * @throw std::domain_error if n is negative * \f[ \mbox{rising\_factorial}(x, n) = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ x^{(n)} & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{rising\_factorial}(x, n)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ \frac{\partial\, x^{(n)}}{\partial x} & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{rising\_factorial}(x, n)}{\partial n} = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ \frac{\partial\, x^{(n)}}{\partial n} & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] \f[ x^{(n)}=\frac{\Gamma(x+n)}{\Gamma(x)} \f] \f[ \frac{\partial \, x^{(n)}}{\partial x} = x^{(n)}(\Psi(x+n)-\Psi(x)) \f] \f[ \frac{\partial \, x^{(n)}}{\partial n} = (x)_n\Psi(x+n) \f] * */ template * = nullptr> inline return_type_t rising_factorial(const T& x, int n) { static const char* function = "rising_factorial"; check_not_nan(function, "first argument", x); check_nonnegative(function, "second argument", n); return boost::math::rising_factorial(x, n, boost_policy_t<>()); } /** * Enables the vectorized application of the rising_factorial * function, when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return rising_factorial function applied to the two inputs. */ template * = nullptr> inline auto rising_factorial(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return rising_factorial(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/dims.hpp0000644000176200001440000000312514645137106022165 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DIMS_HPP #define STAN_MATH_PRIM_FUN_DIMS_HPP #include #include #include namespace stan { namespace math { /** * Pushes dimensions of given argument into given result vector. * * For a scalar that is a no-op. * @tparam type of scalar * @param x argument * @param result result */ template * = nullptr> inline void dims(const T& x, std::vector& result) {} /** * Pushes dimensions of given argument into given result vector. * * For an Eigen type those are the numbers of rows and columns. * @param x argument * @param result result */ template * = nullptr> inline void dims(const T& x, std::vector& result) { result.push_back(x.rows()); result.push_back(x.cols()); } /** * Pushes dimensions of given argument into given result vector. * * For a `std::vector` that is its size and dimensions of its elements. * @tparam type of scalar * @tparam Alloc type of allocator * @param x argument * @param result result */ template inline void dims(const std::vector& x, std::vector& result) { result.push_back(x.size()); if (x.size() > 0) { dims(x[0], result); } } /** * Determines dimensions of an argument. * @param x argument * @return vector of sizes in each of arguemnt's dimensions */ template inline std::vector dims(const T& x) { std::vector result; dims(x, result); return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/matrix_exp_2x2.hpp0000644000176200001440000000345214645137106024107 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MATRIX_EXP_2X2_HPP #define STAN_MATH_PRIM_FUN_MATRIX_EXP_2X2_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the matrix exponential of a 2x2 matrix. Reference for * algorithm: http://mathworld.wolfram.com/MatrixExponential.html * Note: algorithm only works if delta > 0; * * @tparam EigMat type of the matrix * @param[in] A 2x2 matrix to exponentiate. * @return Matrix exponential of A. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> matrix_exp_2x2(const EigMat& A) { using std::cosh; using std::exp; using std::sinh; using std::sqrt; using T = value_type_t; T a = A(0, 0), b = A(0, 1), c = A(1, 0), d = A(1, 1), delta; delta = sqrt(square(a - d) + 4 * b * c); Eigen::Matrix B(2, 2); T half_delta = 0.5 * delta; T cosh_half_delta = cosh(half_delta); T sinh_half_delta = sinh(half_delta); T exp_half_a_plus_d = exp(0.5 * (a + d)); T Two_exp_sinh = 2 * exp_half_a_plus_d * sinh_half_delta; T delta_cosh = delta * cosh_half_delta; T ad_sinh_half_delta = (a - d) * sinh_half_delta; B(0, 0) = exp_half_a_plus_d * (delta_cosh + ad_sinh_half_delta); B(0, 1) = b * Two_exp_sinh; B(1, 0) = c * Two_exp_sinh; B(1, 1) = exp_half_a_plus_d * (delta_cosh - ad_sinh_half_delta); // use pade approximation if cosh & sinh ops overflow to NaN if (B.hasNaN()) { return matrix_exp_pade(A); } else { return B / delta; } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/ones_vector.hpp0000644000176200001440000000105114645137106023553 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ONES_VECTOR_HPP #define STAN_MATH_PRIM_FUN_ONES_VECTOR_HPP #include #include namespace stan { namespace math { /** * Return a vector of ones * * @param K size of the vector * @return A vector of size K with all elements initialized to 1. * @throw std::domain_error if K is negative. */ inline auto ones_vector(int K) { check_nonnegative("ones_vector", "size", K); return Eigen::VectorXd::Constant(K, 1); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/tanh.hpp0000644000176200001440000000440414645137106022164 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TANH_HPP #define STAN_MATH_PRIM_FUN_TANH_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `tanh()` so that it can be vectorized. * * @tparam T type of argument * @param x angle in radians * @return Hyperbolic tangent of x. */ struct tanh_fun { template static inline auto fun(const T& x) { using std::tanh; return tanh(x); } }; /** * Vectorized version of `tanh()`. * * @tparam Container type of container * @param x angles in radians * @return Hyperbolic tangent of each value in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto tanh(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `tanh()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Hyperbolic tangent of each value in x. */ template * = nullptr> inline auto tanh(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().tanh(); }); } namespace internal { /** * Return the hyperbolic tangent of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return hyperbolic tangent of the argument */ template inline std::complex complex_tanh(const std::complex& z) { using std::exp; auto exp_z = exp(z); auto exp_neg_z = exp(-z); return stan::math::internal::complex_divide(exp_z - exp_neg_z, exp_z + exp_neg_z); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log1m_inv_logit.hpp0000644000176200001440000000475614645137106024335 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG1M_INV_LOGIT_HPP #define STAN_MATH_PRIM_FUN_LOG1M_INV_LOGIT_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the natural logarithm of 1 minus the inverse logit * of the specified argument. * \f[ \mbox{log1m\_inv\_logit}(x) = \begin{cases} -\ln(\exp(x)+1) & \mbox{if } -\infty\leq x \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log1m\_inv\_logit}(x)}{\partial x} = \begin{cases} -\frac{\exp(x)}{\exp(x)+1} & \mbox{if } -\infty\leq x\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param u argument * @return log of one minus the inverse logit of the argument */ inline double log1m_inv_logit(double u) { using std::exp; if (u > 0.0) { return -u - log1p(exp(-u)); // prevent underflow } return -log1p(exp(u)); } /** * Return the natural logarithm of one minus the inverse logit of * the specified argument. * * @param u argument * @return log of one minus the inverse logit of the argument */ inline double log1m_inv_logit(int u) { return log1m_inv_logit(static_cast(u)); } /** * Structure to wrap log1m_inv_logit() so it can be vectorized. */ struct log1m_inv_logit_fun { /** * Return the natural logarithm of one minus the inverse logit * of the specified argument. * * @tparam T type of argument * @param x argument * @return natural log of one minus inverse logit of argument */ template static inline auto fun(const T& x) { return log1m_inv_logit(x); } }; /** * Return the elementwise application of * log1m_inv_logit() to specified argument container. * The return type promotes the underlying scalar argument type to * double if it is an integer, and otherwise is the argument type. * * @tparam T type of container * @param x container * @return Elementwise log1m_inv_logit of members of container. */ template * = nullptr, require_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline typename apply_scalar_unary::return_t log1m_inv_logit(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sort_indices_desc.hpp0000644000176200001440000000117514645137106024717 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SORT_INDICES_DESC_HPP #define STAN_MATH_PRIM_FUN_SORT_INDICES_DESC_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return a sorted copy of the argument container in ascending order. * * @tparam C type of container * @param xs Container to sort * @return sorted version of container */ template std::vector sort_indices_desc(const C& xs) { return sort_indices(xs); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/real.hpp0000644000176200001440000000101614645137106022151 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_REAL_HPP #define STAN_MATH_PRIM_FUN_REAL_HPP #include #include namespace stan { namespace math { /** * Return the real component of the complex argument. * * @tparam T value type of complex argument * @param[in] z complex value whose real component is extracted * @return real component of argument */ template > T real(const std::complex& z) { return z.real(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_sum_exp_signed.hpp0000644000176200001440000000263014645137106025103 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_SUM_EXP_SIGNED_HPP #define STAN_MATH_PRIM_FUN_LOG_SUM_EXP_SIGNED_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Calculates the log sum of exponentials without overflow, * accounting for the signs of the inputs * * @tparam T1 type of the first variable * @tparam T2 type of the second variable * @param a the first variable * @param a_sign sign of the first variable * @param b the second variable * @param b_sign sign of the second variable */ template * = nullptr> inline std::tuple, int> log_sum_exp_signed(const T1& a, int a_sign, const T2& b, int b_sign) { if (a_sign == b_sign) { return std::make_tuple(log_sum_exp(a, b), a_sign); } bool a_larger = (a > b); return std::make_tuple(a_larger ? log_diff_exp(a, b) : log_diff_exp(b, a), a_larger ? a_sign : b_sign); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/grad_reg_inc_beta.hpp0000644000176200001440000000307014645137106024626 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GRAD_REG_INC_BETA_HPP #define STAN_MATH_PRIM_FUN_GRAD_REG_INC_BETA_HPP #include #include #include #include #include namespace stan { namespace math { /** * Computes the gradients of the regularized incomplete beta * function. Specifically, this function computes gradients of * ibeta(a, b, z), with respect to the arguments * a and b. * * @tparam T type of arguments * @param[out] g1 partial derivative of ibeta(a, b, z) * with respect to a * @param[out] g2 partial derivative of ibeta(a, b, * z) with respect to b * @param[in] a a * @param[in] b b * @param[in] z z * @param[in] digammaA the value of digamma(a) * @param[in] digammaB the value of digamma(b) * @param[in] digammaSum the value of digamma(a + b) * @param[in] betaAB the value of beta(a, b) */ template void grad_reg_inc_beta(T& g1, T& g2, const T& a, const T& b, const T& z, const T& digammaA, const T& digammaB, const T& digammaSum, const T& betaAB) { using std::exp; T dBda = 0; T dBdb = 0; grad_inc_beta(dBda, dBdb, a, b, z); T b1 = beta(a, b) * inc_beta(a, b, z); g1 = (dBda - b1 * (digammaA - digammaSum)) / betaAB; g2 = (dBdb - b1 * (digammaB - digammaSum)) / betaAB; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/gp_exp_quad_cov.hpp0000644000176200001440000002467514645137106024411 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GP_EXP_QUAD_COV_HPP #define STAN_MATH_PRIM_FUN_GP_EXP_QUAD_COV_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * Returns a squared exponential kernel. * * @tparam T_x type for each scalar * @tparam T_sigma type of parameter sigma * @tparam T_l type of parameter length scale * * @param x std::vector of scalars that can be used in square distance. * This function assumes each element of x is the same size. * @param sigma_sq square root of the marginal standard deviation or magnitude * @param neg_half_inv_l_sq The half negative inverse of the length scale * @return squared distance */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_exp_quad_cov(const std::vector &x, const T_sigma &sigma_sq, const T_l &neg_half_inv_l_sq) { using std::exp; const size_t x_size = x.size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); cov.diagonal().array() = sigma_sq; size_t block_size = 10; for (size_t jb = 0; jb < x.size(); jb += block_size) { for (size_t ib = jb; ib < x.size(); ib += block_size) { size_t j_end = std::min(x_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x_size, ib + block_size); for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { cov.coeffRef(i, j) = sigma_sq * exp(squared_distance(x[i], x[j]) * neg_half_inv_l_sq); } } } } cov.template triangularView() = cov.transpose(); return cov; } /** * Returns a squared exponential kernel. * * This function is for the cross covariance matrix * needed to compute posterior predictive density. * * @tparam T_x1 type of first std::vector of scalars * @tparam T_x2 type of second std::vector of scalars * This function assumes each element of x1 and x2 are the same size. * @tparam T_sigma type of sigma * @tparam T_l type of of length scale * * @param x1 std::vector of elements that can be used in square distance * @param x2 std::vector of elements that can be used in square distance * @param sigma_sq square root of the marginal standard deviation or magnitude * @param neg_half_inv_l_sq The half negative inverse of the length scale * @return squared distance */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_exp_quad_cov(const std::vector &x1, const std::vector &x2, const T_sigma &sigma_sq, const T_l &neg_half_inv_l_sq) { using std::exp; Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1.size(), x2.size()); size_t block_size = 10; for (size_t ib = 0; ib < x1.size(); ib += block_size) { for (size_t jb = 0; jb < x2.size(); jb += block_size) { size_t j_end = std::min(x2.size(), jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x1.size(), ib + block_size); for (size_t i = ib; i < i_end; ++i) { cov.coeffRef(i, j) = sigma_sq * exp(squared_distance(x1[i], x2[j]) * neg_half_inv_l_sq); } } } } return cov; } } // namespace internal /** * Returns a squared exponential kernel. * * @tparam T_x type for each scalar * @tparam T_sigma type of parameter sigma * @tparam T_l type of parameter length scale * * @param x std::vector of scalars that can be used in square distance. * This function assumes each element of x is the same size. * @param sigma marginal standard deviation or magnitude * @param length_scale length scale * @return squared distance * @throw std::domain_error if sigma <= 0, l <= 0, or * x is nan or infinite */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_exp_quad_cov(const std::vector &x, const T_sigma &sigma, const T_l &length_scale) { check_positive("gp_exp_quad_cov", "magnitude", sigma); check_positive("gp_exp_quad_cov", "length scale", length_scale); const size_t x_size = x.size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); if (x_size == 0) { return cov; } for (size_t n = 0; n < x_size; ++n) { check_not_nan("gp_exp_quad_cov", "x", x[n]); } cov = internal::gp_exp_quad_cov(x, square(sigma), -0.5 / square(length_scale)); return cov; } /** * Returns a squared exponential kernel. * * @tparam T_x type for each scalar * @tparam T_sigma type of parameter sigma * @tparam T_l type of each length scale parameter * * @param x std::vector of Eigen vectors of scalars. * @param sigma marginal standard deviation or magnitude * @param length_scale std::vector length scale * @return squared distance * @throw std::domain_error if sigma <= 0, l <= 0, or * x is nan or infinite */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_exp_quad_cov(const std::vector> &x, const T_sigma &sigma, const std::vector &length_scale) { check_positive_finite("gp_exp_quad_cov", "magnitude", sigma); check_positive_finite("gp_exp_quad_cov", "length scale", length_scale); size_t x_size = x.size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); if (x_size == 0) { return cov; } check_size_match("gp_exp_quad_cov", "x dimension", x[0].size(), "number of length scales", length_scale.size()); cov = internal::gp_exp_quad_cov(divide_columns(x, length_scale), square(sigma), -0.5); return cov; } /** * Returns a squared exponential kernel. * * This function is for the cross covariance matrix * needed to compute posterior predictive density. * * @tparam T_x1 type of first std::vector of scalars * @tparam T_x2 type of second std::vector of scalars * This function assumes each element of x1 and x2 are the same size. * @tparam T_sigma type of sigma * @tparam T_l type of of length scale * * @param x1 std::vector of elements that can be used in square distance * @param x2 std::vector of elements that can be used in square distance * @param sigma standard deviation * @param length_scale length scale * @return squared distance * @throw std::domain_error if sigma <= 0, l <= 0, or * x is nan or infinite */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_exp_quad_cov(const std::vector &x1, const std::vector &x2, const T_sigma &sigma, const T_l &length_scale) { const char *function_name = "gp_exp_quad_cov"; check_positive(function_name, "magnitude", sigma); check_positive(function_name, "length scale", length_scale); const size_t x1_size = x1.size(); const size_t x2_size = x2.size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); if (x1_size == 0 || x2_size == 0) { return cov; } for (size_t i = 0; i < x1_size; ++i) { check_not_nan(function_name, "x1", x1[i]); } for (size_t i = 0; i < x2_size; ++i) { check_not_nan(function_name, "x2", x2[i]); } cov = internal::gp_exp_quad_cov(x1, x2, square(sigma), -0.5 / square(length_scale)); return cov; } /** * Returns a squared exponential kernel. * * This function is for the cross covariance * matrix needed to compute the posterior predictive density. * * @tparam T_x1 type of first std::vector of elements * @tparam T_x2 type of second std::vector of elements * @tparam T_s type of sigma * @tparam T_l type of length scale * * @param x1 std::vector of Eigen vectors of scalars. * @param x2 std::vector of Eigen vectors of scalars. * @param sigma standard deviation * @param length_scale std::vector of length scale * @return squared distance * @throw std::domain_error if sigma <= 0, l <= 0, or * x is nan or infinite */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_exp_quad_cov(const std::vector> &x1, const std::vector> &x2, const T_s &sigma, const std::vector &length_scale) { size_t x1_size = x1.size(); size_t x2_size = x2.size(); size_t l_size = length_scale.size(); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); if (x1_size == 0 || x2_size == 0) { return cov; } const char *function_name = "gp_exp_quad_cov"; for (size_t i = 0; i < x1_size; ++i) { check_not_nan(function_name, "x1", x1[i]); } for (size_t i = 0; i < x2_size; ++i) { check_not_nan(function_name, "x2", x2[i]); } check_positive_finite(function_name, "magnitude", sigma); check_positive_finite(function_name, "length scale", length_scale); check_size_match(function_name, "x dimension", x1[0].size(), "number of length scales", l_size); check_size_match(function_name, "x dimension", x2[0].size(), "number of length scales", l_size); cov = internal::gp_exp_quad_cov(divide_columns(x1, length_scale), divide_columns(x2, length_scale), square(sigma), -0.5); return cov; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/multiply_lower_tri_self_transpose.hpp0000644000176200001440000000231414645137106030304 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MULTIPLY_LOWER_TRI_SELF_TRANSPOSE_HPP #define STAN_MATH_PRIM_FUN_MULTIPLY_LOWER_TRI_SELF_TRANSPOSE_HPP #include #include namespace stan { namespace math { /** * Returns the result of multiplying the lower triangular * portion of the input matrix by its own transpose. * * @param L Matrix to multiply. * @return The lower triangular values in L times their own * transpose. */ template * = nullptr, require_not_st_autodiff* = nullptr> inline matrix_d multiply_lower_tri_self_transpose(const EigMat& L) { int K = L.rows(); if (K == 0) { return L; } if (K == 1) { matrix_d result(1, 1); result.coeffRef(0) = square(L.coeff(0, 0)); return result; } int J = L.cols(); matrix_d LLt(K, K); matrix_d Lt = L.transpose(); for (int m = 0; m < K; ++m) { int k = (J < m + 1) ? J : m + 1; LLt(m, m) = Lt.col(m).head(k).squaredNorm(); for (int n = (m + 1); n < K; ++n) { LLt(n, m) = LLt(m, n) = Lt.col(m).head(k).dot(Lt.col(n).head(k)); } } return LLt; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/read_cov_matrix.hpp0000644000176200001440000000454314645137106024404 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_READ_COV_MATRIX_HPP #define STAN_MATH_PRIM_FUN_READ_COV_MATRIX_HPP #include #include #include #include namespace stan { namespace math { /** * A generally worse alternative to call prior to evaluating the * density of an elliptical distribution * * @tparam T_CPCs type of \c T_CPCs vector (must be derived from \c * Eigen::ArrayBase and have one compile-time dimension equal to 1) * @tparam T_sds type of \c sds vector (must be derived from \c Eigen::ArrayBase * and have one compile-time dimension equal to 1) * @param CPCs on (-1, 1) * @param sds on (0, inf) * @param log_prob the log probability value to increment with the Jacobian * @return Covariance matrix for specified partial correlations. */ template * = nullptr, require_vt_same* = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> read_cov_matrix(const T_CPCs& CPCs, const T_sds& sds, value_type_t& log_prob) { Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> L = read_cov_L(CPCs, sds, log_prob); return multiply_lower_tri_self_transpose(L); } /** * Builds a covariance matrix from CPCs and standard deviations * * @tparam T_CPCs type of \c T_CPCs vector (must be derived from \c * Eigen::ArrayBase and have one compile-time dimension equal to 1) * @tparam T_sds type of \c sds vector (must be derived from \c Eigen::ArrayBase * and have one compile-time dimension equal to 1) * @param CPCs in (-1, 1) * @param sds in (0, inf) */ template * = nullptr, require_vt_same* = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> read_cov_matrix(const T_CPCs& CPCs, const T_sds& sds) { size_t K = sds.rows(); Eigen::DiagonalMatrix, Eigen::Dynamic> D(K); D.diagonal() = sds; Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> L = D * read_corr_L(CPCs, K); return multiply_lower_tri_self_transpose(L); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/pseudo_eigenvalues.hpp0000644000176200001440000000106514645137106025120 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PSEUDO_EIGENVALUES_HPP #define STAN_MATH_PRIM_FUN_PSEUDO_EIGENVALUES_HPP #include #include namespace stan { namespace math { template Eigen::Matrix pseudo_eigenvalues(const Eigen::Matrix& m) { check_nonzero_size("pseudo_eigenvalues", "m", m); check_square("pseudo_eigenvalues", "m", m); Eigen::EigenSolver> solver(m); return solver.pseudoEigenvalueMatrix(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/imag.hpp0000644000176200001440000000103514645137106022144 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IMAG_HPP #define STAN_MATH_PRIM_FUN_IMAG_HPP #include #include namespace stan { namespace math { /** * Return the imaginary component of the complex argument. * * @tparam T value type of complex argument * @param[in] z complex value whose imaginary component is extracted * @return imaginary component of argument */ template > T imag(const std::complex& z) { return z.imag(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/csr_matrix_times_vector.hpp0000644000176200001440000001112714645137106026170 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CSR_MATRIX_TIMES_VECTOR_HPP #define STAN_MATH_PRIM_FUN_CSR_MATRIX_TIMES_VECTOR_HPP #include #include #include #include #include namespace stan { namespace math { /** * @defgroup csr_format Compressed Sparse Row matrix format. * A compressed Sparse Row (CSR) sparse matrix is defined by four * component vectors labeled w, v, and u. They are defined as: * - w: the non-zero values in the sparse matrix. * - v: column index for each value in w, as a result this * is the same length as w. * - u: index of where each row starts in w, length * is equal to the number of rows plus one. Last entry is * one-past-the-end in w, following the Eigen spec. * Indexing is either zero-based or one-based depending on the value of * stan::error_index::value. Following the definition of the format in * Eigen, we allow for unused garbage values in w/v which are never read. * All indexing _internal_ to a given function is zero-based. * * With only m/n/w/v/u in hand, it is possible to check all * dimensions are sane _except_ the column dimension since it is * implicit. The error-checking strategy is to check all dimensions * except the column dimension before any work is done inside a * function. The column index is checked as it is constructed and * used for each entry. If the column index is not needed it is * not checked. As a result indexing mistakes might produce non-sensical * operations but out-of-bounds indexing will be caught. * * Except for possible garbage values in w/v/u, memory usage is * calculated from the number of non-zero entries (NNZE) and the number of * rows (NR): 2*NNZE + 2*NR + 1. */ /** * \addtogroup csr_format * Return the multiplication of the sparse matrix (specified by * by values and indexing) by the specified dense vector. * * The sparse matrix X of dimension m by n is represented by the * vector w (of values), the integer array v (containing one-based * column index of each value), the integer array u (containing * one-based indexes of where each row starts in w). * * @tparam T1 type of the sparse matrix * @tparam T2 type of the dense vector * @param m Number of rows in matrix. * @param n Number of columns in matrix. * @param w Vector of non-zero values in matrix. * @param v Column index of each non-zero value, same * length as w. * @param u Index of where each row starts in w, length equal to * the number of rows plus one. * @param b Eigen vector which the matrix is multiplied by. * @return Dense vector for the product. * @throw std::domain_error if m and n are not positive or are nan. * @throw std::domain_error if the implied sparse matrix and b are * not multiplicable. * @throw std::invalid_argument if m/n/w/v/u are not internally * consistent, as defined by the indexing scheme. Extractors are * defined in Stan which guarantee a consistent set of m/n/w/v/u * for a given sparse matrix. * @throw std::out_of_range if any of the indexes are out of range. */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, 1> csr_matrix_times_vector(int m, int n, const T1& w, const std::vector& v, const std::vector& u, const T2& b) { check_positive("csr_matrix_times_vector", "m", m); check_positive("csr_matrix_times_vector", "n", n); check_size_match("csr_matrix_times_vector", "n", n, "b", b.size()); check_size_match("csr_matrix_times_vector", "w", w.size(), "v", v.size()); check_size_match("csr_matrix_times_vector", "m", m, "u", u.size() - 1); check_size_match("csr_matrix_times_vector", "u/z", u[m - 1] + csr_u_to_z(u, m - 1) - 1, "v", v.size()); for (int i : v) { check_range("csr_matrix_times_vector", "v[]", n, i); } std::vector v_zero_based(v.size()); std::transform(v.begin(), v.end(), v_zero_based.begin(), [](auto&& x) { return x - 1; }); std::vector u_zero_based(u.size()); std::transform(u.begin(), u.end(), u_zero_based.begin(), [](auto&& x) { return x - 1; }); // u_zero_based[u.size()] = w.size(); auto&& w_ref = to_ref(w); Eigen::Map, Eigen::RowMajor>> w_mat(m, n, w_ref.size(), u_zero_based.data(), v_zero_based.data(), w_ref.data()); return w_mat * b; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/get_base1.hpp0000644000176200001440000002765114645137106023075 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GET_BASE1_HPP #define STAN_MATH_PRIM_FUN_GET_BASE1_HPP #include #include #include #include namespace stan { namespace math { /** * Return a reference to the value of the specified vector at the * specified base-one index. If the index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i Index into vector plus 1. * @param error_msg Error message if the index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at i - 1 * @throw std::out_of_range if idx is out of range. */ template inline const T& get_base1(const std::vector& x, size_t i, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i, idx, error_msg); return x[i - 1]; } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline const T& get_base1(const std::vector >& x, size_t i1, size_t i2, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1(x[i1 - 1], i2, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline const T& get_base1(const std::vector > >& x, size_t i1, size_t i2, size_t i3, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1(x[i1 - 1], i2, i3, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param i4 Fourth index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline const T& get_base1( const std::vector > > >& x, size_t i1, size_t i2, size_t i3, size_t i4, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1(x[i1 - 1], i2, i3, i4, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param i4 Fourth index plus 1. * @param i5 Fifth index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline const T& get_base1( const std::vector< std::vector > > > >& x, size_t i1, size_t i2, size_t i3, size_t i4, size_t i5, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1(x[i1 - 1], i2, i3, i4, i5, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param i4 Fourth index plus 1. * @param i5 Fifth index plus 1. * @param i6 Sixth index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline const T& get_base1( const std::vector > > > > >& x, size_t i1, size_t i2, size_t i3, size_t i4, size_t i5, size_t i6, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1(x[i1 - 1], i2, i3, i4, i5, i6, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param i4 Fourth index plus 1. * @param i5 Fifth index plus 1. * @param i6 Sixth index plus 1. * @param i7 Seventh index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline const T& get_base1( const std::vector > > > > > >& x, size_t i1, size_t i2, size_t i3, size_t i4, size_t i5, size_t i6, size_t i7, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1(x[i1 - 1], i2, i3, i4, i5, i6, i7, error_msg, idx + 1); } /** * Return a reference to the value of the specified vector at the * specified base-one indexes. If an index is out of range, throw * a std::out_of_range exception with the specified * error message and index indicated. * * @tparam T type of value * @param x Vector from which to get a value. * @param i1 First index plus 1. * @param i2 Second index plus 1. * @param i3 Third index plus 1. * @param i4 Fourth index plus 1. * @param i5 Fifth index plus 1. * @param i6 Sixth index plus 1. * @param i7 Seventh index plus 1. * @param i8 Eigth index plus 1. * @param error_msg Error message if an index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of vector at indexes. * @throw std::out_of_range if idx is out of range. */ template inline const T& get_base1( const std::vector > > > > > > >& x, size_t i1, size_t i2, size_t i3, size_t i4, size_t i5, size_t i6, size_t i7, size_t i8, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), i1, idx, error_msg); return get_base1(x[i1 - 1], i2, i3, i4, i5, i6, i7, i8, error_msg, idx + 1); } /** * Return a copy of the row of the specified matrix at the specified * base-one row index. If the index is out of range, throw a * std::out_of_range exception with the specified * error message and index indicated. * * Warning: Because a copy is involved, it is inefficient * to access element of matrices by first using this method * to get a row then using a second call to get the value at a specified column. * * @tparam EigMat type of the matrix * @param x Matrix from which to get a row * @param m Index into matrix plus 1. * @param error_msg Error message if the index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Row of matrix at i - 1. * @throw std::out_of_range if idx is out of range. */ template * = nullptr, require_not_eigen_vector_t* = nullptr> inline Eigen::Matrix, 1, Eigen::Dynamic> get_base1( const EigMat& x, size_t m, const char* error_msg, size_t idx) { check_range("[]", "rows of x", x.rows(), m, idx, error_msg); return x.row(m - 1); } /** * Return a reference to the value of the specified matrix at the specified * base-one row and column indexes. If either index is out of range, * throw a std::out_of_range exception with the * specified error message and index indicated. * * @tparam EigMat type of the matrix * @param x Matrix from which to get a row * @param m Row index plus 1. * @param n Column index plus 1. * @param error_msg Error message if either index is out of range. * @param idx Nested index level to report in error message if * either index is out of range. * @return Value of matrix at row m - 1 and column * n - 1. * @throw std::out_of_range if idx is out of range. */ template * = nullptr> inline const value_type_t& get_base1(const EigMat& x, size_t m, size_t n, const char* error_msg, size_t idx) { check_range("[]", "rows of x", x.rows(), m, idx, error_msg); check_range("[]", "cols of x", x.cols(), n, idx + 1, error_msg); return x.coeff(m - 1, n - 1); } /** * Return a reference to the value of the specified Eigen vector * at the specified base-one index. If the index is out of range, * throw a std::out_of_range exception with the * specified error message and index indicated. * * @tparam EigVec type of the vector * @param x Eigen vector from which to get a value. * @param m Row index plus 1. * @param error_msg Error message if the index is out of range. * @param idx Nested index level to report in error message if * the index is out of range. * @return Value of Eigen vector at row m - 1. * @throw std::out_of_range if idx is out of range. */ template * = nullptr> inline const value_type_t& get_base1(const EigVec& x, size_t m, const char* error_msg, size_t idx) { check_range("[]", "x", x.size(), m, idx, error_msg); return x.coeff(m - 1); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/factor_cov_matrix.hpp0000644000176200001440000000351514645137106024745 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FACTOR_COV_MATRIX_HPP #define STAN_MATH_PRIM_FUN_FACTOR_COV_MATRIX_HPP #include #include #include namespace stan { namespace math { /** * This function is intended to make starting values, given a * covariance matrix Sigma * * The transformations are hard coded as log for standard * deviations and Fisher transformations (atanh()) of CPCs * * @tparam T type of elements in the matrix and arrays * @param[in] Sigma covariance matrix * @param[out] CPCs fill this unbounded (does not resize) * @param[out] sds fill this unbounded * @return false if any of the diagonals of Sigma are 0 */ template * = nullptr, require_all_eigen_vector_t* = nullptr, require_all_vt_same* = nullptr> bool factor_cov_matrix(const T_Sigma& Sigma, T_CPCs&& CPCs, T_sds&& sds) { using T_scalar = value_type_t; size_t K = sds.rows(); const Eigen::Ref>& Sigma_ref = Sigma; sds = Sigma_ref.diagonal().array(); if ((sds <= 0.0).any()) { return false; } sds = sds.sqrt(); Eigen::DiagonalMatrix D(K); D.diagonal() = sds.inverse(); sds = sds.log(); // now unbounded Eigen::Matrix R = D * Sigma_ref * D; // to hopefully prevent pivoting due to floating point error R.diagonal().setOnes(); Eigen::LDLT> ldlt; ldlt = R.ldlt(); if (!ldlt.isPositive()) { return false; } Eigen::Matrix U = ldlt.matrixU(); factor_U(U, CPCs); return true; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/ldexp.hpp0000644000176200001440000000243314645137106022346 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LDEXP_HPP #define STAN_MATH_PRIM_FUN_LDEXP_HPP #include #include #include namespace stan { namespace math { /** * Returns the product of a (the significand) and * 2 to power b (the exponent). * * @tparam T1 scalar type of significand * @param[in] a the significand * @param[in] b an integer that is the exponent * @return product of a times 2 to the power b */ template * = nullptr> inline double ldexp(T1 a, int b) { using std::ldexp; return ldexp(a, b); } /** * Enables the vectorized application of the ldexp function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return ldexp function applied to the two inputs. */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< T1, T2>* = nullptr> inline auto ldexp(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return ldexp(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sqrt.hpp0000644000176200001440000000414614645137106022226 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SQRT_HPP #define STAN_MATH_PRIM_FUN_SQRT_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `sqrt()` so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return Square root of x. */ struct sqrt_fun { template static inline auto fun(const T& x) { using std::sqrt; return sqrt(x); } }; /** * Vectorized version of `sqrt()`. * * @tparam Container type of container * @param x container * @return Square root of each value in x. */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr, require_not_var_matrix_t* = nullptr> inline auto sqrt(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `sqrt()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Square root of each value in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr> inline auto sqrt(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().sqrt(); }); } namespace internal { /** * Return the square root of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return square root of the argument */ template inline std::complex complex_sqrt(const std::complex& z) { auto m = sqrt(hypot(z.real(), z.imag())); auto at = 0.5 * atan2(z.imag(), z.real()); return {m * cos(at), m * sin(at)}; } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inv_sqrt.hpp0000644000176200001440000000410614645137106023076 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INV_SQRT_HPP #define STAN_MATH_PRIM_FUN_INV_SQRT_HPP #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> inline auto inv_sqrt(T x) { using std::sqrt; return inv(sqrt(x)); } /** * Structure to wrap `1 / sqrt(x)` so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return inverse square root of x. */ struct inv_sqrt_fun { template static inline auto fun(const T& x) { return inv_sqrt(x); } }; /** * Return the elementwise `1 / sqrt(x)}` of the specified argument, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x container * @return inverse square root of each value in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_not_stan_scalar_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto inv_sqrt(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `inv_sqrt()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return inverse square root each variable in the container. */ template * = nullptr, require_container_st* = nullptr> inline auto inv_sqrt(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().rsqrt(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/falling_factorial.hpp0000644000176200001440000000547214645137106024700 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FALLING_FACTORIAL_HPP #define STAN_MATH_PRIM_FUN_FALLING_FACTORIAL_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the falling factorial function evaluated * at the inputs. * Will throw for NaN x and for negative n * * @tparam T Type of x argument. * @param x Argument. * @param n Argument * @return Result of falling factorial function. * @throw std::domain_error if x is NaN * @throw std::domain_error if n is negative * \f[ \mbox{falling\_factorial}(x, n) = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ (x)_n & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{falling\_factorial}(x, n)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ \frac{\partial\, (x)_n}{\partial x} & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{falling\_factorial}(x, n)}{\partial n} = \begin{cases} \textrm{error} & \mbox{if } x \leq 0\\ \frac{\partial\, (x)_n}{\partial n} & \mbox{if } x > 0 \textrm{ and } -\infty \leq n \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } n = \textrm{NaN} \end{cases} \f] \f[ (x)_n=\frac{\Gamma(x+1)}{\Gamma(x-n+1)} \f] \f[ \frac{\partial \, (x)_n}{\partial x} = (x)_n\Psi(x+1) \f] \f[ \frac{\partial \, (x)_n}{\partial n} = -(x)_n\Psi(n+1) \f] * */ template * = nullptr> inline return_type_t falling_factorial(const T& x, int n) { constexpr const char* function = "falling_factorial"; check_not_nan(function, "first argument", x); check_nonnegative(function, "second argument", n); return boost::math::falling_factorial(x, n, boost_policy_t<>()); } /** * Enables the vectorized application of the falling factorial function, when * the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return Falling factorial function applied to the two inputs. */ template * = nullptr, require_all_not_var_matrix_t* = nullptr> inline auto falling_factorial(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return falling_factorial(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mean.hpp0000644000176200001440000000153214645137106022151 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MEAN_HPP #define STAN_MATH_PRIM_FUN_MEAN_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the sample mean (i.e., average) of the coefficients * in the specified std vector, vector, row vector, or matrix. * * @tparam T type of the matrix * * @param m Specified std vector, vector, row vector, or matrix. * @return Sample mean of container coefficients. */ template * = nullptr> inline return_type_t mean(const T& m) { check_nonzero_size("mean", "m", m); return apply_vector_unary::reduce(m, [](const auto& a) { return a.mean(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/if_else.hpp0000644000176200001440000000177514645137106022650 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IF_ELSE_HPP #define STAN_MATH_PRIM_FUN_IF_ELSE_HPP #include namespace stan { namespace math { /** * Return the second argument if the first argument is true * and otherwise return the second argument. * *

This is just a convenience method to provide a function * with the same behavior as the built-in ternary operator. * In general, this function behaves as if defined by * *

if_else(c, y1, y0) = c ? y1 : y0. * * @tparam T_true type of the true argument * @tparam T_false type of the false argument * @param c Boolean condition value. * @param y_true Value to return if condition is true. * @param y_false Value to return if condition is false. */ template inline return_type_t if_else(const bool c, const T_true y_true, const T_false y_false) { return c ? y_true : y_false; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/to_complex.hpp0000644000176200001440000000303614645137106023403 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TO_COMPLEX_HPP #define STAN_MATH_PRIM_FUN_TO_COMPLEX_HPP #include #include #include namespace stan { namespace math { /** * Return a complex value from a real component and an imaginary component. * Default values for both components is 0. * * @tparam T type of real component * @tparam S type of imaginary component * @param[in] re real component (default = 0) * @param[in] im imaginary component (default = 0) * @return complex value with specified real and imaginary components */ template * = nullptr> constexpr inline std::complex> to_complex( const T& re = 0, const S& im = 0) { return std::complex>(re, im); } /** * Return a complex valued container * from a real component and an imaginary component. * * @tparam T type of real component * @tparam S type of imaginary component * @param[in] re real component * @param[in] im imaginary component * @return complex valued container with specified real and imaginary components */ template * = nullptr, require_all_st_stan_scalar* = nullptr> inline auto to_complex(const T1& re, const T2& im) { return apply_scalar_binary(re, im, [&](const auto& c, const auto& d) { return stan::math::to_complex(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log1m.hpp0000644000176200001440000000371614645137106022256 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG1M_HPP #define STAN_MATH_PRIM_FUN_LOG1M_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the natural logarithm of one minus the specified value. * * The main use of this function is to cut down on intermediate * values during algorithmic differentiation. * \f[ \mbox{log1m}(x) = \begin{cases} \ln(1-x) & \mbox{if } x \leq 1 \\ \textrm{NaN} & \mbox{if } x > 1\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{log1m}(x)}{\partial x} = \begin{cases} -\frac{1}{1-x} & \mbox{if } x \leq 1 \\ \textrm{NaN} & \mbox{if } x > 1\\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN} \end{cases} \f] * * @param[in] x Argument. * @return Natural log of one minus the argument. * @throw std::domain_error If the argument is greater than 1. * @throw std::overflow_error If the computation overflows. */ inline double log1m(double x) { if (!is_nan(x)) { check_less_or_equal("log1m", "x", x, 1); } return stan::math::log1p(-x); } /** * Structure to wrap log1m() so it can be vectorized. * * @tparam T type of variable * @param x variable * @return Natural log of (1 - x). */ struct log1m_fun { template static inline auto fun(const T& x) { return log1m(x); } }; /** * Vectorized version of log1m(). * * @tparam T type of container * @param x container * @return Natural log of 1 minus each value in x. */ template < typename T, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto log1m(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/get_real.hpp0000644000176200001440000000261314645137106023014 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_GET_REAL_HPP #define STAN_MATH_PRIM_FUN_GET_REAL_HPP #include #include namespace stan { namespace math { /** * Return the real component of the complex argument. * * @tparam T value type of complex argument * @param[in] z complex value whose real component is extracted * @return real component of argument */ template inline T get_real(const std::complex& z) { return z.real(); } /** * Return the real component of the complex argument. * * @tparam Eig A type derived from `Eigen::EigenBase` * @param[in] z complex value whose real component is extracted * @return real component of argument */ template * = nullptr> inline auto get_real(const Eig& z) { return z.real(); } /** * Return the real component of the complex argument. * * @tparam StdVec A `std::vector` type with complex scalar type * @param[in] z complex value whose real component is extracted * @return real component of argument */ template * = nullptr> inline auto get_real(const StdVec& z) { promote_scalar_t, StdVec> result(z.size()); std::transform(z.begin(), z.end(), result.begin(), [](auto&& x) { return get_real(x); }); return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/fdim.hpp0000644000176200001440000000223114645137106022145 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FDIM_HPP #define STAN_MATH_PRIM_FUN_FDIM_HPP #include #include namespace stan { namespace math { /** * Return the positive difference of the specified values (C++11). * * The function is defined by * * fdim(x, y) = (x > y) ? (x - y) : 0. * * @param x First value. * @param y Second value. * @return max(x- y, 0) */ template * = nullptr> inline double fdim(T1 x, T2 y) { using std::fdim; return fdim(x, y); } /** * Enables the vectorized application of the fdim function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return Fdim function applied to the two inputs. */ template * = nullptr> inline auto fdim(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return fdim(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/primitive_value.hpp0000644000176200001440000000250314645137106024434 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PRIMITIVE_VALUE_HPP #define STAN_MATH_PRIM_FUN_PRIMITIVE_VALUE_HPP #include #include #include namespace stan { namespace math { /** * Return the value of the specified arithmetic argument * unmodified with its own declared type. * *

This template function can only be instantiated with * arithmetic types as defined by std library's * is_arithmetic trait metaprogram. * *

This function differs from value_of in that it * does not cast all return types to double. * * @tparam T type of arithmetic input. * @param x input. * @return input unmodified. */ template inline typename std::enable_if::value, T>::type primitive_value(T x) { return x; } /** * Return the primitive value of the specified argument. * *

This implementation only applies to non-arithmetic types as * defined by std library's is_arithmetic trait metaprogram. * * @tparam T type of non-arithmetic input. * @param x input. * @return value of input. */ template inline typename std::enable_if::value, double>::type primitive_value(const T& x) { return value_of(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_mix.hpp0000644000176200001440000001617714645137106022702 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_MIX_HPP #define STAN_MATH_PRIM_FUN_LOG_MIX_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the log mixture density with specified mixing proportion * and log densities. * * \f[ * \mbox{log\_mix}(\theta, \lambda_1, \lambda_2) * = \log \left( \theta \lambda_1 + (1 - \theta) \lambda_2 \right). * \f] * * @tparam T_theta type of mixing proportion - must be an arithmetic type * @tparam T_lambda1 type of first log density - must be an arithmetic type * @tparam T_lambda2 type of second log density - must be an arithmetic type * @param[in] theta mixing proportion in [0, 1]. * @param[in] lambda1 first log density. * @param[in] lambda2 second log density. * @return log mixture of densities in specified proportion */ template * = nullptr> inline double log_mix(T_theta theta, T_lambda1 lambda1, T_lambda2 lambda2) { using std::log; check_not_nan("log_mix", "lambda1", lambda1); check_not_nan("log_mix", "lambda2", lambda2); check_bounded("log_mix", "theta", theta, 0, 1); return log_sum_exp(log(theta) + lambda1, log1m(theta) + lambda2); } /** * Return the log mixture density with specified mixing proportions * and log densities. * * \f[ * \frac{\partial }{\partial p_x} * \log\left(\exp^{\log\left(p_1\right)+d_1}+\cdot\cdot\cdot+ * \exp^{\log\left(p_n\right)+d_n}\right) * =\frac{e^{d_x}}{e^{d_1}p_1+\cdot\cdot\cdot+e^{d_m}p_m} * \f] * * \f[ * \frac{\partial }{\partial d_x} * \log\left(\exp^{\log\left(p_1\right)+d_1}+\cdot\cdot\cdot+ * \exp^{\log\left(p_n\right)+d_n}\right) * =\frac{e^{d_x}p_x}{e^{d_1}p_1+\cdot\cdot\cdot+e^{d_m}p_m} * \f] * * @tparam T_theta Type of theta. This can be a scalar, std vector or row/column * vector. * @tparam T_lam Type of lambda. This can be a scalar, std vector or row/column * vector. * @param theta std/row/col vector of mixing proportions in [0, 1]. * @param lambda std/row/col vector of log densities. * @return log mixture of densities in specified proportion */ template * = nullptr> return_type_t log_mix(const T_theta& theta, const T_lam& lambda) { static const char* function = "log_mix"; using T_partials_return = partials_return_t; using T_partials_vec = typename Eigen::Matrix; using T_theta_ref = ref_type_t; using T_lam_ref = ref_type_t; check_consistent_sizes(function, "theta", theta, "lambda", lambda); T_theta_ref theta_ref = theta; T_lam_ref lambda_ref = lambda; check_bounded(function, "theta", theta_ref, 0, 1); check_finite(function, "lambda", lambda_ref); const auto& theta_dbl = to_ref(value_of(as_column_vector_or_scalar(theta_ref))); const auto& lam_dbl = to_ref(value_of(as_column_vector_or_scalar(lambda_ref))); T_partials_return logp = log_sum_exp(log(theta_dbl) + lam_dbl); auto ops_partials = make_partials_propagator(theta_ref, lambda_ref); if (!is_constant_all::value) { T_partials_vec theta_deriv = (lam_dbl.array() - logp).exp(); if (!is_constant_all::value) { partials<1>(ops_partials) = theta_deriv.cwiseProduct(theta_dbl); } if (!is_constant_all::value) { partials<0>(ops_partials) = std::move(theta_deriv); } } return ops_partials.build(logp); } /** * Return the log mixture density given specified mixing proportions * and array of log density vectors. * * \f[ * \frac{\partial }{\partial p_x}\left[ * \log\left(\exp^{\log\left(p_1\right)+d_1}+\cdot\cdot\cdot+ * \exp^{\log\left(p_n\right)+d_n}\right)+ * \log\left(\exp^{\log\left(p_1\right)+f_1}+\cdot\cdot\cdot+ * \exp^{\log\left(p_n\right)+f_n}\right)\right] * =\frac{e^{d_x}}{e^{d_1}p_1+\cdot\cdot\cdot+e^{d_m}p_m}+ * \frac{e^{f_x}}{e^{f_1}p_1+\cdot\cdot\cdot+e^{f_m}p_m} * \f] * * \f[ * \frac{\partial }{\partial d_x}\left[ * \log\left(\exp^{\log\left(p_1\right)+d_1}+\cdot\cdot\cdot+ * \exp^{\log\left(p_n\right)+d_n}\right) * +\log\left(\exp^{\log\left(p_1\right)+f_1}+\cdot\cdot\cdot+ * \exp^{\log\left(p_n\right)+f_n}\right)\right] * =\frac{e^{d_x}p_x}{e^{d_1}p_1+\cdot\cdot\cdot+e^{d_m}p_m} * \f] * * @tparam T_theta Type of theta. This can be a scalar, std vector or row/column * vector * @tparam T_lam Type of vector in std vector lambda. This can be std vector or * row/column vector. * @param theta std/row/col vector of mixing proportions in [0, 1]. * @param lambda std vector containing std/row/col vectors of log densities. * @return log mixture of densities in specified proportion */ template * = nullptr> return_type_t> log_mix( const T_theta& theta, const std::vector& lambda) { static const char* function = "log_mix"; using T_partials_return = partials_return_t>; using T_partials_vec = typename Eigen::Matrix; using T_partials_mat = typename Eigen::Matrix; using T_theta_ref = ref_type_t; const int N = stan::math::size(lambda); const int M = theta.size(); T_theta_ref theta_ref = theta; check_bounded(function, "theta", theta_ref, 0, 1); for (int n = 0; n < N; ++n) { check_not_nan(function, "lambda", lambda[n]); check_finite(function, "lambda", lambda[n]); check_consistent_sizes(function, "theta", theta, "lambda", lambda[n]); } const auto& theta_dbl = to_ref(value_of(as_column_vector_or_scalar(theta_ref))); T_partials_mat lam_dbl(M, N); for (int n = 0; n < N; ++n) { lam_dbl.col(n) = value_of(as_column_vector_or_scalar(lambda[n])); } T_partials_mat logp_tmp = lam_dbl.colwise() + log(theta_dbl); T_partials_vec logp(N); for (int n = 0; n < N; ++n) { logp[n] = log_sum_exp(logp_tmp.col(n)); } auto ops_partials = make_partials_propagator(theta_ref, lambda); if (!is_constant_all::value) { T_partials_mat derivs = exp(lam_dbl.rowwise() - logp.transpose()); if (!is_constant_all::value) { partials<0>(ops_partials) = derivs.rowwise().sum(); } if (!is_constant_all::value) { for (int n = 0; n < N; ++n) { as_column_vector_or_scalar(partials_vec<1>(ops_partials)[n]) = derivs.col(n).cwiseProduct(theta_dbl); } } } return ops_partials.build(logp.sum()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/unit_vector_constrain.hpp0000644000176200001440000001015714645137106025655 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_UNIT_VECTOR_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_UNIT_VECTOR_CONSTRAIN_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the unit length vector corresponding to the free vector y. * * See the * Wikipedia page on generating random points on an N-sphere. * * @tparam T type inheriting from `EigenBase` that does not have an fvar * scalar type. * @param y vector of K unrestricted variables * @return Unit length vector of dimension K */ template * = nullptr, require_not_vt_autodiff* = nullptr> inline plain_type_t unit_vector_constrain(const T& y) { using std::sqrt; check_nonzero_size("unit_vector_constrain", "y", y); auto&& y_ref = to_ref(y); value_type_t SN = dot_self(y_ref); check_positive_finite("unit_vector_constrain", "norm", SN); return y_ref.array() / sqrt(SN); } /** * Return the unit length vector corresponding to the free vector y. * See https://en.wikipedia.org/wiki/N-sphere#Generating_random_points * * @tparam T1 type inheriting from `EigenBase` that does not have an fvar * scalar type. * * @param y vector of K unrestricted variables * @return Unit length vector of dimension K * @param lp Log probability reference to increment. */ template * = nullptr, require_all_not_vt_autodiff* = nullptr> inline plain_type_t unit_vector_constrain(const T1& y, T2& lp) { using std::sqrt; check_nonzero_size("unit_vector_constrain", "y", y); auto&& y_ref = to_ref(y); value_type_t SN = dot_self(y_ref); check_positive_finite("unit_vector_constrain", "norm", SN); lp -= 0.5 * SN; return y_ref.array() / sqrt(SN); } /** * Return the unit length vector corresponding to the free vector y. If the * `Jacobian` parameter is `true`, the log density accumulator is incremented * with the log absolute Jacobian determinant of the transform. All of the * transforms are specified with their Jacobians in the *Stan Reference Manual* * chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::DenseBase` or a `var_value` with * inner type inheriting from `Eigen::DenseBase` with compile time dynamic rows * and 1 column * @param y vector of K unrestricted variables * @param[in, out] lp log density accumulator * @return Unit length vector of dimension K */ template * = nullptr> inline auto unit_vector_constrain(const T& y, return_type_t& lp) { if (Jacobian) { return unit_vector_constrain(y, lp); } else { return unit_vector_constrain(y); } } /** * Return the unit length vector corresponding to the free vector y. If the * `Jacobian` parameter is `true`, the log density accumulator is incremented * with the log absolute Jacobian determinant of the transform. All of the * transforms are specified with their Jacobians in the *Stan Reference Manual* * chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A standard vector with inner type inheriting from * `Eigen::DenseBase` or a `var_value` with inner type inheriting from * `Eigen::DenseBase` with compile time dynamic rows and 1 column * @param y vector of K unrestricted variables * @param[in, out] lp log density accumulator * @return Unit length vector of dimension K */ template * = nullptr> inline auto unit_vector_constrain(const T& y, return_type_t& lp) { return apply_vector_unary::apply( y, [&lp](auto&& v) { return unit_vector_constrain(v, lp); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/rank.hpp0000644000176200001440000000152514645137106022166 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_RANK_HPP #define STAN_MATH_PRIM_FUN_RANK_HPP #include #include namespace stan { namespace math { /** * Return the number of components of v less than v[s]. * * @tparam C container type * @param[in] v input vector * @param[in] s position in vector * @return number of components of v less than v[s]. * @throw std::out_of_range if s is out of range. */ template * = nullptr> inline int rank(const C& v, int s) { check_range("rank", "v", v.size(), s); --s; // adjust for indexing by one return apply_vector_unary::reduce(v, [s](const auto& vec) { const auto& vec_ref = to_ref(vec); return (vec_ref.array() < vec_ref.coeff(s)).template cast().sum(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/row.hpp0000644000176200001440000000143114645137106022036 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ROW_HPP #define STAN_MATH_PRIM_FUN_ROW_HPP #include #include namespace stan { namespace math { /** * Return the specified row of the specified matrix, using * start-at-1 indexing. * * This is equivalent to calling m.row(i - 1) and * assigning the resulting template expression to a row vector. * * @tparam T type of the matrix * @param m Matrix. * @param i Row index (count from 1). * @return Specified row of the matrix. * @throw std::out_of_range if i is out of range. */ template * = nullptr> inline auto row(const T& m, size_t i) { check_row_index("row", "i", m, i); return m.row(i - 1); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/eigenvectors_sym.hpp0000644000176200001440000000146614645137106024624 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EIGENVECTORS_SYM_HPP #define STAN_MATH_PRIM_FUN_EIGENVECTORS_SYM_HPP #include #include #include namespace stan { namespace math { template * = nullptr, require_not_st_var* = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> eigenvectors_sym(const EigMat& m) { if (unlikely(m.size() == 0)) { return Eigen::Matrix, -1, -1>(0, 0); } using PlainMat = plain_type_t; const PlainMat& m_eval = m; check_symmetric("eigenvalues_sym", "m", m_eval); Eigen::SelfAdjointEigenSolver solver(m_eval); return solver.eigenvectors(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log2.hpp0000644000176200001440000000254314645137106022077 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG2_HPP #define STAN_MATH_PRIM_FUN_LOG2_HPP #include #include #include #include namespace stan { namespace math { /** * Return natural logarithm of two. * * @return Natural logarithm of two. */ inline constexpr double log2() { return LOG_TWO; } /** * Structure to wrap `log2()` so it can be vectorized. */ struct log2_fun { /** * Return the base two logarithm of the specified argument. * * @tparam T type of argument * @param x argument * @return base two log of the argument */ template static inline auto fun(const T& x) { using std::log2; return log2(x); } }; /** * Return the elementwise application of `log2()` to * specified argument container. The return type promotes the * underlying scalar argument type to double if it is an integer, * and otherwise is the argument type. * * @tparam T type of container * @param x container * @return elementwise log2 of container elements */ template * = nullptr, require_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto log2(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/gp_dot_prod_cov.hpp0000644000176200001440000002124514645137106024403 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COV_DOT_PROD_HPP #define STAN_MATH_PRIM_FUN_COV_DOT_PROD_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Returns a dot product covariance matrix. A member of Stan's Gaussian Process * Library. * * \f$k(x,x') = \sigma^2 + x \cdot x'\f$ * * A dot product covariance matrix is the same covariance matrix * as in bayesian regression with \f$N(0,1)\f$ priors on regression coefficients * and a \f$N(0,\sigma^2)\f$ prior on the constant function. See Rasmussen and * Williams et al 2006, Chapter 4. * * @tparam T_x type of std::vector of elements * @tparam T_sigma type of sigma * * @param x std::vector of elements that can be used in dot product. * This function assumes each element of x is the same size. * @param sigma constant function that can be used in stan::math::square * @return dot product covariance matrix that is positive semi-definite * @throw std::domain_error if sigma < 0, nan, inf or * x is nan or infinite */ template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_dot_prod_cov(const std::vector> &x, const T_sigma &sigma) { check_not_nan("gp_dot_prod_cov", "sigma", sigma); check_nonnegative("gp_dot_prod_cov", "sigma", sigma); check_finite("gp_dot_prod_cov", "sigma", sigma); size_t x_size = x.size(); for (size_t i = 0; i < x_size; ++i) { check_not_nan("gp_dot_prod_cov", "x", x[i]); check_finite("gp_dot_prod_cov", "x", x[i]); } Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); if (x_size == 0) { return cov; } T_sigma sigma_sq = square(sigma); size_t block_size = 10; for (size_t jb = 0; jb < x_size; jb += block_size) { for (size_t ib = jb; ib < x_size; ib += block_size) { size_t j_end = std::min(x_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { cov.coeffRef(j, j) = sigma_sq + dot_self(x[j]); size_t i_end = std::min(x_size, ib + block_size); for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq + dot_product(x[i], x[j]); } } } } cov.coeffRef(x_size - 1, x_size - 1) = sigma_sq + dot_self(x[x_size - 1]); return cov; } /** * Returns a dot product covariance matrix. A member of Stan's Gaussian * Process Library. * * \f$k(x,x') = \sigma^2 + x \cdot x'\f$ * * A dot product covariance matrix is the same covariance matrix * as in bayesian regression with \f$N(0,1)\f$ priors on regression coefficients * and a \f$N(0,\sigma^2)\f$ prior on the constant function. See Rasmussen and * Williams et al 2006, Chapter 4. * * @tparam T_x type of std::vector of double * @tparam T_sigma type of sigma * * @param x std::vector of elements that can be used in transpose * and multiply * This function assumes each element of x is the same size. * @param sigma constant function that can be used in stan::math::square * @return dot product covariance matrix that is positive semi-definite * @throw std::domain_error if sigma < 0, nan, inf or * x is nan or infinite */ template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_dot_prod_cov(const std::vector &x, const T_sigma &sigma) { check_nonnegative("gp_dot_prod_cov", "sigma", sigma); check_finite("gp_dot_prod_cov", "sigma", sigma); size_t x_size = x.size(); check_finite("gp_dot_prod_cov", "x", x); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x_size, x_size); if (x_size == 0) { return cov; } T_sigma sigma_sq = square(sigma); size_t block_size = 10; for (size_t jb = 0; jb < x_size; jb += block_size) { for (size_t ib = jb; ib < x_size; ib += block_size) { size_t j_end = std::min(x_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { cov.coeffRef(j, j) = sigma_sq + x[j] * x[j]; size_t i_end = std::min(x_size, ib + block_size); for (size_t i = std::max(ib, j + 1); i < i_end; ++i) { cov.coeffRef(j, i) = cov.coeffRef(i, j) = sigma_sq + x[i] * x[j]; } } } } cov(x_size - 1, x_size - 1) = sigma_sq + x[x_size - 1] * x[x_size - 1]; return cov; } /** * Returns a dot product covariance matrix of differing * x's. A member of Stan's Gaussian Process Library. * * \f$k(x,x') = \sigma^2 + x \cdot x'\f$ * * A dot product covariance matrix is the same covariance matrix * as in bayesian regression with \f$N(0,1)\f$ priors on regression coefficients * and a \f$N(0,\sigma^2)\f$ prior on the constant function. See Rasmussen and * Williams et al 2006, Chapter 4. * * @tparam T_x1 type of first std::vector of elements * @tparam T_x2 type of second std::vector of elements * @tparam T_sigma type of sigma * * @param x1 std::vector of elements that can be used in dot_product * @param x2 std::vector of elements that can be used in dot_product * @param sigma constant function that can be used in stan::math::square * @return dot product covariance matrix that is not always symmetric * @throw std::domain_error if sigma < 0, nan or inf * or if x1 or x2 are nan or inf */ template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_dot_prod_cov(const std::vector> &x1, const std::vector> &x2, const T_sigma &sigma) { check_nonnegative("gp_dot_prod_cov", "sigma", sigma); check_finite("gp_dot_prod_cov", "sigma", sigma); size_t x1_size = x1.size(); size_t x2_size = x2.size(); for (size_t i = 0; i < x1_size; ++i) { check_finite("gp_dot_prod_cov", "x1", x1[i]); } for (size_t i = 0; i < x2_size; ++i) { check_finite("gp_dot_prod_cov", "x2", x2[i]); } Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); if (x1_size == 0 || x2_size == 0) { return cov; } T_sigma sigma_sq = square(sigma); size_t block_size = 10; for (size_t ib = 0; ib < x1_size; ib += block_size) { for (size_t jb = 0; jb < x2_size; jb += block_size) { size_t j_end = std::min(x2_size, jb + block_size); for (size_t j = jb; j < j_end; ++j) { size_t i_end = std::min(x1_size, ib + block_size); for (size_t i = ib; i < i_end; ++i) { cov(i, j) = sigma_sq + dot_product(x1[i], x2[j]); } } } } return cov; } /** * Returns a dot product covariance matrix of * differing x's. A member of Stan's Gaussian Process Library. * * \f$k(x,x') = \sigma^2 + x \cdot x'\f$ * * A dot product covariance matrix is the same covariance matrix * as in bayesian regression with \f$N(0,1)\f$ priors on regression coefficients * and a \f$N(0,\sigma^2)\f$ prior on the constant function. See Rasmussen and * Williams et al 2006, Chapter 4. * * @tparam T_x1 type of first std::vector of double * @tparam T_x2 type of second std::vector of double * @tparam T_sigma type of sigma * * @param x1 std::vector of elements that can be used in dot_product * @param x2 std::vector of elements that can be used in dot_product * @param sigma is the constant function that can be used in stan::math::square * @return dot product covariance matrix that is not always symmetric * @throw std::domain_error if sigma < 0, nan or inf * or if x1 or x2 are nan or inf */ template Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> gp_dot_prod_cov(const std::vector &x1, const std::vector &x2, const T_sigma &sigma) { check_nonnegative("gp_dot_prod_cov", "sigma", sigma); check_finite("gp_dot_prod_cov", "sigma", sigma); size_t x1_size = x1.size(); size_t x2_size = x2.size(); check_finite("gp_dot_prod_cov", "x1", x1); check_finite("gp_dot_prod_cov", "x2", x2); Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov(x1_size, x2_size); if (x1_size == 0 || x2_size == 0) { return cov; } T_sigma sigma_sq = square(sigma); for (size_t i = 0; i < x1_size; ++i) { for (size_t j = 0; j < x2_size; ++j) { cov(i, j) = sigma_sq + x1[i] * x2[j]; } } return cov; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/one_hot_row_vector.hpp0000644000176200001440000000162014645137106025133 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ONE_HOT_ROW_VECTOR_HPP #define STAN_MATH_PRIM_FUN_ONE_HOT_ROW_VECTOR_HPP #include #include namespace stan { namespace math { /** * Return a row vector with 1 in the k-th position and zero elsewhere * * @param K size of the row vector * @param k position of the 1 (indexing from 1) * @return A row vector of size K with all elements initialized to zero * and a 1 in the k-th position. * @throw std::domain_error if K is not positive, or if k is less than 1 or * greater than K. */ inline Eigen::RowVectorXd one_hot_row_vector(int K, int k) { static const char* function = "one_hot_row_vector"; check_positive(function, "size", K); check_bounded(function, "k", k, 1, K); Eigen::RowVectorXd ret = Eigen::RowVectorXd::Zero(K); ret(k - 1) = 1; return ret; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inc_beta_ddb.hpp0000644000176200001440000000522214645137106023606 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INC_BETA_DDB_HPP #define STAN_MATH_PRIM_FUN_INC_BETA_DDB_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { template T inc_beta_dda(T a, T b, T z, T digamma_a, T digamma_ab); /** * Returns the partial derivative of the regularized * incomplete beta function, I_{z}(a, b) with respect to b. * The power series used to compute the derivative tends to * converge slowly when a and b are large, especially if z * approaches 1. The implementation will throw an exception * if the series have not converged within 100,000 iterations. * The current implementation has been tested for values * of a and b up to 12500 and z = 0.999. * * @tparam T scalar types of arguments * @param a first argument * @param b second argument * @param z upper bound of the integral * @param digamma_b value of digamma(b) * @param digamma_ab value of digamma(b) * @return partial derivative of the incomplete beta with respect to b * * @pre a >= 0 * @pre b >= 0 * @pre 0 <= z <= 1 */ template T inc_beta_ddb(T a, T b, T z, T digamma_b, T digamma_ab) { using std::fabs; using std::log; using std::pow; if (b > a) { if ((0.1 < z && z <= 0.75 && b > 500) || (0.01 < z && z <= 0.1 && b > 2500) || (0.001 < z && z <= 0.01 && b > 1e5)) { return -inc_beta_dda(b, a, 1 - z, digamma_b, digamma_ab); } } if ((z > 0.75 && a < 500) || (z > 0.9 && a < 2500) || (z > 0.99 && a < 1e5) || (z > 0.999)) { return -inc_beta_dda(b, a, 1 - z, digamma_b, digamma_ab); } double threshold = 1e-10; const T a_plus_b = a + b; const T a_plus_1 = a + 1; // Common prefactor to regularize numerator and denominator T prefactor = pow(a_plus_1 / a_plus_b, 3); T sum_numer = digamma_ab * prefactor; T sum_denom = prefactor; T summand = prefactor * z * a_plus_b / a_plus_1; T k = 1; digamma_ab += inv(a_plus_b); while (fabs(summand) > threshold) { sum_numer += digamma_ab * summand; sum_denom += summand; summand *= (1 + (a_plus_b) / k) * (1 + k) / (1 + a_plus_1 / k); digamma_ab += inv(a_plus_b + k); ++k; summand *= z / k; if (k > 1e5) { throw_domain_error("inc_beta_ddb", "did not converge within 100000 iterations", "", ""); } } return inc_beta(a, b, z) * (log(1 - z) - digamma_b + sum_numer / sum_denom); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/erfc.hpp0000644000176200001440000000205014645137106022144 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ERFC_HPP #define STAN_MATH_PRIM_FUN_ERFC_HPP #include #include #include namespace stan { namespace math { /** * Structure to wrap the `erfc()` * so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return Complementary error function applied to x. */ struct erfc_fun { template static inline auto fun(const T& x) { using std::erfc; return erfc(x); } }; /** * Returns the elementwise `erfc()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam T type of container * @param x container * @return Complementary error function applied to each value in x. */ template < typename T, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr> inline auto erfc(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/uniform_simplex.hpp0000644000176200001440000000116414645137106024452 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_UNIFORM_SIMPLEX_HPP #define STAN_MATH_PRIM_FUN_UNIFORM_SIMPLEX_HPP #include #include namespace stan { namespace math { /** * Return a uniform simplex of size K * * @param K size of the simplex * @return A vector of size K with all elements initialized to 1 / K, * so that their sum is equal to 1. * @throw std::domain_error if K is not positive. */ inline auto uniform_simplex(int K) { check_positive("uniform_simplex", "size", K); return Eigen::VectorXd::Constant(K, 1.0 / K); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/max_size.hpp0000644000176200001440000000114614645137106023051 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MAX_SIZE_HPP #define STAN_MATH_PRIM_FUN_MAX_SIZE_HPP #include #include namespace stan { namespace math { /** * Calculate the size of the largest input. * @tparam T1 type of the first input * @tparam Ts types of the other inputs * @param x1 first input * @param xs other inputs * @return the size of the largest input */ template inline size_t max_size(const T1& x1, const Ts&... xs) { return std::max({stan::math::size(x1), stan::math::size(xs)...}); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/fmax.hpp0000644000176200001440000000224114645137106022162 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FMAX_HPP #define STAN_MATH_PRIM_FUN_FMAX_HPP #include #include #include namespace stan { namespace math { /** * Return the greater of the two specified arguments. If one is * not-a-number, return the other. * * @param x First argument. * @param y Second argument. * @return maximum of x or y and if one is NaN return the other */ template * = nullptr> inline double fmax(T1 x, T2 y) { using std::fmax; return fmax(x, y); } /** * Enables the vectorized application of the fmax function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return fmax function applied to the two inputs. */ template * = nullptr> inline auto fmax(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return fmax(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/diag_post_multiply.hpp0000644000176200001440000000172414645137106025144 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DIAG_POST_MULTIPLY_HPP #define STAN_MATH_PRIM_FUN_DIAG_POST_MULTIPLY_HPP #include #include namespace stan { namespace math { /** * Return the product of the matrix and the diagonal matrix * formed from the vector or row_vector. * * @tparam T1 type of the matrix * @tparam T2 type of the vector/row_vector * @param m1 input matrix * @param m2 input vector/row_vector * * @return product of the matrix and the diagonal matrix formed from the * vector or row_vector. */ template * = nullptr, require_eigen_vector_t* = nullptr, require_all_not_st_var* = nullptr> auto diag_post_multiply(const T1& m1, const T2& m2) { check_size_match("diag_post_multiply", "m2.size()", m2.size(), "m1.cols()", m1.cols()); return m1 * m2.asDiagonal(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/positive_ordered_free.hpp0000644000176200001440000000356714645137106025612 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_POSITIVE_ORDERED_FREE_HPP #define STAN_MATH_PRIM_FUN_POSITIVE_ORDERED_FREE_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the vector of unconstrained scalars that transform to * the specified positive ordered vector. * *

This function inverts the constraining operation defined in * positive_ordered_constrain(Matrix), * * @tparam T type of elements in the vector * @param y Vector of positive, ordered scalars. * @return Free vector that transforms into the input vector. * @throw std::domain_error if y is not a vector of positive, * ordered scalars. */ template * = nullptr> auto positive_ordered_free(const EigVec& y) { using std::log; const auto& y_ref = to_ref(y); check_positive_ordered("stan::math::positive_ordered_free", "Positive ordered variable", y_ref); Eigen::Index k = y_ref.size(); plain_type_t x(k); if (k == 0) { return x; } x.coeffRef(0) = log(y_ref.coeff(0)); x.tail(k - 1) = (y_ref.tail(k - 1) - y_ref.head(k - 1)).array().log().matrix(); return x; } /** * Overload of `positive_ordered_free()` to untransform each Eigen vector * in a standard vector. * @tparam T A standard vector with with a `value_type` which inherits from * `Eigen::MatrixBase` with compile time rows or columns equal to 1. * @param x The standard vector to untransform. */ template * = nullptr> auto positive_ordered_free(const T& x) { return apply_vector_unary::apply( x, [](auto&& v) { return positive_ordered_free(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/dot.hpp0000644000176200001440000000063714645137106022024 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DOT_HPP #define STAN_MATH_PRIM_FUN_DOT_HPP #include #include #include namespace stan { namespace math { inline double dot(const std::vector& x, const std::vector& y) { double sum = 0.0; for (size_t i = 0; i < x.size(); ++i) { sum += x[i] * y[i]; } return sum; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cov_matrix_free.hpp0000644000176200001440000000505514645137106024411 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COV_MATRIX_FREE_HPP #define STAN_MATH_PRIM_FUN_COV_MATRIX_FREE_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * The covariance matrix derived from the symmetric view of the * lower-triangular view of the K by K specified matrix is freed * to return a vector of size K + (K choose 2). * * This is the inverse of the cov_matrix_constrain() * function so that for any finite vector x of size K * + (K choose 2), * * x == cov_matrix_free(cov_matrix_constrain(x, K)). * * In order for this round-trip to work (and really for this * function to work), the symmetric view of its lower-triangular * view must be positive definite. * * @tparam T type of the matrix (must be derived from \c Eigen::MatrixBase) * @param y Matrix of dimensions K by K such that he symmetric * view of the lower-triangular view is positive definite. * @return Vector of size K plus (K choose 2) in (-inf, inf) * that produces * @throw std::domain_error if y is not square, * has zero dimensionality, or has a non-positive diagonal element. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, 1> cov_matrix_free(const T& y) { const auto& y_ref = to_ref(y); check_square("cov_matrix_free", "y", y_ref); check_nonzero_size("cov_matrix_free", "y", y_ref); using std::log; int K = y_ref.rows(); check_positive("cov_matrix_free", "y", y_ref.diagonal()); Eigen::Matrix, Eigen::Dynamic, 1> x((K * (K + 1)) / 2); // FIXME: see Eigen LDLT for rank-revealing version -- use that // even if less efficient? Eigen::LLT> llt(y_ref.rows()); llt.compute(y_ref); plain_type_t L = llt.matrixL(); int i = 0; for (int m = 0; m < K; ++m) { x.segment(i, m) = L.row(m).head(m); i += m; x.coeffRef(i++) = log(L.coeff(m, m)); } return x; } /** * Overload of `cov_matrix_free()` to untransform each matrix * in a standard vector. * @tparam T A standard vector with with a `value_type` which inherits from * `Eigen::MatrixBase`. * @param x The standard vector to untransform. */ template * = nullptr> auto cov_matrix_free(const T& x) { return apply_vector_unary::apply( x, [](auto&& v) { return cov_matrix_free(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/logical_eq.hpp0000644000176200001440000000114114645137106023324 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOGICAL_EQ_HPP #define STAN_MATH_PRIM_FUN_LOGICAL_EQ_HPP #include namespace stan { namespace math { /** * Return 1 if the first argument is equal to the second. * Equivalent to x1 == x2. * * @tparam T1 type of first argument * @tparam T2 type of second argument * @param x1 first argument * @param x2 second argument * @return true iff x1 == x2 */ template inline int logical_eq(const T1 x1, const T2 x2) { return x1 == x2; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/qr_thin_R.hpp0000644000176200001440000000216514645137106023161 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_QR_THIN_R_HPP #define STAN_MATH_PRIM_FUN_QR_THIN_R_HPP #include #include #include namespace stan { namespace math { /** * Returns the upper triangular factor of the thin QR decomposition * * @tparam EigMat type of the matrix * @param m Matrix. * @return Upper triangular matrix with minimal rows */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> qr_thin_R( const EigMat& m) { using matrix_t = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; if (unlikely(m.size() == 0)) { return matrix_t(0, 0); } Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); const int min_size = std::min(m.rows(), m.cols()); matrix_t R = qr.matrixQR().topLeftCorner(min_size, m.cols()); R.template triangularView().setZero(); for (int i = 0; i < min_size; ++i) { if (R(i, i) < 0) { R.row(i) *= -1.0; } } return R; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/zeros_int_array.hpp0000644000176200001440000000106714645137106024446 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ZEROS_INT_ARRAY_HPP #define STAN_MATH_PRIM_FUN_ZEROS_INT_ARRAY_HPP #include #include namespace stan { namespace math { /** * Return an integer array of zeros. * * @param K size of the array * @return an integer array of size K with all elements initialized to 0. * @throw std::domain_error if K is negative. */ inline std::vector zeros_int_array(int K) { check_nonnegative("zeros_int_array", "size", K); return std::vector(K, 0); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/csr_u_to_z.hpp0000644000176200001440000000136614645137106023404 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CSR_U_TO_Z #define STAN_MATH_PRIM_FUN_CSR_U_TO_Z #include #include #include namespace stan { namespace math { /** \addtogroup csr_format * @{ */ /** * Return the z vector computed from the specified u vector at the * index for the z vector. * * @param[in] u U vector. * @param[in] i Index into resulting z vector. * @return z[i] where z is conversion from u. * @throw std::domain_error if u is zero length. * @throw std::out_of_range if i is out of range. */ inline int csr_u_to_z(const std::vector& u, int i) { check_range("csr_u_to_z", "i", u.size(), i + 1, "index out of range"); return u[i + 1] - u[i]; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/polar.hpp0000644000176200001440000000305514645137106022350 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_POLAR_HPP #define STAN_MATH_PRIM_FUN_POLAR_HPP #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** * Returns complex number with specified magnitude and phase angle. * * @param[in] r magnitude * @param[in] theta phase angle * @return complex number with magnitude and phase angle */ template inline complex_return_t complex_polar(const U& r, const V& theta) { using std::cos; using std::sin; if (!(r >= 0) || is_inf(theta)) { return {std::numeric_limits::quiet_NaN()}; } return {r * cos(theta), r * sin(theta)}; } } // namespace internal /** * Returns the complex number with specified magnitude and phase angle. * * @param[in] r magnitude * @param[in] theta phase angle * @return complex number with magnitude and phase angle */ inline std::complex polar(double r, double theta) { return internal::complex_polar(r, theta); } inline std::complex polar(double r, int theta) { return internal::complex_polar(r, static_cast(theta)); } inline std::complex polar(int r, double theta) { return internal::complex_polar(static_cast(r), theta); } inline std::complex polar(int r, int theta) { return internal::complex_polar(static_cast(r), static_cast(theta)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/one_hot_int_array.hpp0000644000176200001440000000153014645137106024732 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ONE_HOT_INT_ARRAY_HPP #define STAN_MATH_PRIM_FUN_ONE_HOT_INT_ARRAY_HPP #include #include namespace stan { namespace math { /** * Return an integer array with 1 in the k-th position and zero elsewhere. * * @param K size of the array * @param k position of the 1 (indexing from 1) * @return An integer array of size K with all elements initialized to zero * and a 1 in the k-th position. * @throw std::domain_error if K is not positive, or if k is less than 1 or * greater than K. */ inline std::vector one_hot_int_array(int K, int k) { static const char* function = "one_hot_int_array"; check_positive(function, "size", K); check_bounded(function, "k", k, 1, K); std::vector v(K, 0); v[k - 1] = 1; return v; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/value_of_rec.hpp0000644000176200001440000000704414645137106023666 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_VALUE_OF_REC_HPP #define STAN_MATH_PRIM_FUN_VALUE_OF_REC_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return the value of the specified scalar argument * converted to a double value. * *

See the primitive_value function to * extract values without casting to double. * *

This function is meant to cover the primitive types. For * types requiring pass-by-reference, this template function * should be specialized. * * @tparam T Type of scalar. * @param x Scalar to convert to double. * @return Value of scalar cast to a double. */ template > inline double value_of_rec(const T x) { return static_cast(x); } /** * Return the specified argument. * *

See value_of(T) for a polymorphic * implementation using static casts. * *

This inline pass-through no-op should be compiled away. * * @param x Specified value. * @return Specified value. */ inline double value_of_rec(double x) { return x; } /** * Recursively apply value-of to the parts of the argument. * * @tparam T value type of argument * @param[in] x argument * @return real complex value of argument */ template inline std::complex value_of_rec(const std::complex& x) { return {value_of_rec(x.real()), value_of_rec(x.imag())}; } /** * Convert a std::vector of type T to a std::vector of doubles. * * T must implement value_of_rec. See * test/math/fwd/fun/value_of_rec.cpp for fvar and var usage. * * @tparam T Scalar type in std::vector * @param[in] x std::vector to be converted * @return std::vector of values **/ template * = nullptr> inline std::vector value_of_rec(const std::vector& x) { size_t x_size = x.size(); std::vector result(x_size); for (size_t i = 0; i < x_size; i++) { result[i] = value_of_rec(x[i]); } return result; } /** * Return the specified argument. * *

See value_of_rec(T) for a polymorphic * implementation using static casts. * *

This inline pass-through no-op should be compiled away. * * @param x Specified std::vector. * @return Specified std::vector. */ template * = nullptr, require_vt_same* = nullptr> inline T value_of_rec(T&& x) { return std::forward(x); } /** * Convert a matrix of type T to a matrix of doubles. * * T must implement value_of_rec. See * test/unit/math/fwd/fun/value_of_test.cpp for fvar and var usage. * * @tparam T Type of matrix * @param[in] M Matrix to be converted * @return Matrix of values **/ template , typename = require_eigen_t> inline auto value_of_rec(T&& M) { return make_holder( [](auto& m) { return m.unaryExpr([](auto x) { return value_of_rec(x); }); }, std::forward(M)); } /** * Return the specified argument. * *

See value_of_rec(T) for a polymorphic * implementation using static casts. * *

This inline pass-through no-op should be compiled away. * * @tparam T Type of matrix. * @param x Specified matrix. * @return Specified matrix. */ template , typename = require_eigen_t> inline T value_of_rec(T&& x) { return std::forward(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mdivide_right.hpp0000644000176200001440000000273014645137106024050 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MDIVIDE_RIGHT_HPP #define STAN_MATH_PRIM_FUN_MDIVIDE_RIGHT_HPP #include #include #include namespace stan { namespace math { /** * Returns the solution of the system xA=b. * * @tparam EigMat1 type of the right-hand side matrix or vector * @tparam EigMat2 type of the second matrix * * @param A Matrix. * @param b Right hand side matrix or vector. * @return x = b A^-1, solution of the linear system. * @throws std::domain_error if A is not square or the rows of b don't * match the size of A. */ template * = nullptr, require_all_not_vt_fvar* = nullptr> inline Eigen::Matrix, EigMat1::RowsAtCompileTime, EigMat2::ColsAtCompileTime> mdivide_right(const EigMat1& b, const EigMat2& A) { using T_return = return_type_t; check_square("mdivide_right", "A", A); check_multiplicable("mdivide_right", "b", b, "A", A); if (A.size() == 0) { return {b.rows(), 0}; } return Eigen::Matrix(A) .transpose() .lu() .solve(Eigen::Matrix(b) .transpose()) .transpose(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/scalbn.hpp0000644000176200001440000000061614645137106022475 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SCALBN_HPP #define STAN_MATH_PRIM_FUN_SCALBN_HPP #include #include #include namespace stan { namespace math { template > double scalbn(const T& x, int n) { return std::scalbn(value_of_rec(x), n); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/diag_matrix.hpp0000644000176200001440000000143014645137106023516 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DIAG_MATRIX_HPP #define STAN_MATH_PRIM_FUN_DIAG_MATRIX_HPP #include #include namespace stan { namespace math { /** * Return a square diagonal matrix with the specified vector of * coefficients as the diagonal values. * * @tparam EigVec type of the vector (must be derived from \c Eigen::MatrixBase * and have one compile time dimmension equal to 1) * @param[in] v Specified vector. * @return Diagonal matrix with vector as diagonal values. */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> diag_matrix(const EigVec& v) { return v.asDiagonal(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/vec_concat.hpp0000644000176200001440000000413314645137106023335 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_VEC_CONCAT_HPP #define STAN_MATH_PRIM_FUN_VEC_CONCAT_HPP #include #include #include namespace stan { namespace math { namespace internal { inline auto sum_vector_sizes() { return 0; } /** * Get the internal sizes of a pack of vectors. * @tparam Vec Type of first vector to count. * @tparam VecArgs Parameter pack of vector types to accumulate sizes of * @param x vector to start accumulation of sizes with. * @param args pack of vectors to accumulate sizes for. */ template inline auto sum_vector_sizes(const Vec& x, const VecArgs&... args) { return x.size() + sum_vector_sizes(args...); } /** * End of recursion for appending to vector. */ template inline void append_vectors(VecInOut& x) {} /** * Fill a vector with other vectors. * @tparam VecInOut Type of vector to be filled. * @tparam VecIn Type of the first vector to insert into the in/out vector. * @tparam VecArgs Parameter pack of other vectors to fill in the in/out vector. * @param x Vector to be filled. * @param y First vector to insert into the in/out vector. * @param args Pack of other vectors to fill in the in/out vector. */ template inline void append_vectors(VecInOut& x, const VecIn& y, const VecArgs&... args) { x.insert(x.end(), y.begin(), y.end()); append_vectors(x, args...); } } // namespace internal /** * Get the event stack from a vector of events and other arguments. * * @tparam Vec type of elements in the array * @tparam Args types for variadic arguments * @param v1 event stack to roll up * @param args variadic arguments passed down to the next recursion * @return Vector of OpenCL events */ template inline auto vec_concat(const Vec& v1, const Args&... args) { std::vector> vec; vec.reserve(internal::sum_vector_sizes(v1, args...)); internal::append_vectors(vec, v1, args...); return vec; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/welford_covar_estimator.hpp0000644000176200001440000000176514645137106026164 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_WELFORD_COVAR_ESTIMATOR_HPP #define STAN_MATH_PRIM_FUN_WELFORD_COVAR_ESTIMATOR_HPP #include #include namespace stan { namespace math { class welford_covar_estimator { public: explicit welford_covar_estimator(int n) : m_(Eigen::VectorXd::Zero(n)), m2_(Eigen::MatrixXd::Zero(n, n)) { restart(); } void restart() { num_samples_ = 0; m_.setZero(); m2_.setZero(); } void add_sample(const Eigen::VectorXd& q) { ++num_samples_; Eigen::VectorXd delta(q - m_); m_ += delta / num_samples_; m2_ += (q - m_) * delta.transpose(); } int num_samples() { return num_samples_; } void sample_mean(Eigen::VectorXd& mean) { mean = m_; } void sample_covariance(Eigen::MatrixXd& covar) { if (num_samples_ > 1) { covar = m2_ / (num_samples_ - 1.0); } } protected: double num_samples_; Eigen::VectorXd m_; Eigen::MatrixXd m2_; }; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/max.hpp0000644000176200001440000000311114645137106022011 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MAX_HPP #define STAN_MATH_PRIM_FUN_MAX_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the maximum value of the two specified * scalar arguments. * * @tparam T1 type of first argument (must be arithmetic) * @tparam T2 type of second argument (must be arithmetic) * @param x first argument * @param y second argument * @return maximum value of the two arguments */ template * = nullptr> auto max(T1 x, T2 y) { return std::max(x, y); } /** * Returns the maximum value in the specified * matrix, vector, row vector or std vector. * * @tparam T type of the container * @param m specified matrix, vector, row vector or std vector * @return maximum value in the container, or -infinity if the * container is size zero and the scalar type in container is floating point * number * @throws std::invalid_argument if the vector is size zero and the * scalar type in the container is integer */ template * = nullptr> inline value_type_t max(const T& m) { if (std::is_integral>::value) { check_nonzero_size("max", "int vector", m); } else if (m.size() == 0) { return NEGATIVE_INFTY; } return apply_vector_unary::reduce( m, [](const auto& x) { return x.maxCoeff(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/promote_scalar.hpp0000644000176200001440000001024114645137106024240 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PROMOTE_SCALAR_HPP #define STAN_MATH_PRIM_FUN_PROMOTE_SCALAR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Promote a scalar to another scalar type * * @tparam PromotionScalar scalar type of output. * @tparam UnPromotedType input type. `UnPromotedType` must be constructible * from `PromotionScalar` * @param x input scalar to be promoted to `PromotionScalar` type */ template * = nullptr, require_not_same_t* = nullptr, require_all_not_tuple_t* = nullptr> inline constexpr auto promote_scalar(UnPromotedType&& x) { return PromotionScalar(std::forward(x)); } /** * No-op overload when promoting a type's scalar to the type it already has. * * @tparam PromotionScalar scalar type of output. * @tparam UnPromotedType input type. `UnPromotedType`'s `scalar_type` must be * equal to `PromotionScalar` * @param x input */ template < typename PromotionScalar, typename UnPromotedType, require_same_t>* = nullptr> inline constexpr auto promote_scalar(UnPromotedType&& x) noexcept { return std::forward(x); } /** * Promote the scalar type of an eigen matrix to the requested type. * * @tparam PromotionScalar scalar type of output. * @tparam UnPromotedType input type. The `PromotionScalar` type must be * constructible from `UnPromotedType`'s `scalar_type` * @param x input */ template * = nullptr, require_not_same_t>* = nullptr> inline auto promote_scalar(UnPromotedType&& x) { return x.template cast(); } // Forward decl for iterating over tuples used in std::vector template * = nullptr, require_not_same_t* = nullptr> inline constexpr promote_scalar_t promote_scalar(UnPromotedTypes&& x); /** * Promote the scalar type of an standard vector to the requested type. * * @tparam PromotionScalar scalar type of output. * @tparam UnPromotedType input type. The `PromotionScalar` type must be * constructible from `UnPromotedType`'s `scalar_type` * @param x input */ template * = nullptr, require_not_same_t>* = nullptr> inline auto promote_scalar(UnPromotedType&& x) { const auto x_size = x.size(); promote_scalar_t ret(x_size); for (size_t i = 0; i < x_size; ++i) { ret[i] = promote_scalar(x[i]); } return ret; } /** * Promote the scalar type of a tuples elements to the requested types. * * @tparam PromotionScalars A tuple of scalar types that is the same size as the * tuple of `UnPromotedTypes`. * @tparam UnPromotedTypes tuple input. Each `PromotionScalars` element must be * constructible from it's associated element of `UnPromotedTypes` `scalar_type` * @param x input */ template *, require_not_same_t*> inline constexpr promote_scalar_t promote_scalar(UnPromotedTypes&& x) { return index_apply>::value>( [&x](auto... Is) { return std::make_tuple( promote_scalar( std::declval()))>>(std::get(x))...); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inc_beta_ddz.hpp0000644000176200001440000000214114645137106023633 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INC_BETA_DDZ_HPP #define STAN_MATH_PRIM_FUN_INC_BETA_DDZ_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the partial derivative of the regularized * incomplete beta function, I_{z}(a, b) with respect to z. * * @tparam T scalar types of arguments * @param a first argument * @param b second argument * @param z upper bound of the integral * @return partial derivative of the incomplete beta with respect to z * * @pre a > 0 * @pre b > 0 * @pre 0 < z <= 1 */ template T inc_beta_ddz(T a, T b, T z) { using std::exp; using std::log; return exp((b - 1) * log(1 - z) + (a - 1) * log(z) + lgamma(a + b) - lgamma(a) - lgamma(b)); } template <> inline double inc_beta_ddz(double a, double b, double z) { using boost::math::ibeta_derivative; return ibeta_derivative(a, b, z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_softmax.hpp0000644000176200001440000000302514645137106023552 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_SOFTMAX_HPP #define STAN_MATH_PRIM_FUN_LOG_SOFTMAX_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the natural logarithm of the softmax of the specified * vector. * * \f$ * \log \mbox{softmax}(y) * \ = \ y - \log \sum_{k=1}^K \exp(y_k) * \ = \ y - \mbox{log\_sum\_exp}(y). * \f$ * * For the log softmax function, the entries in the Jacobian are * \f$ * \frac{\partial}{\partial y_m} \mbox{softmax}(y)[k] * = \left\{ * \begin{array}{ll} * 1 - \mbox{softmax}(y)[m] * & \mbox{ if } m = k, \mbox{ and} * \\[6pt] * \mbox{softmax}(y)[m] * & \mbox{ if } m \neq k. * \end{array} * \right. * \f$ * * @tparam Container type of input vector to transform * @param[in] x vector to transform * @return log unit simplex result of the softmax transform of the vector. */ template * = nullptr, require_container_t* = nullptr> inline auto log_softmax(const Container& x) { check_nonzero_size("log_softmax", "v", x); return make_holder( [](const auto& a) { return apply_vector_unary>::apply( a, [](const auto& v) { return v.array() - log_sum_exp(v); }); }, to_ref(x)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mdivide_right_tri.hpp0000644000176200001440000000341014645137106024722 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MDIVIDE_RIGHT_TRI_HPP #define STAN_MATH_PRIM_FUN_MDIVIDE_RIGHT_TRI_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the solution of the system xA=b when A is triangular * * @tparam TriView Specifies whether A is upper (Eigen::Upper) * or lower triangular (Eigen::Lower). * @tparam EigMat1 type of the right-hand side matrix or vector * @tparam EigMat2 type of the triangular matrix * * @param A Triangular matrix. Specify upper or lower with TriView * being Eigen::Upper or Eigen::Lower. * @param b Right hand side matrix or vector. * @return x = b A^-1, solution of the linear system. * @throws std::domain_error if A is not square or the rows of b don't * match the size of A. */ template * = nullptr> inline auto mdivide_right_tri(const EigMat1& b, const EigMat2& A) { check_square("mdivide_right_tri", "A", A); check_multiplicable("mdivide_right_tri", "b", b, "A", A); if (TriView != Eigen::Lower && TriView != Eigen::Upper) { throw_domain_error("mdivide_right_tri", "triangular view must be Eigen::Lower or Eigen::Upper", "", ""); } using T_return = return_type_t; using ret_type = Eigen::Matrix; if (A.rows() == 0) { return ret_type(b.rows(), 0); } return ret_type(A) .template triangularView() .transpose() .solve(ret_type(b).transpose()) .transpose() .eval(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/elt_multiply.hpp0000644000176200001440000000354714645137106023764 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ELT_MULTIPLY_HPP #define STAN_MATH_PRIM_FUN_ELT_MULTIPLY_HPP #include #include #include #include namespace stan { namespace math { /** * Return the elementwise multiplication of the specified * matrices. * * @tparam Mat1 type of the first matrix or expression * @tparam Mat2 type of the second matrix or expression * * @param m1 First matrix or expression * @param m2 Second matrix or expression * @return Elementwise product of matrices. */ template * = nullptr, require_all_not_st_var* = nullptr> auto elt_multiply(const Mat1& m1, const Mat2& m2) { check_matching_dims("elt_multiply", "m1", m1, "m2", m2); return m1.cwiseProduct(m2); } /** * Return the elementwise multiplication of the specified * scalars. * * @tparam Mat1 type of the first scalar * @tparam Mat2 type of the second scalar * * @param a First scalar * @param b Second scalar * @return Product of scalars */ template * = nullptr> auto elt_multiply(const Scalar1& a, const Scalar2& b) { return a * b; } /** * Return specified matrix multiplied by specified scalar. * * One argument is a matrix and one is a scalar. * * @tparam T1 type of the first argument * @tparam T2 type of the second argument * * @param A first argument * @param B second argument * @return product of matrix and scalar */ template * = nullptr, require_any_stan_scalar_t* = nullptr> inline auto elt_multiply(const T1& A, const T2& B) { return multiply(A, B); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/asinh.hpp0000644000176200001440000000364314645137106022340 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ASINH_HPP #define STAN_MATH_PRIM_FUN_ASINH_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `asinh()` so it can be vectorized. * * @tparam T argument scalar type * @param x argument * @return inverse hyperbolic sine of argument in radians. */ struct asinh_fun { template static inline auto fun(const T& x) { using std::asinh; return asinh(x); } }; /** * Returns the elementwise `asinh()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam T type of container * @param x container * @return Inverse hyperbolic sine of each value in the container. */ template < typename T, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto asinh(const T& x) { return apply_scalar_unary::apply(x); } namespace internal { /** * Return the hyperbolic arc sine of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return hyperbolic arc sine of the argument */ template inline std::complex complex_asinh(const std::complex& z) { std::complex y_d = asinh(value_of_rec(z)); auto y = log(z + sqrt(1 + z * z)); return copysign(y, y_d); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cov_matrix_free_lkj.hpp0000644000176200001440000000441214645137106025245 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COV_MATRIX_FREE_LKJ_HPP #define STAN_MATH_PRIM_FUN_COV_MATRIX_FREE_LKJ_HPP #include #include #include #include namespace stan { namespace math { /** * Return the vector of unconstrained partial correlations and * deviations that transform to the specified covariance matrix. * *

The constraining transform is defined as for * cov_matrix_constrain(Matrix, size_t). The * inverse first factors out the deviations, then applies the * freeing transform of corr_matrix_free(Matrix&). * * @tparam T type of the matrix (must be derived from \c Eigen::MatrixBase) * @param y Covariance matrix to free. * @return Vector of unconstrained values that transforms to the * specified covariance matrix. * @throw std::domain_error if the correlation matrix has no * elements or is not a square matrix. * @throw std::runtime_error if the correlation matrix cannot be * factorized by factor_cov_matrix() */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, 1> cov_matrix_free_lkj( const T& y) { using Eigen::Array; using Eigen::Dynamic; using Eigen::Matrix; using T_scalar = value_type_t; check_nonzero_size("cov_matrix_free_lkj", "y", y); check_square("cov_matrix_free_lkj", "y", y); Eigen::Index k = y.rows(); Eigen::Index k_choose_2 = (k * (k - 1)) / 2; Matrix x(k_choose_2 + k); bool successful = factor_cov_matrix(y, x.head(k_choose_2).array(), x.tail(k).array()); if (!successful) { throw_domain_error("cov_matrix_free_lkj", "factor_cov_matrix failed on y", "", ""); } return x; } /** * Overload of `cov_matrix_free_lkj()` to untransform each matrix * in a standard vector. * @tparam T A standard vector with with a `value_type` which inherits from * `Eigen::MatrixBase`. * @param x The standard vector to untransform. */ template * = nullptr> auto cov_matrix_free_lkj(const T& x) { return apply_vector_unary::apply( x, [](auto&& v) { return cov_matrix_free_lkj(v); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/factor_U.hpp0000644000176200001440000000303314645137106022771 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FACTOR_U_HPP #define STAN_MATH_PRIM_FUN_FACTOR_U_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * This function is intended to make starting values, given a unit * upper-triangular matrix U such that U'DU is a correlation matrix * * @tparam T type of elements in the matrix * @param U Sigma matrix * @param CPCs fill this unbounded */ template * = nullptr, require_eigen_vector_t* = nullptr, require_vt_same* = nullptr> void factor_U(const T_U& U, T_CPCs&& CPCs) { size_t K = U.rows(); size_t position = 0; size_t pull = K - 1; const Eigen::Ref>& U_ref = U; if (K == 2) { CPCs(0) = atanh(U_ref(0, 1)); return; } Eigen::Array, 1, Eigen::Dynamic> temp = U_ref.row(0).tail(pull); CPCs.head(pull) = temp; Eigen::Array, Eigen::Dynamic, 1> acc(K); acc(0) = -0.0; acc.tail(pull) = 1.0 - temp.square(); for (size_t i = 1; i < (K - 1); i++) { position += pull; pull--; temp = U_ref.row(i).tail(pull); temp /= sqrt(acc.tail(pull) / acc(i)); CPCs.segment(position, pull) = temp; acc.tail(pull) *= 1.0 - temp.square(); } CPCs = 0.5 * ((1.0 + CPCs) / (1.0 - CPCs)).log(); // now unbounded } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/value_of.hpp0000644000176200001440000000623014645137106023031 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_VALUE_OF_HPP #define STAN_MATH_PRIM_FUN_VALUE_OF_HPP #include #include #include #include namespace stan { namespace math { /** * Inputs that are arithmetic types or containers of airthmetric types * are returned from value_of unchanged * * @tparam T Input type * @param[in] x Input argument * @return Forwarded input argument **/ template * = nullptr> inline T value_of(T&& x) { return std::forward(x); } template * = nullptr, require_t::value_type>>* = nullptr> inline auto value_of(T&& x) { return std::forward(x); } template < typename T, require_complex_t* = nullptr, require_not_arithmetic_t::value_type>* = nullptr> inline auto value_of(T&& x) { using inner_t = partials_type_t::value_type>; return std::complex{value_of(x.real()), value_of(x.imag())}; } /** * For std::vectors of non-arithmetic types, return a std::vector composed * of value_of applied to each element. * * @tparam T Input element type * @param[in] x Input std::vector * @return std::vector of values **/ template * = nullptr, require_not_st_arithmetic* = nullptr> inline auto value_of(const T& x) { std::vector>()))>> out; out.reserve(x.size()); for (auto&& x_elem : x) { out.emplace_back(value_of(x_elem)); } return out; } /** * For Eigen matrices and expressions of non-arithmetic types, return an *expression that represents the Eigen::Matrix resulting from applying value_of *elementwise * * @tparam EigMat type of the matrix * * @param[in] M Matrix to be converted * @return Matrix of values **/ template * = nullptr, require_not_st_arithmetic* = nullptr> inline auto value_of(EigMat&& M) { return make_holder( [](auto& a) { return a.unaryExpr([](const auto& scal) { return value_of(scal); }); }, std::forward(M)); } template * = nullptr, require_not_st_arithmetic* = nullptr> inline auto value_of(EigMat&& M) { auto&& M_ref = to_ref(M); using scalar_t = decltype(value_of(std::declval>())); promote_scalar_t> ret(M_ref.rows(), M_ref.cols()); ret.reserve(M_ref.nonZeros()); for (int k = 0; k < M_ref.outerSize(); ++k) { for (typename std::decay_t::InnerIterator it(M_ref, k); it; ++it) { ret.insert(it.row(), it.col()) = value_of(it.valueRef()); } } ret.makeCompressed(); return ret; } template * = nullptr, require_st_arithmetic* = nullptr> inline auto value_of(EigMat&& M) { return std::forward(M); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/assign.hpp0000644000176200001440000000560614645137106022523 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ASSIGN_HPP #define STAN_MATH_PRIM_FUN_ASSIGN_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Helper function to return the matrix size as either "dynamic" or "1". * * @tparam N Eigen matrix size specification * @param o output stream */ template inline void print_mat_size(std::ostream& o) { if (N == Eigen::Dynamic) { o << "dynamically sized"; } else { o << N; } } /** * Copy the right-hand side's value to the left-hand side * variable. * * The assign() function is overloaded. This * instance will match arguments where the right-hand side is * assignable to the left and they are not both * std::vector or Eigen::Matrix types. * * @tparam T_lhs Type of left-hand side. * @tparam T_rhs Type of right-hand side. * @param x Left-hand side. * @param y Right-hand side. */ template * = nullptr> inline void assign(T_lhs& x, const T_rhs& y) { x = y; } /** * Copy the right-hand side's value to the left-hand side * variable. * * The assign() function is overloaded. This * instance will be called for arguments that are both * Eigen::Matrix types. * * @tparam T_lhs type of the left-hand side matrix * @tparam T_rhs type of the right-hand side matrix * * @param x Left-hand side matrix. * @param y Right-hand side matrix. * @throw std::invalid_argument if sizes do not match. */ template * = nullptr> inline void assign(T_lhs&& x, const T_rhs& y) { check_matching_dims("assign", "left-hand-side", x, "right-hand-side", y); x = y.template cast>(); } /** * Copy the right-hand side's value to the left-hand side * variable. * * The assign() function is overloaded. This * instance will be called for arguments that are both * std::vector, and will call assign() * element-by element. * * For example, a std::vector<int> can be * assigned to a std::vector<double> using this * function. * * @tparam T_lhs type of elements in the left-hand side vector * @tparam T_rhs type of elements in the right-hand side vector * @param x Left-hand side vector. * @param y Right-hand side vector. * @throw std::invalid_argument if sizes do not match. */ template inline void assign(std::vector& x, const std::vector& y) { check_matching_sizes("assign", "left-hand side", x, "right-hand side", y); for (size_t i = 0; i < x.size(); ++i) { assign(x[i], y[i]); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/as_value_array_or_scalar.hpp0000644000176200001440000000140414645137106026251 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_AS_VALUE_ARRAY_OR_SCALAR_HPP #define STAN_MATH_PRIM_FUN_AS_VALUE_ARRAY_OR_SCALAR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Extract the value from an object. For eigen types and `std::vectors` * convert to an eigen array and for scalars return a scalar. * @tparam T A stan scalar, eigen vector, or `std::vector`. * @param v Specified value. * @return Same value. */ template inline auto as_value_array_or_scalar(T&& v) { return value_of(as_array_or_scalar(std::forward(v))); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/norm.hpp0000644000176200001440000000150614645137106022205 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_NORM_HPP #define STAN_MATH_PRIM_FUN_NORM_HPP #include #include #include namespace stan { namespace math { /** * Return the squared magnitude of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return squared magnitude of the argument */ template inline V norm(const std::complex& z) { return std::norm(z); } namespace internal { /** * Return the squared magnitude of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return squared magnitude of the argument */ template inline V complex_norm(const std::complex& z) { return square(z.real()) + square(z.imag()); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inv_cloglog.hpp0000644000176200001440000000532214645137106023534 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INV_CLOGLOG_HPP #define STAN_MATH_PRIM_FUN_INV_CLOGLOG_HPP #include #include #include #include #include namespace stan { namespace math { /** * The inverse complementary log-log function. * * The function is defined by * * inv_cloglog(x) = 1 - exp(-exp(x)). * * This function can be used to implement the inverse link * function for complementary-log-log regression. * * \f[ \mbox{inv\_cloglog}(y) = \begin{cases} \mbox{cloglog}^{-1}(y) & \mbox{if } -\infty\leq y \leq \infty \\[6pt] \textrm{NaN} & \mbox{if } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{inv\_cloglog}(y)}{\partial y} = \begin{cases} \frac{\partial\, \mbox{cloglog}^{-1}(y)}{\partial y} & \mbox{if } -\infty\leq y\leq \infty \\[6pt] \textrm{NaN} & \mbox{if } y = \textrm{NaN} \end{cases} \f] \f[ \mbox{cloglog}^{-1}(y) = 1 - \exp \left( - \exp(y) \right) \f] \f[ \frac{\partial \, \mbox{cloglog}^{-1}(y)}{\partial y} = \exp(y-\exp(y)) \f] * * @param x Argument. * @return Inverse complementary log-log of the argument. */ inline double inv_cloglog(double x) { using std::exp; return 1. - exp(-exp(x)); } /** * Structure to wrap inv_cloglog() so that it can be vectorized. * * @tparam T type of variable * @param x variable * @return 1 - exp(-exp(x)). */ struct inv_cloglog_fun { template static inline auto fun(const T& x) { return inv_cloglog(x); } }; /** * Vectorized version of inv_cloglog(). * * @tparam Container type of container * @param x container * @return 1 - exp(-exp()) applied to each value in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto inv_cloglog(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of inv_cloglog() that accepts std::vectors, Eigen Matrix/Array * objects or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return 1 - exp(-exp()) applied to each value in x. */ template * = nullptr> inline auto inv_cloglog(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return 1 - (-v.array().exp()).exp(); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/asin.hpp0000644000176200001440000000454414645137106022171 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ASIN_HPP #define STAN_MATH_PRIM_FUN_ASIN_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `asin()` so it can be vectorized. * * @tparam T type of argument * @param x argument * @return Arcsine of x in radians. */ struct asin_fun { template static inline auto fun(const T& x) { using std::asin; return asin(x); } }; /** * Returns the elementwise `asin()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x container * @return Arcsine of each variable in the container, in radians. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto asin(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `asin()` that accepts std::vectors, Eigen Matrix/Array objects, * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Arcsine of each variable in the container, in radians. */ template * = nullptr> inline auto asin(const Container& x) { return apply_vector_unary::apply( x, [](const auto& v) { return v.array().asin(); }); } namespace internal { /** * Return the arc sine of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return arc sine of the argument */ template inline std::complex complex_asin(const std::complex& z) { auto y_d = asin(value_of_rec(z)); auto y = neg_i_times(asinh(i_times(z))); return copysign(y, y_d); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/add.hpp0000644000176200001440000000464714645137106021773 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ADD_HPP #define STAN_MATH_PRIM_FUN_ADD_HPP #include #include #include namespace stan { namespace math { /** * Return the sum of the specified scalars. * * @tparam ScalarA type of the first scalar * @tparam ScalarB type of the second scalar * @param a first scalar * @param b second scalar * @return the sum of the scalars */ template * = nullptr, require_all_not_var_t* = nullptr> inline return_type_t add(const ScalarA& a, const ScalarB& b) { return a + b; } /** * Return the sum of the specified matrices. The two matrices * must have the same dimensions. * * @tparam Mat1 type of the first matrix or expression * @tparam Mat2 type of the second matrix or expression * * @param m1 First matrix or expression. * @param m2 Second matrix or expression. * @return Sum of the matrices. * @throw std::invalid_argument if m1 and m2 do not have the same * dimensions. */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto add(const Mat1& m1, const Mat2& m2) { check_matching_dims("add", "m1", m1, "m2", m2); return m1 + m2; } /** * Return the sum of the specified matrix and specified scalar. * * @tparam Mat type of the matrix or expression * @tparam Scal type of the scalar * @param m Matrix or expression. * @param c Scalar. * @return The matrix plus the scalar. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_all_not_st_var* = nullptr> inline auto add(const Mat& m, const Scal c) { return (m.array() + c).matrix(); } /** * Return the sum of the specified scalar and specified matrix. * * @tparam Scal type of the scalar * @tparam Mat type of the matrix or expression * @param c Scalar. * @param m Matrix. * @return The scalar plus the matrix. */ template * = nullptr, require_eigen_t* = nullptr, require_all_not_st_var* = nullptr> inline auto add(const Scal c, const Mat& m) { return (c + m.array()).matrix(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/finite_diff_stepsize.hpp0000644000176200001440000000133514645137106025426 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_FINITE_DIFF_STEPSIZE_HPP #define STAN_MATH_PRIM_FUN_FINITE_DIFF_STEPSIZE_HPP #include #include #include namespace stan { namespace math { /** * Return the stepsize for finite difference evaluations at the * specified scalar. * *

The formula used is `stepsize(u) = cbrt(epsilon) * max(1, * abs(u)).` * * @param u initial value to increment * @return stepsize away from u for finite differences */ inline double finite_diff_stepsize(double u) { using std::fabs; static const double cbrt_epsilon = std::cbrt(EPSILON); return cbrt_epsilon * std::fmax(1, fabs(u)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sin.hpp0000644000176200001440000000365514645137106022032 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SIN_HPP #define STAN_MATH_PRIM_FUN_SIN_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap sin() so it can be vectorized. * * @tparam T type of argument * @param x angle in radians * @return Sine of x. */ struct sin_fun { template static inline auto fun(const T& x) { using std::sin; return sin(x); } }; /** * Vectorized version of sin(). * * @tparam Container type of container * @param x angles in radians * @return Sine of each value in x. */ template < typename T, require_not_container_st* = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto sin(const T& x) { return apply_scalar_unary::apply(x); } /** * Version of sin() that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Sine of each value in x. */ template * = nullptr> inline auto sin(const Container& x) { return apply_vector_unary::apply( x, [&](const auto& v) { return v.array().sin(); }); } namespace internal { /** * Return the sine of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return sine of the argument */ template inline std::complex complex_sin(const std::complex& z) { return neg_i_times(sinh(i_times(z))); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/subtract.hpp0000644000176200001440000000510714645137106023062 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SUBTRACT_HPP #define STAN_MATH_PRIM_FUN_SUBTRACT_HPP #include #include #include namespace stan { namespace math { /** * Return the result of subtracting the second scalar from the first * scalar. * * @tparam ScalarA type of the first scalar * @tparam ScalarB type of the second scalar * @param a first scalar * @param b second scalar * @return difference between first scalar and second scalar */ template * = nullptr, require_all_not_var_t* = nullptr> inline return_type_t subtract(const ScalarA& a, const ScalarB& b) { return a - b; } /** * Return the result of subtracting the second specified matrix * from the first specified matrix. * * @tparam Mat1 type of the first matrix or expression * @tparam Mat2 type of the second matrix or expression * * @param m1 First matrix or expression. * @param m2 Second matrix or expression. * @return Difference between first matrix and second matrix. */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto subtract(const Mat1& m1, const Mat2& m2) { check_matching_dims("subtract", "m1", m1, "m2", m2); return m1 - m2; } /** * Return the result of subtracting the specified matrix from the specified * scalar. * * @tparam Scal type of the scalar * @tparam Mat type of the matrix or expression * @param c Scalar. * @param m Matrix or expression. * @return The scalar minus the matrix. */ template * = nullptr, require_eigen_t* = nullptr, require_all_not_st_var* = nullptr> inline auto subtract(const Scal c, const Mat& m) { return (c - m.array()).matrix(); } /** * Return the result of subtracting the specified scalar from the specified * matrix. * * @tparam Mat type of the matrix or expression * @tparam Scal type of the scalar * @param m Matrix or expression. * @param c Scalar. * @return The matrix minus the scalar. */ template * = nullptr, require_stan_scalar_t* = nullptr, require_all_not_st_var* = nullptr> inline auto subtract(const Mat& m, const Scal c) { return (m.array() - c).matrix(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/qr_R.hpp0000644000176200001440000000223414645137106022134 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_QR_R_HPP #define STAN_MATH_PRIM_FUN_QR_R_HPP #include #include #include namespace stan { namespace math { /** * Returns the upper triangular factor of the fat QR decomposition * * @tparam EigMat type of the matrix * @param m Matrix. * @return Upper triangular matrix with maximal rows */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> qr_R( const EigMat& m) { using matrix_t = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; if (unlikely(m.size() == 0)) { return matrix_t(0, 0); } Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); matrix_t R = qr.matrixQR(); if (m.rows() > m.cols()) { R.bottomRows(m.rows() - m.cols()).setZero(); } const int min_size = std::min(m.rows(), m.cols()); for (int i = 0; i < min_size; i++) { for (int j = 0; j < i; j++) { R.coeffRef(i, j) = 0.0; } if (R(i, i) < 0) { R.row(i) *= -1.0; } } return R; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/grad_reg_lower_inc_gamma.hpp0000644000176200001440000001463614645137106026217 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOWER_REG_INC_GAMMA_HPP #define STAN_MATH_PRIM_FUN_LOWER_REG_INC_GAMMA_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Computes the gradient of the lower regularized incomplete * gamma function. * * The lower incomplete gamma function * derivative w.r.t its first parameter (a) seems to have no * standard source. It also appears to have no widely known * approximate implementation. Gautschi (1979) has a thorough * discussion of the calculation of the lower regularized * incomplete gamma function itself and some stability issues. * * Reference: Gautschi, Walter (1979) ACM Transactions on * mathematical software. 5(4):466-481 * * We implemented calculations for d(gamma_p)/da by taking * derivatives of formulas suggested by Gauschi and others and * testing them against an outside source (Mathematica). We * took three implementations which can cover the range {a:[0,20], * z:[0,30]} with absolute error < 1e-10 with the exception of * values near (0,0) where the error is near 1e-5. Relative error * is also <<1e-6 except for regions where the gradient approaches * zero. * * Gautschi suggests calculating the lower incomplete gamma * function for small to moderate values of $z$ using the * approximation: * * \f[ * \frac{\gamma(a,z)}{\Gamma(a)}=z^a e^-z * \sum_n=0^\infty \frac{z^n}{\Gamma(a+n+1)} * \f] * * We write the derivative in the form: * * \f[ * \frac{d\gamma(a,z)\Gamma(a)}{da} = \frac{\log z}{e^z} * \sum_n=0^\infty \frac{z^{a+n}}{\Gamma(a+n+1)} * - \frac{1}{e^z} * \sum_n=0^\infty \frac{z^{a+n}}{\Gamma(a+n+1)}\psi^0(a+n+1) * \f] * * This calculation is sufficiently accurate for small $a$ and * small $z$. For larger values and $a$ and $z$ we use it in its * log form: * * \f[ * \frac{d \gamma(a,z)\Gamma(a)}{da} = \frac{\log z}{e^z} * \sum_n=0^\infty \exp[(a+n)\log z - \log\Gamma(a+n+1)] * - \sum_n=0^\infty \exp[(a+n)\log z - \log\Gamma(a+n+1) + * \log\psi^0(a+n+1)] * \f] * * For large $z$, Gauschi recommends using the upper incomplete * Gamma instead and the negative of its derivative turns out to be * more stable and accurate for larger $z$ and for some combinations * of $a$ and $z$. This is a log-scale implementation of the * derivative of the formulation suggested by Gauschi (1979). For * some values it defers to the negative of the gradient * for the gamma_q function. This is based on the suggestion by Gauschi * (1979) that for large values of $z$ it is better to * carry out calculations using the upper incomplete Gamma function. * * Branching for choice of implementation for the lower incomplete * regularized gamma function gradient. The derivative based on * Gautschi's formulation appears to be sufficiently accurate * everywhere except for large z and small to moderate a. The * intersection between the two regions is a radius 12 quarter circle * centered at a=0, z=30 although both implementations are * satisfactory near the intersection. * * Some limits that could be treated, e.g., infinite z should * return tgamma(a) * digamma(a), throw instead to match the behavior of, * e.g., boost::math::gamma_p * * @tparam T1 type of a * @tparam T2 type of z * @param[in] a shared with complete Gamma * @param[in] z value to integrate up to * @param[in] precision series terminates when increment falls below * this value. * @param[in] max_steps number of terms to sum before throwing * @throw std::domain_error if the series does not converge to * requested precision before max_steps. * */ template return_type_t grad_reg_lower_inc_gamma(const T1& a, const T2& z, double precision = 1e-10, int max_steps = 1e5) { using std::exp; using std::log; using std::pow; using std::sqrt; using TP = return_type_t; if (is_any_nan(a, z)) { return std::numeric_limits::quiet_NaN(); } check_positive_finite("grad_reg_lower_inc_gamma", "a", a); if (z == 0.0) { return 0.0; } check_positive_finite("grad_reg_lower_inc_gamma", "z", z); if ((a < 0.8 && z > 15.0) || (a < 12.0 && z > 30.0) || a < sqrt(-756 - value_of_rec(z) * value_of_rec(z) + 60 * value_of_rec(z))) { T1 tg = tgamma(a); T1 dig = digamma(a); return -grad_reg_inc_gamma(a, z, tg, dig, max_steps, precision); } T2 log_z = log(z); T2 emz = exp(-z); int n = 0; T1 a_plus_n = a; TP sum_a = 0.0; T1 lgamma_a_plus_1 = lgamma(a + 1); T1 lgamma_a_plus_n_plus_1 = lgamma_a_plus_1; TP term; while (true) { term = exp(a_plus_n * log_z - lgamma_a_plus_n_plus_1); sum_a += term; if (term <= precision) { break; } if (n >= max_steps) { throw_domain_error("grad_reg_lower_inc_gamma", "n (internal counter)", max_steps, "exceeded ", " iterations, gamma_p(a,z) gradient (a) " "did not converge."); } ++n; lgamma_a_plus_n_plus_1 += log1p(a_plus_n); ++a_plus_n; } n = 1; a_plus_n = a + 1; TP digammap1 = digamma(a_plus_n); TP sum_b = digammap1 * exp(a * log_z - lgamma_a_plus_1); lgamma_a_plus_n_plus_1 = lgamma_a_plus_1 + log(a_plus_n); while (true) { digammap1 += 1 / a_plus_n; term = exp(a_plus_n * log_z - lgamma_a_plus_n_plus_1) * digammap1; sum_b += term; if (term <= precision) { return emz * (log_z * sum_a - sum_b); } if (n >= max_steps) { throw_domain_error("grad_reg_lower_inc_gamma", "n (internal counter)", max_steps, "exceeded ", " iterations, gamma_p(a,z) gradient (a) " "did not converge."); } ++n; lgamma_a_plus_n_plus_1 += log1p(a_plus_n); ++a_plus_n; } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/eigendecompose_sym.hpp0000644000176200001440000000265214645137106025113 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_EIGENDECOMPOSE_SYM_HPP #define STAN_MATH_PRIM_FUN_EIGENDECOMPOSE_SYM_HPP #include #include #include namespace stan { namespace math { /** * Return the eigendecomposition of the specified symmetric matrix. * * @tparam EigMat type of the matrix * @param m Specified matrix. * @return A tuple V,D where V is a matrix where the columns are the * eigenvectors of m, and D is a column vector of the eigenvalues of m. * The eigenvalues are in ascending order of magnitude, with the eigenvectors * provided in the same order. */ template * = nullptr, require_not_st_var* = nullptr> std::tuple, -1, -1>, Eigen::Matrix, -1, 1>> eigendecompose_sym(const EigMat& m) { if (unlikely(m.size() == 0)) { return std::make_tuple(Eigen::Matrix, -1, -1>(0, 0), Eigen::Matrix, -1, 1>(0, 1)); } using PlainMat = plain_type_t; const PlainMat& m_eval = m; check_symmetric("eigendecompose_sym", "m", m_eval); Eigen::SelfAdjointEigenSolver solver(m_eval); return std::make_tuple(std::move(solver.eigenvectors()), std::move(solver.eigenvalues())); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/multiply.hpp0000644000176200001440000001333414645137106023113 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MULTIPLY_HPP #define STAN_MATH_PRIM_FUN_MULTIPLY_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return specified matrix multiplied by specified scalar. * * @tparam Mat type of the matrix or expression * @tparam Scal type of the scalar * * @param m matrix * @param c scalar * @return product of matrix and scalar */ template * = nullptr, require_eigen_t* = nullptr, require_all_not_st_var* = nullptr, require_all_not_complex_t>* = nullptr> inline auto multiply(const Mat& m, Scal c) { return c * m; } /** * Return the product of the specified matrix and scalar, one of which must have * a complex value type. The return type will be an expression template denoting * a complex matrix. * * @tparam Mat type of matrix argument * @tparam Scal type of scalar argument * @param m matrix argument * @param c scalar argument * @return expression template evaluating to the product of the matrix and * scalar arguments */ template , Scal>* = nullptr, require_eigen_t* = nullptr, require_not_eigen_t* = nullptr> inline auto multiply(const Mat& m, Scal c) { return m * c; } /** * Return the product of the specified matrix and scalar, one of which must have * a complex value type. The return type will be an expression template denoting * a complex matrix. * * @tparam Scal type of scalar argument * @tparam Mat type of matrix argument * @param c scalar argument * @param m matrix argument * @return expression template evaluating to the product of the matrix and * scalar arguments */ template , Scal>* = nullptr, require_eigen_t* = nullptr, require_not_eigen_t* = nullptr> inline auto multiply(const Scal& m, const Mat& c) { return m * c; } /** * Return specified scalar multiplied by specified matrix. * * @tparam Scal type of the scalar * @tparam Mat type of the matrix or expression * * @param c scalar * @param m matrix * @return product of scalar and matrix */ template * = nullptr, require_eigen_t* = nullptr, require_all_not_st_var* = nullptr, require_all_not_complex_t>* = nullptr> inline auto multiply(Scal c, const Mat& m) { return c * m; } /** * Return the product of the specified matrices. The number of * columns in the first matrix must be the same as the number of rows * in the second matrix. * * @tparam Mat1 type of the first matrix or expression * @tparam Mat2 type of the second matrix or expression * * @param m1 first matrix or expression * @param m2 second matrix or expression * @return the product of the first and second matrices * @throw std::invalid_argument if the number of columns of m1 does * not match the number of rows of m2. */ template * = nullptr, require_not_eigen_row_and_col_t* = nullptr> inline auto multiply(const Mat1& m1, const Mat2& m2) { check_size_match("multiply", "Columns of m1", m1.cols(), "Rows of m2", m2.rows()); return m1 * m2; } /** * Return the product of the specified matrices. The number of * columns in the first matrix must be the same as the number of rows * in the second matrix. * * @tparam Mat1 type of the first matrix or expression * @tparam Mat2 type of the second matrix or expression * * @param m1 first matrix or expression * @param m2 second matrix or expression * @return the product of the first and second matrices * @throw std::invalid_argument if the number of columns of m1 does * not match the number of rows of m2. */ template * = nullptr, require_t>>* = nullptr, require_not_eigen_row_and_col_t* = nullptr> inline auto multiply(const Mat1& m1, const Mat2& m2) { check_size_match("multiply", "Columns of m1", m1.cols(), "Rows of m2", m2.rows()); return m1.lazyProduct(m2).eval(); } /** * Return the scalar product of the specified row vector and * specified column vector. The return is the same as the dot * product. The two vectors must be the same size. * * @tparam RowVec type of the row vector * @tparam ColVec type of the column vector * * @param rv row vector * @param v column vector * @return scalar result of multiplying row vector by column vector * @throw std::invalid_argument if rv and v are not the same size */ template >* = nullptr, require_eigen_row_and_col_t* = nullptr> inline auto multiply(const RowVec& rv, const ColVec& v) { check_multiplicable("multiply", "rv", rv, "v", v); return dot_product(rv, v); } /** * Return product of scalars. * * @tparam Scalar1 type of first scalar * @tparam Scalar2 type of second scalar * @param m scalar * @param c scalar * @return product */ template * = nullptr> inline return_type_t multiply(Scalar1 m, Scalar2 c) { return c * m; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/acosh.hpp0000644000176200001440000000531014645137106022324 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ACOSH_HPP #define STAN_MATH_PRIM_FUN_ACOSH_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the inverse hyperbolic cosine of the specified value. * Returns nan for nan argument. * * @param[in] x Argument. * @return Inverse hyperbolic cosine of the argument. * @throw std::domain_error If argument is less than 1. */ inline double acosh(double x) { if (is_nan(x)) { return x; } else { check_greater_or_equal("acosh", "x", x, 1.0); #ifdef _WIN32 if (is_inf(x)) return x; #endif return std::acosh(x); } } /** * Integer version of acosh. * * @param[in] x Argument. * @return Inverse hyperbolic cosine of the argument. * @throw std::domain_error If argument is less than 1. */ inline double acosh(int x) { if (is_nan(x)) { return x; } else { check_greater_or_equal("acosh", "x", x, 1); #ifdef _WIN32 if (is_inf(x)) return x; #endif return std::acosh(x); } } /** * Structure to wrap acosh() so it can be vectorized. */ struct acosh_fun { /** * Return the inverse hyperbolic cosine of the specified argument. * * @tparam T type of argument * @param x argument * @return Inverse hyperbolic cosine of the argument. */ template static inline auto fun(const T& x) { return acosh(x); } }; /** * Return the elementwise application of acosh() to * specified argument container. The return type promotes the * underlying scalar argument type to double if it is an integer, * and otherwise is the argument type. * * @tparam T type of container * @param x container * @return Elementwise acosh of members of container. */ template < typename T, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline auto acosh(const T& x) { return apply_scalar_unary::apply(x); } namespace internal { /** * Return the hyperbolic arc cosine of the complex argument. * * @tparam V value type of argument * @param[in] z argument * @return hyperbolic arc cosine of the argument */ template inline std::complex complex_acosh(const std::complex& z) { std::complex y_d = acosh(value_of_rec(z)); auto y = log(z + sqrt(z * z - 1)); return copysign(y, y_d); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/qr_thin.hpp0000644000176200001440000000306114645137106022674 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_QR_THIN_HPP #define STAN_MATH_PRIM_FUN_QR_THIN_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the thin QR decomposition * * @tparam EigMat type of the matrix * @param m Matrix. * @return A tuple containing (Q,R): * 1. Orthogonal matrix with minimal columns * 2. Upper triangular matrix with minimal rows */ template * = nullptr> std::tuple, Eigen::Dynamic, Eigen::Dynamic>, Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>> qr_thin(const EigMat& m) { using matrix_t = Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic>; if (unlikely(m.size() == 0)) { return std::make_tuple(matrix_t(0, 0), matrix_t(0, 0)); } Eigen::HouseholderQR qr(m.rows(), m.cols()); qr.compute(m); const int min_size = std::min(m.rows(), m.cols()); matrix_t Q = qr.householderQ() * matrix_t::Identity(m.rows(), min_size); for (int i = 0; i < min_size; i++) { if (qr.matrixQR().coeff(i, i) < 0) { Q.col(i) *= -1.0; } } matrix_t R = qr.matrixQR().topLeftCorner(min_size, m.cols()); R.template triangularView().setZero(); for (int i = 0; i < min_size; ++i) { if (R(i, i) < 0) { R.row(i) *= -1.0; } } return std::make_tuple(std::move(Q), std::move(R)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/Phi_approx.hpp0000644000176200001440000000373614645137106023352 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PHI_APPROX_HPP #define STAN_MATH_PRIM_FUN_PHI_APPROX_HPP #include #include #include #include namespace stan { namespace math { /** * Return an approximation of the unit normal CDF. * * http://www.jiem.org/index.php/jiem/article/download/60/27 * * This function can be used to implement the inverse link function * for probit regression. * * @param x Argument. * @return Probability random sample is less than or equal to argument. */ inline double Phi_approx(double x) { using std::pow; return inv_logit(0.07056 * pow(x, 3.0) + 1.5976 * x); } /** * Return an approximation of the unit normal CDF. * * @param x argument. * @return approximate probability random sample is less than or * equal to argument. */ inline double Phi_approx(int x) { return Phi_approx(static_cast(x)); } /** * Structure to wrap Phi_approx() so it can be vectorized. */ struct Phi_approx_fun { /** * Return the approximate value of the Phi() function applied to * the argument. * * @tparam T type of argument * @param x argument * @return approximate value of Phi applied to argument */ template static inline auto fun(const T& x) { return Phi_approx(x); } }; /** * Return the elementwise application of Phi_approx() to * specified argument container. The return type promotes the * underlying scalar argument type to double if it is an integer, * and otherwise is the argument type. * * @tparam T type of container * @param x container * @return elementwise Phi_approx of container elements */ template < typename T, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr> inline auto Phi_approx(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/modulus.hpp0000644000176200001440000000063314645137106022722 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MODULUS_HPP #define STAN_MATH_PRIM_FUN_MODULUS_HPP #include #include #include #include namespace stan { namespace math { inline int modulus(int x, int y) { if (unlikely(y == 0)) { throw_domain_error("modulus", "divisor is", 0, ""); } return x % y; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/size_zero.hpp0000644000176200001440000000143214645137106023241 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SIZE_ZERO_HPP #define STAN_MATH_PRIM_FUN_SIZE_ZERO_HPP #include #include #include namespace stan { namespace math { /** * Returns 1 if input is of length 0, returns 0 * otherwise * * @param x argument * @return 0 or 1 */ template inline bool size_zero(const T& x) { return !math::size(x); } /** * Returns 1 if any inputs are of length 0, returns 0 * otherwise * * @param x first argument * @param xs parameter pack of remaining arguments to forward to function * @return 0 or 1 */ template inline bool size_zero(const T& x, const Ts&... xs) { return (size_zero(x) || size_zero(xs...)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/lmultiply.hpp0000644000176200001440000000324214645137106023264 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LMULTIPLY_HPP #define STAN_MATH_PRIM_FUN_LMULTIPLY_HPP #include #include #include #include namespace stan { namespace math { /** * Return the first argument times the log of the second argument. The * result is 0 if both arguments are 0. The funcgtion is defined by * `lmultiply(x, y) = x * log(y)` if `x` or `y` is non-zero and * `lmultiply(0, 0) = 0` otherwise. * * @tparam T1 type of the first argument * @tparam T2 type of the second argument * @param a first argument * @param b second argument * @return the first argument times the log of the second argument */ template * = nullptr> inline return_type_t lmultiply(const T1 a, const T2 b) { using std::log; if (a == 0 && b == 0) { return 0; } return a * log(b); } /** * Return the result of applying `lmultiply` to the arguments * elementwise, with broadcasting if one of the arguments is a scalar. * At least one of the arguments must be a container. * * @tparam T1 type of the first argument * @tparam T2 type of the second argument * @param a first argument * @param b second argument * @return result of applying `lmultiply` to the arguments */ template * = nullptr, require_all_not_var_matrix_t* = nullptr> inline auto lmultiply(const T1& a, const T2& b) { return apply_scalar_binary( a, b, [&](const auto& c, const auto& d) { return lmultiply(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/dot_self.hpp0000644000176200001440000000154214645137106023031 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DOT_SELF_HPP #define STAN_MATH_PRIM_FUN_DOT_SELF_HPP #include #include #include #include #include namespace stan { namespace math { inline double dot_self(const std::vector& x) { double sum = 0.0; for (double i : x) { sum += i * i; } return sum; } /** * Returns squared norm of a vector or matrix. For vectors that equals the dot * product of the specified vector with itself. * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase) * @param v Vector. */ template * = nullptr, require_not_eigen_vt* = nullptr> inline value_type_t dot_self(const T& v) { return v.squaredNorm(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/csr_extract_v.hpp0000644000176200001440000000316214645137106024100 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CSR_EXTRACT_V_HPP #define STAN_MATH_PRIM_FUN_CSR_EXTRACT_V_HPP #include #include #include #include #include namespace stan { namespace math { /** \addtogroup csr_format * @{ */ /** * Extract the column indexes for non-zero value from a sparse * matrix. * * @tparam T type of elements in the matrix * @param A Sparse matrix. * @return Array of column indexes for non-zero entries of A. */ template const std::vector csr_extract_v( const Eigen::SparseMatrix& A) { std::vector v(A.nonZeros()); for (int nze = 0; nze < A.nonZeros(); ++nze) { v[nze] = *(A.innerIndexPtr() + nze) + stan::error_index::value; } return v; } /** * Extract the column indexes for non-zero values from a dense * matrix by converting to sparse and calling the sparse matrix * extractor. * * @tparam T type of elements in the matrix * @tparam R number of rows, can be Eigen::Dynamic * @tparam C number of columns, can be Eigen::Dynamic * * @param[in] A dense matrix. * @return Array of column indexes to non-zero entries of A. */ template * = nullptr> const std::vector csr_extract_v(const T& A) { // conversion to sparse seems to touch data twice, so we need to call to_ref Eigen::SparseMatrix, Eigen::RowMajor> B = to_ref(A).sparseView(); std::vector v = csr_extract_v(B); return v; } /** @} */ // end of csr_format group } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/is_integer.hpp0000644000176200001440000000101014645137106023350 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IS_INTEGER_HPP #define STAN_MATH_PRIM_FUN_IS_INTEGER_HPP #include #include #include namespace stan { namespace math { /** * Returns true if the input is an integer and false otherwise. * * @param x Value to test. * @return true if the value is an integer */ template inline bool is_integer(T x) { using std::floor; return floor(x) == x; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/initialize.hpp0000644000176200001440000000155314645137106023375 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INITIALIZE_HPP #define STAN_MATH_PRIM_FUN_INITIALIZE_HPP #include #include #include #include namespace stan { namespace math { // initializations called for local variables generate in Stan // code; fills in all cells in first arg with second arg template * = nullptr, require_convertible_t* = nullptr> inline void initialize(T& x, V v) { x = v; } template inline void initialize(Eigen::Matrix& x, const V& v) { x.fill(v); } template inline void initialize(std::vector& x, const V& v) { for (size_t i = 0; i < x.size(); ++i) { initialize(x[i], v); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/rep_vector.hpp0000644000176200001440000000126414645137106023403 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_REP_VECTOR_HPP #define STAN_MATH_PRIM_FUN_REP_VECTOR_HPP #include #include namespace stan { namespace math { template * = nullptr, require_stan_scalar_t* = nullptr> inline auto rep_vector(const T& x, int n) { check_nonnegative("rep_vector", "n", n); return T_ret::Constant(n, x); } template * = nullptr> inline auto rep_vector(const T& x, int n) { return rep_vector, Eigen::Dynamic, 1>>(x, n); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/identity_free.hpp0000644000176200001440000000134714645137106024067 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_IDENTITY_FREE_HPP #define STAN_MATH_PRIM_FUN_IDENTITY_FREE_HPP #include namespace stan { namespace math { /** * Returns the result of applying the inverse of the identity * constraint transform to the input. This promotes the input to the least upper * bound of the input types. * * * @tparam T type of value to promote * @tparam Types Other types. * @param[in] x value to promote * @return value */ template * = nullptr> inline auto identity_free(T&& x, Types&&... /* args */) { return promote_scalar_t, T>(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/rows_dot_self.hpp0000644000176200001440000000122414645137106024100 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ROWS_DOT_SELF_HPP #define STAN_MATH_PRIM_FUN_ROWS_DOT_SELF_HPP #include #include namespace stan { namespace math { /** * Returns the dot product of each row of a matrix with itself. * * @tparam T type of the matrix (must be derived from \c * Eigen::MatrixBase) * * @param x matrix */ template * = nullptr, require_not_st_var* = nullptr> inline Eigen::Matrix, T::RowsAtCompileTime, 1> rows_dot_self( const T& x) { return x.rowwise().squaredNorm(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/hypergeometric_3F2.hpp0000644000176200001440000001371714645137106024701 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_HYPERGEOMETRIC_3F2_HPP #define STAN_MATH_PRIM_FUN_HYPERGEOMETRIC_3F2_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { template , typename ArrayAT = Eigen::Array, 3, 1>, typename ArrayBT = Eigen::Array, 3, 1>, require_all_vector_t* = nullptr, require_stan_scalar_t* = nullptr> T_return hypergeometric_3F2_infsum(const Ta& a, const Tb& b, const Tz& z, double precision = 1e-6, int max_steps = 1e5) { ArrayAT a_array = as_array_or_scalar(a); ArrayBT b_array = append_row(as_array_or_scalar(b), 1.0); check_3F2_converges("hypergeometric_3F2", a_array[0], a_array[1], a_array[2], b_array[0], b_array[1], z); T_return t_acc = 1.0; T_return log_t = 0.0; T_return log_z = log(fabs(z)); Eigen::ArrayXi a_signs = sign(value_of_rec(a_array)); Eigen::ArrayXi b_signs = sign(value_of_rec(b_array)); plain_type_t apk = a_array; plain_type_t bpk = b_array; int z_sign = sign(value_of_rec(z)); int t_sign = z_sign * a_signs.prod() * b_signs.prod(); int k = 0; while (k <= max_steps && log_t >= log(precision)) { // Replace zero values with 1 prior to taking the log so that we accumulate // 0.0 rather than -inf const auto& abs_apk = math::fabs((apk == 0).select(1.0, apk)); const auto& abs_bpk = math::fabs((bpk == 0).select(1.0, bpk)); T_return p = sum(log(abs_apk)) - sum(log(abs_bpk)); if (p == NEGATIVE_INFTY) { return t_acc; } log_t += p + log_z; t_acc += t_sign * exp(log_t); if (is_inf(t_acc)) { throw_domain_error("hypergeometric_3F2", "sum (output)", t_acc, "overflow hypergeometric function did not converge."); } k++; apk.array() += 1.0; bpk.array() += 1.0; a_signs = sign(value_of_rec(apk)); b_signs = sign(value_of_rec(bpk)); t_sign = a_signs.prod() * b_signs.prod() * t_sign; } if (k == max_steps) { throw_domain_error("hypergeometric_3F2", "k (internal counter)", max_steps, "exceeded iterations, hypergeometric function did not ", "converge."); } return t_acc; } } // namespace internal /** * Hypergeometric function (3F2). * * Function reference: http://dlmf.nist.gov/16.2 * * \f[ * _3F_2 \left( * \begin{matrix}a_1 a_2 a[2] \\ b_1 b_2\end{matrix}; z * \right) = \sum_k=0^\infty * \frac{(a_1)_k(a_2)_k(a_3)_k}{(b_1)_k(b_2)_k}\frac{z^k}{k!} \f] * * Where $(a_1)_k$ is an upper shifted factorial. * * Calculate the hypergeometric function (3F2) as the power series * directly to within precision or until * max_steps terms. * * This function does not have a closed form but will converge if: * - |z| is less than 1 * - |z| is equal to one and b[0] + b[1] < a[0] + a[1] + * a[2] This function is a rational polynomial if * - a[0], a[1], or a[2] is a * non-positive integer * This function can be treated as a rational polynomial if * - b[0] or b[1] is a non-positive integer * and the series is terminated prior to the final term. * * @tparam Ta type of Eigen/Std vector 'a' arguments * @tparam Tb type of Eigen/Std vector 'b' arguments * @tparam Tz type of z argument * @param[in] a Always called with a[1] > 1, a[2] <= 0 * @param[in] b Always called with int b[0] < |a[2]|, <= 1) * @param[in] z z (is always called with 1 from beta binomial cdfs) * @param[in] precision precision of the infinite sum. defaults to 1e-6 * @param[in] max_steps number of steps to take. defaults to 1e5 * @return The 3F2 generalized hypergeometric function applied to the * arguments {a1, a2, a3}, {b1, b2} */ template * = nullptr, require_stan_scalar_t* = nullptr> auto hypergeometric_3F2(const Ta& a, const Tb& b, const Tz& z) { check_3F2_converges("hypergeometric_3F2", a[0], a[1], a[2], b[0], b[1], z); // Boost's pFq throws convergence errors in some cases, fallback to naive // infinite-sum approach (tests pass for these) if (z == 1.0 && (sum(b) - sum(a)) < 0.0) { return internal::hypergeometric_3F2_infsum(a, b, z); } return hypergeometric_pFq(to_vector(a), to_vector(b), z); } /** * Hypergeometric function (3F2). * * Overload for initializer_list inputs * * @tparam Ta type of scalar 'a' arguments * @tparam Tb type of scalar 'b' arguments * @tparam Tz type of z argument * @param[in] a Always called with a[1] > 1, a[2] <= 0 * @param[in] b Always called with int b[0] < |a[2]|, <= 1) * @param[in] z z (is always called with 1 from beta binomial cdfs) * @param[in] precision precision of the infinite sum. defaults to 1e-6 * @param[in] max_steps number of steps to take. defaults to 1e5 * @return The 3F2 generalized hypergeometric function applied to the * arguments {a1, a2, a3}, {b1, b2} */ template * = nullptr> auto hypergeometric_3F2(const std::initializer_list& a, const std::initializer_list& b, const Tz& z) { return hypergeometric_3F2(std::vector(a), std::vector(b), z); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/add_diag.hpp0000644000176200001440000000272614645137106022753 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ADD_DIAG_HPP #define STAN_MATH_PRIM_FUN_ADD_DIAG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Returns a Matrix with values added along the main diagonal * * @tparam T_m type of element in Eigen::Matrix * @tparam T_a Type of element(s) to add along the diagonal * * @param mat a matrix * @param to_add scalar value or column vector or row vector to add along the * diagonal * @return a matrix with to_add added along main diagonal */ template , typename = require_any_t, is_stan_scalar>> inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> add_diag(const T_m &mat, const T_a &to_add) { if (is_vector::value) { const size_t length_diag = std::min(mat.rows(), mat.cols()); check_consistent_size("add_diag", "number of elements of to_add", to_add, length_diag); } Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> out = mat; out.diagonal().array() += as_array_or_scalar(as_column_vector_or_scalar(to_add)); return out; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/ub_constrain.hpp0000644000176200001440000002237314645137106023725 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_UB_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_UB_CONSTRAIN_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the upper-bounded value for the specified unconstrained * matrix and upper bound. * *

The transform is * *

\f$f(x) = U - \exp(x)\f$ * *

where \f$U\f$ is the upper bound. * * @tparam T type of Matrix * @tparam U type of upper bound * @param[in] x free Matrix. * @param[in] ub upper bound * @return matrix constrained to have upper bound */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto ub_constrain(const T& x, const U& ub) { if (value_of_rec(ub) == INFTY) { return identity_constrain(x, ub); } else { return subtract(ub, exp(x)); } } /** * Return the upper-bounded value for the specified unconstrained * scalar and upper bound and increment the specified log * probability reference with the log absolute Jacobian * determinant of the transform. * *

The transform is as specified for * ub_constrain(T, double). The log absolute Jacobian * determinant is * *

\f$ \log | \frac{d}{dx} -\mbox{exp}(x) + U | * = \log | -\mbox{exp}(x) + 0 | = x\f$. * * @tparam T type of scalar * @tparam U type of upper bound * @tparam S type of log probability * @param[in] x free scalar * @param[in] ub upper bound * @param[in,out] lp log density * @return scalar constrained to have upper bound */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto ub_constrain(const T& x, const U& ub, std::decay_t>& lp) { if (value_of_rec(ub) == INFTY) { return identity_constrain(x, ub); } else { lp += x; return subtract(ub, exp(x)); } } /** * Specialization of `ub_constrain` to apply a scalar upper bound elementwise * to each input. * * @tparam T A type inheriting from `EigenBase`. * @tparam U Scalar. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @return upper-bound constrained value corresponding to inputs */ template * = nullptr, require_stan_scalar_t* = nullptr, require_all_not_st_var* = nullptr> inline auto ub_constrain(const T& x, const U& ub) { return eval(x.unaryExpr([ub](auto&& xx) { return ub_constrain(xx, ub); })); } /** * Specialization of `ub_constrain` to apply a scalar upper bound elementwise * to each input. * * @tparam T A type inheriting from `EigenBase`. * @tparam U Scalar. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @param[in,out] lp reference to log probability to increment * @return upper-bound constrained value corresponding to inputs */ template * = nullptr, require_stan_scalar_t* = nullptr, require_all_not_st_var* = nullptr> inline auto ub_constrain(const T& x, const U& ub, std::decay_t>& lp) { return eval( x.unaryExpr([ub, &lp](auto&& xx) { return ub_constrain(xx, ub, lp); })); } /** * Specialization of `ub_constrain` to apply a matrix of upper bounds * elementwise to each input element. * * @tparam T A type inheriting from `EigenBase`. * @tparam U A type inheriting from `EigenBase`. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @return upper-bound constrained value corresponding to inputs */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto ub_constrain(const T& x, const U& ub) { check_matching_dims("ub_constrain", "x", x, "ub", ub); return eval(x.binaryExpr( ub, [](auto&& xx, auto&& ubb) { return ub_constrain(xx, ubb); })); } /** * Specialization of `ub_constrain` to apply a matrix of upper bounds * elementwise to each input element. * * @tparam T A type inheriting from `EigenBase`. * @tparam U A type inheriting from `EigenBase`. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @param[in,out] lp reference to log probability to increment * @return upper-bound constrained value corresponding to inputs */ template * = nullptr, require_all_not_st_var* = nullptr> inline auto ub_constrain(const T& x, const U& ub, std::decay_t>& lp) { check_matching_dims("ub_constrain", "x", x, "ub", ub); return eval(x.binaryExpr( ub, [&lp](auto&& xx, auto&& ubb) { return ub_constrain(xx, ubb, lp); })); } /** * Specialization of `ub_constrain` to apply a scalar upper bound elementwise * to each input element. * * @tparam T A Any type with a Scalar `scalar_type`. * @tparam U Scalar. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @return lower-bound constrained value corresponding to inputs */ template * = nullptr> inline auto ub_constrain(const std::vector& x, const U& ub) { std::vector> ret(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = ub_constrain(x[i], ub); } return ret; } /** * Specialization of `ub_constrain` to apply a scalar upper bound elementwise * to each input element. * * @tparam T A Any type with a Scalar `scalar_type`. * @tparam U Scalar. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @param[in,out] lp reference to log probability to increment * @return lower-bound constrained value corresponding to inputs */ template * = nullptr> inline auto ub_constrain(const std::vector& x, const U& ub, return_type_t& lp) { std::vector> ret(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = ub_constrain(x[i], ub, lp); } return ret; } /** * Specialization of `ub_constrain` to apply a container of upper bounds * elementwise to each input element. * * @tparam T A Any type with a Scalar `scalar_type`. * @tparam U A type inheriting from `EigenBase` or a standard vector. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @return lower-bound constrained value corresponding to inputs */ template inline auto ub_constrain(const std::vector& x, const std::vector& ub) { check_matching_dims("ub_constrain", "x", x, "ub", ub); std::vector> ret(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = ub_constrain(x[i], ub[i]); } return ret; } /** * Specialization of `ub_constrain` to apply a container of upper bounds * elementwise to each input element. * * @tparam T A Any type with a Scalar `scalar_type`. * @tparam U A type inheriting from `EigenBase` or a standard vector. * @param[in] x unconstrained input * @param[in] ub upper bound on output * @param[in,out] lp reference to log probability to increment * @return lower-bound constrained value corresponding to inputs */ template inline auto ub_constrain(const std::vector& x, const std::vector& ub, return_type_t& lp) { check_matching_dims("ub_constrain", "x", x, "ub", ub); std::vector> ret(x.size()); for (size_t i = 0; i < x.size(); ++i) { ret[i] = ub_constrain(x[i], ub[i], lp); } return ret; } /** * Specialization of `ub_constrain` to apply a container of upper bounds * elementwise to each input element. If the `Jacobian` parameter is `true`, the * log density accumulator is incremented with the log absolute Jacobian * determinant of the transform. All of the transforms are specified with their * Jacobians in the *Stan Reference Manual* chapter Constraint Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @tparam U A type inheriting from `Eigen::EigenBase`, a `var_value` with inner * type inheriting from `Eigen::EigenBase`, a standard vector, or a scalar * @param[in] x unconstrained input * @param[in] ub upper bound on output * @param[in, out] lp log density accumulator * @return lower-bound constrained value corresponding to inputs */ template inline auto ub_constrain(const T& x, const U& ub, return_type_t& lp) { if (Jacobian) { return ub_constrain(x, ub, lp); } else { return ub_constrain(x, ub); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/quantile.hpp0000644000176200001440000000752714645137106023065 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_QUANTILE_HPP #define STAN_MATH_PRIM_FUN_QUANTILE_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return sample quantiles corresponding to the given probabilities. * The smallest observation corresponds to a probability of 0 and the largest to * a probability of 1. * * Implements algorithm 7 from Hyndman, R. J. and Fan, Y., * Sample quantiles in Statistical Packages (R's default quantile function) * * @tparam T An Eigen type with either fixed rows or columns at compile time, or * std::vector * @param samples_vec Numeric vector whose sample quantiles are wanted * @param p Probability with value between 0 and 1. * @return Sample quantile. * @throw std::invalid_argument If any element of samples_vec is NaN, or size 0. * @throw std::domain_error If `p<0` or `p>1`. */ template * = nullptr, require_vector_vt* = nullptr> inline double quantile(const T& samples_vec, const double p) { check_not_nan("quantile", "p", p); check_bounded("quantile", "p", p, 0, 1); const size_t n_sample = samples_vec.size(); if (n_sample == 0) { return {}; } Eigen::VectorXd x = as_array_or_scalar(samples_vec); check_not_nan("quantile", "samples_vec", x); if (n_sample == 1) return x.coeff(0); else if (p == 0.) return *std::min_element(x.data(), x.data() + n_sample); else if (p == 1.) return *std::max_element(x.data(), x.data() + n_sample); double index = (n_sample - 1) * p; size_t lo = std::floor(index); size_t hi = std::ceil(index); std::sort(x.data(), x.data() + n_sample, std::less()); double h = index - lo; return (1 - h) * x.coeff(lo) + h * x.coeff(hi); } /** * Return sample quantiles corresponding to the given probabilities. * The smallest observation corresponds to a probability of 0 and the largest to * a probability of 1. * * Implements algorithm 7 from Hyndman, R. J. and Fan, Y., * Sample quantiles in Statistical Packages (R's default quantile function) * * @tparam T An Eigen type with either fixed rows or columns at compile time, or * std::vector * @tparam Tp Type of probabilities vector * @param samples_vec Numeric vector whose sample quantiles are wanted * @param ps Vector of probability with value between 0 and 1. * @return Sample quantiles, one for each p in ps. * @throw std::invalid_argument If any of the values are NaN or size 0. * @throw std::domain_error If `p<0` or `p>1` for any p in ps. */ template * = nullptr, require_vector_vt* = nullptr, require_std_vector_vt* = nullptr> inline std::vector quantile(const T& samples_vec, const Tp& ps) { check_not_nan("quantile", "ps", ps); check_bounded("quantile", "ps", ps, 0, 1); const size_t n_sample = samples_vec.size(); const size_t n_ps = ps.size(); if (n_ps == 0 || n_sample == 0) { return {}; } Eigen::VectorXd x = as_array_or_scalar(samples_vec); check_not_nan("quantile", "samples_vec", x); const auto& p = as_array_or_scalar(ps); std::vector ret(n_ps, 0.0); std::sort(x.data(), x.data() + n_sample, std::less()); Eigen::ArrayXd index = (n_sample - 1) * p; for (size_t i = 0; i < n_ps; ++i) { if (p[i] == 0.) { ret[i] = x.coeff(0); } else if (p[i] == 1.) { ret[i] = x.coeff(n_sample - 1); } else { size_t lo = std::floor(index[i]); size_t hi = std::ceil(index[i]); double h = index[i] - lo; ret[i] = (1 - h) * x.coeff(lo) + h * x.coeff(hi); } } return ret; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/read_corr_L.hpp0000644000176200001440000001045214645137106023445 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_READ_CORR_L_HPP #define STAN_MATH_PRIM_FUN_READ_CORR_L_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the Cholesky factor of the correlation matrix of the * specified dimensionality corresponding to the specified * canonical partial correlations. * * It is generally better to work with the Cholesky factor rather * than the correlation matrix itself when the determinant, * inverse, etc. of the correlation matrix is needed for some * statistical calculation. * *

See read_corr_matrix(Array, size_t, T) * for more information. * * @tparam T type of the array (must be derived from \c Eigen::ArrayBase and * have one compile-time dimension equal to 1) * @param CPCs The (K choose 2) canonical partial correlations in * (-1, 1). * @param K Dimensionality of correlation matrix. * @return Cholesky factor of correlation matrix for specified * canonical partial correlations. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> read_corr_L( const T& CPCs, // on (-1, 1) size_t K) { using T_scalar = value_type_t; if (K == 0) { return {}; } if (K == 1) { return Eigen::Matrix::Identity(1, 1); } using std::sqrt; Eigen::Array temp; Eigen::Array acc(K - 1); acc.setOnes(); // Cholesky factor of correlation matrix Eigen::Matrix L(K, K); L.setZero(); size_t position = 0; size_t pull = K - 1; L(0, 0) = 1.0; L.col(0).tail(pull) = temp = CPCs.head(pull); acc.tail(pull) = T_scalar(1.0) - temp.square(); for (size_t i = 1; i < (K - 1); i++) { position += pull; pull--; temp = CPCs.segment(position, pull); L(i, i) = sqrt(acc(i - 1)); L.col(i).tail(pull) = temp * acc.tail(pull).sqrt(); acc.tail(pull) *= T_scalar(1.0) - temp.square(); } L(K - 1, K - 1) = sqrt(acc(K - 2)); return L; } /** * Return the Cholesky factor of the correlation matrix of the * specified dimensionality corresponding to the specified * canonical partial correlations, incrementing the specified * scalar reference with the log absolute determinant of the * Jacobian of the transformation. * *

The implementation is Ben Goodrich's Cholesky * factor-based approach to the C-vine method of: * *

  • Daniel Lewandowski, Dorota Kurowicka, and Harry Joe, * Generating random correlation matrices based on vines and * extended onion method Journal of Multivariate Analysis 100 * (2009) 1989–2001
* * @tparam T type of the array (must be derived from \c Eigen::ArrayBase and * have one compile-time dimension equal to 1) * @param CPCs The (K choose 2) canonical partial correlations in * (-1, 1). * @param K Dimensionality of correlation matrix. * @param log_prob Reference to variable to increment with the log * Jacobian determinant. * @return Cholesky factor of correlation matrix for specified * partial correlations. */ template * = nullptr> Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> read_corr_L( const T& CPCs, size_t K, value_type_t& log_prob) { using T_scalar = value_type_t; if (K == 0) { return {}; } if (K == 1) { return Eigen::Matrix::Identity(1, 1); } const Eigen::Ref>& CPCs_ref = CPCs; size_t pos = 0; T_scalar acc = 0; // no need to abs() because this Jacobian determinant // is strictly positive (and triangular) // see inverse of Jacobian in equation 11 of LKJ paper for (size_t k = 1; k <= (K - 2); k++) { for (size_t i = k + 1; i <= K; i++) { acc += (K - k - 1) * log1m(square(CPCs_ref(pos))); pos++; } } log_prob += 0.5 * acc; return read_corr_L(CPCs_ref, K); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/trace_quad_form.hpp0000644000176200001440000000210714645137106024363 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TRACE_QUAD_FORM_HPP #define STAN_MATH_PRIM_FUN_TRACE_QUAD_FORM_HPP #include #include #include #include namespace stan { namespace math { /** * Compute trace(B^T A B). * * @tparam EigMat1 type of the first matrix * @tparam EigMat2 type of the second matrix * * @param A matrix * @param B matrix * @return The trace of B^T A B * @throw std::domain_error if A is not square * @throw std::domain_error if A cannot be multiplied by B */ template * = nullptr> inline return_type_t trace_quad_form(const EigMat1& A, const EigMat2& B) { check_square("trace_quad_form", "A", A); check_multiplicable("trace_quad_form", "A", A, "B", B); const auto& B_ref = to_ref(B); return B_ref.cwiseProduct(A * B_ref).sum(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/vector_seq_view.hpp0000644000176200001440000000656614645137106024451 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_VECTOR_SEQ_VIEW_HPP #define STAN_MATH_PRIM_FUN_VECTOR_SEQ_VIEW_HPP #include #include #include namespace stan { /** * This class provides a low-cost wrapper for situations where you either need * an Eigen Vector or RowVector or a std::vector of them and you want to be * agnostic between those two options. This is similar to scalar_seq_view but * instead of being a sequence-like view over a scalar or seq of scalars, it's * a sequence-like view over a Vector or seq of Vectors. Notably this version * only allows std::vectors as the outer container type, since we would have * difficulty figuring out which contained type was the container otherwise. * * @tparam T the wrapped type, either a Vector or std::vector of them. */ template class vector_seq_view; /** * This class provides a low-cost wrapper for situations where you either need * an Eigen Vector or RowVector or a std::vector of them and you want to be * agnostic between those two options. This is similar to scalar_seq_view but * instead of being a sequence-like view over a scalar or seq of scalars, it's * a sequence-like view over a Vector or seq of Vectors. Notably this version * only allows std::vectors as the outer container type, since we would have * difficulty figuring out which contained type was the container otherwise. * * @tparam T the type of the underlying Vector */ template class vector_seq_view> { public: explicit vector_seq_view(const T& m) : m_(m) {} static constexpr auto size() { return 1; } inline const auto& operator[](size_t /* i */) const noexcept { return m_; } template * = nullptr> inline const auto& val(size_t /* i */) const noexcept { return m_; } template * = nullptr> inline auto val(size_t /* i */) const noexcept { return m_.val(); } private: const ref_type_t m_; }; namespace internal { template using is_matrix_or_std_vector = math::disjunction, is_std_vector>; } /** * This class provides a low-cost wrapper for situations where you either need * an Eigen Vector or RowVector or a std::vector of them and you want to be * agnostic between those two options. This is similar to scalar_seq_view but * instead of being a sequence-like view over a scalar or seq of scalars, it's * a sequence-like view over a Vector or seq of Vectors. Notably this version * only allows std::vectors as the outer container type, since we would have * difficulty figuring out which contained type was the container otherwise. * * @tparam S the type inside of the std::vector */ template class vector_seq_view< T, require_std_vector_vt> { public: explicit vector_seq_view(const T& v) noexcept : v_(v) {} inline auto size() const noexcept { return v_.size(); } inline decltype(auto) operator[](size_t i) const { return v_[i]; } template * = nullptr> inline decltype(auto) val(size_t i) const { return v_[i]; } template * = nullptr> inline auto val(size_t i) const { return value_of(v_[i]); } private: const T& v_; }; } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/to_row_vector.hpp0000644000176200001440000000247714645137106024135 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_TO_ROW_VECTOR_HPP #define STAN_MATH_PRIM_FUN_TO_ROW_VECTOR_HPP #include #include #include namespace stan { namespace math { // row_vector to_row_vector(matrix) // row_vector to_row_vector(vector) // row_vector to_row_vector(row_vector) template * = nullptr> inline Eigen::Matrix, 1, Eigen::Dynamic> to_row_vector( const EigMat& matrix) { using T = value_type_t; Eigen::Matrix res(matrix.size()); Eigen::Map< Eigen::Matrix> res_map(res.data(), matrix.rows(), matrix.cols()); res_map = matrix; return res; } // row_vector to_row_vector(real[]) template inline Eigen::Matrix to_row_vector( const std::vector& vec) { return Eigen::Matrix::Map(vec.data(), vec.size()); } // row_vector to_row_vector(int[]) inline Eigen::Matrix to_row_vector( const std::vector& vec) { int C = vec.size(); Eigen::Matrix result(C); for (int i = 0; i < C; i++) { result(i) = vec[i]; } return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/initialize_fill.hpp0000644000176200001440000000312414645137106024377 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INITIALIZE_FILL_HPP #define STAN_MATH_PRIM_FUN_INITIALIZE_FILL_HPP #include #include #include namespace stan { namespace math { /** * Fill the specified container with the specified value. * * The specified matrix is filled by element. * * @tparam EigMat Type inheriting from `EigenBase` * @tparam S Type of value. * * @param x Container. * @param y Value. */ template * = nullptr, require_stan_scalar_t* = nullptr> inline void initialize_fill(EigMat& x, const S& y) { x.fill(y); } /** * Fill the specified container with the specified value. * * This base case simply assigns the value to the container. * * @tparam T Type of reference container. * @tparam S Type of value. * @param x Container. * @param y Value. */ template < typename T, typename S, require_t&, std::decay_t>>* = nullptr> inline void initialize_fill(T& x, S&& y) { x = std::forward(y); } /** * Fill the specified container with the specified value. * * Each container in the specified standard vector is filled * recursively by calling fill. * * @tparam Vec A standard vector * @tparam S type of value * @param[in] x Container. * @param[in, out] y Value. */ template * = nullptr> inline void initialize_fill(Vec& x, S&& y) { for (auto& x_val : x) { initialize_fill(x_val, y); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cov_exp_quad.hpp0000644000176200001440000000406414645137106023711 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COV_EXP_QUAD_HPP #define STAN_MATH_PRIM_FUN_COV_EXP_QUAD_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * @deprecated use gp_exp_quad_cov */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov_exp_quad(const std::vector& x, const T_sigma& sigma, const T_l& length_scale) { return gp_exp_quad_cov(x, sigma, length_scale); } /** * @deprecated use gp_exp_quad_cov */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov_exp_quad(const std::vector& x, const T_sigma& sigma, const std::vector& length_scale) { return gp_exp_quad_cov(x, sigma, length_scale); } /** * @deprecated use gp_exp_quad_cov */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov_exp_quad(const std::vector& x1, const std::vector& x2, const T_sigma& sigma, const T_l& length_scale) { return gp_exp_quad_cov(x1, x2, sigma, length_scale); } /** * @deprecated use gp_exp_quad_cov */ template inline typename Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cov_exp_quad(const std::vector& x1, const std::vector& x2, const T_sigma& sigma, const std::vector& length_scale) { return gp_exp_quad_cov(x1, x2, sigma, length_scale); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/binomial_coefficient_log.hpp0000644000176200001440000001302114645137106026216 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_BINOMIAL_COEFFICIENT_LOG_HPP #define STAN_MATH_PRIM_FUN_BINOMIAL_COEFFICIENT_LOG_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the log of the binomial coefficient for the specified * arguments. * * The binomial coefficient, \f${n \choose k}\f$, read "n choose k", is * defined for \f$0 \leq k \leq n\f$ by * * \f${n \choose k} = \frac{n!}{k! (n-k)!}\f$. * * This function uses Gamma functions to define the log * and generalize the arguments to continuous n and k. * * \f$ \log {n \choose k} * = \log \ \Gamma(n+1) - \log \Gamma(k+1) - \log \Gamma(n-k+1)\f$. * * \f[ \mbox{binomial\_coefficient\_log}(x, y) = \begin{cases} \textrm{error} & \mbox{if } y > x + 1 \textrm{ or } y < -1 \textrm{ or } x < -1\\ \ln\Gamma(x+1) & \mbox{if } -1 < y < x + 1 \\ \quad -\ln\Gamma(y+1)& \\ \quad -\ln\Gamma(x-y+1)& \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{binomial\_coefficient\_log}(x, y)}{\partial x} = \begin{cases} \textrm{error} & \mbox{if } y > x + 1 \textrm{ or } y < -1 \textrm{ or } x < -1\\ \Psi(x+1) & \mbox{if } 0\leq y \leq x \\ \quad -\Psi(x-y+1)& \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] \f[ \frac{\partial\, \mbox{binomial\_coefficient\_log}(x, y)}{\partial y} = \begin{cases} \textrm{error} & \mbox{if } y > x + 1 \textrm{ or } y < -1 \textrm{ or } x < -1\\ -\Psi(y+1) & \mbox{if } 0\leq y \leq x \\ \quad +\Psi(x-y+1)& \\[6pt] \textrm{NaN} & \mbox{if } x = \textrm{NaN or } y = \textrm{NaN} \end{cases} \f] * * This function is numerically more stable than naive evaluation via lgamma. * * @tparam T_n type of the first argument * @tparam T_k type of the second argument * * @param n total number of objects. * @param k number of objects chosen. * @return log (n choose k). */ template * = nullptr> inline return_type_t binomial_coefficient_log(const T_n n, const T_k k) { using T_partials_return = partials_return_t; if (is_any_nan(n, k)) { return NOT_A_NUMBER; } // Choosing the more stable of the symmetric branches if (n > -1 && k > value_of_rec(n) / 2.0 + 1e-8) { return binomial_coefficient_log(n, n - k); } const T_partials_return n_dbl = value_of(n); const T_partials_return k_dbl = value_of(k); const T_partials_return n_plus_1 = n_dbl + 1; const T_partials_return n_plus_1_mk = n_plus_1 - k_dbl; static const char* function = "binomial_coefficient_log"; check_greater_or_equal(function, "first argument", n, -1); check_greater_or_equal(function, "second argument", k, -1); check_greater_or_equal(function, "(first argument - second argument + 1)", n_plus_1_mk, 0.0); auto ops_partials = make_partials_propagator(n, k); T_partials_return value; if (k_dbl == 0) { value = 0; } else if (n_plus_1 < lgamma_stirling_diff_useful) { value = lgamma(n_plus_1) - lgamma(k_dbl + 1) - lgamma(n_plus_1_mk); } else { value = -lbeta(n_plus_1_mk, k_dbl + 1) - log1p(n_dbl); } if (!is_constant_all::value) { // Branching on all the edge cases. // In direct computation many of those would be NaN // But one-sided limits from within the domain exist, all of the below // follows from lim x->0 from above digamma(x) == -Inf // // Note that we have k < n / 2 (see the first branch in this function) // se we can ignore the n == k - 1 edge case. T_partials_return digamma_n_plus_1_mk = digamma(n_plus_1_mk); if (!is_constant_all::value) { if (n_dbl == -1.0) { if (k_dbl == 0) { partials<0>(ops_partials)[0] = 0; } else { partials<0>(ops_partials)[0] = NEGATIVE_INFTY; } } else { partials<0>(ops_partials)[0] = (digamma(n_plus_1) - digamma_n_plus_1_mk); } } if (!is_constant_all::value) { if (k_dbl == 0 && n_dbl == -1.0) { partials<1>(ops_partials)[0] = NEGATIVE_INFTY; } else if (k_dbl == -1) { partials<1>(ops_partials)[0] = INFTY; } else { partials<1>(ops_partials)[0] = (digamma_n_plus_1_mk - digamma(k_dbl + 1)); } } } return ops_partials.build(value); } /** * Enables the vectorized application of the binomial coefficient log function, * when the first and/or second arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @param a First input * @param b Second input * @return Binomial coefficient log function applied to the two inputs. */ template * = nullptr> inline auto binomial_coefficient_log(const T1& a, const T2& b) { return apply_scalar_binary(a, b, [&](const auto& c, const auto& d) { return binomial_coefficient_log(c, d); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/ones_int_array.hpp0000644000176200001440000000106214645137106024243 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ONES_INT_ARRAY_HPP #define STAN_MATH_PRIM_FUN_ONES_INT_ARRAY_HPP #include #include namespace stan { namespace math { /** * Return an integer array of ones. * * @param K size of the array * @return An integer array of size K with all elements initialized to 1. * @throw std::domain_error if K is negative. */ inline std::vector ones_int_array(int K) { check_nonnegative("ones_int_array", "size", K); return std::vector(K, 1); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mdivide_left_ldlt.hpp0000644000176200001440000000265414645137106024711 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MDIVIDE_LEFT_LDLT_HPP #define STAN_MATH_PRIM_FUN_MDIVIDE_LEFT_LDLT_HPP #include #include #include #include #include namespace stan { namespace math { /** * Returns the solution of the system Ax=b given an LDLT_factor of A * * @tparam T type of matrix in the LDLT_factor * @tparam EigMat type of the right hand side * * @param A LDLT_factor * @param b Right hand side matrix or vector. * @return x = A^-1 b, solution of the linear system. * @throws std::domain_error if rows of b don't match the size of A. */ template * = nullptr, require_all_not_st_var* = nullptr, require_any_not_t>, is_fvar>>* = nullptr> inline Eigen::Matrix, Eigen::Dynamic, EigMat::ColsAtCompileTime> mdivide_left_ldlt(LDLT_factor& A, const EigMat& b) { check_multiplicable("mdivide_left_ldlt", "A", A.matrix(), "b", b); if (A.matrix().cols() == 0) { return {0, b.cols()}; } return A.ldlt().solve( Eigen::Matrix, EigMat::RowsAtCompileTime, EigMat::ColsAtCompileTime>(b)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/diagonal.hpp0000644000176200001440000000111214645137106023001 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DIAGONAL_HPP #define STAN_MATH_PRIM_FUN_DIAGONAL_HPP #include #include namespace stan { namespace math { /** * Return a column vector of the diagonal elements of the * specified matrix. The matrix is not required to be square. * * @tparam T type of the matrix * @param m Specified matrix. * @return Diagonal of the matrix. */ template * = nullptr> inline auto diagonal(const T& m) { return m.diagonal(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sort_desc.hpp0000644000176200001440000000251614645137106023221 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SORT_DESC_HPP #define STAN_MATH_PRIM_FUN_SORT_DESC_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the specified standard vector in descending order. * * @tparam T Type of elements contained in vector. * @param xs Vector to order. * @return Vector in descending order. * @throw std::domain_error If any of the values are NaN. */ template inline std::vector sort_desc(std::vector xs) { check_not_nan("sort_asc", "container argument", xs); std::sort(xs.begin(), xs.end(), std::greater()); return xs; } /** * Return the specified vector in descending order. * * @tparam EigVec type of the vector * * @param xs Vector to order. * @return Vector in descending order. * @throw std::domain_error If any of the values are NaN. */ template * = nullptr> inline plain_type_t sort_desc(EigVec&& xs) { plain_type_t x = std::forward(xs); check_not_nan("sort_asc", "container argument", x); std::sort(x.data(), x.data() + x.size(), std::greater>()); return x; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/all.hpp0000644000176200001440000000437214645137106022006 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ALL_HPP #define STAN_MATH_PRIM_FUN_ALL_HPP #include #include #include namespace stan { namespace math { /** * Return true if all values in the input are true. * * Overload for a single integral input * * @tparam T Any type convertible to `bool` * @param x integral input * @return The input unchanged */ template >* = nullptr> constexpr inline bool all(T x) { return x; } /** * Return true if all values in the input are true. * * Overload for Eigen types * * @tparam ContainerT A type derived from `Eigen::EigenBase` that has an * `integral` scalar type * @param x Eigen object of boolean inputs * @return Boolean indicating whether all elements are true */ template * = nullptr> inline bool all(const ContainerT& x) { return x.all(); } // Forward-declaration for correct resolution of all(std::vector) template inline bool all(const std::tuple& x); /** * Return true if all values in the input are true. * * Overload for a std::vector/nested inputs. The Eigen::Map/apply_vector_unary * approach cannot be used as std::vector types do not have a .data() * member and are not always stored contiguously. * * @tparam InnerT Type within std::vector * @param x Nested container of boolean inputs * @return Boolean indicating whether all elements are true */ template inline bool all(const std::vector& x) { return std::all_of(x.begin(), x.end(), [](const auto& i) { return all(i); }); } /** * Return true if all values in the input are true. * * Overload for a tuple input. * * @tparam Types of items within tuple * @param x Tuple of boolean scalar-type elements * @return Boolean indicating whether all elements are true */ template inline bool all(const std::tuple& x) { bool all_true = true; math::for_each( [&all_true](const auto& i) { all_true = all_true && all(i); return; }, x); return all_true; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/Phi.hpp0000644000176200001440000000354214645137106021754 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PHI_HPP #define STAN_MATH_PRIM_FUN_PHI_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * The unit normal cumulative distribution function. * * The return value for a specified input is the probability that * a random unit normal variate is less than or equal to the * specified value, defined by * * \f$\Phi(x) = \int_{-\infty}^x \mbox{\sf Norm}(x|0, 1) \ dx\f$ * * This function can be used to implement the inverse link function * for probit regression. * * Phi will underflow to 0 below -37.5 and overflow to 1 above 8 * * @param x Argument. * @return Probability random sample is less than or equal to argument. */ inline double Phi(double x) { check_not_nan("Phi", "x", x); if (x < -37.5) { return 0; } else if (x < -5.0) { return 0.5 * erfc(-INV_SQRT_TWO * x); } else if (x > 8.25) { return 1; } else { return 0.5 * (1.0 + erf(INV_SQRT_TWO * x)); } } /** * Structure to wrap Phi() so it can be vectorized. * * @tparam T type of argument * @param x argument * @return Unit normal CDF of x. */ struct Phi_fun { template static inline auto fun(const T& x) { return Phi(x); } }; /** * Vectorized version of Phi(). * * @tparam T type of container * @param x container * @return Unit normal CDF of each value in x. */ template < typename T, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr> inline auto Phi(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/zeros_vector.hpp0000644000176200001440000000105014645137106023750 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ZEROS_VECTOR_HPP #define STAN_MATH_PRIM_FUN_ZEROS_VECTOR_HPP #include #include namespace stan { namespace math { /** * Return a vector of zeros. * * @param K size of the vector * @return A vector of size K with all elements initialized to 0. * @throw std::domain_error if K is negative. */ inline auto zeros_vector(int K) { check_nonnegative("zeros_vector", "size", K); return Eigen::VectorXd::Zero(K); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/diag_pre_multiply.hpp0000644000176200001440000000171514645137106024745 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_DIAG_PRE_MULTIPLY_HPP #define STAN_MATH_PRIM_FUN_DIAG_PRE_MULTIPLY_HPP #include #include namespace stan { namespace math { /** * Return the product of the diagonal matrix formed from the vector * or row_vector and a matrix. * * @tparam T1 type of the vector/row_vector * @tparam T2 type of the matrix * @param m1 input vector/row_vector * @param m2 input matrix * * @return product of the diagonal matrix formed from the * vector or row_vector and a matrix. */ template * = nullptr, require_eigen_t* = nullptr, require_all_not_st_var* = nullptr> auto diag_pre_multiply(const T1& m1, const T2& m2) { check_size_match("diag_pre_multiply", "m1.size()", m1.size(), "m2.rows()", m2.rows()); return m1.asDiagonal() * m2; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cholesky_factor_constrain.hpp0000644000176200001440000001321314645137106026467 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_CHOLESKY_FACTOR_CONSTRAIN_HPP #define STAN_MATH_PRIM_FUN_CHOLESKY_FACTOR_CONSTRAIN_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return the Cholesky factor of the specified size read from the * specified vector. A total of (N choose 2) + N + (M - N) * N * elements are required to read an M by N Cholesky factor. * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase and * have one compile-time dimension equal to 1) * @param x Vector of unconstrained values * @param M number of rows * @param N number of columns * @return Cholesky factor */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cholesky_factor_constrain(const T& x, int M, int N) { using std::exp; using T_scalar = value_type_t; check_greater_or_equal("cholesky_factor_constrain", "num rows (must be greater or equal to num cols)", M, N); check_size_match("cholesky_factor_constrain", "constrain size", x.size(), "((N * (N + 1)) / 2 + (M - N) * N)", ((N * (N + 1)) / 2 + (M - N) * N)); Eigen::Matrix y(M, N); int pos = 0; const auto& x_ref = to_ref(x); for (int m = 0; m < N; ++m) { y.row(m).head(m) = x_ref.segment(pos, m); pos += m; y.coeffRef(m, m) = exp(x_ref.coeff(pos++)); y.row(m).tail(N - m - 1).setZero(); } for (int m = N; m < M; ++m) { y.row(m) = x_ref.segment(pos, N); pos += N; } return y; } /** * Return the Cholesky factor of the specified size read from the * specified vector and increment the specified log probability * reference with the log absolute Jacobian determinant adjustment of the * transform. A total of (N choose 2) + N + N * (M - N) free parameters are * required to read an M by N Cholesky factor. * * @tparam T type of the vector (must be derived from \c Eigen::MatrixBase and * have one compile-time dimension equal to 1) * @param x Vector of unconstrained values * @param M number of rows * @param N number of columns * @param lp Log probability that is incremented with the log absolute Jacobian * determinant * @return Cholesky factor */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> cholesky_factor_constrain(const T& x, int M, int N, return_type_t& lp) { check_size_match("cholesky_factor_constrain", "x.size()", x.size(), "((N * (N + 1)) / 2 + (M - N) * N)", ((N * (N + 1)) / 2 + (M - N) * N)); int pos = 0; const auto& x_ref = to_ref(x); for (int n = 0; n < N; ++n) { pos += n; lp += x_ref.coeff(pos++); } return cholesky_factor_constrain(x_ref, M, N); } /** * Return the Cholesky factor of the specified size read from the specified * vector. A total of (N choose 2) + N + N * (M - N) free parameters are * required to read an M by N Cholesky factor. If the `Jacobian` parameter is * `true`, the log density accumulator is incremented with the log absolute * Jacobian determinant of the transform. All of the transforms are specified * with their Jacobians in the *Stan Reference Manual* chapter Constraint * Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A type inheriting from `Eigen::DenseBase` or a `var_value` with * inner type inheriting from `Eigen::DenseBase` with compile time dynamic rows * and 1 column * @param x Vector of unconstrained values * @param M number of rows * @param N number of columns * @param[in,out] lp log density accumulator * @return Cholesky factor */ template * = nullptr> inline auto cholesky_factor_constrain(const T& x, int M, int N, return_type_t& lp) { if (Jacobian) { return cholesky_factor_constrain(x, M, N, lp); } else { return cholesky_factor_constrain(x, M, N); } } /** * Return the Cholesky factor of the specified size read from the specified * vector. A total of (N choose 2) + N + N * (M - N) free parameters are * required to read an M by N Cholesky factor. If the `Jacobian` parameter is * `true`, the log density accumulator is incremented with the log absolute * Jacobian determinant of the transform. All of the transforms are specified * with their Jacobians in the *Stan Reference Manual* chapter Constraint * Transforms. * * @tparam Jacobian if `true`, increment log density accumulator with log * absolute Jacobian determinant of constraining transform * @tparam T A standard vector with inner type inheriting from * `Eigen::DenseBase` or a `var_value` with inner type inheriting from * `Eigen::DenseBase` with compile time dynamic rows and 1 column * @param x Vector of unconstrained values * @param M number of rows * @param N number of columns * @param[in,out] lp log density accumulator * @return Cholesky factor */ template * = nullptr> inline auto cholesky_factor_constrain(const T& x, int M, int N, return_type_t& lp) { return apply_vector_unary::apply(x, [&lp, M, N](auto&& v) { return cholesky_factor_constrain(v, M, N, lp); }); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/zeros_array.hpp0000644000176200001440000000103514645137106023567 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_ZEROS_ARRAY_HPP #define STAN_MATH_PRIM_FUN_ZEROS_ARRAY_HPP #include #include namespace stan { namespace math { /** * Return an array of zeros. * * @param K size of the array * @return an array of size K with all elements initialized to 0. * @throw std::domain_error if K is negative. */ inline std::vector zeros_array(int K) { check_nonnegative("zeros_array", "size", K); return std::vector(K, 0); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/sort_indices_asc.hpp0000644000176200001440000000117114645137106024543 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SORT_INDICES_ASC_HPP #define STAN_MATH_PRIM_FUN_SORT_INDICES_ASC_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return a sorted copy of the argument container in ascending order. * * @tparam C type of container * @param xs Container to sort * @return sorted version of container */ template std::vector sort_indices_asc(const C& xs) { return sort_indices(xs); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/inc_beta.hpp0000644000176200001440000000341414645137106022776 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_INC_BETA_HPP #define STAN_MATH_PRIM_FUN_INC_BETA_HPP #include #include #include #include #include namespace stan { namespace math { /** * The normalized incomplete beta function of a, b, with outcome x. * * Used to compute the cumulative density function for the beta * distribution. * * @param a Shape parameter a >= 0; a and b can't both be 0 * @param b Shape parameter b >= 0 * @param x Random variate. 0 <= x <= 1 * @throws if constraints are violated or if any argument is NaN * @return The normalized incomplete beta function. */ inline double inc_beta(double a, double b, double x) { check_not_nan("inc_beta", "a", a); check_not_nan("inc_beta", "b", b); check_not_nan("inc_beta", "x", x); return boost::math::ibeta(a, b, x, boost_policy_t<>()); } /** * Enables the vectorized application of the inc_beta function, when * any arguments are containers. * * @tparam T1 type of first input * @tparam T2 type of second input * @tparam T3 type of third input * @param a First input * @param b Second input * @param c Third input * @return Incomplete Beta function applied to the three inputs. */ template * = nullptr, require_all_not_var_matrix_t* = nullptr> inline auto inc_beta(const T1& a, const T2& b, const T3& c) { return apply_scalar_ternary([](const auto& d, const auto& e, const auto& f) { return inc_beta(d, e, f); }, a, b, c); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/plus.hpp0000644000176200001440000000063714645137106022221 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_PLUS_HPP #define STAN_MATH_PRIM_FUN_PLUS_HPP #include namespace stan { namespace math { /** * Returns the unary plus of the input. * * @tparam T Type of input. * @param x input. * @return result of unary plus of the input. */ template inline T plus(T&& x) { return std::forward(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/mdivide_left_spd.hpp0000644000176200001440000000334214645137106024533 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_MDIVIDE_LEFT_SPD_HPP #define STAN_MATH_PRIM_FUN_MDIVIDE_LEFT_SPD_HPP #include #include #include #include namespace stan { namespace math { /** * Returns the solution of the system Ax=b where A is symmetric positive * definite. * * @tparam EigMat1 type of the first matrix * @tparam EigMat2 type of the right-hand side matrix or vector * * @param A Matrix. * @param b Right hand side matrix or vector. * @return x = A^-1 b, solution of the linear system. * @throws std::domain_error if A is not square or the rows of b don't * match the size of A. */ template * = nullptr, require_all_not_vt_var* = nullptr> inline Eigen::Matrix, EigMat1::RowsAtCompileTime, EigMat2::ColsAtCompileTime> mdivide_left_spd(const EigMat1& A, const EigMat2& b) { static const char* function = "mdivide_left_spd"; check_multiplicable(function, "A", A, "b", b); const auto& A_ref = to_ref(A); check_symmetric(function, "A", A_ref); check_not_nan(function, "A", A_ref); if (A.size() == 0) { return {0, b.cols()}; } auto llt = Eigen::Matrix, EigMat1::RowsAtCompileTime, EigMat1::ColsAtCompileTime>( A_ref) .llt(); check_pos_definite(function, "A", llt); return llt.solve( Eigen::Matrix, EigMat2::RowsAtCompileTime, EigMat2::ColsAtCompileTime>(b)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/symmetrize_from_lower_tri.hpp0000644000176200001440000000141514645137106026552 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_SYMMETRIZE_FROM_LOWER_TRI_HPP #define STAN_MATH_PRIM_FUN_SYMMETRIZE_FROM_LOWER_TRI_HPP #include #include namespace stan { namespace math { /** * Return a symmetric matrix using elements from the lower triangular part of * the input matrix. * * @tparam T type of elements in the matrix * @param m Matrix. * @throw std:invalid_argument if the matrix is not square. */ template * = nullptr> inline Eigen::Matrix, Eigen::Dynamic, Eigen::Dynamic> symmetrize_from_lower_tri(const T& m) { check_square("symmetrize_from_lower_tri", "m", m); return m.template selfadjointView(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/ub_free.hpp0000644000176200001440000001041314645137106022636 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_UB_FREE_HPP #define STAN_MATH_PRIM_FUN_UB_FREE_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return the free scalar that corresponds to the specified * upper-bounded value with respect to the specified upper bound. * *

The transform is the reverse of the * ub_constrain(T, double) transform, * *

\f$f^{-1}(y) = \log -(y - U)\f$ * *

where \f$U\f$ is the upper bound. * * @tparam T type of scalar * @tparam U type of upper bound * @param y constrained scalar with specified upper bound * @param ub upper bound * @return unconstrained scalar with respect to upper bound * @throw std::invalid_argument if constrained scalar is greater * than the upper bound. */ template * = nullptr, require_stan_scalar_t* = nullptr> inline auto ub_free(T&& y, U&& ub) { if (value_of_rec(ub) == INFTY) { return identity_free(y, ub); } else { auto&& y_ref = to_ref(std::forward(y)); auto&& ub_ref = to_ref(std::forward(ub)); check_less_or_equal("ub_free", "Upper bounded variable", value_of(y_ref), value_of(ub_ref)); return eval(log(subtract(std::forward(ub_ref), std::forward(y_ref)))); } } /** * Return the free matrix that corresponds to the specified * upper-bounded matrix with respect to the specified upper bound. * * The transform is the reverse of the `ub_constrain` transform. * * @tparam T type of matrix * @tparam U type of upper bound * @param y constrained matrix with specified upper bound * @param ub upper bound * @return unconstrained matrix with respect to upper bound * @throw std::invalid_argument if any element of constrained variable * is greater than the upper bound. */ template * = nullptr> inline auto ub_free(T&& y, U&& ub) { check_matching_dims("ub_free", "y", y, "ub", ub); auto&& y_ref = to_ref(std::forward(y)); auto&& ub_ref = to_ref(std::forward(ub)); promote_scalar_t, T> ret(y.rows(), y.cols()); for (Eigen::Index j = 0; j < y.cols(); ++j) { for (Eigen::Index i = 0; i < y.rows(); ++i) { ret.coeffRef(i, j) = ub_free(y_ref.coeff(i, j), ub_ref.coeff(i, j)); } } return ret; } /** * Return the free variable that corresponds to the specified * upper-bounded variable with respect to the specified upper bound. * * The transform is the reverse of the `ub_constrain` transform. * * @tparam T type of constrained variable * @tparam U type of upper bound * @param y constrained variable with specified upper bound * @param ub upper bound * @return unconstrained variable with respect to upper bound * @throw std::invalid_argument if any element of constrained variable * is greater than the upper bound. */ template * = nullptr> inline auto ub_free(const std::vector y, const U& ub) { auto&& ub_ref = to_ref(ub); std::vector ret(y.size()); for (Eigen::Index i = 0; i < y.size(); ++i) { ret[i] = ub_free(y[i], ub_ref); } return ret; } /** * Return the free variable that corresponds to the specified * upper-bounded variable with respect to the specified upper bound. * * The transform is the reverse of the `ub_constrain` transform. * * @tparam T type of constrained variable * @tparam U type of upper bound * @param y constrained variable with specified upper bound * @param ub upper bound * @return unconstrained variable with respect to upper bound * @throw std::invalid_argument if any element of constrained variable * is greater than the upper bound. */ template inline auto ub_free(const std::vector y, const std::vector& ub) { check_matching_dims("ub_free", "y", y, "ub", ub); std::vector ret(y.size()); for (Eigen::Index i = 0; i < y.size(); ++i) { ret[i] = ub_free(y[i], ub[i]); } return ret; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/cos.hpp0000644000176200001440000000412214645137106022013 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_COS_HPP #define STAN_MATH_PRIM_FUN_COS_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Structure to wrap `cos()` so it can be vectorized. * * @tparam T type of variable * @param x angle in radians * @return Cosine of x. */ struct cos_fun { template static inline auto fun(const T& x) { using std::cos; return cos(x); } }; /** * Returns the elementwise `cos()` of the input, * which may be a scalar or any Stan container of numeric scalars. * * @tparam Container type of container * @param x angles in radians * @return Cosine of each value in x. */ template * = nullptr, require_not_var_matrix_t* = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< Container>* = nullptr> inline auto cos(const Container& x) { return apply_scalar_unary::apply(x); } /** * Version of `cos()` that accepts std::vectors, Eigen Matrix/Array objects * or expressions, and containers of these. * * @tparam Container Type of x * @param x Container * @return Cosine of each value in x. */ template * = nullptr> inline auto cos(const Container& x) { return apply_vector_unary::apply( x, [&](const auto& v) { return v.array().cos(); }); } namespace internal { /** * Return the cosine of the complex argument. * * @tparam T value type of argument * @param[in] z argument * @return cosine of the argument */ template inline std::complex complex_cos(const std::complex& z) { return cosh(i_times(z)); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/log_determinant_spd.hpp0000644000176200001440000000177514645137106025263 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_LOG_DETERMINANT_SPD_HPP #define STAN_MATH_PRIM_FUN_LOG_DETERMINANT_SPD_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Returns the log absolute determinant of the specified square matrix. * * @tparam T type of the matrix * * @param m specified matrix * @return log absolute determinant of the matrix * @throw std::domain_error if matrix is not square and symmetric */ template * = nullptr, require_not_vt_var* = nullptr> inline value_type_t log_determinant_spd(const EigMat& m) { const auto& m_ref = to_ref(m); check_symmetric("log_determinant_spd", "m", m_ref); if (m.size() == 0) { return 0; } return sum(log(m_ref.ldlt().vectorD().array())); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun/autocovariance.hpp0000644000176200001440000000770714645137106024246 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_AUTOCOVARIANCE_HPP #define STAN_MATH_PRIM_FUN_AUTOCOVARIANCE_HPP #include #include #include #include namespace stan { namespace math { /** * Write autocovariance estimates for every lag for the specified * input sequence into the specified result using the specified * FFT engine. The return vector be resized to the same length as * the input sequence with lags given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

An FFT engine can be created for reuse for type double with: * *

 *     Eigen::FFT fft;
 * 
* * @tparam T Scalar type. * @param y Input sequence. * @param acov Autocovariance. * @param fft FFT engine instance. */ template void autocovariance(const std::vector& y, std::vector& acov, Eigen::FFT& fft) { autocorrelation(y, acov, fft); T var = variance(y) * (y.size() - 1) / y.size(); for (size_t i = 0; i < y.size(); i++) { acov[i] *= var; } } /** * Write autocovariance estimates for every lag for the specified * input sequence into the specified result using the specified * FFT engine. The return vector be resized to the same length as * the input sequence with lags given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

An FFT engine can be created for reuse for type double with: * *

 *     Eigen::FFT fft;
 * 
* * @tparam T scalar type * @tparam DerivedA type of the first matrix * @tparam DerivedB type of the second matrix * @param y Input sequence. * @param acov Autocovariance. * @param fft FFT engine instance. */ template void autocovariance(const Eigen::MatrixBase& y, Eigen::MatrixBase& acov, Eigen::FFT& fft) { autocorrelation(y, acov, fft); acov = acov.array() * (y.array() - y.mean()).square().sum() / y.size(); } /** * Write autocovariance estimates for every lag for the specified * input sequence into the specified result. The return vector be * resized to the same length as the input sequence with lags * given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

This method is just a light wrapper around the three-argument * autocovariance function. * * @tparam T Scalar type. * @param y Input sequence. * @param acov Autocovariances. */ template void autocovariance(const std::vector& y, std::vector& acov) { Eigen::FFT fft; size_t N = y.size(); acov.resize(N); const Eigen::Map > y_map(&y[0], N); Eigen::Map > acov_map(&acov[0], N); autocovariance(y_map, acov_map, fft); } /** * Write autocovariance estimates for every lag for the specified * input sequence into the specified result. The return vector be * resized to the same length as the input sequence with lags * given by array index. * *

The implementation involves a fast Fourier transform, * followed by a normalization, followed by an inverse transform. * *

This method is just a light wrapper around the three-argument * autocovariance function * * @tparam T scalar type * @tparam DerivedA type of the first matrix * @tparam DerivedB type of the second matrix * @param y Input sequence. * @param acov Autocovariances. */ template void autocovariance(const Eigen::MatrixBase& y, Eigen::MatrixBase& acov) { Eigen::FFT fft; autocovariance(y, acov, fft); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/functor.hpp0000644000176200001440000000313614645137106022123 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUNCTOR_HPP #define STAN_MATH_PRIM_FUNCTOR_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/prim/eigen_plugins.h0000644000176200001440000001650414645137106022736 0ustar liggesusers/** * Reimplements is_fvar without requiring external math headers * * decltype((void)(T::d_)) is a pre C++17 replacement for * std::void_t * * TODO(Andrew): Replace with std::void_t after move to C++17 */ template struct is_fvar : std::false_type { }; template struct is_fvar : std::true_type { }; //TODO(Andrew): Replace std::is_const<>::value with std::is_const_v<> after move to C++17 template using double_return_t = std::conditional_t>::value, const double, double>; template using reverse_return_t = std::conditional_t>::value, const double&, double&>; template using vari_return_t = std::conditional_t>::value, const decltype(T::vi_)&, decltype(T::vi_)&>; template using forward_return_t = std::conditional_t>::value, const decltype(T::val_)&, decltype(T::val_)&>; /** * Structure to return a view to the values in a var, vari*, and fvar. * To identify the correct member to call for a given input, templates * check a combination of whether the input is a pointer (i.e. vari*) * and/or whether the input has member ".d_" (i.e. fvar). * * There are two methods for returning doubles unchanged. One which takes a reference * to a double and returns the same reference, used when 'chaining' methods * (i.e. A.adj().val()). The other for passing and returning by value, used directly * with matrices of doubles (i.e. A.val(), where A is of type MatrixXd). * * For definitions of EIGEN_EMPTY_STRUCT_CTOR, EIGEN_DEVICE_FUNC, and * EIGEN_STRONG_INLINE; see: https://eigen.tuxfamily.org/dox/XprHelper_8h_source.html */ struct val_Op{ EIGEN_EMPTY_STRUCT_CTOR(val_Op); //Returns value from a vari* template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::enable_if_t::value, const double&> operator()(T &v) const { return v->val_; } //Returns value from a var template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::enable_if_t<(!std::is_pointer::value && !is_fvar::value && !std::is_arithmetic::value), const double&> operator()(T &v) const { return v.vi_->val_; } //Returns value from an fvar template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::enable_if_t::value, forward_return_t> operator()(T &v) const { return v.val_; } //Returns double unchanged from input (by value) template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::enable_if_t::value, double_return_t> operator()(T v) const { return v; } //Returns double unchanged from input (by reference) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const double& operator()(const double& v) const { return v; } //Returns double unchanged from input (by reference) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double& operator()(double& v) const { return v; } }; /** * Coefficient-wise function applying val_Op struct to a matrix of const var * or vari* and returning a view to the const matrix of doubles containing * the values */ inline const CwiseUnaryOp val() const { return CwiseUnaryOp(derived()); } /** * Coefficient-wise function applying val_Op struct to a matrix of var * or vari* and returning a view to the matrix of doubles containing * the values */ inline CwiseUnaryOp val_op() { return CwiseUnaryOp(derived()); } /** * Coefficient-wise function applying val_Op struct to a matrix of var * or vari* and returning a view to the values */ inline CwiseUnaryView val() { return CwiseUnaryView(derived()); } /** * Structure to return tangent from an fvar. */ struct d_Op { EIGEN_EMPTY_STRUCT_CTOR(d_Op); //Returns tangent from an fvar template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE forward_return_t operator()(T &v) const { return v.d_; } }; /** * Coefficient-wise function applying d_Op struct to a matrix of const fvar * and returning a const matrix of type T containing the tangents */ inline const CwiseUnaryOp d() const { return CwiseUnaryOp(derived()); } /** * Coefficient-wise function applying d_Op struct to a matrix of fvar * and returning a view to a matrix of type T of the tangents that can * be modified */ inline CwiseUnaryView d() { return CwiseUnaryView(derived()); } /** * Structure to return adjoints from var and vari*. Deduces whether the variables * are pointers (i.e. vari*) to determine whether to return the adjoint or * first point to the underlying vari* (in the case of var). */ struct adj_Op { EIGEN_EMPTY_STRUCT_CTOR(adj_Op); //Returns adjoint from a vari* template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::enable_if_t::value, reverse_return_t> operator()(T &v) const { return v->adj_; } //Returns adjoint from a var template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::enable_if_t::value, reverse_return_t> operator()(T &v) const { return v.vi_->adj_; } }; /** * Coefficient-wise function applying adj_Op struct to a matrix of const var * and returning a const matrix of type T containing the values */ inline const CwiseUnaryOp adj() const { return CwiseUnaryOp(derived()); } /** * Coefficient-wise function applying adj_Op struct to a matrix of var * and returning a view to a matrix of doubles of the adjoints that can * be modified. This is meant to be used on the rhs of expressions. */ inline CwiseUnaryOp adj_op() { return CwiseUnaryOp(derived()); } /** * Coefficient-wise function applying adj_Op struct to a matrix of var * and returning a view to a matrix of doubles of the adjoints that can * be modified */ inline CwiseUnaryView adj() { return CwiseUnaryView(derived()); } /** * Structure to return vari* from a var. */ struct vi_Op { EIGEN_EMPTY_STRUCT_CTOR(vi_Op); //Returns vari* from a var template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE vari_return_t operator()(T &v) const { return v.vi_; } }; /** * Coefficient-wise function applying vi_Op struct to a matrix of const var * and returning a const matrix of vari* */ inline const CwiseUnaryOp vi() const { return CwiseUnaryOp(derived()); } /** * Coefficient-wise function applying vi_Op struct to a matrix of var * and returning a view to a matrix of vari* that can be modified */ inline CwiseUnaryView vi() { return CwiseUnaryView(derived()); } #define EIGEN_STAN_MATRIXBASE_PLUGIN #define EIGEN_STAN_ARRAYBASE_PLUGIN StanHeaders/inst/include/stan/math/prim/err/0000755000176200001440000000000014645161272020520 5ustar liggesusersStanHeaders/inst/include/stan/math/prim/err/is_matching_size.hpp0000644000176200001440000000173414645137106024554 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_MATCHING_SIZE_HPP #define STAN_MATH_PRIM_ERR_IS_MATCHING_SIZE_HPP #include #include namespace stan { namespace math { /** * Return true if two structures are the same size. * This is a top-level sizing function for std::vector * where a 2x3 vector returns 2, and a total sizing function for * Eigen::Matrix where a 2x3 matrix will return 6. * @tparam T_y1 Type of the first variable, requires class method * .size() * @tparam T_y2 Type of the second variable, requires class method * .size() * @param y1 First variable * @param y2 Second variable * @return true if the variable sizes match */ template inline bool is_matching_size(const T_y1& y1, const T_y2& y2) { return is_size_match(y1.size(), y2.size()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_positive_ordered.hpp0000644000176200001440000000530314645137106025734 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_POSITIVE_ORDERED_HPP #define STAN_MATH_PRIM_ERR_CHECK_POSITIVE_ORDERED_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if the specified the vector contains negative values or * is not sorted into strictly increasing order. * @tparam Vec A type derived from `Eigen::EigenBase` with 1 compile time row or * column * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Vector to test * @throw `std::domain_error` if the vector contains non-positive values, if the * values are not ordered, if there are duplicated values, or if any element is * `NaN` */ template * = nullptr, require_not_std_vector_t* = nullptr> void check_positive_ordered(const char* function, const char* name, const Vec& y) { if (y.size() == 0) { return; } auto&& y_ref = to_ref(value_of_rec(y)); if (y_ref.coeff(0) < 0) { [&]() STAN_COLD_PATH { std::ostringstream msg; msg << "is not a valid positive_ordered vector." << " The element at " << stan::error_index::value << " is "; std::string msg_str(msg.str()); throw_domain_error(function, name, value_of_rec(to_ref(y).coeff(0)), msg_str.c_str(), ", but should be postive."); }(); } check_ordered(function, name, y_ref); } /** * Throw an exception if any of the vectors in a standard vector contains * negative values or is not sorted into strictly increasing order. * @tparam StdVec A standard vector type with an `value_type` inheriting from * `Eigen::EigenBase` with 1 compile time row or column * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Vector to test * @throw `std::domain_error` if the vector contains non-positive values, if the * values are not ordered, if there are duplicated values, or if any element is * `NaN` */ template * = nullptr> void check_positive_ordered(const char* function, const char* name, const StdVec& y) { for (size_t i = 0; i < y.size(); ++i) { check_positive_ordered(function, internal::make_iter_name(name, i).c_str(), y[i]); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_unit_vector.hpp0000644000176200001440000000560614645137106024735 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_UNIT_VECTOR_HPP #define STAN_MATH_PRIM_ERR_CHECK_UNIT_VECTOR_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if the specified vector does not have unit Euclidiean * length. A valid unit vector is one where the square of the elements summed is * equal to 1. This function tests that the sum is within the tolerance * specified by `CONSTRAINT_TOLERANCE`. This function only accepts Eigen * vectors, statically typed vectors, not general matrices with 1 column. * @tparam Vec A type derived from `Eigen::EigenBase` with either dynamic rows * or columns but not both * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param theta Vector to test * @throw `std::invalid_argument` if `theta` is a 0-vector * @throw `std::domain_error` if the vector is not a unit vector or if any * element is `NaN` */ template * = nullptr, require_not_std_vector_t* = nullptr> void check_unit_vector(const char* function, const char* name, const Vec& theta) { check_nonzero_size(function, name, theta); using std::fabs; scalar_type_t ssq = value_of_rec(theta).squaredNorm(); if (!(fabs(1.0 - ssq) <= CONSTRAINT_TOLERANCE)) { [&]() STAN_COLD_PATH { std::stringstream msg; msg << "is not a valid unit vector." << " The sum of the squares of the elements should be 1, but is "; std::string msg_str(msg.str()); throw_domain_error(function, name, ssq, msg_str.c_str()); }(); } } /** * Throw an exception if the each element in a standard vector does not have * unit Euclidiean length. * @tparam StdVec A standard vector with inner type inheriting from * `Eigen::EigenBase` * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param theta Vector to test * @throw `std::invalid_argument` if `theta` * is a 0-vector * @throw `std::domain_error` if the vector is not a unit vector or if any * element is `NaN` */ template * = nullptr> void check_unit_vector(const char* function, const char* name, const StdVec& theta) { for (size_t i = 0; i < theta.size(); ++i) { check_unit_vector(function, internal::make_iter_name(name, i).c_str(), theta[i]); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_positive.hpp0000644000176200001440000000174014645137106023567 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_POSITIVE_HPP #define STAN_MATH_PRIM_ERR_IS_POSITIVE_HPP #include #include #include namespace stan { namespace math { /** * Return true if y is positive. * This function is vectorized and will check each element of * y. * @tparam T_y Type of y * @param y Variable to check * @return true if vector contains only positive elements */ template inline bool is_positive(const T_y& y) { for (size_t n = 0; n < stan::math::size(y); ++n) { if (!(stan::get(y, n) > 0)) { return false; } } return true; } /** * Return true if size is positive. * @param size Size value to check * @return true if size is not zero or negative */ inline bool is_positive(int size) { return size > 0; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/domain_error_vec.hpp0000644000176200001440000000147714645137106024556 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_DOMAIN_ERROR_VEC_HPP #define STAN_MATH_PRIM_ERR_DOMAIN_ERROR_VEC_HPP #include namespace stan { namespace math { /** * @deprecated use throw_domain_error_vec */ template inline void domain_error_vec(const char* function, const char* name, const T& y, size_t i, const char* msg1, const char* msg2) { throw_domain_error_vec(function, name, y, i, msg1, msg2); } /** * @deprecated use throw_domain_error_vec */ template inline void domain_error_vec(const char* function, const char* name, const T& y, size_t i, const char* msg1) { throw_domain_error_vec(function, name, y, i, msg1, ""); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_corr_matrix.hpp0000644000176200001440000000330314645137106024253 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_CORR_MATRIX_HPP #define STAN_MATH_PRIM_ERR_IS_CORR_MATRIX_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return true if the matrix is square and not 0x0, * if the matrix is symmetric, diagonals are near 1, positive definite, * and no elements are NaN * A valid correlation matrix is symmetric, has a unit diagonal * (all 1 values), and has all values between -1 and 1 (inclusive). * @tparam EigMat A type derived from `EigenBase` with dynamic rows and columns * @param y Matrix to test * @return true if the matrix is square and not 0x0, * if the matrix is symmetric, diagonals are near 1, positive definite, * and no elements are NaN */ template * = nullptr> inline bool is_corr_matrix(const EigMat& y) { using std::fabs; const auto& y_ref = to_ref(y); if (!is_size_match(y_ref.rows(), y_ref.cols())) { return false; } if (!is_positive(y_ref.rows())) { return false; } if (!is_pos_definite(y_ref)) { return false; } if (is_symmetric(y_ref)) { for (Eigen::Index k = 0; k < y_ref.rows(); ++k) { if (!(fabs(y_ref(k, k) - 1.0) <= CONSTRAINT_TOLERANCE)) { return false; } } } return true; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_positive_finite.hpp0000644000176200001440000000171014645137106025564 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_POSITIVE_FINITE_HPP #define STAN_MATH_PRIM_ERR_CHECK_POSITIVE_FINITE_HPP #include #include namespace stan { namespace math { /** * Check if y is positive and finite. * This function is vectorized and will check each element of * y. * @tparam T_y Type of y * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @throw domain_error if any element of y is not positive or * if any element of y is NaN. */ template inline void check_positive_finite(const char* function, const char* name, const T_y& y) { elementwise_check([](double x) { return x > 0 && std::isfinite(x); }, function, name, y, "positive finite"); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_symmetric.hpp0000644000176200001440000000245514645137106023745 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_SYMMETRIC_HPP #define STAN_MATH_PRIM_ERR_IS_SYMMETRIC_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return true if the matrix is square, and no element * not on the main diagonal is NaN. * @tparam EigMat A type derived from `EigenBase` with dynamic rows and columns * @param y Matrix to test * @return true if the matrix is square, and no * element not on the main diagonal is NaN */ template * = nullptr> inline bool is_symmetric(const EigMat& y) { const auto& y_ref = to_ref(y); if (!is_square(y_ref)) { return false; } Eigen::Index k = y_ref.rows(); if (k == 1) { return true; } for (Eigen::Index m = 0; m < k; ++m) { for (Eigen::Index n = m + 1; n < k; ++n) { if (!(fabs(value_of(y_ref(m, n)) - value_of(y_ref(n, m))) <= CONSTRAINT_TOLERANCE)) { return false; } } } return true; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_nonnegative.hpp0000644000176200001440000000174714645137106024713 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_NONNEGATIVE_HPP #define STAN_MATH_PRIM_ERR_CHECK_NONNEGATIVE_HPP #include #include #include #include #include namespace stan { namespace math { /** * Check if y is non-negative. * This function is vectorized and will check each element of y. * @tparam T_y Type of y * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @throw domain_error if y is negative or * if any element of y is NaN. */ template inline void check_nonnegative(const char* function, const char* name, const T_y& y) { elementwise_check([](double x) { return x >= 0; }, function, name, y, "nonnegative"); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/hmm_check.hpp0000644000176200001440000000340514645137106023150 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_HMM_CHECK_HPP #define STAN_MATH_PRIM_ERR_HMM_CHECK_HPP #include #include namespace stan { namespace math { /** * Check arguments for hidden Markov model functions with a discrete * latent state (lpdf, rng for latent states, and marginal probabilities * for latent sates). * * @tparam T_omega type of the log likelihood matrix * @tparam T_Gamma type of the transition matrix * @tparam T_rho type of the initial guess vector * @param[in] log_omegas log matrix of observational densities. * @param[in] Gamma transition density between hidden states. * @param[in] rho initial state * @param[in] function the name of the function using the arguments. * @throw `std::invalid_argument` if Gamma is not square * or if the size of rho is not the number of rows of log_omegas. * @throw `std::domain_error` if rho is not a simplex or the rows * of Gamma are not a simplex. */ template * = nullptr, require_eigen_col_vector_t* = nullptr> inline void hmm_check(const T_omega& log_omegas, const T_Gamma& Gamma, const T_rho& rho, const char* function) { int n_states = log_omegas.rows(); check_consistent_size(function, "rho", rho, n_states); check_square(function, "Gamma", Gamma); check_nonzero_size(function, "Gamma", Gamma); check_multiplicable(function, "Gamma", Gamma, "log_omegas", log_omegas); check_simplex(function, "rho", rho); const auto& Gamma_ref = to_ref(Gamma); for (int i = 0; i < Gamma.rows(); ++i) { check_simplex(function, "Gamma[i, ]", Gamma_ref.row(i)); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_greater_or_equal.hpp0000644000176200001440000004217414645137106025715 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_GREATER_OR_EQUAL_HPP #define STAN_MATH_PRIM_ERR_CHECK_GREATER_OR_EQUAL_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if `y` is not greater or equal than `low`. This function * is vectorized and will check each element of `y` against each element of * `low`. * @tparam T_y A scalar type * @tparam T_low A scalar type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, typename... Idxs> inline void check_greater_or_equal(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { if (unlikely(!(y >= low))) { [](auto y, auto low, auto function, auto name, auto... idxs) STAN_COLD_PATH { throw_domain_error( function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be greater than or equal to " + std::to_string(value_of_rec(low))) .c_str()); }(y, low, function, name, idxs...); } } /** * Throw an exception if `y` is not greater or equal than each element of `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam T_y A scalar type * @tparam T_low A standard vector or type inheriting from `Eigen::DenseBase` * with compile time rows or columns equal to one and `value_type` equal to a * stan scalar * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template < typename T_y, typename T_low, require_stan_scalar_t* = nullptr, require_vector_t* = nullptr, require_not_std_vector_vt* = nullptr, typename... Idxs> inline void check_greater_or_equal(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& low_arr = value_of_rec(as_array_or_scalar(to_ref(low))); for (Eigen::Index i = 0; i < low_arr.size(); ++i) { if (unlikely(!(y >= low_arr.coeff(i)))) { [](auto y, auto&& low_arr, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error( function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be greater than or equal to " + std::to_string(low_arr.coeff(i))) .c_str()); }(y, low_arr, name, function, i, idxs...); } } } /** * Throw an exception if `y` is not greater or equal than each element of `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam T_y A scalar type * @tparam T_low Type inheriting from `Eigen::DenseBase` or a `var_value` with * the var's inner type inheriting from `Eigen::DenseBase` where the compile * time number of rows or columns is not equal to one * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_dense_dynamic_t* = nullptr, typename... Idxs> inline void check_greater_or_equal(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& low_arr = value_of_rec(to_ref(low)); for (Eigen::Index j = 0; j < low_arr.cols(); ++j) { for (Eigen::Index i = 0; i < low_arr.rows(); ++i) { if (unlikely(!(y >= low_arr.coeff(i, j)))) { [](auto y, auto&& low_arr, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error(function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be greater than or equal to " + std::to_string(low_arr.coeff(i, j))) .c_str()); }(y, low_arr, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not greater or equal than `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam T_y A standard vector or type inheriting from `Eigen::DenseBase` with * compile time rows or columns equal to one and `value_type` equal to a stan * scalar * @tparam T_low A scalar type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_not_std_vector_vt* = nullptr, require_stan_scalar_t* = nullptr, typename... Idxs> inline void check_greater_or_equal(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& y_arr = value_of_rec(as_array_or_scalar(to_ref(y))); for (Eigen::Index i = 0; i < y_arr.size(); ++i) { if (unlikely(!(y_arr.coeff(i) >= low))) { [](auto&& y_arr, auto low, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error_vec( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, "is ", (", but must be greater than or equal to " + std::to_string(value_of_rec(low))) .c_str()); }(y_arr, low, name, function, i, idxs...); } } } /** * Throw an exception if each element of `y` is not greater or equal than `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam T_y Type inheriting from `Eigen::DenseBase` or a `var_value` with the * var's inner type inheriting from `Eigen::DenseBase` where the compile time * number of rows or columns is not equal to one * @tparam T_low A scalar type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_stan_scalar_t* = nullptr, typename... Idxs> inline void check_greater_or_equal(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& y_arr = value_of_rec(to_ref(y)); for (Eigen::Index j = 0; j < y_arr.cols(); ++j) { for (Eigen::Index i = 0; i < y_arr.rows(); ++i) { if (unlikely(!(y_arr.coeff(i, j) >= low))) { [](auto&& y_arr, auto low, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error_mat( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, j, "is ", (", but must be greater than or equal to " + std::to_string(value_of_rec(low))) .c_str()); }(y_arr, low, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not greater or equal than the * associated element in `low`. This function is vectorized and will check each * element of `y` against each element of `low`. * @tparam T_y A standard vector or type inheriting from `Eigen::DenseBase` with * compile time rows or columns equal to one and `value_type` equal to a stan * scalar * @tparam T_low A standard vector or type inheriting from `Eigen::DenseBase` * with compile time rows or columns equal to one and `value_type` equal to a * stan scalar * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_all_not_std_vector_vt* = nullptr, typename... Idxs> inline void check_greater_or_equal(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& y_arr = value_of_rec(as_array_or_scalar(to_ref(y))); auto&& low_arr = value_of_rec(as_array_or_scalar(to_ref(low))); for (Eigen::Index i = 0; i < low_arr.size(); ++i) { if (unlikely(!(y_arr.coeff(i) >= low_arr.coeff(i)))) { [](auto&& y_arr, auto&& low_arr, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error_vec(function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, "is ", (", but must be greater than or equal to " + std::to_string(low_arr.coeff(i))) .c_str()); }(y_arr, low_arr, name, function, i, idxs...); } } } /** * Throw an exception if each element of `y` is not greater or equal than the * associated element in `low`. This function is vectorized and will check each * element of `y` against each element of `low`. * @tparam T_y Type inheriting from `Eigen::DenseBase` or a `var_value` with the * var's inner type inheriting from `Eigen::DenseBase` where the compile time * number of rows or columns is not equal to one * @tparam T_low Type inheriting from `Eigen::DenseBase` or a `var_value` with * the var's inner type inheriting from `Eigen::DenseBase` where the compile * time number of rows or columns is not equal to one * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, typename... Idxs> inline void check_greater_or_equal(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& y_arr = value_of_rec(to_ref(y)); auto&& low_arr = value_of_rec(to_ref(low)); for (Eigen::Index j = 0; j < low_arr.cols(); ++j) { for (Eigen::Index i = 0; i < low_arr.rows(); ++i) { if (unlikely(!(y_arr.coeff(i, j) >= low_arr.coeff(i, j)))) { [](auto&& y_arr, auto&& low_arr, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error_mat( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, j, "is ", (", but must be greater than or equal to " + std::to_string(low_arr.coeff(i, j))) .c_str()); }(y_arr, low_arr, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not greater or equal than `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam T_y A standard vector type with a `value_type` of a standard vector * or type inheriting from `Eigen::DenseBase` * @tparam T_low A standard vector type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_not_std_vector_t* = nullptr, typename... Idxs> inline void check_greater_or_equal(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { for (size_t i = 0; i < y.size(); ++i) { check_greater_or_equal(function, name, y[i], low, idxs..., i); } } /** * Throw an exception if each element of `y` is not greater or equal than `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam T_y A standard vector * @tparam T_low A standard vector type with a `value_type` of a standard * @tparam Idxs A parameter pack of Integral types * vector or type inheriting from `Eigen::DenseBase` * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is NaN */ template * = nullptr, require_std_vector_vt* = nullptr, typename... Idxs> inline void check_greater_or_equal(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { for (size_t i = 0; i < low.size(); ++i) { check_greater_or_equal(function, name, y, low[i], idxs..., i); } } /** * Throw an exception if each element of `y` is not greater or equal than `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam T_y A standard vector type * @tparam T_low A standard vector type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_all_std_vector_t* = nullptr, typename... Idxs> inline void check_greater_or_equal(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { for (size_t i = 0; i < y.size(); ++i) { check_greater_or_equal(function, name, y[i], low[i], idxs..., i); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/validate_positive_index.hpp0000644000176200001440000000201114645137106026124 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_VALIDATE_POSITIVE_INDEX_HPP #define STAN_MATH_PRIM_ERR_VALIDATE_POSITIVE_INDEX_HPP #include #include #include #include namespace stan { namespace math { /** * Check that simplex is at least size 1 * * @param var_name Name of simplex variable * @param expr Expression in which variable is declared * @param val Size of simplex * @throw std::invalid_argument if simplex size is less than 1 */ inline void validate_positive_index(const char* var_name, const char* expr, int val) { if (val < 1) { [&]() STAN_COLD_PATH { std::stringstream msg; msg << "Found dimension size less than one in simplex declaration" << "; variable=" << var_name << "; dimension size expression=" << expr << "; expression value=" << val; std::string msg_str(msg.str()); throw std::invalid_argument(msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_pos_definite.hpp0000644000176200001440000000547614645137106024407 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_POS_DEFINITE_HPP #define STAN_MATH_PRIM_ERR_IS_POS_DEFINITE_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return true if the matrix is square or if the matrix has * non-zero size, or if the matrix is symmetric, or if it is positive * definite, or if no element is NaN. * @tparam EigMat A type derived from `EigenBase` with dynamic rows and columns * @param y Matrix to test * @return true if the matrix is square or if the matrix has non-0 * size, or if the matrix is symmetric, or if it is positive definite, or if * no element is NaN */ template * = nullptr> inline bool is_pos_definite(const EigMat& y) { const auto& y_ref = to_ref(y); if (!is_symmetric(y_ref)) { return false; } if (!is_positive(y_ref.rows())) { return false; } if (y_ref.rows() == 1 && !(y_ref(0, 0) > CONSTRAINT_TOLERANCE)) { return false; } Eigen::LDLT cholesky = value_of_rec(y_ref).ldlt(); if (cholesky.info() != Eigen::Success || !cholesky.isPositive() || (cholesky.vectorD().array() <= 0.0).any()) { return false; } return is_not_nan(y_ref); } /** * Return true if the matrix is positive definite. * @tparam Derived Derived type of the Eigen::LDLT transform, requires * class method .info() and .isPositive() * and .vectorD().array() * @param cholesky Eigen::LDLT to test, whose progenitor * must not have any NaN elements * @return true if the matrix is positive definite */ template inline bool is_pos_definite(const Eigen::LDLT& cholesky) { return cholesky.info() == Eigen::Success && cholesky.isPositive() && (cholesky.vectorD().array() > 0.0).all(); } /** * Return true if diagonal of the L matrix is positive. * @tparam Derived Derived type of the Eigen::LLT transform, requires * class method .info() and * .matrixLLT().diagonal().array() * @param cholesky Eigen::LLT to test, whose progenitor * must not have any NaN elements * @return true if diagonal of the L matrix is positive */ template inline bool is_pos_definite(const Eigen::LLT& cholesky) { return cholesky.info() == Eigen::Success && (cholesky.matrixLLT().diagonal().array() > 0.0).all(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_symmetric.hpp0000644000176200001440000000434214645137106024404 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_SYMMETRIC_HPP #define STAN_MATH_PRIM_ERR_CHECK_SYMMETRIC_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Check if the specified matrix is symmetric. * The error message is either 0 or 1 indexed, specified by * stan::error_index::value. * @tparam EigMat Type of matrix * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Matrix to test * @throw std::invalid_argument if the matrix is not square. * @throw std::domain_error if any element not on the * main diagonal is NaN */ template * = nullptr> inline void check_symmetric(const char* function, const char* name, const EigMat& y) { check_square(function, name, y); using std::fabs; Eigen::Index k = y.rows(); if (k <= 1) { return; } const auto& y_ref = to_ref(y); for (Eigen::Index m = 0; m < k; ++m) { for (Eigen::Index n = m + 1; n < k; ++n) { if (!(fabs(value_of(y_ref(m, n)) - value_of(y_ref(n, m))) <= CONSTRAINT_TOLERANCE)) { [&]() STAN_COLD_PATH { std::ostringstream msg1; msg1 << "is not symmetric. " << name << "[" << stan::error_index::value + m << "," << stan::error_index::value + n << "] = "; std::string msg1_str(msg1.str()); std::ostringstream msg2; msg2 << ", but " << name << "[" << stan::error_index::value + n << "," << stan::error_index::value + m << "] = " << y_ref(n, m); std::string msg2_str(msg2.str()); throw_domain_error(function, name, y_ref(m, n), msg1_str.c_str(), msg2_str.c_str()); }(); } } } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_row_index.hpp0000644000176200001440000000245014645137106024364 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_ROW_INDEX_HPP #define STAN_MATH_PRIM_ERR_CHECK_ROW_INDEX_HPP #include #include #include #include #include namespace stan { namespace math { /** * Check if the specified index is a valid row of the matrix * This check is 1-indexed by default. This behavior can be changed * by setting stan::error_index::value. * @tparam T Matrix type * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y matrix to test * @param i row index to check * @throw std::out_of_range if the index is out of range. */ template > inline void check_row_index(const char* function, const char* name, const T_y& y, size_t i) { STAN_NO_RANGE_CHECKS_RETURN; if (!(i >= stan::error_index::value && i < static_cast(y.rows()) + stan::error_index::value)) { [&]() STAN_COLD_PATH { std::stringstream msg; msg << " for rows of " << name; std::string msg_str(msg.str()); out_of_range(function, y.rows(), i, msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_nonzero_size.hpp0000644000176200001440000000107514645137106024452 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_NONZERO_SIZE_HPP #define STAN_MATH_PRIM_ERR_IS_NONZERO_SIZE_HPP #include namespace stan { namespace math { /** * Returns true if the specified matrix/vector is size nonzero. * @tparam T_y Type of container, requires class method .size() * @param y Container to test -- matrix/vector * @return true if container has size zero */ template inline bool is_nonzero_size(const T_y& y) { return y.size() > 0; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_lower_triangular.hpp0000644000176200001440000000173614645137106025312 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_LOWER_TRIANGULAR_HPP #define STAN_MATH_PRIM_ERR_IS_LOWER_TRIANGULAR_HPP #include #include #include namespace stan { namespace math { /** * Return true is matrix is lower triangular. * A matrix x is not lower triangular if there is a non-zero entry * x[m, n] with m < n. This function only inspect the upper and * triangular portion of the matrix, not including the diagonal. * @tparam EigMat A type derived from `EigenBase` with dynamic rows and columns * @param y Matrix to test * @return true is matrix is lower triangular */ template * = nullptr> inline bool is_lower_triangular(const EigMat& y) { return y.unaryExpr([](auto&& x) { return is_not_nan(x) ? x : 1.0; }) .transpose() .isUpperTriangular(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/invalid_argument_vec.hpp0000644000176200001440000000503014645137106025413 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_INVALID_ARGUMENT_VEC_HPP #define STAN_MATH_PRIM_ERR_INVALID_ARGUMENT_VEC_HPP #include #include #include #include #include namespace stan { namespace math { /** * Throw an invalid argument exception with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * invalid arguments. This will allow us to change the behavior for all * functions at once. (We've already changed behavior multiple times up * to Stan v2.5.0.) * The message is: * ": [] " * where error_index is the value of stan::error_index::value * which indicates whether the message should be 0 or 1 indexed. * @tparam T Type of variable * @param function Name of the function * @param name Name of the variable * @param y Variable * @param i Index * @param msg1 Message to print before the variable * @param msg2 Message to print after the variable * @throw std::invalid_argument */ template inline void invalid_argument_vec(const char* function, const char* name, const T& y, size_t i, const char* msg1, const char* msg2) { std::ostringstream vec_name_stream; vec_name_stream << name << "[" << stan::error_index::value + i << "]"; std::string vec_name(vec_name_stream.str()); invalid_argument(function, vec_name.c_str(), stan::get(y, i), msg1, msg2); } /** * Throw an invalid argument exception with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * invalid arguments. This will allow us to change the behavior for all * functions at once. (We've already changed behavior multiple times up * to Stan v2.5.0.) * The message is: * ": [] " * where error_index is the value of stan::error_index::value * which indicates whether the message should be 0 or 1 indexed. * @tparam T Type of variable * @param function Name of the function * @param name Name of the variable * @param y Variable * @param i Index * @param msg Message to print before the variable * @throw std::invalid_argument */ template inline void invalid_argument_vec(const char* function, const char* name, const T& y, size_t i, const char* msg) { invalid_argument_vec(function, name, y, i, msg, ""); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_square.hpp0000644000176200001440000000123014645137106023217 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_SQUARE_HPP #define STAN_MATH_PRIM_ERR_IS_SQUARE_HPP #include #include #include namespace stan { namespace math { /** * Return true if the matrix is square. This check allows 0x0 * matrices. * @tparam EigMat A type derived from `EigenBase` * @param y Matrix to test * @return true if matrix is square */ template * = nullptr> inline bool is_square(const EigMat& y) { return is_size_match(y.rows(), y.cols()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_scal_finite.hpp0000644000176200001440000000152014645137106024201 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_SCAL_FINITE_HPP #define STAN_MATH_PRIM_ERR_IS_SCAL_FINITE_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return true if y is finite. * This function is vectorized and will check each element of * y. * @tparam T_y Type of y * @param y Variable to check * @throw true if y is not infinity, -infinity, or NaN */ template inline bool is_scal_finite(const T_y& y) { for (size_t n = 0; n < stan::math::size(y); ++n) { if (!std::isfinite(value_of_rec(stan::get(y, n)))) { return false; } } return true; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_size_match.hpp0000644000176200001440000000464614645137106024525 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_SIZE_MATCH_HPP #define STAN_MATH_PRIM_ERR_CHECK_SIZE_MATCH_HPP #include #include #include #include namespace stan { namespace math { /** * Check if the provided sizes match. * @tparam T_size1 Type of size 1 * @tparam T_size2 Type of size 2 * @param function Function name (for error messages) * @param name_i Variable name 1 (for error messages) * @param i Variable size 1 * @param name_j Variable name 2 (for error messages) * @param j Variable size 2 * @throw std::invalid_argument if the sizes do not match */ template inline void check_size_match(const char* function, const char* name_i, T_size1 i, const char* name_j, T_size2 j) { if (i != static_cast(j)) { [&]() STAN_COLD_PATH { std::ostringstream msg; msg << ") and " << name_j << " (" << j << ") must match in size"; std::string msg_str(msg.str()); invalid_argument(function, name_i, i, "(", msg_str.c_str()); }(); } } /** * Check if the provided sizes match. * @tparam T_size1 Type of size 1 * @tparam T_size2 Type of size 2 * @param function Function name (for error messages) * @param expr_i Expression for variable name 1 (for error messages) * @param name_i Variable name 1 (for error messages) * @param i Variable size 1 * @param expr_j Expression for variable name 2 (for error messages) * @param name_j Variable name 2 (for error messages) * @param j Variable size 2 * @throw std::invalid_argument if the sizes do not match */ template inline void check_size_match(const char* function, const char* expr_i, const char* name_i, T_size1 i, const char* expr_j, const char* name_j, T_size2 j) { if (i != static_cast(j)) { [&]() STAN_COLD_PATH { std::ostringstream updated_name; updated_name << expr_i << name_i; std::string updated_name_str(updated_name.str()); std::ostringstream msg; msg << ") and " << expr_j << name_j << " (" << j << ") must match in size"; std::string msg_str(msg.str()); invalid_argument(function, updated_name_str.c_str(), i, "(", msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_size_match.hpp0000644000176200001440000000113114645137106024045 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_SIZE_MATCH_HPP #define STAN_MATH_PRIM_ERR_IS_SIZE_MATCH_HPP #include namespace stan { namespace math { /** * Return true if the provided sizes match. * @tparam T_size1 Type of size 1 * @tparam T_size2 Type of size 2 * @param i Size of variable 1 * @param j Size of variable 2 * @return true if provided dimensions match */ template inline bool is_size_match(T_size1 i, T_size2 j) { return i == static_cast(j); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_less_or_equal.hpp0000644000176200001440000004252014645137106025225 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_LESS_OR_EQUAL_HPP #define STAN_MATH_PRIM_ERR_CHECK_LESS_OR_EQUAL_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if `y` is not less than `high`. This function is * vectorized and will check each element of `y` against each element of `high`. * @tparam T_y A scalar type * @tparam T_high A scalar type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not less than high or if any element of y * or high is `NaN` */ template * = nullptr, typename... Idxs> inline void check_less_or_equal(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { if (unlikely(!(y <= high))) { [](auto y, auto high, auto function, auto name, auto... idxs) STAN_COLD_PATH { throw_domain_error( function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be less than or equal to " + std::to_string(value_of_rec(high))) .c_str()); }(y, high, function, name, idxs...); } } /** * Throw an exception if `y` is not less than each element of `high`. This * function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y A scalar type * @tparam T_high A standard vector or type inheriting from `Eigen::DenseBase` * with compile time rows or columns equal to one and `value_type` equal to a * stan scalar. * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not less than high or if any element of y * or high is `NaN` */ template < typename T_y, typename T_high, require_stan_scalar_t* = nullptr, require_vector_t* = nullptr, require_not_std_vector_vt* = nullptr, typename... Idxs> inline void check_less_or_equal(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& high_arr = value_of_rec(as_array_or_scalar(to_ref(high))); for (Eigen::Index i = 0; i < high_arr.size(); ++i) { if (unlikely(!(y <= high_arr.coeff(i)))) { [](auto y, auto&& high_arr, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error( function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be less than or equal to " + std::to_string(high_arr.coeff(i))) .c_str()); }(y, high_arr, name, function, i, idxs...); } } } /** * Throw an exception if `y` is not less than each element of `high`. This * function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y A scalar type * @tparam T_high Type inheriting from `Eigen::DenseBase` or a `var_value` with * the var's inner type inheriting from `Eigen::DenseBase` where the compile * time number of rows or columns is not equal to one * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not less than high or if any element of y * or high is `NaN` */ template * = nullptr, require_dense_dynamic_t* = nullptr, typename... Idxs> inline void check_less_or_equal(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& high_arr = value_of_rec(to_ref(high)); for (Eigen::Index j = 0; j < high_arr.cols(); ++j) { for (Eigen::Index i = 0; i < high_arr.rows(); ++i) { if (unlikely(!(y <= high_arr.coeff(i, j)))) { [](auto y, auto&& high_arr, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error(function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be less than or equal to " + std::to_string(high_arr.coeff(i, j))) .c_str()); }(y, high_arr, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not less than `high`. This * function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y A standard vector or type inheriting from `Eigen::DenseBase` with * compile time rows or columns equal to one and `value_type` equal to a stan * scalar. * @tparam T_high A scalar type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not less than high or if any element of y * or high is `NaN` */ template * = nullptr, require_not_std_vector_vt* = nullptr, require_stan_scalar_t* = nullptr, typename... Idxs> inline void check_less_or_equal(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& y_arr = value_of_rec(as_array_or_scalar(to_ref(y))); for (Eigen::Index i = 0; i < y_arr.size(); ++i) { if (unlikely(!(y_arr.coeff(i) <= high))) { [](auto&& y_arr, auto high, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error_vec(function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, "is ", (", but must be less than or equal to " + std::to_string(value_of_rec(high))) .c_str()); }(y_arr, high, name, function, i, idxs...); } } } /** * Throw an exception if each element of `y` is not less than `high`. This * function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y Type inheriting from `Eigen::DenseBase` or a `var_value` with the * var's inner type inheriting from `Eigen::DenseBase` where the compile time * number of rows or columns is not equal to one * @tparam T_high A scalar type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not less than high or if any element of y * or high is `NaN` */ template * = nullptr, require_stan_scalar_t* = nullptr, typename... Idxs> inline void check_less_or_equal(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& y_arr = value_of_rec(to_ref(y)); for (Eigen::Index j = 0; j < y_arr.cols(); ++j) { for (Eigen::Index i = 0; i < y_arr.rows(); ++i) { if (unlikely(!(y_arr.coeff(i, j) <= high))) { [](auto&& y_arr, auto high, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error_mat( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, j, "is ", (", but must be less than or equal to " + std::to_string(value_of_rec(high))) .c_str()); }(y_arr, high, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not less than the associated * element of `high`. This function is vectorized and will check each element of * `y` against each element of `high`. * @tparam T_y A standard vector or type inheriting from `Eigen::DenseBase` with * compile time rows or columns equal to one and `value_type` equal to a stan * scalar. * @tparam T_high A standard vector or type inheriting from `Eigen::DenseBase` * with compile time rows or columns equal to one and `value_type` equal to a * stan scalar. * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not less than high or if any element of y * or high is `NaN` */ template * = nullptr, require_all_not_std_vector_vt* = nullptr, typename... Idxs> inline void check_less_or_equal(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& y_arr = value_of_rec(as_array_or_scalar(to_ref(y))); auto&& high_arr = value_of_rec(as_array_or_scalar(to_ref(high))); for (Eigen::Index i = 0; i < y_arr.size(); ++i) { if (unlikely(!(y_arr.coeff(i) <= high_arr.coeff(i)))) { [](auto&& y_arr, auto&& high_arr, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error_vec(function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, "is ", (", but must be less than or equal to " + std::to_string(high_arr.coeff(i))) .c_str()); }(y_arr, high_arr, name, function, i, idxs...); } } } /** * Throw an exception if each element of `y` is not less than the associated * element of `high`. This function is vectorized and will check each element of * `y` against each element of `high`. * @tparam T_y Type inheriting from `Eigen::DenseBase` or a `var_value` with the * var's inner type inheriting from `Eigen::DenseBase` where the compile time * number of rows or columns is not equal to one * @tparam T_high Type inheriting from `Eigen::DenseBase` or a `var_value` with * the var's inner type inheriting from `Eigen::DenseBase` where the compile * time number of rows or columns is not equal to one * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not less than high or if any element of y * or high is `NaN` */ template * = nullptr, typename... Idxs> inline void check_less_or_equal(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& y_arr = value_of_rec(to_ref(y)); auto&& high_arr = value_of_rec(to_ref(high)); for (Eigen::Index j = 0; j < y_arr.cols(); ++j) { for (Eigen::Index i = 0; i < y_arr.rows(); ++i) { if (unlikely(!(y_arr.coeff(i, j) <= high_arr.coeff(i, j)))) { [](auto&& y_arr, auto&& high_arr, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error_mat( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, j, "is ", (", but must be less than or equal to " + std::to_string(high_arr.coeff(i, j))) .c_str()); }(y_arr, high_arr, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not less than `high`. This * function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y A standard vector type with a `value_type` of a standard vector * or type inheriting from `Eigen::DenseBase` * @tparam T_high A scalar or the same `value_type` of `T_y` * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not less than high or if any element of y * or high is `NaN` */ template * = nullptr, require_not_std_vector_t* = nullptr, typename... Idxs> inline void check_less_or_equal(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { for (size_t i = 0; i < y.size(); ++i) { check_less_or_equal(function, name, y[i], high, idxs..., i); } } /** * Throw an exception if `y` is not less than each element of `high`. This * function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y A scalar type or the same `value_type` of `T_high` * @tparam T_high A standard vector type with a `value_type` of a standard * vector or type inheriting from `Eigen::DenseBase` * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not less than high or if any element of y * or high is `NaN` */ template * = nullptr, require_std_vector_vt* = nullptr, typename... Idxs> inline void check_less_or_equal(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { for (size_t i = 0; i < high.size(); ++i) { check_less_or_equal(function, name, y, high[i], idxs..., i); } } /** * Throw an exception if each element of `y` is not less than each associated * element of `high`. This function is vectorized and will check each element of * `y` against each element of `high`. * @tparam T_y A standard vector type whose `value_type` is a scalar, type * inheriting from `Eigen::EigenBase`, or another standard vector * @tparam T_high A standard vector type whose `value_type` is a scalar, type * inheriting from `Eigen::EigenBase`, or another standard vector * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not less than high or if any element of y * or high is `NaN` */ template * = nullptr, require_all_std_vector_t* = nullptr, typename... Idxs> inline void check_less_or_equal(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { for (size_t i = 0; i < y.size(); ++i) { check_less_or_equal(function, name, y[i], high[i], idxs..., i); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_square.hpp0000644000176200001440000000174114645137106023670 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_SQUARE_HPP #define STAN_MATH_PRIM_ERR_CHECK_SQUARE_HPP #include #include #include #include namespace stan { namespace math { /** * Check if the specified matrix is square. This check allows 0x0 matrices. * @tparam T Type of matrix * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Matrix to test * @throw std::invalid_argument if the matrix is not square */ template , is_prim_or_rev_kernel_expression>* = nullptr> inline void check_square(const char* function, const char* name, const T_y& y) { check_size_match(function, "Expecting a square matrix; rows of ", name, y.rows(), "columns of ", name, y.cols()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_finite.hpp0000644000176200001440000000204714645137106023646 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_FINITE_HPP #define STAN_MATH_PRIM_ERR_CHECK_FINITE_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Return true if all values in `y` are finite. `y` can be a *scalar, `std::vector` or Eigen type. * * @tparam T_y type of `y` * * @param function name of function (for error messages) * @param name variable name (for error messages) * @param y scalar or container to test * @return true if all values are finite **/ template inline void check_finite(const char* function, const char* name, const T_y& y) { elementwise_check([](double x) { return std::isfinite(x); }, function, name, y, "finite"); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_2F1_converges.hpp0000644000176200001440000000453714645137106025001 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_2F1_CONVERGES_HPP #define STAN_MATH_PRIM_ERR_CHECK_2F1_CONVERGES_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Check if the hypergeometric function (2F1) called with * supplied arguments will converge, assuming arguments are * finite values. * @tparam T_a1 Type of a1 * @tparam T_a2 Type of a2 * @tparam T_b1 Type of b1 * @tparam T_z Type of z * @param function Name of function ultimately relying on 2F1 (for error * messages) * @param a1 Variable to check * @param a2 Variable to check * @param b1 Variable to check * @param z Variable to check * @throw domain_error if 2F1(a1, a2, b1, z) * does not meet convergence conditions, or if any coefficient is NaN. */ template inline void check_2F1_converges(const char* function, const T_a1& a1, const T_a2& a2, const T_b1& b1, const T_z& z) { using std::fabs; using std::floor; check_not_nan("check_3F2_converges", "a1", a1); check_not_nan("check_3F2_converges", "a2", a2); check_not_nan("check_3F2_converges", "b1", b1); check_not_nan("check_3F2_converges", "z", z); int num_terms = 0; bool is_polynomial = false; if (is_nonpositive_integer(a1) && fabs(a1) >= num_terms) { is_polynomial = true; num_terms = floor(fabs(value_of_rec(a1))); } if (is_nonpositive_integer(a2) && fabs(a2) >= num_terms) { is_polynomial = true; num_terms = floor(fabs(value_of_rec(a2))); } bool is_undefined = is_nonpositive_integer(b1) && fabs(b1) <= num_terms; if (!is_undefined && (is_polynomial || fabs(z) < 1 || (fabs(z) == 1 && b1 > a1 + a2))) { return; } std::stringstream msg; msg << "called from function '" << function << "', " << "hypergeometric function 2F1 does not meet convergence " << "conditions with given arguments. " << "a1: " << a1 << ", a2: " << a2 << ", " << "b1: " << b1 << ", z: " << z; throw std::domain_error(msg.str()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/invalid_argument.hpp0000644000176200001440000000363014645137106024562 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_INVALID_ARGUMENT_HPP #define STAN_MATH_PRIM_ERR_INVALID_ARGUMENT_HPP #include #include #include #include namespace stan { namespace math { /** * Throw an invalid_argument exception with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * invalid argument. This will allow us to change the behavior for all * functions at once. * The message is: ": " * @tparam T Type of variable * @param function Name of the function * @param name Name of the variable * @param y Variable * @param msg1 Message to print before the variable * @param msg2 Message to print after the variable * @throw std::invalid_argument */ template inline void invalid_argument(const char* function, const char* name, const T& y, const char* msg1, const char* msg2) { std::ostringstream message; message << function << ": " << name << " " << msg1 << y << msg2; throw std::invalid_argument(message.str()); } /** * Throw an invalid_argument exception with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * invalid argument. This will allow us to change the behavior for all * functions at once. (We've already changed behavior multiple times up * to Stan v2.5.0.) * The message is: ": " * @tparam T Type of variable * @param function Name of the function * @param name Name of the variable * @param y Variable * @param msg1 Message to print before the variable * @throw std::invalid_argument */ template inline void invalid_argument(const char* function, const char* name, const T& y, const char* msg1) { invalid_argument(function, name, y, msg1, ""); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/throw_domain_error_vec.hpp0000644000176200001440000000457314645137106026001 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_THROW_DOMAIN_ERROR_VEC_HPP #define STAN_MATH_PRIM_ERR_THROW_DOMAIN_ERROR_VEC_HPP #include #include #include #include #include namespace stan { namespace math { /** * Throw a domain error with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * domain errors. This will allow us to change the behavior for all * functions at once. * The message is: ": [] " * where error_index is the value of stan::error_index::value * which indicates whether the message should be 0 or 1 indexed. * @tparam T Type of variable * @param function Name of the function * @param name Name of the variable * @param y Variable * @param i Index * @param msg1 Message to print before the variable * @param msg2 Message to print after the variable * @throw std::domain_error Always. */ template inline void throw_domain_error_vec(const char* function, const char* name, const T& y, size_t i, const char* msg1, const char* msg2) { std::ostringstream vec_name_stream; vec_name_stream << name << "[" << stan::error_index::value + i << "]"; std::string vec_name(vec_name_stream.str()); throw_domain_error(function, vec_name.c_str(), stan::get(y, i), msg1, msg2); } /** * Throw a domain error with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * domain errors. This will allow us to change the behavior for all * functions at once. * The message is: ": [] " * where error_index is the value of stan::error_index::value * which indicates whether the message should be 0 or 1 indexed. * @tparam T Type of variable * @param function Name of the function * @param name Name of the variable * @param y Variable * @param i Index * @param msg Message to print before the variable * @throw std::domain_error Always */ template inline void throw_domain_error_vec(const char* function, const char* name, const T& y, size_t i, const char* msg) { throw_domain_error_vec(function, name, y, i, msg, ""); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_column_index.hpp0000644000176200001440000000277114645137106025060 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_COLUMN_INDEX_HPP #define STAN_MATH_PRIM_ERR_CHECK_COLUMN_INDEX_HPP #include #include #include #include #include namespace stan { namespace math { /** * Check if the specified index is a valid column of the matrix. * By default this is a 1-indexed check (as opposed to * 0-indexed). Behavior can be changed by setting * stan::error_index::value. This function will * throw an std::out_of_range exception if * the index is out of bounds. * @tparam T_y Type of matrix * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y matrix to test * @param i column index to check * @throw std::out_of_range if index is an invalid column */ template , is_prim_or_rev_kernel_expression>* = nullptr> inline void check_column_index(const char* function, const char* name, const T_y& y, size_t i) { if (!(i >= stan::error_index::value && i < static_cast(y.cols()) + stan::error_index::value)) { [&]() STAN_COLD_PATH { std::stringstream msg; msg << " for columns of " << name; std::string msg_str(msg.str()); out_of_range(function, y.cols(), i, msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/out_of_range.hpp0000644000176200001440000000264514645137106023706 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_OUT_OF_RANGE_HPP #define STAN_MATH_PRIM_ERR_OUT_OF_RANGE_HPP #include #include #include #include namespace stan { namespace math { /** * Throw an out_of_range exception with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * out of range. This will allow us to change the behavior for all * functions at once. * The message is: * ": index out of range; expecting index to be between " * "1 and " * @param function Name of the function * @param max Max * @param index Index * @param msg1 Message to print. Default is "". * @param msg2 Message to print. Default is "". * @throw std::out_of_range with message. */ inline void out_of_range(const char* function, int max, int index, const char* msg1 = "", const char* msg2 = "") { std::ostringstream message; message << function << ": accessing element out of range. " << "index " << index << " out of range; "; if (max == 0) { message << "container is empty and cannot be indexed" << msg1 << msg2; } else { message << "expecting index to be between " << stan::error_index::value << " and " << stan::error_index::value - 1 + max << msg1 << msg2; } throw std::out_of_range(message.str()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_consistent_size.hpp0000644000176200001440000000317114645137106025612 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_CONSISTENT_SIZE_HPP #define STAN_MATH_PRIM_ERR_CHECK_CONSISTENT_SIZE_HPP #include #include #include #include #include namespace stan { namespace math { /** * Check if `x` is consistent with size `expected_size`. `x` is consistent with * size `expected_size` if `x` is a vector of size `expected_size`, or a non * vector. * @tparam T type of value * @param function function name (for error messages) * @param name variable name (for error messages) * @param x variable to check for consistent size * @param expected_size expected size if `x` is a vector * @throw `invalid_argument` if the size is inconsistent */ template inline void check_consistent_size(const char* function, const char* name, const T& x, size_t expected_size) { if (!(!is_vector::value || (is_vector::value && expected_size == stan::math::size(x)))) { [&]() STAN_COLD_PATH { std::stringstream msg; msg << ", expecting dimension = " << expected_size << "; a function was called with arguments of different " << "scalar, array, vector, or matrix types, and they were not " << "consistently sized; all arguments must be scalars or " << "multidimensional values of the same shape."; std::string msg_str(msg.str()); invalid_argument(function, name, stan::math::size(x), "has dimension = ", msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_less.hpp0000644000176200001440000004063614645137106023344 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_LESS_HPP #define STAN_MATH_PRIM_ERR_CHECK_LESS_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if `y` is not strictly less than `high`. This function is * vectorized and will check each element of `y` against each element of `high`. * @tparam T_y A scalar type * @tparam T_high A scalar type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `domain_error` if y is not less than high or if any element of y or * high is `NaN` */ template * = nullptr, typename... Idxs> inline void check_less(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { if (!(y < high)) { [](auto y, auto high, auto function, auto name, auto... idxs) STAN_COLD_PATH { throw_domain_error( function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be less than " + std::to_string(value_of_rec(high))) .c_str()); }(y, high, function, name, idxs...); } } /** * Throw an exception if `y` is not strictly less than each element of `high`. * This function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y A scalar type * @tparam T_high A standard vector or type inheriting from `Eigen::DenseBase` * with compile time rows or columns equal to one and `value_type` equal to a * stan scalar. * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `domain_error` if y is not less than high or if any element of y or * high is `NaN` */ template < typename T_y, typename T_high, require_stan_scalar_t* = nullptr, require_vector_t* = nullptr, require_not_std_vector_vt* = nullptr, typename... Idxs> inline void check_less(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& high_arr = as_array_or_scalar(value_of_rec(to_ref(high))); for (Eigen::Index i = 0; i < high_arr.size(); ++i) { if (unlikely(!(y < high_arr.coeff(i)))) { [](auto y, auto&& high_arr, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error( function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be less than " + std::to_string(high_arr.coeff(i))) .c_str()); }(y, high_arr, name, function, i, idxs...); } } } /** * Throw an exception if `y` is not strictly less than each element of `high`. * This function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y A scalar type * @tparam T_high Type inheriting from `Eigen::DenseBase` or a `var_value` with * the var's inner type inheriting from `Eigen::DenseBase` where the compile * time number of rows or columns is not equal to one * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `domain_error` if y is not less than high or if any element of y or * high is `NaN` */ template * = nullptr, require_dense_dynamic_t* = nullptr, typename... Idxs> inline void check_less(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& high_arr = value_of_rec(to_ref(high)); for (Eigen::Index j = 0; j < high_arr.cols(); ++j) { for (Eigen::Index i = 0; i < high_arr.rows(); ++i) { if (unlikely(!(y < high_arr.coeff(i, j)))) { [](auto y, auto&& high_arr, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error(function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be less than " + std::to_string(high_arr.coeff(i, j))) .c_str()); }(y, high_arr, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not strictly less than `high`. * This function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y A standard vector or type inheriting from `Eigen::DenseBase` with * compile time rows or columns equal to one and `value_type` equal to a stan * scalar. * @tparam T_high A scalar type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `domain_error` if y is not less than high or if any element of y or * high is `NaN` */ template * = nullptr, require_not_std_vector_vt* = nullptr, require_stan_scalar_t* = nullptr, typename... Idxs> inline void check_less(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& y_arr = value_of_rec(as_array_or_scalar(to_ref(y))); for (Eigen::Index i = 0; i < y_arr.size(); ++i) { if (unlikely(!(y_arr.coeff(i) < high))) { [](auto&& y_arr, auto high, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error_vec( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, "is ", (", but must be less than " + std::to_string(value_of_rec(high))) .c_str()); }(y_arr, high, name, function, i, idxs...); } } } /** * Throw an exception if each element of `y` is not strictly less than `high`. * This function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y Type inheriting from `Eigen::DenseBase` or a `var_value` with the * var's inner type inheriting from `Eigen::DenseBase` where the compile time * number of rows or columns is not equal to one * @tparam T_high A scalar type * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `domain_error` if y is not less than high or if any element of y or * high is `NaN` */ template * = nullptr, require_stan_scalar_t* = nullptr, typename... Idxs> inline void check_less(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& y_arr = value_of_rec(to_ref(y)); for (Eigen::Index j = 0; j < y_arr.cols(); ++j) { for (Eigen::Index i = 0; i < y_arr.rows(); ++i) { if (unlikely(!(y_arr.coeff(i, j) < high))) { [](auto&& y_arr, auto high, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error_mat( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, j, "is ", (", but must be less than " + std::to_string(value_of_rec(high))) .c_str()); }(y_arr, high, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not strictly less than each * element of `high`. This function is vectorized and will check each element of * `y` against each element of `high`. * @tparam T_y A standard vector or type inheriting from `Eigen::DenseBase` with * compile time rows or columns equal to one and `value_type` equal to a stan * scalar. * @tparam T_high A standard vector or type inheriting from `Eigen::DenseBase` * with compile time rows or columns equal to one and `value_type` equal to a * stan scalar. * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `domain_error` if y is not less than high or if any element of y or * high is `NaN` */ template * = nullptr, require_all_not_std_vector_vt* = nullptr, typename... Idxs> inline void check_less(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& y_arr = value_of_rec(to_ref(y)); auto&& high_arr = value_of_rec(to_ref(high)); for (Eigen::Index i = 0; i < y_arr.size(); ++i) { if (unlikely(!(y_arr.coeff(i) < high_arr.coeff(i)))) { [](auto&& y_arr, auto&& high_arr, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error_vec( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, "is ", (", but must be less than " + std::to_string(high_arr.coeff(i))) .c_str()); }(y_arr, high_arr, name, function, i, idxs...); } } } /** * Throw an exception if each element of `y` is not strictly less than each * element of `high`. This function is vectorized and will check each element of * `y` against each element of `high`. * @tparam T_y Type inheriting from `Eigen::DenseBase` or a `var_value` with the * var's inner type inheriting from `Eigen::DenseBase` where the compile time * number of rows or columns is not equal to one * @tparam T_high Type inheriting from `Eigen::DenseBase` or a `var_value` with * the var's inner type inheriting from `Eigen::DenseBase` where the compile * time number of rows or columns is not equal to one * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `domain_error` if y is not less than high or if any element of y or * high is `NaN` */ template * = nullptr, typename... Idxs> inline void check_less(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { auto&& y_arr = value_of_rec(to_ref(y)); auto&& high_arr = value_of_rec(to_ref(high)); for (Eigen::Index j = 0; j < y_arr.cols(); ++j) { for (Eigen::Index i = 0; i < y_arr.rows(); ++i) { if (unlikely(!(y_arr.coeff(i, j) < high_arr.coeff(i, j)))) { [](auto&& y_arr, auto&& high_arr, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error_mat( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, j, "is ", (", but must be less than " + std::to_string(high_arr.coeff(i, j))) .c_str()); }(y_arr, high_arr, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not strictly less than `high`. * This function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y A standard vector type with a `value_type` of a standard vector * or type inheriting from `Eigen::DenseBase` * @tparam T_high A scalar type or the same type as the inner type of `T_high` * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `domain_error` if y is not less than high or if any element of y or * high is `NaN` */ template * = nullptr, require_not_std_vector_t* = nullptr, typename... Idxs> inline void check_less(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { for (size_t i = 0; i < y.size(); ++i) { check_less(function, name, y[i], high, idxs..., i); } } /** * Throw an exception if `y` is not strictly less than each element of `high`. * This function is vectorized and will check each element of `y` against each * element of `high`. * @tparam T_y A scalar type or the same type as the inner type of `T_high` * @tparam T_high A standard vector type with a `value_type` of a standard * vector or type inheriting from `Eigen::DenseBase` * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `domain_error` if y is not less than high or if any element of y or * high is `NaN` */ template * = nullptr, require_std_vector_vt* = nullptr, typename... Idxs> inline void check_less(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { for (size_t i = 0; i < high.size(); ++i) { check_less(function, name, y, high[i], idxs..., i); } } /** * Throw an exception if each element of `y` is not strictly less than each * associated element of `high`. This function is vectorized and will check each * element of `y` against each element of `high`. * @tparam T_y A standard vector type whose `value_type` is a scalar, type * inheriting from `Eigen::DenseBase`, or another standard vector * @tparam T_high A standard vector type whose `value_type` is a scalar, type * inheriting from `Eigen::DenseBase`, or another standard vector * @tparam Idxs A parameter pack of Integral types * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param high Upper bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `domain_error` if y is not less than high or if any element of y or * high is `NaN` */ template * = nullptr, require_all_std_vector_t* = nullptr, typename... Idxs> inline void check_less(const char* function, const char* name, const T_y& y, const T_high& high, Idxs... idxs) { for (size_t i = 0; i < y.size(); ++i) { check_less(function, name, y[i], high[i], idxs..., i); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_vector.hpp0000644000176200001440000000261614645137106023674 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_VECTOR_HPP #define STAN_MATH_PRIM_ERR_CHECK_VECTOR_HPP #include #include #include #include #include #include #ifdef STAN_OPENCL #include #endif namespace stan { namespace math { /** * Check the input is either a row vector or column vector or * a matrix with a single row or column. * * @tparam Mat Input type * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param x Input * @throw std::invalid_argument if x is not a row or column * vector. */ template , is_prim_or_rev_kernel_expression>* = nullptr> inline void check_vector(const char* function, const char* name, const Mat& x) { if (!(x.rows() == 1 || x.cols() == 1)) { STAN_NO_RANGE_CHECKS_RETURN; [&]() STAN_COLD_PATH { std::ostringstream msg; msg << ") has " << x.rows() << " rows and " << x.cols() << " columns but it should be a vector so it should " << "either have 1 row or 1 column"; std::string msg_str(msg.str()); invalid_argument(function, name, 0.0, "(", msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_unit_vector.hpp0000644000176200001440000000253714645137106024273 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_UNIT_VECTOR_HPP #define STAN_MATH_PRIM_ERR_IS_UNIT_VECTOR_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return true if the vector is not a unit vector or if any * element is NaN. * A valid unit vector is one where the square elements * summed is equal to 1. This function tests that the sum * is within the tolerance specified by CONSTRAINT_TOLERANCE. * This function only accepts Eigen::Matrix vectors, statically * typed vectors, not general matrices with 1 column. * @tparam EigMat A type derived from `EigenBase` with 1 compile time row or * column * @param theta Eigen vector to test * @return true if the vector is not a unit * vector or if any element is NaN */ template * = nullptr> inline bool is_unit_vector(const EigVec& theta) { using std::fabs; if (is_nonzero_size(theta)) { value_type_t seq = theta.squaredNorm(); return fabs(1.0 - seq) <= CONSTRAINT_TOLERANCE; } return false; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_cholesky_factor.hpp0000644000176200001440000000630514645137106025550 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_CHOLESKY_FACTOR_HPP #define STAN_MATH_PRIM_ERR_CHECK_CHOLESKY_FACTOR_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if the specified matrix is not a valid Cholesky factor. A * Cholesky factor is a lower triangular matrix whose diagonal elements are all * positive. Note that Cholesky factors need not be square, but require at * least as many rows M as columns N (i.e., `M >= N`). * @tparam Mat Type inheriting from `MatrixBase` with neither rows or columns * defined at compile time to be equal to 1 or a `var_value` with the var's * inner type inheriting from `Eigen::MatrixBase` with neither rows or columns * defined at compile time to be equal to 1 * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Matrix to test * @throw `std::domain_error` if y is not a valid Cholesky factor, if number of * rows is less than the number of columns, if there are 0 columns, or if any * element in matrix is `NaN` */ template * = nullptr> inline void check_cholesky_factor(const char* function, const char* name, const Mat& y) { check_less_or_equal(function, "columns and rows of Cholesky factor", y.cols(), y.rows()); check_positive(function, "columns of Cholesky factor", y.cols()); auto&& y_ref = to_ref(value_of_rec(y)); check_lower_triangular(function, name, y_ref); check_positive(function, name, y_ref.diagonal()); } /** * Throw an exception if the specified matrix is not a valid Cholesky factor. A * Cholesky factor is a lower triangular matrix whose diagonal elements are all * positive. Note that Cholesky factors need not be square, but require at * least as many rows M as columns N (i.e., `M >= N`). * @tparam StdVec A standard vector with inner type either inheriting from * `MatrixBase` with neither rows or columns defined at compile time to be equal * to 1 or a `var_value` with the var's inner type inheriting from * `Eigen::MatrixBase` with neither rows or columns defined at compile time to * be equal to 1 * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Standard vector of matrices to test * @throw `std::domain_error` if y is not a valid Cholesky factor, if number of * rows is less than the number of columns, if there are 0 columns, or if any * element in matrix is `NaN` */ template * = nullptr> void check_cholesky_factor(const char* function, const char* name, const StdVec& y) { for (size_t i = 0; i < y.size(); ++i) { check_cholesky_factor(function, internal::make_iter_name(name, i).c_str(), y[i]); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_cholesky_factor_corr.hpp0000644000176200001440000000713214645137106026574 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_CHOLESKY_FACTOR_CORR_HPP #define STAN_MATH_PRIM_ERR_CHECK_CHOLESKY_FACTOR_CORR_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if the specified matrix is not a valid Cholesky factor of * a correlation matrix. A Cholesky factor is a lower triangular matrix whose * diagonal elements are all positive and each row has unit Euclidean length. * Note that Cholesky factors need not be square, but require at least as many * rows M as columns N (i.e., `M >= N`). Tolerance is specified by * `math::CONSTRAINT_TOLERANCE`. Tolerance is specified by * `math::CONSTRAINT_TOLERANCE`. * @tparam Mat Type inheriting from `MatrixBase` with neither rows or columns * defined at compile time to be equal to 1 or a `var_value` with the var's * inner type inheriting from `Eigen::MatrixBase` with neither rows or columns * defined at compile time to be equal to 1 * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Matrix to test * @throw `std::domain_error` if y is not a valid Cholesky factor, if number of * rows is less than the number of columns, if there are 0 columns, or if any * element in matrix is NaN */ template * = nullptr> void check_cholesky_factor_corr(const char* function, const char* name, const Mat& y) { const auto& y_ref = to_ref(value_of_rec(y)); check_square(function, name, y_ref); check_lower_triangular(function, name, y_ref); check_positive(function, name, y_ref.diagonal()); for (Eigen::Index i = 0; i < y_ref.rows(); ++i) { check_unit_vector(function, name, y_ref.row(i)); } } /** * Throw an exception if the specified matrix is not a valid Cholesky factor of * a correlation matrix. A Cholesky factor is a lower triangular matrix whose * diagonal elements are all positive and each row has unit Euclidean length. * Note that Cholesky factors need not be square, but require at least as many * rows M as columns N (i.e., `M >= N`). Tolerance is specified by * `math::CONSTRAINT_TOLERANCE`. Tolerance is specified by * `math::CONSTRAINT_TOLERANCE`. * @tparam StdVec A standard vector with inner type either inheriting from * `MatrixBase` with neither rows or columns defined at compile time to be equal * to 1 or a `var_value` with the var's inner type inheriting from * `Eigen::MatrixBase` with neither rows or columns defined at compile time to * be equal to 1 * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Standard vector of matrics to test * @throw `std::domain_error` if y[i] is not a valid Cholesky factor, if number * of rows is less than the number of columns, if there are 0 columns, or if any * element in matrix is NaN */ template * = nullptr> void check_cholesky_factor_corr(const char* function, const char* name, const StdVec& y) { for (size_t i = 0; i < y.size(); ++i) { check_cholesky_factor_corr(function, internal::make_iter_name(name, i).c_str(), y[i]); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_not_nan.hpp0000644000176200001440000000165014645137106023361 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_NOT_NAN_HPP #define STAN_MATH_PRIM_ERR_IS_NOT_NAN_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return true if y is not NaN. * This function is vectorized and will check each element of * y. If no element is NaN, this * function will return true. * @tparam T_y Type of y * @param y Variable to check * @return true if no element of y is NaN */ template inline bool is_not_nan(const T_y& y) { for (size_t n = 0; n < stan::math::size(y); ++n) { if (is_nan(value_of_rec(stan::get(y, n)))) { return false; } } return true; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_ldlt_factor.hpp0000644000176200001440000000147014645137106024222 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_LDLT_FACTOR_HPP #define STAN_MATH_PRIM_ERR_IS_LDLT_FACTOR_HPP #include #include #include namespace stan { namespace math { /** * Return true if the specified LDLT factor is invalid. * An LDLT_factor is invalid if it was constructed from * a matrix that is not positive definite. * * @tparam T Type matrix of LDLT * @param A The LDLT factor to check for validity * @return true if the LDLT factor is valid */ template inline bool is_ldlt_factor(LDLT_factor& A) { return A.ldlt().info() == Eigen::Success && A.ldlt().isPositive() && (A.ldlt().vectorD().array() > 0).all(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/make_iter_name.hpp0000644000176200001440000000211614645137106024170 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_MAKE_ITER_NAME_HPP #define STAN_MATH_PRIM_ERR_MAKE_ITER_NAME_HPP #include #include #include namespace stan { namespace math { namespace internal { inline constexpr auto construct_idx() noexcept { return ""; } template inline auto construct_idx(size_t i, Idxs... idxs) { if (sizeof...(Idxs) > 0) { return std::to_string(i + stan::error_index::value) + ", " + construct_idx(idxs...); } else { return std::to_string(i + stan::error_index::value); } } inline auto make_iter_name(const char* name) { return std::string(name); } /** * Given a name and index, generate a new name with the index attached. * This will produce strings of the form `[]`. * @param name The name of the argument. * @param idxs The index. */ template inline auto make_iter_name(const char* name, Idxs... idxs) { return (std::string(name) + "[" + construct_idx(idxs...) + "]"); } } // namespace internal } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_less_or_equal.hpp0000644000176200001440000000221514645137106024560 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_LESS_OR_EQUAL_HPP #define STAN_MATH_PRIM_ERR_IS_LESS_OR_EQUAL_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return true if y is less or equal to * high. * This function is vectorized and will check each element of * y against each element of high. * @tparam T_y Type of y * @tparam T_high Type of upper bound * @param y Variable to check * @param high Upper bound * @return true if y is less than or equal * to low and if and element of y or high is NaN */ template inline bool is_less_or_equal(const T_y& y, const T_high& high) { scalar_seq_view high_vec(high); const auto& y_ref = to_ref(y); for (size_t n = 0; n < stan::math::size(high); n++) { if (!(stan::get(y_ref, n) <= high_vec[n])) { return false; } } return true; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/domain_error.hpp0000644000176200001440000000143014645137106023706 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_DOMAIN_ERROR_HPP #define STAN_MATH_PRIM_ERR_DOMAIN_ERROR_HPP #include #include namespace stan { namespace math { /** * @deprecated use throw_domain_error */ template inline void domain_error(const char* function, const char* name, const T& y, const char* msg1, const char* msg2) { throw_domain_error(function, name, y, msg1, msg2); } /** * @deprecated use throw_domain_error */ template inline void domain_error(const char* function, const char* name, const T& y, const char* msg1) { throw_domain_error(function, name, y, msg1); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_pos_definite.hpp0000644000176200001440000000710714645137106025042 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_POS_DEFINITE_HPP #define STAN_MATH_PRIM_ERR_CHECK_POS_DEFINITE_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Check if the specified square, symmetric matrix is positive definite. * * This computes an LDLT decomposition to establish positive definiteness, * so it can be expensive. If an LDLT or LLT decomposition is available, * that should be tested instead. * * @tparam EigMat A type derived from `EigenBase` with dynamic rows and columns * @param function function name (for error messages) * @param name variable name (for error messages) * @param y matrix to test * @throw std::invalid_argument if the matrix is not square * or if the matrix has 0 size. * @throw std::domain_error if the matrix is not symmetric, * if it is not positive definite, or if any element is NaN */ template * = nullptr> inline void check_pos_definite(const char* function, const char* name, const EigMat& y) { const auto& y_ref = to_ref(value_of_rec(y)); check_symmetric(function, name, y_ref); check_positive(function, name, "rows", y_ref.rows()); check_not_nan(function, name, y_ref); if (y_ref.rows() == 1 && !(y_ref(0, 0) > CONSTRAINT_TOLERANCE)) { throw_domain_error(function, name, "is not positive definite.", ""); } Eigen::LDLT cholesky = value_of_rec(y_ref).ldlt(); if (cholesky.info() != Eigen::Success || !cholesky.isPositive() || (cholesky.vectorD().array() <= 0.0).any()) { throw_domain_error(function, name, "is not positive definite.", ""); } } /** * Check if the specified LDLT decomposition of a matrix is positive definite. * * @tparam Derived type of the Eigen::LDLT decomposition * @param function function name (for error messages) * @param name variable name (for error messages) * @param cholesky Eigen::LDLT to test, whose progenitor * must not have any NaN elements * @throw std::domain_error if the matrix is not positive definite */ template inline void check_pos_definite(const char* function, const char* name, const Eigen::LDLT& cholesky) { if (cholesky.info() != Eigen::Success || !cholesky.isPositive() || !(cholesky.vectorD().array() > 0.0).all()) { throw_domain_error(function, "LDLT decomposition of", " failed", name); } } /** * Check if the specified LLT decomposition was successful. * * @tparam Derived type of the Eigen::LLT decomposition * @param function function name (for error messages) * @param name variable name (for error messages) * @param cholesky Eigen::LLT to test, whose progenitor * must not have any NaN elements * @throw std::domain_error if the decomposition failed or the * diagonal of the L matrix is not positive */ template inline void check_pos_definite(const char* function, const char* name, const Eigen::LLT& cholesky) { if (cholesky.info() != Eigen::Success || !(cholesky.matrixLLT().diagonal().array() > 0.0).all()) { throw_domain_error(function, "Matrix", " is not positive definite", name); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_range.hpp0000644000176200001440000000555614645137106023474 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_RANGE_HPP #define STAN_MATH_PRIM_ERR_CHECK_RANGE_HPP #include #include #include #include namespace stan { namespace math { /** * Check if specified index is within range. * This check is 1-indexed by default. This behavior can be * changed by setting stan::error_index::value. * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param max Maximum size of the variable * @param index Index to check * @param nested_level Nested level (for error messages) * @param error_msg Additional error message (for error messages) * @throw std::out_of_range if the index is not in range */ inline void check_range(const char* function, const char* name, int max, int index, int nested_level, const char* error_msg) { STAN_NO_RANGE_CHECKS_RETURN; if (!((index >= stan::error_index::value) && (index < max + stan::error_index::value))) { [&]() STAN_COLD_PATH { std::stringstream msg; msg << "; index position = " << nested_level; std::string msg_str(msg.str()); out_of_range(function, max, index, msg_str.c_str(), error_msg); }(); } } /** * Check if specified index is within range. * This check is 1-indexed by default. This behavior can be * changed by setting stan::error_index::value. * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param max Maximum size of the variable * @param index Index to check * @param error_msg Additional error message (for error messages) * @throw std::out_of_range if the index is not in range */ inline void check_range(const char* function, const char* name, int max, int index, const char* error_msg) { STAN_NO_RANGE_CHECKS_RETURN; if (!((index >= stan::error_index::value) && (index < max + stan::error_index::value))) { [&]() STAN_COLD_PATH { out_of_range(function, max, index, error_msg); }(); } } /** * Check if specified index is within range. * This check is 1-indexed by default. This behavior can be * changed by setting stan::error_index::value. * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param max Maximum size of the variable * @param index Index to check * @throw std::out_of_range if the index is not in range */ inline void check_range(const char* function, const char* name, int max, int index) { STAN_NO_RANGE_CHECKS_RETURN; if (!((index >= stan::error_index::value) && (index < max + stan::error_index::value))) { [&]() STAN_COLD_PATH { out_of_range(function, max, index); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_bounded.hpp0000644000176200001440000000603414645137106024010 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_BOUNDED_HPP #define STAN_MATH_PRIM_ERR_CHECK_BOUNDED_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { // implemented using structs because there is no partial specialization // for templated functions // default implementation works for scalar T_y. T_low and T_high can // be either scalar or vector // throws if y, low, or high is nan template struct bounded { static void check(const char* function, const char* name, const T_y& y, const T_low& low, const T_high& high) { scalar_seq_view low_vec(low); scalar_seq_view high_vec(high); for (size_t n = 0; n < max_size(low, high); n++) { if (!(low_vec[n] <= y && y <= high_vec[n])) { std::stringstream msg; msg << ", but must be in the interval "; msg << "[" << low_vec[n] << ", " << high_vec[n] << "]"; std::string msg_str(msg.str()); throw_domain_error(function, name, y, "is ", msg_str.c_str()); } } } }; template struct bounded { static void check(const char* function, const char* name, const T_y& y, const T_low& low, const T_high& high) { scalar_seq_view low_vec(low); scalar_seq_view high_vec(high); for (size_t n = 0; n < stan::math::size(y); n++) { if (!(low_vec[n] <= stan::get(y, n) && stan::get(y, n) <= high_vec[n])) { std::stringstream msg; msg << ", but must be in the interval "; msg << "[" << low_vec[n] << ", " << high_vec[n] << "]"; std::string msg_str(msg.str()); throw_domain_error_vec(function, name, y, n, "is ", msg_str.c_str()); } } } }; } // namespace internal /** * Check if the value is between the low and high values, inclusively. * @tparam T_y Type of value * @tparam T_low Type of low value * @tparam T_high Type of high value * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Value to check * @param low Low bound * @param high High bound * @throw std::domain_error otherwise. This also throws * if any of the arguments are NaN. */ template inline void check_bounded(const char* function, const char* name, const T_y& y, const T_low& low, const T_high& high) { if (size_zero(y, low, high)) { return; } internal::bounded::value>::check( function, name, y, low, high); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_simplex.hpp0000644000176200001440000000710614645137106024052 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_SIMPLEX_HPP #define STAN_MATH_PRIM_ERR_CHECK_SIMPLEX_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if the specified vector is not a simplex. To be a simplex, * all values must be greater than or equal to 0 and the values must sum to 1. A * valid simplex is one where the sum of the elements is equal to 1. This * function tests that the sum is within the tolerance specified by * `CONSTRAINT_TOLERANCE`. This function only accepts Eigen vectors, statically * typed vectors, not general matrices with 1 column. * @tparam T A type inheriting from `Eigen::EigenBase` * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param theta Vector to test * @throw `std::invalid_argument` if `theta` is a 0-vector * @throw `std::domain_error` if the vector is not a simplex or if any element * is `NaN` */ template * = nullptr> void check_simplex(const char* function, const char* name, const T& theta) { using std::fabs; check_nonzero_size(function, name, theta); auto&& theta_ref = to_ref(value_of_rec(theta)); if (!(fabs(1.0 - theta_ref.sum()) <= CONSTRAINT_TOLERANCE)) { [&]() STAN_COLD_PATH { std::stringstream msg; scalar_type_t sum = theta_ref.sum(); msg << "is not a valid simplex."; msg.precision(10); msg << " sum(" << name << ") = " << sum << ", but should be "; std::string msg_str(msg.str()); throw_domain_error(function, name, 1.0, msg_str.c_str()); }(); } for (Eigen::Index n = 0; n < theta_ref.size(); n++) { if (!(theta_ref.coeff(n) >= 0)) { [&]() STAN_COLD_PATH { std::ostringstream msg; msg << "is not a valid simplex. " << name << "[" << n + stan::error_index::value << "]" << " = "; std::string msg_str(msg.str()); throw_domain_error(function, name, theta_ref.coeff(n), msg_str.c_str(), ", but should be greater than or equal to 0"); }(); } } } /** * Throw an exception if each vector in a standard vector is not a simplex. To * be a simplex, all values must be greater than or equal to 0 and the values * must sum to 1. A valid simplex is one where the sum of the elements is equal * to 1. This function tests that the sum is within the tolerance specified by * `CONSTRAINT_TOLERANCE`. This function only accepts Eigen vectors, statically * typed vectors, not general matrices with 1 column. * @tparam T A standard vector with inner type inheriting from * `Eigen::EigenBase` * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param theta Vector to test. * @throw `std::invalid_argument` if `theta` is a 0-vector * @throw `std::domain_error` if the vector is not a simplex or if any element * is `NaN` */ template * = nullptr> void check_simplex(const char* function, const char* name, const T& theta) { for (size_t i = 0; i < theta.size(); ++i) { check_simplex(function, internal::make_iter_name(name, i).c_str(), theta[i]); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_ordered.hpp0000644000176200001440000000730314645137106024014 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_ORDERED_HPP #define STAN_MATH_PRIM_ERR_CHECK_ORDERED_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if the specified vector is not sorted into strictly * increasing order. * @tparam T_y A type inheriting from EigenBase with either 1 compile time row * or column * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Vector to test * @throw `std::domain_error` if the vector elements are not ordered, if there * are duplicated values, or if any element is `NaN` */ template * = nullptr, require_not_std_vector_t* = nullptr> void check_ordered(const char* function, const char* name, const T_y& y) { const auto& y_ref = to_ref(value_of_rec(y)); for (Eigen::Index n = 1; n < y_ref.size(); n++) { if (!(y_ref[n] > y_ref[n - 1])) { [&]() STAN_COLD_PATH { std::ostringstream msg1; msg1 << "is not a valid ordered vector." << " The element at " << stan::error_index::value + n << " is "; std::string msg1_str(msg1.str()); std::ostringstream msg2; msg2 << ", but should be greater than the previous element, " << y_ref[n - 1]; std::string msg2_str(msg2.str()); throw_domain_error(function, name, y_ref[n], msg1_str.c_str(), msg2_str.c_str()); }(); } } } /** * Throw an exception if the specified vector is not sorted into strictly * increasing order. * @tparam T_y A standard vector with inner scalar type * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y `std::vector` to test * @throw `std::domain_error` if the vector elements are not ordered, if there * are duplicated values, or if any element is `NaN` */ template * = nullptr> void check_ordered(const char* function, const char* name, const T_y& y) { for (size_t n = 1; n < y.size(); n++) { if (!(y[n] > y[n - 1])) { [&]() STAN_COLD_PATH { std::ostringstream msg1; msg1 << "is not a valid ordered vector." << " The element at " << stan::error_index::value + n << " is "; std::string msg1_str(msg1.str()); std::ostringstream msg2; msg2 << ", but should be greater than the previous element, " << y[n - 1]; std::string msg2_str(msg2.str()); throw_domain_error(function, name, y[n], msg1_str.c_str(), msg2_str.c_str()); }(); } } } /** * Throw an exception if each vector in a standard vector is not sorted into * strictly increasing order. * @tparam T_y A standard vector with an inner vector type * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y `std::vector` to test * @throw `std::domain_error` if the vector elements are not ordered, if there * are duplicated values, or if any element is `NaN` */ template * = nullptr, require_not_vt_stan_scalar* = nullptr> void check_ordered(const char* function, const char* name, const T_y& y) { for (size_t i = 0; i < y.size(); ++i) { check_ordered(function, internal::make_iter_name(name, i).c_str(), y[i]); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_sorted.hpp0000644000176200001440000000517314645137106023673 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_SORTED_HPP #define STAN_MATH_PRIM_ERR_CHECK_SORTED_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Check if the specified vector is sorted into increasing order (repeated * values are okay). * @tparam EigVec A type derived from `EigenBase` with 1 compile time row or * column. * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Vector to test * @throw std::domain_error if the vector elements are * not sorted, or if any element is NaN. */ template * = nullptr> void check_sorted(const char* function, const char* name, const EigVec& y) { const auto& y_ref = to_ref(y); for (Eigen::Index n = 1; n < y_ref.size(); n++) { if (!(y_ref[n] >= y_ref[n - 1])) { std::ostringstream msg1; msg1 << "is not a valid sorted vector." << " The element at " << stan::error_index::value + n << " is "; std::string msg1_str(msg1.str()); std::ostringstream msg2; msg2 << ", but should be greater than or equal to the previous element, " << y_ref[n - 1]; std::string msg2_str(msg2.str()); throw_domain_error(function, name, y_ref[n], msg1_str.c_str(), msg2_str.c_str()); } } } /** * Check if the specified vector is sorted into increasing order (repeated * values are okay). * @tparam T_y Type of scalar * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y std::vector to test * @throw std::domain_error if the vector elements are * not sorted, or if any element * is NaN. */ template void check_sorted(const char* function, const char* name, const std::vector& y) { for (size_t n = 1; n < y.size(); n++) { if (!(y[n] >= y[n - 1])) { std::ostringstream msg1; msg1 << "is not a valid sorted vector." << " The element at " << stan::error_index::value + n << " is "; std::string msg1_str(msg1.str()); std::ostringstream msg2; msg2 << ", but should be greater than or equal to the previous element, " << y[n - 1]; std::string msg2_str(msg2.str()); throw_domain_error(function, name, y[n], msg1_str.c_str(), msg2_str.c_str()); } } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_cov_matrix.hpp0000644000176200001440000000475714645137106024555 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_COV_MATRIX_HPP #define STAN_MATH_PRIM_ERR_CHECK_COV_MATRIX_HPP #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if the specified matrix is not a valid covariance matrix. * A valid covariance matrix is a square, symmetric matrix that is positive * definite. * @tparam Mat Type inheriting from `MatrixBase` with neither rows or columns * defined at compile time to be equal to 1 or a `var_value` with the var's * inner type inheriting from `Eigen::MatrixBase` with neither rows or columns * defined at compile time to be equal to 1 * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Matrix to test * @throw `std::invalid_argument` if the matrix is not square or if the matrix * is 0x0 * @throw `std::domain_error` if the matrix is not symmetric, if the matrix is * not positive definite, or if any element of the matrix is `NaN` */ template * = nullptr> inline void check_cov_matrix(const char* function, const char* name, const Mat& y) { check_pos_definite(function, name, y); } /** * Throw an exception if the specified matrix is not a valid covariance matrix. * A valid covariance matrix is a square, symmetric matrix that is positive * definite. * @tparam StdVec A standard vector with inner type either inheriting from * `MatrixBase` with neither rows or columns defined at compile time to be equal * to 1 or a `var_value` with the var's inner type inheriting from * `Eigen::MatrixBase` with neither rows or columns defined at compile time to * be equal to 1 * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y standard vector of matrices to test. * @throw `std::invalid_argument` if the matrix is not square * or if the matrix is 0x0 * @throw `std::domain_error` if the matrix is not symmetric, if the matrix is * not positive definite, or if any element of the matrix is `NaN` */ template * = nullptr> void check_cov_matrix(const char* function, const char* name, const StdVec& y) { for (auto&& y_i : y) { check_cov_matrix(function, name, y_i); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_matching_sizes.hpp0000644000176200001440000000222014645137106025370 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_MATCHING_SIZES_HPP #define STAN_MATH_PRIM_ERR_CHECK_MATCHING_SIZES_HPP #include #include namespace stan { namespace math { /** * Check if two structures at the same size. * This function only checks the runtime sizes for variables that * implement a size() method. * @tparam T_y1 Type of the first variable * @tparam T_y2 Type of the second variable * @param function Function name (for error messages) * @param name1 First variable name (for error messages) * @param y1 First variable * @param name2 Second variable name (for error messages) * @param y2 Second variable * @throw std::invalid_argument if the sizes do not match */ template inline void check_matching_sizes(const char* function, const char* name1, const T_y1& y1, const char* name2, const T_y2& y2) { check_size_match(function, "size of ", name1, y1.size(), "size of ", name2, y2.size()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/system_error.hpp0000644000176200001440000000353014645137106023766 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_SYSTEM_ERROR_HPP #define STAN_MATH_PRIM_ERR_SYSTEM_ERROR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Throw a system error with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * system errors. This will allow us to change the behavior for all * functions at once. * The message is: ": " * @param[in] function Name of the function. * @param[in] name Name of the variable. * @param[in] y Error code. * @param[in] msg1 Message to print before the variable. * @param[in] msg2 Message to print after the variable. * @throw std::system_error Always. */ inline void system_error(const char* function, const char* name, const int& y, const char* msg1, const char* msg2) { std::ostringstream message; // hack to remove -Waddress, -Wnonnull-compare warnings from GCC 6 message << function << ": " << name << " " << msg1 << msg2; throw std::system_error(y, std::generic_category(), message.str()); } /** * Throw a system error with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * system errors. This will allow us to change the behavior for all * functions at once. * The message is: * ": " * @param[in] function Name of the function. * @param[in] name Name of the variable. * @param[in] y Error code. * @param[in] msg1 Message to print before the variable. * @throw std::system_error Always. */ inline void system_error(const char* function, const char* name, const int& y, const char* msg1) { system_error(function, name, y, msg1, ""); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_cholesky_factor_corr.hpp0000644000176200001440000000264514645137106026136 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_CHOLESKY_FACTOR_CORR_HPP #define STAN_MATH_PRIM_ERR_IS_CHOLESKY_FACTOR_CORR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Return true if y is a valid Cholesky factor, if * the number of rows is not less than the number of columns, if there * are no zero columns, and no element in matrix is NaN. * A Cholesky factor is a lower triangular matrix whose diagonal * elements are all positive. This definition does not require a * square matrix. * @tparam EigMat A type derived from `EigenBase` with dynamic rows and columns * @param y Matrix to test * @return true if y is a valid Cholesky factor, if * the number of rows is not less than the number of columns, * if there are no 0 columns, and no element in matrix is NaN */ template * = nullptr> inline bool is_cholesky_factor_corr(const EigMat& y) { const auto& y_ref = to_ref(y); if (!is_cholesky_factor(y_ref)) { return false; } for (int i = 0; i < y_ref.rows(); ++i) { if (!is_unit_vector(y_ref.row(i))) { return false; } } return true; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_corr_matrix.hpp0000644000176200001440000000662314645137106024725 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_CORR_MATRIX_HPP #define STAN_MATH_PRIM_ERR_CHECK_CORR_MATRIX_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if the specified matrix is not a valid correlation matrix. * A valid correlation matrix is symmetric positive definite, has a unit * diagonal (all 1 values), and has all values between -1 and 1 (inclusive). * @tparam Mat Type inheriting from `MatrixBase` with neither rows or columns * defined at compile time to be equal to 1 or a `var_value` with the var's * inner type inheriting from `Eigen::MatrixBase` with neither rows or columns * defined at compile time to be equal to 1 * @param function Name of the function this was called from * @param name Name of the variable * @param y Matrix to test * @throw `std::invalid_argument` if the matrix is not square * @throw `std::domain_error` if the matrix is non-symmetric, diagonals not near * 1, not positive definite, or any of the elements are `NaN` */ template * = nullptr> inline void check_corr_matrix(const char* function, const char* name, const Mat& y) { auto&& y_ref = to_ref(value_of_rec(y)); check_square(function, name, y_ref); using std::fabs; if (y_ref.size() == 0) { return; } for (Eigen::Index k = 0; k < y.rows(); ++k) { if (!(fabs(y_ref.coeff(k, k) - 1.0) <= CONSTRAINT_TOLERANCE)) { [&y_ref, name, k, function]() STAN_COLD_PATH { std::ostringstream msg; msg << "is not a valid correlation matrix. " << name << "(" << stan::error_index::value + k << "," << stan::error_index::value + k << ") is "; std::string msg_str(msg.str()); throw_domain_error(function, name, y_ref(k, k), msg_str.c_str(), ", but should be near 1.0"); }(); } } check_pos_definite(function, name, y_ref); } /** * Throw an exception if the specified matrix is not a valid correlation matrix. * A valid correlation matrix is symmetric positive definite, has a unit * diagonal (all 1 values), and has all values between -1 and 1 (inclusive). * @tparam StdVec A standard vector with inner type either inheriting from * `Eigen::MatrixBase` with neither rows or columns defined at compile time to * be equal to 1 or a `var_value` with the var's inner type inheriting from * `Eigen::MatrixBase` with neither rows or columns defined at compile time to * be equal to 1. * @param function Name of the function this was called from * @param name Name of the variable * @param y Matrix to test * @throw `std::invalid_argument` if the matrix is not square * @throw `std::domain_error` if the matrix is non-symmetric, diagonals not near * 1, not positive definite, or any of the elements are `NaN` */ template * = nullptr> void check_corr_matrix(const char* function, const char* name, const StdVec& y) { for (auto&& y_i : y) { check_corr_matrix(function, name, y_i); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/validate_non_negative_index.hpp0000644000176200001440000000144014645137106026743 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_VALIDATE_NON_NEGATIVE_INDEX_HPP #define STAN_MATH_PRIM_ERR_VALIDATE_NON_NEGATIVE_INDEX_HPP #include #include #include #include namespace stan { namespace math { inline void validate_non_negative_index(const char* var_name, const char* expr, int val) { if (val < 0) { [&]() STAN_COLD_PATH { std::stringstream msg; msg << "Found negative dimension size in variable declaration" << "; variable=" << var_name << "; dimension size expression=" << expr << "; expression value=" << val; std::string msg_str(msg.str()); throw std::invalid_argument(msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/elementwise_check.hpp0000644000176200001440000003222714645137106024714 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_ELEMENTWISE_ERROR_CHECKER_HPP #define STAN_MATH_PRIM_ERR_ELEMENTWISE_ERROR_CHECKER_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** Apply an error check to a container, signal failure with `false`. * Apply a predicate like is_positive to the double underlying every scalar in a * container, producing true if the predicate holds everywhere and `false` if it * fails anywhere. * @tparam F type of predicate */ template class Iser { const F& is_good; public: /** * @param is_good predicate to check, must accept doubles and produce bools */ explicit Iser(const F& is_good) : is_good(is_good) {} /** * Check the scalar. * @tparam T type of scalar * @param x scalar * @return `false` if the scalar fails the error check */ template > bool is(const T& x) { return is_good(value_of_rec(x)); } /** * Check all the scalars inside the container. * @tparam T type of scalar * @param x container * @return `false` if any of the scalars fail the error check */ template , typename = void> bool is(const T& x) { for (size_t i = 0; i < stan::math::size(x); ++i) if (!is(stan::get(x, i))) return false; return true; } }; /** * No-op. */ inline void pipe_in(std::stringstream& ss) {} /** * Pipes given arguments into a stringstream. * * @tparam Arg0 type of the first argument * @tparam Args types of remaining arguments * @param ss stringstream to pipe arguments in * @param arg0 the first argument * @param args remining arguments */ template inline void pipe_in(std::stringstream& ss, Arg0 arg0, const Args... args) { ss << arg0; pipe_in(ss, args...); } /** * Throws domain error with concatenation of arguments for the error message. * @tparam Args types of arguments * @param args arguments */ template void elementwise_throw_domain_error(const Args... args) { std::stringstream ss; pipe_in(ss, args...); throw std::domain_error(ss.str()); } } // namespace internal /** * Check that the predicate holds for the value of `x`. This overload * works on scalars. * * @tparam F type of predicate * @tparam T type of `x` * @tparam Indexings types of `indexings` * @param is_good predicate to check, must accept doubles and produce bools * @param function function name (for error messages) * @param name variable name (for error messages) * @param x variable to check, can be a scalar, a container of scalars, a * container of containers of scalars, etc * @param must_be message describing what the value should be * @param indexings any additional indexing to print. Intended for internal use * in `elementwise_check` only. * @throws `std::domain_error` if `is_good` returns `false` for the value * of any element in `x` */ template * = nullptr> inline void elementwise_check(const F& is_good, const char* function, const char* name, const T& x, const char* must_be, const Indexings&... indexings) { if (unlikely(!is_good(value_of_rec(x)))) { [&]() STAN_COLD_PATH { internal::elementwise_throw_domain_error(function, ": ", name, indexings..., " is ", x, ", but must be ", must_be, "!"); }(); } } /** * Check that the predicate holds for all elements of the value of `x`. This * overload works on Eigen types that support linear indexing. * * @tparam F type of predicate * @tparam T type of `x` * @tparam Indexings types of `indexings` * @param is_good predicate to check, must accept doubles and produce bools * @param function function name (for error messages) * @param name variable name (for error messages) * @param x variable to check, can be a scalar, a container of scalars, a * container of containers of scalars, etc * @param must_be message describing what the value should be * @param indexings any additional indexing to print. Intended for internal use * in `elementwise_check` only. * @throws `std::domain_error` if `is_good` returns `false` for the value * of any element in `x` */ template * = nullptr, std::enable_if_t<(Eigen::internal::traits::Flags & Eigen::LinearAccessBit) || T::IsVectorAtCompileTime>* = nullptr> inline void elementwise_check(const F& is_good, const char* function, const char* name, const T& x, const char* must_be, const Indexings&... indexings) { for (size_t i = 0; i < x.size(); i++) { auto scal = value_of_rec(x.coeff(i)); if (unlikely(!is_good(scal))) { [&]() STAN_COLD_PATH { if (is_eigen_vector::value) { internal::elementwise_throw_domain_error( function, ": ", name, indexings..., "[", i + error_index::value, "] is ", scal, ", but must be ", must_be, "!"); } else if (Eigen::internal::traits::Flags & Eigen::RowMajorBit) { internal::elementwise_throw_domain_error( function, ": ", name, indexings..., "[", i / x.cols() + error_index::value, ", ", i % x.cols() + error_index::value, "] is ", scal, ", but must be ", must_be, "!"); } else { internal::elementwise_throw_domain_error( function, ": ", name, indexings..., "[", i % x.rows() + error_index::value, ", ", i / x.rows() + error_index::value, "] is ", scal, ", but must be ", must_be, "!"); } }(); } } } /** * Check that the predicate holds for all elements of the value of `x`. This * overload works on col-major Eigen types that do not support linear indexing. * * @tparam F type of predicate * @tparam T type of `x` * @tparam Indexings types of `indexings` * @param is_good predicate to check, must accept doubles and produce bools * @param function function name (for error messages) * @param name variable name (for error messages) * @param x variable to check, can be a scalar, a container of scalars, a * container of containers of scalars, etc * @param must_be message describing what the value should be * @param indexings any additional indexing to print. Intended for internal use * in `elementwise_check` only. * @throws `std::domain_error` if `is_good` returns `false` for the value * of any element in `x` */ template * = nullptr, std::enable_if_t::Flags & Eigen::LinearAccessBit) && !T::IsVectorAtCompileTime && !(Eigen::internal::traits::Flags & Eigen::RowMajorBit)>* = nullptr> inline void elementwise_check(const F& is_good, const char* function, const char* name, const T& x, const char* must_be, const Indexings&... indexings) { for (size_t i = 0; i < x.rows(); i++) { for (size_t j = 0; j < x.cols(); j++) { auto scal = value_of_rec(x.coeff(i, j)); if (unlikely(!is_good(scal))) { [&]() STAN_COLD_PATH { internal::elementwise_throw_domain_error( function, ": ", name, indexings..., "[", i + error_index::value, ", ", j + error_index::value, "] is ", scal, ", but must be ", must_be, "!"); }(); } } } } /** * Check that the predicate holds for all the elements of the value of `x`. This * overload works on row-major Eigen types that do not support linear indexing. * * @tparam F type of predicate * @tparam T type of `x` * @tparam Indexings types of `indexings` * @param is_good predicate to check, must accept doubles and produce bools * @param function function name (for error messages) * @param name variable name (for error messages) * @param x variable to check, can be a scalar, a container of scalars, a * container of containers of scalars, etc * @param must_be message describing what the value should be * @param indexings any additional indexing to print. Intended for internal use * in `elementwise_check` only. * @throws `std::domain_error` if `is_good` returns `false` for the value * of any element in `x` */ template * = nullptr, std::enable_if_t< !(Eigen::internal::traits::Flags & Eigen::LinearAccessBit) && !T::IsVectorAtCompileTime && static_cast(Eigen::internal::traits::Flags & Eigen::RowMajorBit)>* = nullptr> inline void elementwise_check(const F& is_good, const char* function, const char* name, const T& x, const char* must_be, const Indexings&... indexings) { for (size_t j = 0; j < x.cols(); j++) { for (size_t i = 0; i < x.rows(); i++) { auto scal = value_of_rec(x.coeff(i, j)); if (unlikely(!is_good(scal))) { [&]() STAN_COLD_PATH { internal::elementwise_throw_domain_error( function, ": ", name, indexings..., "[", i + error_index::value, ", ", j + error_index::value, "] is ", scal, ", but must be ", must_be, "!"); }(); } } } } /** * Check that the predicate holds for all elements of the value of `x`. This * overload works on `std::vector` types. * * @tparam F type of predicate * @tparam T type of `x` * @tparam Indexings types of `indexings` * @param is_good predicate to check, must accept doubles and produce bools * @param function function name (for error messages) * @param name variable name (for error messages) * @param x variable to check, can be a scalar, a container of scalars, a * container of containers of scalars, etc * @param must_be message describing what the value should be * @param indexings any additional indexing to print. Intended for internal use * in `elementwise_check` only. * @throws `std::domain_error` if `is_good` returns `false` for the value * of any element in `x` */ template * = nullptr> inline void elementwise_check(const F& is_good, const char* function, const char* name, const T& x, const char* must_be, const Indexings&... indexings) { for (size_t j = 0; j < x.size(); j++) { elementwise_check(is_good, function, name, x[j], must_be, indexings..., "[", j + error_index::value, "]"); } } /** * Check that the predicate holds for all elements of the value of `x`. This * overload works on `var`s containing Eigen types. * * @tparam F type of predicate * @tparam T type of `x` * @tparam Indexings types of `indexings` * @param is_good predicate to check, must accept doubles and produce bools * @param function function name (for error messages) * @param name variable name (for error messages) * @param x variable to check, can be a scalar, a container of scalars, a * container of containers of scalars, etc * @param must_be message describing what the value should be * @param indexings any additional indexing to print. Intended for internal use * in `elementwise_check` only. * @throws `std::domain_error` if `is_good` returns `false` for the value * of any element in `x` */ template * = nullptr> inline void elementwise_check(const F& is_good, const char* function, const char* name, const T& x, const char* must_be, const Indexings&... indexings) { elementwise_check(is_good, function, name, x.val(), must_be, indexings...); } /** * Check that the predicate holds for the value of `x`, working elementwise on * containers. If `x` is a scalar, check the double underlying the scalar. If * `x` is a container, check each element inside `x`, recursively. * * @tparam F type of predicate * @tparam T type of `x` * @param is_good predicate to check, must accept doubles and produce bools * @param x variable to check, can be a scalar, a container of scalars, a * container of containers of scalars, etc * @return `false` if any of the scalars fail the error check */ template inline bool elementwise_is(const F& is_good, const T& x) { return internal::Iser{is_good}.is(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_ldlt_factor.hpp0000644000176200001440000000250414645137106024663 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_LDLT_FACTOR_HPP #define STAN_MATH_PRIM_ERR_CHECK_LDLT_FACTOR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Raise domain error if the specified LDLT factor is invalid. An * LDLT_factor is invalid if it was constructed from * a matrix that is not positive definite. * * @tparam T Type matrix of LDLT * @param[in] function name of function for error messages * @param[in] name variable name for error messages * @param[in] A the LDLT factor to check for validity * @throws std::domain_error if the LDLT factor is invalid */ template inline void check_ldlt_factor(const char* function, const char* name, LDLT_factor& A) { if (!(A.ldlt().info() == Eigen::Success && A.ldlt().isPositive() && (A.ldlt().vectorD().array() > 0).all())) { std::ostringstream msg; msg << "is not positive definite. last conditional variance is "; std::string msg_str(msg.str()); auto too_small = A.ldlt().vectorD().tail(1)(0); throw_domain_error(function, name, too_small, msg_str.c_str(), "."); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_nonzero_size.hpp0000644000176200001440000000206214645137106025111 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_NONZERO_SIZE_HPP #define STAN_MATH_PRIM_ERR_CHECK_NONZERO_SIZE_HPP #include #include namespace stan { namespace math { /** * Check if the specified matrix/vector is of non-zero size. * Throws a std:invalid_argument otherwise. The message * will indicate that the variable name "has size 0". * @tparam T_y Type of container * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Container to test. This will accept matrices and vectors * @throw std::invalid_argument if the specified matrix/vector * has zero size */ template inline void check_nonzero_size(const char* function, const char* name, const T_y& y) { if (y.size() == 0) { [&]() STAN_COLD_PATH { invalid_argument(function, name, 0, "has size ", ", but must have a non-zero size"); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_std_vector_index.hpp0000644000176200001440000000243114645137106025730 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_STD_VECTOR_INDEX_HPP #define STAN_MATH_PRIM_ERR_CHECK_STD_VECTOR_INDEX_HPP #include #include #include #include #include namespace stan { namespace math { /** * Check if the specified index is valid in std vector * This check is 1-indexed by default. This behavior can be changed * by setting stan::error_index::value. * @tparam T Scalar type * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y std::vector to test * @param i Index * @throw std::out_of_range if the index is out of range. */ template inline void check_std_vector_index(const char* function, const char* name, const std::vector& y, int i) { STAN_NO_RANGE_CHECKS_RETURN; if (!(i >= static_cast(stan::error_index::value) && i < static_cast(y.size() + stan::error_index::value))) { [&]() STAN_COLD_PATH { std::stringstream msg; msg << " for " << name; std::string msg_str(msg.str()); out_of_range(function, y.size(), i, msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_mat_finite.hpp0000644000176200001440000000133514645137106024044 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_MAT_FINITE_HPP #define STAN_MATH_PRIM_ERR_IS_MAT_FINITE_HPP #include #include #include namespace stan { namespace math { /** * Return true is the specified matrix is finite. * @tparam T Scalar type of the matrix, requires class method * .allFinite() * @tparam EigMat A type derived from `EigenBase` * @param y Matrix to test * @return true if the matrix is finite **/ template * = nullptr> inline bool is_mat_finite(const EigMat& y) { return y.array().isFinite().all(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_ordered.hpp0000644000176200001440000000131714645137106023351 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_ORDERED_HPP #define STAN_MATH_PRIM_ERR_IS_ORDERED_HPP #include #include #include namespace stan { namespace math { /** * Return true if the vector is sorted into strictly * increasing order. * @tparam T_y Type of scalar, requires class method .size() * @param y std::vector to test * @return true if vector is sorted in ascending order */ template inline bool is_ordered(const std::vector& y) { for (size_t n = 1; n < y.size(); ++n) { if (!(y[n] > y[n - 1])) { return false; } } return true; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/throw_domain_error.hpp0000644000176200001440000000370014645137106025133 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_THROW_DOMAIN_ERROR_HPP #define STAN_MATH_PRIM_ERR_THROW_DOMAIN_ERROR_HPP #include #include #include namespace stan { namespace math { /** * Throw a domain error with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * domain errors. This will allow us to change the behavior for all * functions at once. * The message is: ": " * @tparam T Type of variable. * @param[in] function Name of the function. * @param[in] name Name of the variable. * @param[in] y Variable. * @param[in] msg1 Message to print before the variable. * @param[in] msg2 Message to print after the variable. * @throw std::domain_error Always. */ template inline void throw_domain_error(const char* function, const char* name, const T& y, const char* msg1, const char* msg2) { std::ostringstream message; // hack to remove -Waddress, -Wnonnull-compare warnings from GCC 6 const T* y_ptr = &y; message << function << ": " << name << " " << msg1 << (*y_ptr) << msg2; throw std::domain_error(message.str()); } /** * Throw a domain error with a consistently formatted message. * This is an abstraction for all Stan functions to use when throwing * domain errors. This will allow us to change the behavior for all * functions at once. * The message is: * ": " * @tparam T Type of variable. * @param[in] function Name of the function. * @param[in] name Name of the variable. * @param[in] y Variable. * @param[in] msg1 Message to print before the variable. * @throw std::domain_error Always. */ template inline void throw_domain_error(const char* function, const char* name, const T& y, const char* msg1) { throw_domain_error(function, name, y, msg1, ""); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_consistent_sizes_mvt.hpp0000644000176200001440000000506614645137106026670 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_CONSISTENT_SIZES_MVT_HPP #define STAN_MATH_PRIM_ERR_CHECK_CONSISTENT_SIZES_MVT_HPP #include #include #include #include #include #include namespace stan { namespace math { /** Trivial no input case, this function is a no-op. */ inline void check_consistent_sizes_mvt(const char*) { return; } /** * Base case of recursion, this function is a no-op. * @tparam T1 type of first input **/ template inline void check_consistent_sizes_mvt(const char*, const char*, const T1&) { return; } /** * Check that the provided multivariate inputs are of consistent size with each * other. Two multivariate inputs are of consistent size if both are * `std::vector`s of the same size, or if at least one is a not an * `std::vector`. * * E.g.: check_consistent_sizes_mvt("some_function", "x1", x1, "x2", x2, etc.). * * @tparam T1 type of first input * @tparam T2 type of second input * @tparam Ts type of other inputs * @param function function name (for error messages) * @param name1 name of variable corresponding to first input * @param x1 first input * @param name2 name of variable corresponding to second input * @param x2 second input * @param names_and_xs more inputs * @throw `invalid_argument` if sizes are inconsistent */ template inline void check_consistent_sizes_mvt(const char* function, const char* name1, const T1& x1, const char* name2, const T2& x2, const Ts&... names_and_xs) { if (!is_std_vector::value && is_std_vector::value) { check_consistent_sizes_mvt(function, name2, x2, name1, x1, names_and_xs...); } else if (!is_std_vector::value) { check_consistent_sizes_mvt(function, name1, x1, names_and_xs...); } else if (stan::math::size(x1) == stan::math::size(x2)) { check_consistent_sizes_mvt(function, name1, x1, names_and_xs...); } else { [&]() STAN_COLD_PATH { size_t size_x1 = stan::math::size(x1); size_t size_x2 = stan::math::size(x2); std::stringstream msg; msg << ", but " << name2 << " has size " << size_x2 << "; and they must be the same size."; std::string msg_str(msg.str()); invalid_argument(function, name1, size_x1, "has size = ", msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_consistent_sizes.hpp0000644000176200001440000000463014645137106025776 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_CONSISTENT_SIZES_HPP #define STAN_MATH_PRIM_ERR_CHECK_CONSISTENT_SIZES_HPP #include #include #include #include #include #include namespace stan { namespace math { /** Trivial no input case, this function is a no-op. */ inline void check_consistent_sizes(const char*) { return; } /** * Base case of recursion, this function is a no-op. * @tparam T1 type of first input **/ template inline void check_consistent_sizes(const char*, const char*, const T1&) { return; } /** * Check that the inputs are of consistent size. Two inputs are of consistent * size if they are vectors of the same size, or if at least one is not a * vector. * * E.g.: check_consistent_sizes("some_function", "x1", x1, "x2", x2, etc.). * * @tparam T1 type of first input * @tparam T2 type of second input * @tparam Ts type of other inputs * @param function function name (for error messages) * @param name1 name of variable corresponding to first input * @param x1 first input * @param name2 name of variable corresponding to second input * @param x2 second input * @param names_and_xs more inputs * @throw `invalid_argument` if sizes are inconsistent */ template inline void check_consistent_sizes(const char* function, const char* name1, const T1& x1, const char* name2, const T2& x2, const Ts&... names_and_xs) { if (!is_vector::value && is_vector::value) { check_consistent_sizes(function, name2, x2, name1, x1, names_and_xs...); } else if (!is_vector::value) { check_consistent_sizes(function, name1, x1, names_and_xs...); } else if (stan::math::size(x1) == stan::math::size(x2)) { check_consistent_sizes(function, name1, x1, names_and_xs...); } else { [&]() STAN_COLD_PATH { size_t size_x1 = stan::math::size(x1); size_t size_x2 = stan::math::size(x2); std::stringstream msg; msg << ", but " << name2 << " has size " << size_x2 << "; and they must be the same size."; std::string msg_str(msg.str()); invalid_argument(function, name1, size_x1, "has size = ", msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_matching_dims.hpp0000644000176200001440000001423714645137106025202 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_MATCHING_DIMS_HPP #define STAN_MATH_PRIM_ERR_CHECK_MATCHING_DIMS_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Check if the two containers have the same dimensions. * @tparam T1 type of the first container * @tparam T2 type of the second container * @param function name of function (for error messages) * @param name1 variable name for the first container (for error messages) * @param y1 first container to test * @param name2 variable name for the second container (for error messages) * @param y2 second container to test * @throw std::invalid_argument if the dimensions of the * containers do not match */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< T1, T2>* = nullptr> inline void check_matching_dims(const char* function, const char* name1, const T1& y1, const char* name2, const T2& y2) { std::vector y1_d = dims(y1); std::vector y2_d = dims(y2); auto error_throw = [&]() STAN_COLD_PATH { std::ostringstream y1s; if (y1_d.size() > 0) { y1s << y1_d[0]; for (int i = 1; i < y1_d.size(); i++) { y1s << ", " << y1_d[i]; } } std::ostringstream msg; msg << ") and " << name2 << " ("; if (y2_d.size() > 0) { msg << y2_d[0]; for (int i = 1; i < y2_d.size(); i++) { msg << ", " << y2_d[i]; } } msg << ") must match in size"; std::string msg_str(msg.str()); invalid_argument(function, name1, y1s.str(), "(", msg_str.c_str()); }; if (y1_d.size() != y2_d.size()) { error_throw(); } else { for (int i = 0; i < y1_d.size(); i++) { if (y1_d[i] != y2_d[i]) { error_throw(); } } } } /** * Check if two matrices have the same row and column dimensions. * @tparam T1 Either an Eigen type or a `var_value` with underlying Eigen type. * @tparam T2 Either an Eigen type or a `var_value` with underlying Eigen type. * @param function name of function (for error messages) * @param name1 variable name for the first container (for error messages) * @param y1 first matrix to test * @param name2 variable name for the second container (for error messages) * @param y2 second matrix to test * @throw std::invalid_argument if the dimensions of the * containers do not match */ template < typename T1, typename T2, require_any_t, is_matrix>, conjunction, is_prim_or_rev_kernel_expression>>* = nullptr, require_any_not_stan_scalar_t* = nullptr> inline void check_matching_dims(const char* function, const char* name1, const T1& y1, const char* name2, const T2& y2) { if (y1.rows() != y2.rows() || y1.cols() != y2.cols()) { [&]() STAN_COLD_PATH { std::ostringstream y1_err; std::ostringstream msg_str; y1_err << "(" << y1.rows() << ", " << y1.cols() << ")"; msg_str << y2.rows() << ", " << y2.cols() << ") must match in size"; invalid_argument(function, name1, y1_err.str(), "(", std::string(msg_str.str()).c_str()); }(); } } /** * Check if two matrices have the same row and column dimensions. * @tparam T1 Either an Eigen type, a `var_value` with underlying Eigen type, or * scalar. * @tparam T2 Either an Eigen type, a `var_value` with underlying Eigen type, or * scalar. * @param function name of function (for error messages) * @param name1 variable name for the first container (for error messages) * @param y1 first argument to test * @param name2 variable name for the second container (for error messages) * @param y2 second argument to test * @throw std::invalid_argument if the dimensions of the * containers do not match */ template * = nullptr, require_any_stan_scalar_t* = nullptr> inline void check_matching_dims(const char* function, const char* name1, const T1& y1, const char* name2, const T2& y2) { std::string y1_err(""); std::string msg_str("Tried Checking the dimensions of a matrix vs a scalar"); invalid_argument(function, name1, y1_err, "", msg_str.c_str()); } /** * Check if the two matrices are of the same size. * This function checks the runtime sizes and can also check the static * sizes as well. For example, a 4x1 matrix is not the same as a vector * with 4 elements. * @tparam check_compile Whether to check the static sizes * @tparam Mat1 type of the first matrix * @tparam Mat2 type of the second matrix * @param function name of function (for error messages) * @param name1 variable name for the first matrix (for error messages) * @param y1 first matrix to test * @param name2 variable name for the second matrix (for error messages) * @param y2 second matrix to test * @throw std::invalid_argument if the dimensions of the matrices * do not match */ template > inline void check_matching_dims(const char* function, const char* name1, const Mat1& y1, const char* name2, const Mat2& y2) { if (check_compile && (static_cast(Mat1::RowsAtCompileTime) != static_cast(Mat2::RowsAtCompileTime) || static_cast(Mat1::ColsAtCompileTime) != static_cast(Mat2::ColsAtCompileTime))) { [&]() STAN_COLD_PATH { std::ostringstream msg; msg << "Static rows and cols of " << name1 << " and " << name2 << " must match in size."; std::string msg_str(msg.str()); invalid_argument(function, msg_str.c_str(), "", ""); }(); } check_matching_dims(function, name1, y1, name2, y2); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/constraint_tolerance.hpp0000644000176200001440000000113614645137106025451 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CONSTRAINT_TOLERANCE_HPP #define STAN_MATH_PRIM_ERR_CONSTRAINT_TOLERANCE_HPP #ifndef STAN_MATH_CONSTRAINT_TOLERANCE #define STAN_MATH_CONSTRAINT_TOLERANCE 1E-8 #endif #include namespace stan { namespace math { /** * The tolerance for checking arithmetic bounds in rank and in * simplexes. The default value is 1E-8. * This can changed by defining STAN_MATH_CONSTRAINT_TOLERANCE * at compile time. */ const double CONSTRAINT_TOLERANCE = STAN_MATH_CONSTRAINT_TOLERANCE; } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_column_index.hpp0000644000176200001440000000155114645137106024411 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_COLUMN_INDEX_HPP #define STAN_MATH_PRIM_ERR_IS_COLUMN_INDEX_HPP #include #include namespace stan { namespace math { /** * Return true if column index is in bounds. * By default this is a 1-indexed check (as opposed to zero-indexed). * Behavior can be changed by setting stan::error_index::value. * @tparam EigMat A type derived from `EigenBase` * @param y matrix to test * @param i column index to check * @return true if column index is in bounds */ template * = nullptr> inline bool is_column_index(const EigMat& y, size_t i) { return i >= stan::error_index::value && i < static_cast(y.cols()) + stan::error_index::value; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_multiplicable.hpp0000644000176200001440000000276114645137106025221 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_MULTIPLICABLE_HPP #define STAN_MATH_PRIM_ERR_CHECK_MULTIPLICABLE_HPP #include #include #include #include namespace stan { namespace math { /** * Check if the matrices can be multiplied. * This checks the runtime sizes to determine whether the two * matrices are multiplicable. This allows Eigen matrices, * vectors, and row vectors to be checked. * @tparam T1 Type of first matrix * @tparam T2 Type of second matrix * @param function Function name (for error messages) * @param name1 Variable name for the first matrix (for error messages) * @param y1 First matrix to test, requires class access to .rows() * and .cols() * @param name2 Variable name for the second matrix (for error messages) * @param y2 Second matrix to test, requires class access to * .rows() and .cols() * @throw std::invalid_argument if the matrices are not * multiplicable or if either matrix is size 0 for either rows or columns */ template inline void check_multiplicable(const char* function, const char* name1, const T1& y1, const char* name2, const T2& y2) { check_size_match(function, "Columns of ", name1, y1.cols(), "Rows of ", name2, y2.rows()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_3F2_converges.hpp0000644000176200001440000000576214645137106025004 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_3F2_CONVERGES_HPP #define STAN_MATH_PRIM_ERR_CHECK_3F2_CONVERGES_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Check if the hypergeometric function (3F2) called with * supplied arguments will converge, assuming arguments are * finite values. * @tparam T_a1 Type of a1 * @tparam T_a2 Type of a2 * @tparam T_a3 Type of a3 * @tparam T_b1 Type of b1 * @tparam T_b2 Type of b2 * @tparam T_z Type of z * @param function Name of function ultimately relying on 3F2 (for error & messages) * @param a1 Variable to check * @param a2 Variable to check * @param a3 Variable to check * @param b1 Variable to check * @param b2 Variable to check * @param z Variable to check * @throw domain_error if 3F2(a1, a2, a3, b1, b2, z) * does not meet convergence conditions, or if any coefficient is NaN. */ template inline void check_3F2_converges(const char* function, const T_a1& a1, const T_a2& a2, const T_a3& a3, const T_b1& b1, const T_b2& b2, const T_z& z) { using std::fabs; using std::floor; check_not_nan("check_3F2_converges", "a1", a1); check_not_nan("check_3F2_converges", "a2", a2); check_not_nan("check_3F2_converges", "a3", a3); check_not_nan("check_3F2_converges", "b1", b1); check_not_nan("check_3F2_converges", "b2", b2); check_not_nan("check_3F2_converges", "z", z); int num_terms = 0; bool is_polynomial = false; if (is_nonpositive_integer(a1) && fabs(a1) >= num_terms) { is_polynomial = true; num_terms = floor(fabs(value_of_rec(a1))); } if (is_nonpositive_integer(a2) && fabs(a2) >= num_terms) { is_polynomial = true; num_terms = floor(fabs(value_of_rec(a2))); } if (is_nonpositive_integer(a3) && fabs(a3) >= num_terms) { is_polynomial = true; num_terms = floor(fabs(value_of_rec(a3))); } bool is_undefined = (is_nonpositive_integer(b1) && fabs(b1) <= num_terms) || (is_nonpositive_integer(b2) && fabs(b2) <= num_terms); if (is_polynomial && !is_undefined) { return; } if (fabs(z) < 1.0 && !is_undefined) { return; } if (fabs(z) == 1.0 && !is_undefined && b1 + b2 > a1 + a2 + a3) { return; } std::stringstream msg; msg << "called from function '" << function << "', " << "hypergeometric function 3F2 does not meet convergence " << "conditions with given arguments. " << "a1: " << a1 << ", a2: " << a2 << ", a3: " << a3 << ", b1: " << b1 << ", b2: " << b2 << ", z: " << z; throw std::domain_error(msg.str()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_vector_index.hpp0000644000176200001440000000256614645137106025067 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_VECTOR_INDEX_HPP #define STAN_MATH_PRIM_ERR_CHECK_VECTOR_INDEX_HPP #include #include #include #include #include namespace stan { namespace math { /** * Check if the specified index is a valid element of the row or column vector * This check is 1-indexed by default. This behavior can be changed * by setting stan::error_index::value. * @tparam T Vector type * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y vector to test * @param i row index to check * @throw std::out_of_range if the index is out of range. */ template < typename T, require_any_t, is_prim_or_rev_kernel_expression>* = nullptr> inline void check_vector_index(const char* function, const char* name, const T& y, size_t i) { STAN_NO_RANGE_CHECKS_RETURN; if (!(i >= stan::error_index::value && i < static_cast(y.size()) + stan::error_index::value)) { [&]() STAN_COLD_PATH { std::stringstream msg; msg << " for size of " << name; std::string msg_str(msg.str()); out_of_range(function, y.rows(), i, msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_lower_triangular.hpp0000644000176200001440000000307314645137106025750 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_LOWER_TRIANGULAR_HPP #define STAN_MATH_PRIM_ERR_CHECK_LOWER_TRIANGULAR_HPP #include #include #include #include #include namespace stan { namespace math { /** * Check if the specified matrix is lower triangular. * A matrix x is not lower triangular if there is a non-zero entry * x[m, n] with m < n. This function only inspects the upper * triangular portion of the matrix, not including the diagonal. * @tparam T Type of the matrix * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Matrix to test * @throw std::domain_error if the matrix is not * lower triangular or if any element in the upper triangular * portion is NaN */ template * = nullptr> inline void check_lower_triangular(const char* function, const char* name, const T_y& y) { const auto& y_ref = to_ref(y); for (int n = 1; n < y.cols(); ++n) { for (int m = 0; m < n && m < y.rows(); ++m) { if (y_ref(m, n) != 0) { std::stringstream msg; msg << "is not lower triangular;" << " " << name << "[" << stan::error_index::value + m << "," << stan::error_index::value + n << "]="; std::string msg_str(msg.str()); throw_domain_error(function, name, y_ref(m, n), msg_str.c_str()); } } } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/validate_unit_vector_index.hpp0000644000176200001440000000260614645137106026635 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_VALIDATE_UNIT_VECTOR_INDEX_HPP #define STAN_MATH_PRIM_ERR_VALIDATE_UNIT_VECTOR_INDEX_HPP #include #include #include #include namespace stan { namespace math { /** * Check that unit vector is at least size 2 * * @param var_name Name of unit vector variable * @param expr Expression in which variable is declared * @param val Size of unit vector * @throw std::invalid_argument if simplex size is less than 2 */ inline void validate_unit_vector_index(const char* var_name, const char* expr, int val) { if (val <= 1) { [&]() STAN_COLD_PATH { std::stringstream msg; if (val == 1) { msg << "Found dimension size one in unit vector declaration." << " One-dimensional unit vector is discrete" << " but the target distribution must be continuous." << " variable=" << var_name << "; dimension size expression=" << expr; } else { msg << "Found dimension size less than one in unit vector declaration" << "; variable=" << var_name << "; dimension size expression=" << expr << "; expression value=" << val; } std::string msg_str(msg.str()); throw std::invalid_argument(msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_not_nan.hpp0000644000176200001440000000213414645137106024021 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_NOT_NAN_HPP #define STAN_MATH_PRIM_ERR_CHECK_NOT_NAN_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Check if y is not NaN. * This function is vectorized and will check each element of * y. If any element is NaN, this * function will throw an exception. * @tparam T_y Type of y * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @throw domain_error if any element of y is NaN */ template inline void check_not_nan(const char* function, const char* name, const T_y& y) { elementwise_check([](double x) { return !std::isnan(x); }, function, name, y, "not nan"); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/throw_domain_error_mat.hpp0000644000176200001440000000555314645137106026004 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_THROW_DOMAIN_ERROR_MAT_HPP #define STAN_MATH_PRIM_ERR_THROW_DOMAIN_ERROR_MAT_HPP #include #include #include #include #include namespace stan { namespace math { /** * Throw a domain error with a consistently formatted message for matrices. * This is an abstraction for all Stan functions to use when throwing * domain errors. This will allow us to change the behavior for all * functions at once. * The message is: ": [] * " where error_index is the value of stan::error_index::value which * indicates whether the message should be 0 or 1 indexed. * @tparam T Type of variable * @param function Name of the function * @param name Name of the variable * @param y Variable * @param i Row index * @param j Column index * @param msg1 Message to print before the variable * @param msg2 Message to print after the variable * @throw std::domain_error Always. */ template * = nullptr> inline void throw_domain_error_mat(const char* function, const char* name, const T& y, size_t i, size_t j, const char* msg1, const char* msg2) { std::ostringstream vec_name_stream; if (is_col_vector::value) { vec_name_stream << name << "[" << stan::error_index::value + i << "]"; } else if (is_row_vector::value) { vec_name_stream << name << "[" << stan::error_index::value + j << "]"; } else { vec_name_stream << name << "[" << stan::error_index::value + i << ", " << stan::error_index::value + j << "]"; } std::string vec_name(vec_name_stream.str()); throw_domain_error(function, vec_name.c_str(), y.coeff(i, j), msg1, msg2); } /** * Throw a domain error with a consistently formatted message for matrices. * This is an abstraction for all Stan functions to use when throwing * domain errors. This will allow us to change the behavior for all * functions at once. * The message is: ": [] * " where error_index is the value of stan::error_index::value which * indicates whether the message should be 0 or 1 indexed. * @tparam T Type of variable * @param function Name of the function * @param name Name of the variable * @param y Variable * @param i Row index * @param j Column index * @param msg Message to print before the variable * @throw std::domain_error Always */ template inline void throw_domain_error_mat(const char* function, const char* name, const T& y, size_t i, size_t j, const char* msg) { throw_domain_error_vec(function, name, y, i, j, msg, ""); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_matching_dims.hpp0000644000176200001440000000355014645137106024534 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_MATCHING_DIMS_HPP #define STAN_MATH_PRIM_ERR_IS_MATCHING_DIMS_HPP #include #include #include namespace stan { namespace math { /** * Return true if the two matrices are of the same size. * This function checks the runtime sizes only. * @tparam EigMat1 A type derived from `EigenBase` * @tparam EigMat2 A type derived from `EigenBase` * @param y1 first matrix to test * @param y2 second matrix to test * @return true if the dimensions of the matrices match */ template * = nullptr> inline bool is_matching_dims(const EigMat1& y1, const EigMat2& y2) { return is_size_match(y1.rows(), y2.rows()) && is_size_match(y1.cols(), y2.cols()); } /** * Return true if the two matrices are of the same size. * This function checks the runtime sizes and can also check the static * sizes as well. For example, a 4x1 matrix is not the same as a vector * with 4 elements. * @tparam check_compile Whether to check the static sizes * @tparam EigMat1 A type derived from `EigenBase` * @tparam EigMat2 A type derived from `EigenBase` * @param y1 first matrix to test * @param y2 second matrix to test * @return true if the dimensions of the matrices match */ template * = nullptr> inline bool is_matching_dims(const EigMat1& y1, const EigMat2& y2) { return !(check_compile && (EigMat1::RowsAtCompileTime != EigMat2::RowsAtCompileTime || EigMat1::ColsAtCompileTime != EigMat2::ColsAtCompileTime)) && is_matching_dims(y1, y2); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_positive.hpp0000644000176200001440000000346314645137106024235 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_POSITIVE_HPP #define STAN_MATH_PRIM_ERR_CHECK_POSITIVE_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** * Check if y is positive. * This function is vectorized and will check each element of * y. * @tparam T_y Type of y * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @throw domain_error if y is negative or zero or * if any element of y is NaN */ template inline void check_positive(const char* function, const char* name, const T_y& y) { elementwise_check([](double x) { return x > 0; }, function, name, y, "positive"); } /** * Check if size is positive. * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param expr Expression for the dimension size (for error messages) * @param size Size value to check * @throw std::invalid_argument if size is * zero or negative. */ inline void check_positive(const char* function, const char* name, const char* expr, int size) { if (size <= 0) { [&]() STAN_COLD_PATH { std::stringstream msg; msg << "; dimension size expression = " << expr; std::string msg_str(msg.str()); invalid_argument(function, name, size, "must have a positive size, but is ", msg_str.c_str()); }(); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_flag_sundials.hpp0000644000176200001440000003665114645137106025213 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_FLAG_SUNDIALS_HPP #define STAN_MATH_PRIM_ERR_CHECK_FLAG_SUNDIALS_HPP #include #include #include #include #include namespace stan { namespace math { #define CHECK_CVODES_CALL(call) cvodes_check(call, #call) #define CHECK_IDAS_CALL(call) idas_check(call, #call) #define CHECK_KINSOL_CALL(call) kinsol_check(call, #call) /** * Map cvodes error flag to acutally error msg. The most frequent * errors are put at the top. An alternative would be to use std::map * but in our case the difference would be negligible. Note that we * don't use CVGetReturnFlagName function to retrieve the constant * because sanitizer indicates it contains mem leak. * * @param flag * * @return error msg string constant and actuall informative msg */ inline std::array cvodes_flag_msg(int flag) { std::array msg; switch (flag) { case -1: msg = {"CV_TOO_MUCH_WORK", "The solver took mxstep internal steps but could not reach tout"}; break; // NOLINT case -2: msg = {"CV_TOO_MUCH_ACC", "The solver could not satisfy the accuracy demanded by the user " "for some internal step"}; break; // NOLINT case -3: msg = {"CV_ERR_FAILURE", "Error test failures occurred too many times during one internal " "time step or minimum step size was reached"}; break; // NOLINT case -4: msg = {"CV_CONV_FAILURE", "Convergence test failures occurred too many times during one " "internal time step or minimum step size was reached"}; break; // NOLINT case -8: msg = {"CV_RHSFUNC_FAIL", "The right-hand side function failed in an unrecoverable manner"}; break; // NOLINT case -9: msg = {"CV_FIRST_RHSFUNC_ERR", "The right-hand side function failed at the first call"}; break; // NOLINT case -10: msg = {"CV_REPTD_RHSFUNC_ERR", "The right-hand side function had repetead recoverable errors"}; break; // NOLINT case -11: msg = {"CV_UNREC_RHSFUNC_ERR", "The right-hand side function had a recoverable error, but no " "recovery is possible"}; break; // NOLINT case -27: msg = {"CV_TOO_CLOSE", "The output and initial times are too close to each other"}; break; // NOLINT default: switch (flag) { case -5: msg = {"CV_LINIT_FAIL", "The linear solver's initialization function failed"}; break; // NOLINT case -6: msg = {"CV_LSETUP_FAIL", "The linear solver's setup function failed in an " "unrecoverable manner"}; break; // NOLINT case -7: msg = {"CV_LSOLVE_FAIL", "The linear solver's solve function failed in an " "unrecoverable manner"}; break; // NOLINT case -20: msg = {"CV_MEM_FAIL", "A memory allocation failed"}; break; // NOLINT case -21: msg = {"CV_MEM_NULL", "The cvode_mem argument was NULL"}; break; // NOLINT case -22: msg = {"CV_ILL_INPUT", "One of the function inputs is illegal"}; break; // NOLINT case -23: msg = {"CV_NO_MALLOC", "The CVODE memory block was not allocated by a call to " "CVodeMalloc"}; break; // NOLINT case -24: msg = {"CV_BAD_K", "The derivative order k is larger than the order used"}; break; // NOLINT case -25: msg = {"CV_BAD_T", "The time t s outside the last step taken"}; break; // NOLINT case -26: msg = {"CV_BAD_DKY", "The output derivative vector is NULL"}; break; // NOLINT case -40: msg = {"CV_BAD_IS", "The sensitivity index is larger than the number of " "sensitivities computed"}; break; // NOLINT case -41: msg = {"CV_NO_SENS", "Forward sensitivity integration was not activated"}; break; // NOLINT case -42: msg = {"CV_SRHSFUNC_FAIL", "The sensitivity right-hand side function failed in an " "unrecoverable manner"}; break; // NOLINT case -43: msg = {"CV_FIRST_SRHSFUNC_ER", "The sensitivity right-hand side function failed at the first " "call"}; break; // NOLINT case -44: msg = {"CV_REPTD_SRHSFUNC_ER", "The sensitivity ight-hand side function had repetead " "recoverable errors"}; break; // NOLINT case -45: msg = {"CV_UNREC_SRHSFUNC_ER", "The sensitivity right-hand side function had a recoverable " "error, but no recovery is possible"}; break; // NOLINT case -101: msg = {"CV_ADJMEM_NULL", "The cvadj_mem argument was NULL"}; break; // NOLINT case -103: msg = {"CV_BAD_TB0", "The final time for the adjoint problem is outside the " "interval over which the forward problem was solved"}; break; // NOLINT case -104: msg = {"CV_BCKMEM_NULL", "The cvodes memory for the backward problem was not created"}; break; // NOLINT case -105: msg = {"CV_REIFWD_FAIL", "Reinitialization of the forward problem failed at the first " "checkpoint"}; break; // NOLINT case -106: msg = { "CV_FWD_FAIL", "An error occured during the integration of the forward problem"}; break; // NOLINT case -107: msg = {"CV_BAD_ITASK", "Wrong task for backward integration"}; break; // NOLINT case -108: msg = {"CV_BAD_TBOUT", "The desired output time is outside the interval over which " "the forward problem was solved"}; break; // NOLINT case -109: msg = {"CV_GETY_BADT", "Wrong time in interpolation function"}; break; // NOLINT } } return msg; } /** * Throws a std::runtime_error exception when a Sundial function fails * (i.e. returns a negative flag) * * @param flag Error flag * @param func_name Name of the function that returned the flag * @throw std::runtime_error if the flag is negative */ inline void cvodes_check(int flag, const char* func_name) { if (flag < 0) { std::ostringstream ss; ss << func_name << " failed with error flag " << flag << ": \n" << cvodes_flag_msg(flag).at(1) << "."; if (flag == -1 || flag == -4) { throw std::domain_error(ss.str()); } else { throw std::runtime_error(ss.str()); } } } inline std::array idas_flag_msg(int flag) { std::array msg; switch (flag) { case -1: msg = {"IDA_TOO_MUCH_WORK", "The solver took mxstep internal steps but could not reach tout."}; break; // NOLINT case -2: msg = {"IDA_TOO_MUCH_ACC", "The solver could not satisfy the accuracy demanded by the user " "for some internal step."}; break; // NOLINT case -3: msg = {"IDA_ERR_FAIL", "Error test failures occurred too many times during one internal " "time step or minimum step size was reached."}; break; // NOLINT case -4: msg = {"IDA_CONV_FAIL", "Convergence test failures occurred too many times during one " "internal time step or minimum step size was reached."}; break; // NOLINT case -5: msg = {"IDA_LINIT_FAIL", "The linear solver’s initialization function failed."}; break; // NOLINT case -6: msg = {"IDA_LSETUP_FAIL", "The linear solver’s setup function failed in an unrecoverable " "manner."}; break; // NOLINT case -7: msg = {"IDA_LSOLVE_FAIL", "The linear solver’s solve function failed in an unrecoverable " "manner."}; break; // NOLINT case -8: msg = {"IDA_RES_FAIL", "The user-provided residual function failed in an unrecoverable " "manner."}; break; // NOLINT case -9: msg = {"IDA_REP_RES_FAIL", "The user-provided residual function repeatedly returned a " "recoverable error flag, but the solver was unable to recover."}; break; // NOLINT case -10: msg = {"IDA_RTFUNC_FAIL", "The rootfinding function failed in an unrecoverable manner."}; break; // NOLINT case -11: msg = {"IDA_CONSTR_FAIL", "The inequality constraints were violated and the solver was " "unable to recover."}; break; // NOLINT case -12: msg = {"IDA_FIRST_RES_FAIL", "The user-provided residual function failed recoverably on the " "first call."}; break; // NOLINT case -13: msg = {"IDA_LINESEARCH_FAIL", "The line search failed."}; break; // NOLINT default: switch (flag) { case -14: msg = {"IDA_NO_RECOVERY", "The residual function, linear solver setup function, or " "linear solver solve function had a recoverable failure, but " "IDACalcIC could not recover."}; break; // NOLINT case -15: msg = {"IDA_NLS_INIT_FAIL", "The nonlinear solver’s init routine failed."}; break; // NOLINT case -16: msg = {"IDA_NLS_SETUP_FAIL", "The nonlinear solver’s setup routine failed."}; break; // NOLINT case -20: msg = {"IDA_MEM_NULL", "The ida mem argument was NULL."}; break; // NOLINT case -21: msg = {"IDA_MEM_FAIL", "A memory allocation failed."}; break; // NOLINT case -22: msg = {"IDA_ILL_INPUT", "One of the function inputs is illegal."}; break; // NOLINT case -23: msg = {"IDA_NO_MALLOC", "The idas memory was not allocated by a call to IDAInit."}; break; // NOLINT case -24: msg = {"IDA_BAD_EWT", "Zero value of some error weight component."}; break; // NOLINT case -25: msg = {"IDA_BAD_K", "The k-th derivative is not available."}; break; // NOLINT case -26: msg = {"IDA_BAD_T", "The time t is outside the last step taken."}; break; // NOLINT case -27: msg = { "IDA_BAD_DKY", "The vector argument where derivative should be stored is NULL."}; break; // NOLINT case -30: msg = {"IDA_NO_QUAD", "Quadratures were not initialized."}; break; // NOLINT case -31: msg = {"IDA_QRHS_FAIL", "The user-provided right-hand side function for quadratures " "failed in an unrecoverable manner."}; break; // NOLINT case -32: msg = {"IDA_FIRST_QRHS_ERR", "The user-provided right-hand side function for quadratures " "failed -in an unrecoverable manner on the first call."}; break; // NOLINT case -33: msg = {"IDA_REP_QRHS_ERR", "The user-provided right-hand side repeatedly returned a re- " "coverable error flag, but the solver was unable to recover."}; break; // NOLINT case -40: msg = {"IDA_NO_SENS", "Sensitivities were not initialized."}; break; // NOLINT case -41: msg = {"IDA_SRES_FAIL", "The user-provided sensitivity residual function failed in an " "unrecoverable manner."}; break; // NOLINT case -42: msg = {"IDA_REP_SRES_ERR", "The user-provided sensitivity residual function repeatedly " "re- turned a recoverable error flag, but the solver was " "unable to recover."}; break; // NOLINT case -43: msg = {"IDA_BAD_IS", "The sensitivity identifier is not valid."}; break; // NOLINT case -50: msg = {"IDA_NO_QUADSENS", "Sensitivity-dependent quadratures were not initialized."}; break; // NOLINT case -51: msg = {"IDA_QSRHS_FAIL", "The user-provided sensitivity-dependent quadrature right- " "hand side function failed in an unrecoverable manner."}; break; // NOLINT case -52: msg = {"IDA_FIRST_QSRHS_ERR", "The user-provided sensitivity-dependent quadrature right- " "hand side function failed in an unrecoverable manner on the " "first call."}; break; // NOLINT case -53: msg = {"IDA_REP_QSRHS_ERR", "The user-provided sensitivity-dependent quadrature right- " "hand side repeatedly returned a recoverable error flag, but " "the solver was unable to recover."}; break; // NOLINT } } return msg; } inline void idas_check(int flag, const char* func_name) { if (flag < 0) { std::ostringstream ss; ss << func_name << " failed with error flag " << flag << ": \n" << idas_flag_msg(flag).at(1); if (flag == -1 || flag == -4) { throw std::domain_error(ss.str()); } else { throw std::runtime_error(ss.str()); } } } /** * Throws an exception message when the functions in KINSOL * fails. "KINGetReturnFlagName()" from SUNDIALS has a mem leak bug so * until it's fixed we cannot use it to extract flag error string. * * @param flag Error flag * @param func_name calling function name * @throw std::runtime_error if the flag is negative. */ inline void kinsol_check(int flag, const char* func_name) { if (flag < 0) { std::ostringstream ss; ss << "algebra_solver failed with error flag " << flag << "."; throw std::runtime_error(ss.str()); } } /** * Throws an exception message when the KINSol() call fails. * When the exception is caused * by a tuning parameter the user controls, gives a specific * error. * * @param flag Error flag * @param func_name calling function name * @param max_num_steps max number of nonlinear iterations. * @throw std::runtime_error if the flag is negative. * @throw std::domain_error if the flag indicates max * number of steps is exceeded. */ inline void kinsol_check(int flag, const char* func_name, long int max_num_steps) { // NOLINT(runtime/int) std::ostringstream ss; if (flag == -6) { domain_error("algebra_solver", "maximum number of iterations", max_num_steps, "(", ") was exceeded in the solve."); } else if (flag == -11) { ss << "The linear solver’s setup function failed " << "in an unrecoverable manner."; throw std::runtime_error(ss.str()); } else if (flag < 0) { ss << "algebra_solver failed with error flag " << flag << "."; throw std::runtime_error(ss.str()); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_greater.hpp0000644000176200001440000004113614645137106024023 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_GREATER_HPP #define STAN_MATH_PRIM_ERR_CHECK_GREATER_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Throw an exception if `y` is not strictly greater than `low`. This function * is vectorized and will check each element of `y` against each element of * `low`. * @tparam Idxs A parameter pack of Integral types * @tparam T_y A scalar type type * @tparam T_low A scalar type * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, typename... Idxs> inline void check_greater(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { if (unlikely(!(y > low))) { [](auto y, auto low, auto function, auto name, auto... idxs) STAN_COLD_PATH { throw_domain_error( function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be greater than " + std::to_string(value_of_rec(low))) .c_str()); }(y, low, function, name, idxs...); } } /** * Throw an exception if `y` is not strictly greater than each element of `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam Idxs A parameter pack of Integral types * @tparam T_y A scalar type * @tparam T_low A standard vector or type inheriting from `Eigen::DenseBase` * with compile time rows or columns equal to one and `value_type` equal to a * stan scalar. * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template < typename T_y, typename T_low, require_stan_scalar_t* = nullptr, require_vector_t* = nullptr, require_not_std_vector_vt* = nullptr, typename... Idxs> inline void check_greater(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& low_arr = value_of_rec(as_array_or_scalar(to_ref(low))); for (Eigen::Index i = 0; i < low_arr.size(); ++i) { if (unlikely(!(y > low_arr.coeff(i)))) { [](auto y, auto&& low_arr, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error( function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be greater than " + std::to_string(low_arr.coeff(i))) .c_str()); }(y, low_arr, name, function, i, idxs...); } } } /** * Throw an exception if `y` is not strictly greater than each element of `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam Idxs A parameter pack of Integral types * @tparam T_y A scalar type * @tparam T_low Type inheriting from `Eigen::DenseBase` or a `var_value` with * the var's inner type inheriting from `Eigen::DenseBase` where the compile * time number of rows or columns is not equal to one * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_dense_dynamic_t* = nullptr, typename... Idxs> inline void check_greater(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& low_arr = value_of_rec(to_ref(low)); for (Eigen::Index j = 0; j < low_arr.cols(); ++j) { for (Eigen::Index i = 0; i < low_arr.rows(); ++i) { if (unlikely(!(y > low_arr.coeff(i, j)))) { [](auto y, auto&& low_arr, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error(function, internal::make_iter_name(name, idxs...).c_str(), y, "is ", (", but must be greater than " + std::to_string(low_arr.coeff(i, j))) .c_str()); }(y, low_arr, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not strictly greater than `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam Idxs A parameter pack of Integral types * @tparam T_y A standard vector or type inheriting from `Eigen::DenseBase` with * compile time rows or columns equal to one and `value_type` equal to a stan * scalar * @tparam T_low A scalar type * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_not_std_vector_vt* = nullptr, require_stan_scalar_t* = nullptr, typename... Idxs> inline void check_greater(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& y_arr = value_of_rec(as_array_or_scalar(to_ref(y))); for (Eigen::Index i = 0; i < y_arr.size(); ++i) { if (unlikely(!(y_arr.coeff(i) > low))) { [](auto&& y_arr, auto low, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error_vec( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, "is ", (", but must be greater than " + std::to_string(value_of_rec(low))) .c_str()); }(y_arr, low, name, function, i, idxs...); } } } /** * Throw an exception if each element of `y` is not strictly greater than `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam Idxs A parameter pack of Integral types * @tparam T_y Type inheriting from `Eigen::DenseBase` or a `var_value` with the * var's inner type inheriting from `Eigen::DenseBase` where the compile time * number of rows or columns is not equal to one * @tparam T_low A scalar type * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_stan_scalar_t* = nullptr, typename... Idxs> inline void check_greater(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& y_arr = value_of_rec(to_ref(y)); for (Eigen::Index j = 0; j < y_arr.cols(); ++j) { for (Eigen::Index i = 0; i < y_arr.rows(); ++i) { if (unlikely(!(y_arr.coeff(i, j) > low))) { [](auto&& y_arr, auto low, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error_mat( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, j, "is ", (", but must be greater than " + std::to_string(value_of_rec(low))) .c_str()); }(y_arr, low, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not strictly greater than the * associated element in `low`. This function is vectorized and will check each * element of `y` against each element of `low`. * @tparam Idxs A parameter pack of Integral types * @tparam T_y A standard vector or type inheriting from `Eigen::DenseBase` with * compile time rows or columns equal to one and `value_type` equal to a stan * scalar. * @tparam T_low A standard vector or type inheriting from `Eigen::DenseBase` * with compile time rows or columns equal to one and `value_type` equal to a * stan scalar. * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_all_not_std_vector_vt* = nullptr, typename... Idxs> inline void check_greater(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& y_arr = value_of_rec(as_array_or_scalar(to_ref(y))); auto&& low_arr = value_of_rec(as_array_or_scalar(to_ref(low))); for (Eigen::Index i = 0; i < low_arr.size(); ++i) { if (unlikely(!(y_arr.coeff(i) > low_arr.coeff(i)))) { [](auto&& y_arr, auto&& low_arr, auto name, auto function, auto i, auto... idxs) STAN_COLD_PATH { throw_domain_error_vec( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, "is ", (", but must be greater than " + std::to_string(low_arr.coeff(i))) .c_str()); }(y_arr, low_arr, name, function, i, idxs...); } } } /** * Throw an exception if each element of `y` is not strictly greater than the * associated element in `low`. This function is vectorized and will check each * element of `y` against each element of `low`. * @tparam Idxs A parameter pack of Integral types * @tparam T_y Type inheriting from `Eigen::DenseBase` or a `var_value` with the * var's inner type inheriting from `Eigen::DenseBase` where the compile time * number of rows or columns is not equal to one * @tparam T_low Type inheriting from `Eigen::DenseBase` or a `var_value` with * the var's inner type inheriting from `Eigen::DenseBase` where the compile * time number of rows or columns is not equal to one * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, typename... Idxs> inline void check_greater(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { auto&& y_arr = value_of_rec(to_ref(y)); auto&& low_arr = value_of_rec(to_ref(low)); for (Eigen::Index j = 0; j < low_arr.cols(); ++j) { for (Eigen::Index i = 0; i < low_arr.rows(); ++i) { if (unlikely(!(y_arr.coeff(i, j) > low_arr.coeff(i, j)))) { [](auto&& y_arr, auto&& low_arr, auto name, auto function, auto i, auto j, auto... idxs) STAN_COLD_PATH { throw_domain_error_mat( function, internal::make_iter_name(name, idxs...).c_str(), y_arr, i, j, "is ", (", but must be greater than " + std::to_string(low_arr.coeff(i, j))) .c_str()); }(y_arr, low_arr, name, function, i, j, idxs...); } } } } /** * Throw an exception if each element of `y` is not strictly greater than `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam Idxs A parameter pack of Integral types * @tparam T_y A standard vector type with a `value_type` of a standard vector * or type inheriting from `Eigen::DenseBase` * @tparam T_low A scalar type or the same type as the underlying type in `T_y` * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_not_std_vector_t* = nullptr, typename... Idxs> inline void check_greater(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { for (size_t i = 0; i < y.size(); ++i) { check_greater(function, name, y[i], low, idxs..., i); } } /** * Throw an exception if each element of `y` is not strictly greater than `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam Idxs A parameter pack of Integral types * @tparam T_y A scalar type or the same type as the inner type of `T_low` * @tparam T_low A standard vector type with a `value_type` of a standard vector * or type inheriting from `Eigen::DenseBase` * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_std_vector_vt* = nullptr, typename... Idxs> inline void check_greater(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { for (size_t i = 0; i < low.size(); ++i) { check_greater(function, name, y, low[i], idxs..., i); } } /** * Throw an exception if each element of `y` is not strictly greater than `low`. * This function is vectorized and will check each element of `y` against each * element of `low`. * @tparam Idxs A parameter pack of Integral types * @tparam T_y A standard vector type whose `value_type` is a scalar, type * inheriting from `Eigen::EigenBase`, or another standard vector * @tparam T_low A standard vector type whose `value_type` is a scalar, type * inheriting from `Eigen::EigenBase`, or another standard vector * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Variable to check * @param low Lower bound * @param idxs Pack of integral types to construct lazily construct the error * message indices * @throw `std::domain_error` if y is not greater or equal to low or if any * element of y or low is `NaN` */ template * = nullptr, require_all_std_vector_t* = nullptr, typename... Idxs> inline void check_greater(const char* function, const char* name, const T_y& y, const T_low& low, Idxs... idxs) { for (size_t i = 0; i < y.size(); ++i) { check_greater(function, name, y[i], low[i], idxs..., i); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/check_pos_semidefinite.hpp0000644000176200001440000000517014645137106025716 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_CHECK_POS_SEMIDEFINITE_HPP #define STAN_MATH_PRIM_ERR_CHECK_POS_SEMIDEFINITE_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * Check if the specified matrix is positive definite * @tparam EigMat A type derived from `EigenBase` with dynamic rows and columns * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param y Matrix to test * @throw std::invalid_argument if the matrix is not square * or if the matrix has 0 size. * @throw std::domain_error if the matrix is not symmetric, * or if it is not positive semi-definite, * or if any element of the matrix is NaN. */ template * = nullptr> inline void check_pos_semidefinite(const char* function, const char* name, const EigMat& y) { const auto& y_ref = to_ref(y); check_symmetric(function, name, y_ref); check_positive(function, name, "rows", y_ref.rows()); check_not_nan(function, name, y_ref); if (y_ref.rows() == 1 && !(y_ref(0, 0) >= 0.0)) { throw_domain_error(function, name, "is not positive semi-definite.", ""); } using Eigen::Dynamic; using Eigen::LDLT; using Eigen::Matrix; LDLT > cholesky = value_of_rec(y_ref).ldlt(); if (cholesky.info() != Eigen::Success || (cholesky.vectorD().array() < 0.0).any()) { throw_domain_error(function, name, "is not positive semi-definite.", ""); } } /** * Check if the specified matrix is positive semidefinite * * @tparam Derived Derived type of the Eigen::LDLT transform. * @param function Function name (for error messages) * @param name Variable name (for error messages) * @param cholesky Eigen::LDLT to test * @throw std::domain_error if the matrix is not positive * semi-definite. */ template inline void check_pos_semidefinite(const char* function, const char* name, const Eigen::LDLT& cholesky) { if (cholesky.info() != Eigen::Success || (cholesky.vectorD().array() < 0.0).any()) { throw_domain_error(function, name, "is not positive semi-definite.", ""); } } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/err/is_cholesky_factor.hpp0000644000176200001440000000275214645137106025110 0ustar liggesusers#ifndef STAN_MATH_PRIM_ERR_IS_CHOLESKY_FACTOR_HPP #define STAN_MATH_PRIM_ERR_IS_CHOLESKY_FACTOR_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * Return true if y is a valid Cholesky factor, if * number of rows is not less than the number of columns, if there * are no 0 columns, and no element in matrix is NaN. * A Cholesky factor is a lower triangular matrix whose diagonal * elements are all positive. Note that Cholesky factors need not * be square, but require at least as many rows M as columns N * (i.e., M >= N). * @tparam EigMat A type derived from `EigenBase` with dynamic rows and columns * @param y Matrix to test * @return true if y is a valid Cholesky factor, if * number of rows is not less than the number of columns, * if there are no 0 columns, and no element in matrix is NaN */ template * = nullptr> inline bool is_cholesky_factor(const EigMat& y) { const auto& y_ref = to_ref(y); return is_less_or_equal(y_ref.cols(), y_ref.rows()) && is_positive(y_ref.cols()) && is_lower_triangular(y_ref) && is_positive(y_ref.diagonal()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/core.hpp0000644000176200001440000000105614645137106021372 0ustar liggesusers#ifndef STAN_MATH_PRIM_CORE_HPP #define STAN_MATH_PRIM_CORE_HPP #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/prim/prob.hpp0000644000176200001440000004642414645137106021414 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_HPP #define STAN_MATH_PRIM_PROB_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/prim/prob/0000755000176200001440000000000014645161272020672 5ustar liggesusersStanHeaders/inst/include/stan/math/prim/prob/lognormal_rng.hpp0000644000176200001440000000437514645137106024253 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGNORMAL_RNG_HPP #define STAN_MATH_PRIM_PROB_LOGNORMAL_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a lognormal random variate for the given location and scale * using the specified random number generator. * * mu and sigma can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @tparam RNG type of random number generator * @param mu (Sequence of) location parameter(s) * @param sigma (Sequence of) positive scale parameter(s) * @param rng random number generator * @return (Sequence of) Normal random variate(s) * @throw std::domain_error if mu is infinite or sigma is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type lognormal_rng( const T_loc& mu, const T_scale& sigma, RNG& rng) { using boost::variate_generator; using boost::random::lognormal_distribution; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "lognormal_rng"; check_consistent_sizes(function, "Location parameter", mu, "Scale Parameter", sigma); T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(mu, sigma); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > lognorm_rng( rng, lognormal_distribution<>(mu_vec[n], sigma_vec[n])); output[n] = lognorm_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gumbel_log.hpp0000644000176200001440000000176414645137106023526 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GUMBEL_LOG_HPP #define STAN_MATH_PRIM_PROB_GUMBEL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use gumbel_lpdf */ template return_type_t gumbel_log(const T_y& y, const T_loc& mu, const T_scale& beta) { return gumbel_lpdf(y, mu, beta); } /** \ingroup prob_dists * @deprecated use gumbel_lpdf */ template inline return_type_t gumbel_log(const T_y& y, const T_loc& mu, const T_scale& beta) { return gumbel_lpdf(y, mu, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_normal_log.hpp0000644000176200001440000000214314645137106024564 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_NORMAL_LOG_HPP #define STAN_MATH_PRIM_PROB_SKEW_NORMAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use skew_normal_lpdf */ template return_type_t skew_normal_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { return skew_normal_lpdf(y, mu, sigma, alpha); } /** \ingroup prob_dists * @deprecated use skew_normal_lpdf */ template inline return_type_t skew_normal_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { return skew_normal_lpdf(y, mu, sigma, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exponential_cdf_log.hpp0000644000176200001440000000113214645137106025402 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXPONENTIAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_EXPONENTIAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use exponential_lcdf */ template return_type_t exponential_cdf_log(const T_y& y, const T_inv_scale& beta) { return exponential_lcdf(y, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/dirichlet_log.hpp0000644000176200001440000000316014645137106024212 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DIRICHLET_LOG_HPP #define STAN_MATH_PRIM_PROB_DIRICHLET_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the Dirichlet density for the given theta and * a vector of prior sample sizes, alpha. * Each element of alpha must be greater than 0. * Each element of theta must be greater than or 0. * Theta sums to 1. * * @deprecated use dirichlet_lpdf * * @param theta A scalar vector. * @param alpha Prior sample sizes. * @return The log of the Dirichlet density. * @throw std::domain_error if any element of alpha is less than * or equal to 0. * @throw std::domain_error if any element of theta is less than 0. * @throw std::domain_error if the sum of theta is not 1. * @tparam T_prob Type of scalar. * @tparam T_prior_size Type of prior sample sizes. */ template return_type_t dirichlet_log(const T_prob& theta, const T_prior_size& alpha) { return dirichlet_lpdf(theta, alpha); } /** \ingroup multivar_dists * @deprecated use dirichlet_lpdf */ template return_type_t dirichlet_log(const T_prob& theta, const T_prior_size& alpha) { return dirichlet_lpdf(theta, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/hypergeometric_log.hpp0000644000176200001440000000167314645137106025300 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_HYPERGEOMETRIC_LOG_HPP #define STAN_MATH_PRIM_PROB_HYPERGEOMETRIC_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use hypergeometric_lpmf */ template double hypergeometric_log(const T_n& n, const T_N& N, const T_a& a, const T_b& b) { return hypergeometric_lpmf(n, N, a, b); } /** \ingroup prob_dists * @deprecated use hypergeometric_lpmf */ template inline double hypergeometric_log(const T_n& n, const T_N& N, const T_a& a, const T_b& b) { return hypergeometric_lpmf(n, N, a, b); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/logistic_cdf_log.hpp0000644000176200001440000000124114645137106024672 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGISTIC_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_LOGISTIC_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use logistic_lcdf */ template return_type_t logistic_cdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return logistic_lcdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/logistic_ccdf_log.hpp0000644000176200001440000000125114645137106025036 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGISTIC_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_LOGISTIC_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use logistic_lccdf */ template return_type_t logistic_ccdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return logistic_lccdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_binomial_log.hpp0000644000176200001440000000165114645137106025612 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_LOG_HPP #define STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use poisson_binomial_lpmf */ template return_type_t poisson_binomial_log(const T_y& y, const T_theta& theta) { return poisson_binomial_lpmf(y, theta); } /** \ingroup prob_dists * @deprecated use poisson_binomial_lpmf */ template inline return_type_t poisson_binomial_log(const T_y& y, const T_theta& theta) { return poisson_binomial_lpmf(y, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_gamma_lcdf.hpp0000644000176200001440000001030214645137106024324 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_GAMMA_LCDF_HPP #define STAN_MATH_PRIM_PROB_INV_GAMMA_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t inv_gamma_lcdf(const T_y& y, const T_shape& alpha, const T_scale& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; using std::exp; using std::log; using std::pow; static const char* function = "inv_gamma_lcdf"; check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Scale Parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Scale parameter", beta_ref); check_nonnegative(function, "Random variable", y_ref); if (size_zero(y, alpha, beta)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(y, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(negative_infinity()); } } VectorBuilder::value, T_partials_return, T_shape> gamma_vec(math::size(alpha)); VectorBuilder::value, T_partials_return, T_shape> digamma_vec(math::size(alpha)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(alpha); i++) { const T_partials_return alpha_dbl = alpha_vec.val(i); gamma_vec[i] = tgamma(alpha_dbl); digamma_vec[i] = digamma(alpha_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return beta_dbl = beta_vec.val(n); const T_partials_return Pn = gamma_q(alpha_dbl, beta_dbl * y_inv_dbl); P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += beta_dbl * y_inv_dbl * y_inv_dbl * exp(-beta_dbl * y_inv_dbl) * pow(beta_dbl * y_inv_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += grad_reg_inc_gamma(alpha_dbl, beta_dbl * y_inv_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += -y_inv_dbl * exp(-beta_dbl * y_inv_dbl) * pow(beta_dbl * y_inv_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/von_mises_log.hpp0000644000176200001440000000203414645137106024244 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_VON_MISES_LOG_HPP #define STAN_MATH_PRIM_PROB_VON_MISES_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use von_mises_lpdf */ template return_type_t von_mises_log(T_y const& y, T_loc const& mu, T_scale const& kappa) { return von_mises_lpdf(y, mu, kappa); } /** \ingroup prob_dists * @deprecated use von_mises_lpdf */ template inline return_type_t von_mises_log(T_y const& y, T_loc const& mu, T_scale const& kappa) { return von_mises_lpdf(y, mu, kappa); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_type_2_lcdf.hpp0000644000176200001440000000777414645137106025165 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_TYPE_2_LCDF_HPP #define STAN_MATH_PRIM_PROB_PARETO_TYPE_2_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t pareto_type_2_lcdf( const T_y& y, const T_loc& mu, const T_scale& lambda, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_lambda_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; using std::pow; static const char* function = "pareto_type_2_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", lambda, "Shape parameter", alpha); if (size_zero(y, mu, lambda, alpha)) { return 0; } T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_lambda_ref lambda_ref = lambda; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Scale parameter", lambda_val); check_positive_finite(function, "Shape parameter", alpha_val); check_greater_or_equal(function, "Random variable", y_val, mu_val); const auto& temp = to_ref_if::value>( 1 + (y_val - mu_val) / lambda_val); const auto& p1_pow_alpha = to_ref_if::value>( pow(temp, alpha_val)); T_partials_return P = sum(log1m(1 / p1_pow_alpha)); auto ops_partials = make_partials_propagator(y_ref, mu_ref, lambda_ref, alpha_ref); if (!is_constant_all::value) { const auto& inv_p1_pow_alpha_minus_one = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( inv(p1_pow_alpha - 1)); if (!is_constant_all::value) { auto grad_1_2 = to_ref_if<(!is_constant_all::value + !is_constant_all::value + !is_constant_all::value) >= 2>(alpha_val * inv_p1_pow_alpha_minus_one / (lambda_val - mu_val + y_val)); if (!is_constant_all::value) { partials<1>(ops_partials) = -grad_1_2; } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = (mu_val - y_val) * grad_1_2 / lambda_val; } if (!is_constant_all::value) { partials<0>(ops_partials) = std::move(grad_1_2); } } if (!is_constant_all::value) { partials<3>(ops_partials) = log(temp) * inv_p1_pow_alpha_minus_one; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/logistic_log.hpp0000644000176200001440000000202014645137106024052 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGISTIC_LOG_HPP #define STAN_MATH_PRIM_PROB_LOGISTIC_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use logistic_lpdf */ template return_type_t logistic_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return logistic_lpdf(y, mu, sigma); } /** \ingroup prob_dists * @deprecated use logistic_lpdf */ template inline return_type_t logistic_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return logistic_lpdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_log_glm_log.hpp0000644000176200001440000000237014645137106025437 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_LOG_GLM_LOG_HPP #define STAN_MATH_PRIM_PROB_POISSON_LOG_GLM_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use poisson_logit_glm_lpmf */ template return_type_t poisson_log_glm_log(const T_y &y, const T_x &x, const T_alpha &alpha, const T_beta &beta) { return poisson_log_glm_lpmf(y, x, alpha, beta); } /** \ingroup multivar_dists * @deprecated use poisson_logit_glm_lpmf */ template inline return_type_t poisson_log_glm_log( const T_y &y, const T_x &x, const T_alpha &alpha, const T_beta &beta) { return poisson_log_glm_lpmf(y, x, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/weibull_lccdf.hpp0000644000176200001440000000617514645137106024211 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WEIBULL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_WEIBULL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the Weibull log complementary cumulative distribution function * for the given location and scale. Given containers of matching sizes, * returns the log sum of probabilities. * * @tparam T_y type of real parameter * @tparam T_shape type of shape parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param alpha shape parameter * @param sigma scale parameter * @return log probability or log sum of probabilities * @throw std::domain_error if y is negative, alpha sigma is nonpositive */ template * = nullptr> return_type_t weibull_lccdf(const T_y& y, const T_shape& alpha, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_shape>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using std::pow; static const char* function = "weibull_lccdf"; T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Shape parameter", alpha_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, alpha, sigma)) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, sigma_ref); const auto& pow_n = to_ref_if::value>( pow(y_val / sigma_val, alpha_val)); T_partials_return ccdf_log = -sum(pow_n); if (!is_constant_all::value) { partials<0>(ops_partials) = -alpha_val / y_val * pow_n; } if (!is_constant_all::value) { partials<1>(ops_partials) = -log(y_val / sigma_val) * pow_n; } if (!is_constant_all::value) { partials<2>(ops_partials) = alpha_val / sigma_val * pow_n; } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/std_normal_rng.hpp0000644000176200001440000000146214645137106024415 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STD_NORMAL_RNG_HPP #define STAN_MATH_PRIM_PROB_STD_NORMAL_RNG_HPP #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a standard Normal random variate using the specified * random number generator. * * @tparam RNG type of random number generator * @param rng random number generator * @return A standard normal random variate */ template inline double std_normal_rng(RNG& rng) { using boost::normal_distribution; using boost::variate_generator; variate_generator> norm_rng( rng, normal_distribution<>(0, 1)); return norm_rng(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_gp_lpdf.hpp0000644000176200001440000000644714645137106024242 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_GP_LPDF_HPP #define STAN_MATH_PRIM_PROB_MULTI_GP_LPDF_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of a multivariate Gaussian Process for the given y, Sigma, and * w. y is a dxN matrix, where each column is a different observation and each * row is a different output dimension. The Gaussian Process is assumed to * have a scaled kernel matrix with a different scale for each output dimension. * This distribution is equivalent to: * for (i in 1:d) row(y, i) ~ multi_normal(0, (1/w[i])*Sigma). * * @tparam T_y type of scalar * @tparam T_covar type of kernel * @tparam T_w type of weight * @param y A dxN matrix * @param Sigma The NxN kernel matrix * @param w A d-dimensional vector of positive inverse scale parameters for each * output. * @return The log of the multivariate GP density. * @throw std::domain_error if Sigma is not square, not symmetric, * or not semi-positive definite. */ template * = nullptr, require_col_vector_t* = nullptr> return_type_t multi_gp_lpdf(const T_y& y, const T_covar& Sigma, const T_w& w) { using T_lp = return_type_t; static const char* function = "multi_gp_lpdf"; check_size_match(function, "Size of random variable (rows y)", y.rows(), "Size of kernel scales (w)", w.size()); check_size_match(function, "Size of random variable", y.cols(), "rows of covariance parameter", Sigma.rows()); check_positive(function, "Kernel rows", Sigma.rows()); const auto& Sigma_ref = to_ref(Sigma); const auto& w_ref = to_ref(w); const auto& y_t_ref = to_ref(y.transpose()); check_finite(function, "Kernel", Sigma_ref); check_symmetric(function, "Kernel", Sigma_ref); check_positive_finite(function, "Kernel scales", w_ref); check_finite(function, "Random variable", y_t_ref); auto ldlt_Sigma = make_ldlt_factor(Sigma_ref); check_ldlt_factor(function, "LDLT_Factor of Sigma", ldlt_Sigma); T_lp lp(0.0); if (y.rows() == 0) { return lp; } if (include_summand::value) { lp += NEG_LOG_SQRT_TWO_PI * y.size(); } if (include_summand::value) { lp -= 0.5 * log_determinant_ldlt(ldlt_Sigma) * y.rows(); } if (include_summand::value) { lp += (0.5 * y.cols()) * sum(log(w_ref)); } if (include_summand::value) { lp -= 0.5 * trace_gen_inv_quad_form_ldlt(w_ref, ldlt_Sigma, y_t_ref); } return lp; } template inline return_type_t multi_gp_lpdf(const T_y& y, const T_covar& Sigma, const T_w& w) { return multi_gp_lpdf(y, Sigma, w); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_gamma_log.hpp0000644000176200001440000000322714645137106024205 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_GAMMA_LOG_HPP #define STAN_MATH_PRIM_PROB_INV_GAMMA_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of an inverse gamma density for y with the specified * shape and scale parameters. * Shape and scale parameters must be greater than 0. * y must be greater than 0. * * @deprecated use inv_gamma_lpdf * * @param y A scalar variable. * @param alpha Shape parameter. * @param beta Scale parameter. * @throw std::domain_error if alpha is not greater than 0. * @throw std::domain_error if beta is not greater than 0. * @throw std::domain_error if y is not greater than 0. * @tparam T_y Type of scalar. * @tparam T_shape Type of shape. * @tparam T_scale Type of scale. */ template return_type_t inv_gamma_log(const T_y& y, const T_shape& alpha, const T_scale& beta) { return inv_gamma_lpdf(y, alpha, beta); } /** \ingroup prob_dists * @deprecated use inv_gamma_lpdf */ template inline return_type_t inv_gamma_log(const T_y& y, const T_shape& alpha, const T_scale& beta) { return inv_gamma_lpdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/double_exponential_lcdf.hpp0000644000176200001440000000705614645137106026262 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the double exponential log cumulative density function. Given * containers of matching sizes, returns the log sum of probabilities. * * @tparam T_y type of real parameter. * @tparam T_loc type of location parameter. * @tparam T_scale type of scale parameter. * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @return log probability or log sum of probabilities * @throw std::domain_error if y is nan, mu is infinite, or sigma is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t double_exponential_lcdf( const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::exp; using std::log; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "double_exponential_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale Parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 0; } T_partials_return cdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t size_sigma = stan::math::size(sigma); size_t N = max_size(y, mu, sigma); VectorBuilder inv_sigma(size_sigma); for (size_t i = 0; i < size_sigma; i++) { inv_sigma[i] = inv(sigma_vec.val(i)); } for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return scaled_diff = (y_dbl - mu_dbl) * inv_sigma[n]; const T_partials_return rep_deriv = y_dbl < mu_dbl ? inv_sigma[n] : inv_sigma[n] * inv(2 * exp(scaled_diff) - 1); if (y_dbl < mu_dbl) { cdf_log += LOG_HALF + scaled_diff; } else { cdf_log += log1m(0.5 * exp(-scaled_diff)); } if (!is_constant_all::value) { partials<0>(ops_partials)[n] += rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= rep_deriv * scaled_diff; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_cdf_log.hpp0000644000176200001440000000077114645137106024556 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_POISSON_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use poisson_lcdf */ template return_type_t poisson_cdf_log(const T_n& n, const T_rate& lambda) { return poisson_lcdf(n, lambda); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gaussian_dlm_obs_rng.hpp0000644000176200001440000001360014645137106025561 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAUSSIAN_DLM_OBS_RNG_HPP #define STAN_MATH_PRIM_PROB_GAUSSIAN_DLM_OBS_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { namespace internal { /** \ingroup multivar_dists * Return a multivariate normal random variate with the given location * and covariance using the specified random number generator. * * No error checking or templating, takes the LDLT directly to avoid * recomputation. Can sample from semidefinite covariance matrices. * * @tparam RNG type of pseudo-random number generator * @param mu location parameter * @param S_ldlt Eigen::LDLT of covariance matrix, semidefinite is okay * @param rng random number generator * */ template inline Eigen::VectorXd multi_normal_semidefinite_rng( const Eigen::VectorXd &mu, const Eigen::LDLT &S_ldlt, RNG &rng) { using boost::normal_distribution; using boost::variate_generator; variate_generator> std_normal_rng( rng, normal_distribution<>(0, 1)); Eigen::VectorXd stddev = S_ldlt.vectorD().array().sqrt().matrix(); size_t M = S_ldlt.vectorD().size(); Eigen::VectorXd z(M); for (int i = 0; i < M; i++) { z(i) = stddev(i) * std_normal_rng(); } Eigen::VectorXd Y = mu + (S_ldlt.transpositionsP().transpose() * (S_ldlt.matrixL() * z)); // The inner paranthesis matter, transpositionsP() gives a // permutation matrix from pivoting and matrixL() gives a lower // triangular matrix. The types cannot be directly multiplied. return Y; } } // namespace internal /** \ingroup multivar_dists * Simulate random draw from Gaussian dynamic linear model (GDLM). * This distribution is equivalent to, for \f$t = 1:T\f$, * \f{eqnarray*}{ * y_t & \sim N(F' \theta_t, V) \\ * \theta_t & \sim N(G \theta_{t-1}, W) \\ * \theta_0 & \sim N(m_0, C_0) * \f} * * @tparam RNG Type of pseudo-random number generator. * @param F A n x r matrix. The design matrix. * @param G A n x n matrix. The transition matrix. * @param V A r x r matrix. The observation covariance matrix. * @param W A n x n matrix. The state covariance matrix. * @param m0 A n x 1 matrix. The mean vector of the distribution * of the initial state. * @param C0 A n x n matrix. The covariance matrix of the distribution * of the initial state. * @param T a positive integer, how many timesteps to simulate. * @param rng Pseudo-random number generator. * @return A r x T matrix of simulated observations. Rows are * variables, columns are observations. First column is the state * after the first transition. Last column is the state after the last * transition. Initial state not returned. * @throw std::domain_error if a matrix is not symmetric or not * positive semi-definite. Or throw std::invalid_argument if a size is * wrong or any input is NaN or non-finite, or if T is not * positive. Require C0 in particular to be strictly positive * definite. V and W can be semidefinite. * */ template inline Eigen::MatrixXd gaussian_dlm_obs_rng(const Eigen::MatrixXd &F, const Eigen::MatrixXd &G, const Eigen::MatrixXd &V, const Eigen::MatrixXd &W, const Eigen::VectorXd &m0, const Eigen::MatrixXd &C0, const int T, RNG &rng) { static const char *function = "gaussian_dlm_obs_rng"; int r = F.cols(); // number of variables int n = G.rows(); // number of states check_size_match(function, "rows of F", F.rows(), "rows of G", n); check_finite(function, "F", F); check_square(function, "G", G); check_finite(function, "G", G); check_size_match(function, "rows of V", V.rows(), "cols of F", r); check_finite(function, "V", V); check_positive(function, "V rows", V.rows()); check_symmetric(function, "V", V); check_size_match(function, "rows of W", W.rows(), "rows of G", n); check_finite(function, "W", W); check_positive(function, "W rows", W.rows()); check_symmetric(function, "W", W); check_size_match(function, "rows of W", W.rows(), "rows of G", n); check_size_match(function, "size of m0", m0.size(), "rows of G", n); check_finite(function, "m0", m0); check_size_match(function, "rows of C0", C0.rows(), "rows of G", n); check_finite(function, "C0", C0); check_positive(function, "C0 rows", C0.rows()); check_symmetric(function, "C0", C0); check_positive(function, "T", T); Eigen::LDLT V_ldlt = V.ldlt(); check_pos_semidefinite(function, "V", V_ldlt); Eigen::LDLT W_ldlt = W.ldlt(); check_pos_semidefinite(function, "W", W_ldlt); Eigen::LDLT C0_ldlt = C0.ldlt(); check_pos_semidefinite(function, "C0", C0_ldlt); Eigen::MatrixXd y(r, T); Eigen::VectorXd theta_t = internal::multi_normal_semidefinite_rng(m0, C0_ldlt, rng); for (int t = 0; t < T; ++t) { theta_t = internal::multi_normal_semidefinite_rng( Eigen::VectorXd(G * theta_t), W_ldlt, rng); y.col(t) = internal::multi_normal_semidefinite_rng( Eigen::VectorXd(F.transpose() * theta_t), V_ldlt, rng); } return y; } template inline Eigen::Matrix gaussian_dlm_obs_rng(const Eigen::MatrixXd &F, const Eigen::MatrixXd &G, const Eigen::VectorXd &V, const Eigen::MatrixXd &W, const Eigen::VectorXd &m0, const Eigen::MatrixXd &C0, const int T, RNG &rng) { return gaussian_dlm_obs_rng(F, G, Eigen::MatrixXd(V.asDiagonal()), W, m0, C0, T, rng); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_normal_lccdf.hpp0000644000176200001440000001057214645137106025063 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_NORMAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_SKEW_NORMAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t skew_normal_lccdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; static const char* function = "skew_normal_lccdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma, "Shape paramter", alpha); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive(function, "Scale parameter", sigma_val); check_finite(function, "Shape parameter", alpha_val); if (size_zero(y, mu, sigma, alpha)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, alpha_ref); const auto& diff = to_ref((y_val - mu_val) / sigma_val); const auto& scaled_diff = to_ref_if::value>(diff / SQRT_TWO); const auto& erfc_m_scaled_diff = erfc(-scaled_diff); const auto& owens_t_diff_alpha = owens_t(diff, alpha_val); const auto& ccdf_log_ = to_ref_if::value>( 1.0 - 0.5 * erfc_m_scaled_diff + 2 * owens_t_diff_alpha); T_partials_return ccdf_log = sum(log(ccdf_log_)); if (!is_constant_all::value) { const auto& diff_square = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>(square(diff)); if (!is_constant_all::value) { const auto& erf_alpha_scaled_diff = erf(alpha_val * scaled_diff); const auto& exp_m_scaled_diff_square = exp(-0.5 * diff_square); auto rep_deriv = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>( (erf_alpha_scaled_diff + 1) * INV_SQRT_TWO_PI / (ccdf_log_ * sigma_val) * exp_m_scaled_diff_square); if (!is_constant_all::value) { partials<0>(ops_partials) = -rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = rep_deriv * diff; } if (!is_constant_all::value) { partials<1>(ops_partials) = std::move(rep_deriv); } } if (!is_constant_all::value) { const auto& alpha_square = square(alpha_val); edge<3>(ops_partials).partials_ = 2.0 * exp(-0.5 * diff_square * (1.0 + alpha_square)) / ((1 + alpha_square) * TWO_PI * ccdf_log_); } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_proportion_lcdf.hpp0000644000176200001440000001314114645137106025600 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_PROPORTION_LCDF_HPP #define STAN_MATH_PRIM_PROB_BETA_PROPORTION_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the beta log cumulative distribution function * for specified probability, location, and precision parameters: * beta_proportion_lcdf(y | mu, kappa) = beta_lcdf(y | mu * kappa, (1 - * mu) * kappa). Any arguments other than scalars must be containers of * the same size. With non-scalar arguments, the return is the sum of * the log cdfs with scalars broadcast as necessary. * * @tparam T_y type of y * @tparam T_loc type of location parameter * @tparam T_prec type of precision parameter * @param y (Sequence of) scalar(s) between zero and one * @param mu (Sequence of) location parameter(s) * @param kappa (Sequence of) precision parameter(s) * @return log probability or sum of log of probabilities * @throw std::domain_error if mu is outside of (0, 1) * @throw std::domain_error if kappa is nonpositive * @throw std::domain_error if y is not a valid probability * @throw std::invalid_argument if container sizes mismatch */ template return_type_t beta_proportion_lcdf(const T_y& y, const T_loc& mu, const T_prec& kappa) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_kappa_ref = ref_type_t; static const char* function = "beta_proportion_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Precision parameter", kappa); if (size_zero(y, mu, kappa)) { return 0; } T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_kappa_ref kappa_ref = kappa; check_positive(function, "Location parameter", value_of(mu_ref)); check_less(function, "Location parameter", value_of(mu_ref), 1.0); check_positive_finite(function, "Precision parameter", value_of(kappa_ref)); check_bounded(function, "Random variable", value_of(y_ref), 0.0, 1.0); T_partials_return cdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, kappa_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view kappa_vec(kappa_ref); size_t size_kappa = stan::math::size(kappa); size_t size_mu_kappa = max_size(mu, kappa); size_t N = max_size(y, mu, kappa); VectorBuilder::value, T_partials_return, T_loc, T_prec> digamma_mukappa(size_mu_kappa); VectorBuilder::value, T_partials_return, T_loc, T_prec> digamma_kappa_mukappa(size_mu_kappa); VectorBuilder::value, T_partials_return, T_prec> digamma_kappa(size_kappa); if (!is_constant_all::value) { for (size_t i = 0; i < size_mu_kappa; i++) { const T_partials_return kappa_dbl = kappa_vec.val(i); const T_partials_return mukappa_dbl = mu_vec.val(i) * kappa_dbl; digamma_mukappa[i] = digamma(mukappa_dbl); digamma_kappa_mukappa[i] = digamma(kappa_dbl - mukappa_dbl); } for (size_t i = 0; i < size_kappa; i++) { digamma_kappa[i] = digamma(kappa_vec.val(i)); } } for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return kappa_dbl = kappa_vec.val(n); const T_partials_return mukappa_dbl = mu_dbl * kappa_dbl; const T_partials_return kappa_mukappa_dbl = kappa_dbl - mukappa_dbl; const T_partials_return betafunc_dbl = beta(mukappa_dbl, kappa_mukappa_dbl); const T_partials_return Pn = inc_beta(mukappa_dbl, kappa_mukappa_dbl, y_dbl); cdf_log += log(Pn); const T_partials_return inv_Pn = is_constant_all::value ? 0 : inv(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += pow(1 - y_dbl, kappa_mukappa_dbl - 1) * pow(y_dbl, mukappa_dbl - 1) * inv_Pn / betafunc_dbl; } T_partials_return g1 = 0; T_partials_return g2 = 0; if (!is_constant_all::value) { grad_reg_inc_beta(g1, g2, mukappa_dbl, kappa_mukappa_dbl, y_dbl, digamma_mukappa[n], digamma_kappa_mukappa[n], digamma_kappa[n], betafunc_dbl); } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += kappa_dbl * (g1 - g2) * inv_Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += (g1 * mu_dbl + g2 * (1 - mu_dbl)) * inv_Pn; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_proportion_lpdf.hpp0000644000176200001440000001245214645137106025621 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_PROPORTION_LPDF_HPP #define STAN_MATH_PRIM_PROB_BETA_PROPORTION_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the beta density for specified y, location, and * precision: beta_proportion_lpdf(y | mu, kappa) = beta_lpdf(y | mu * * kappa, (1 - mu) * kappa). Any arguments other than scalars must be * containers of the same size. With non-scalar arguments, the return * is the sum of the log pdfs with scalars broadcast as necessary. * *

The result log probability is defined to be the sum of * the log probabilities for each observation/mu/kappa triple. * * Prior location, mu, must be contained in (0, 1). Prior precision * must be positive. * * @tparam T_y type of scalar outcome * @tparam T_loc type of prior location * @tparam T_prec type of prior precision * * @param y (Sequence of) scalar(s) between zero and one * @param mu (Sequence of) location parameter(s) * @param kappa (Sequence of) precision parameter(s) * @return The log of the product of densities. */ template * = nullptr> return_type_t beta_proportion_lpdf(const T_y& y, const T_loc& mu, const T_prec& kappa) { using T_partials_return = partials_return_t; using std::log; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_kappa_ref = ref_type_if_t::value, T_prec>; static const char* function = "beta_proportion_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Precision parameter", kappa); if (size_zero(y, mu, kappa)) { return 0; } T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_kappa_ref kappa_ref = kappa; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) kappa_val = to_ref(as_value_column_array_or_scalar(kappa_ref)); check_positive(function, "Location parameter", mu_val); check_less(function, "Location parameter", mu_val, 1.0); check_positive_finite(function, "Precision parameter", kappa_val); check_bounded(function, "Random variable", value_of(y_val), 0, 1); if (!include_summand::value) { return 0; } const auto& log_y = to_ref_if::value>(log(y_val)); const auto& log1m_y = to_ref_if::value>(log1m(y_val)); const auto& mukappa = to_ref(mu_val * kappa_val); size_t N = max_size(y, mu, kappa); T_partials_return logp(0); if (include_summand::value) { logp += sum(lgamma(kappa_val)) * N / math::size(kappa); } if (include_summand::value) { logp -= sum(lgamma(mukappa) + lgamma(kappa_val - mukappa)) * N / max_size(mu, kappa_val); } logp += sum((mukappa - 1) * log_y + (kappa_val - mukappa - 1) * log1m_y); auto ops_partials = make_partials_propagator(y_ref, mu_ref, kappa_ref); if (!is_constant_all::value) { edge<0>(ops_partials).partials_ = (mukappa - 1) / y_val + (kappa_val - mukappa - 1) / (y_val - 1); } if (!is_constant_all::value) { auto digamma_mukappa = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>(digamma(mukappa)); auto digamma_kappa_mukappa = to_ref_if<( !is_constant_all::value && !is_constant_all::value)>( digamma(kappa_val - mukappa)); if (!is_constant_all::value) { edge<1>(ops_partials).partials_ = kappa_val * (digamma_kappa_mukappa - digamma_mukappa + log_y - log1m_y); } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = digamma(kappa_val) + mu_val * (log_y - digamma_mukappa) + (1 - mu_val) * (log1m_y - digamma_kappa_mukappa); } } return ops_partials.build(logp); } template inline return_type_t beta_proportion_lpdf( const T_y& y, const T_loc& mu, const T_prec& kappa) { return beta_proportion_lpdf(y, mu, kappa); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_lpdf.hpp0000644000176200001440000001225214645137106023324 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_LPDF_HPP #define STAN_MATH_PRIM_PROB_BETA_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the beta density for the specified scalar(s) given the specified * sample stan::math::size(s). y, alpha, or beta can each either be scalar or a * vector. Any vector inputs must be the same length. * *

The result log probability is defined to be the sum of * the log probabilities for each observation/alpha/beta triple. * * Prior sample sizes, alpha and beta, must be greater than 0. * * @tparam T_y type of scalar outcome * @tparam T_scale_succ type of prior scale for successes * @tparam T_scale_fail type of prior scale for failures * @param y (Sequence of) scalar(s). * @param alpha (Sequence of) prior sample stan::math::size(s). * @param beta (Sequence of) prior sample stan::math::size(s). * @return The log of the product of densities. */ template * = nullptr> return_type_t beta_lpdf( const T_y& y, const T_scale_succ& alpha, const T_scale_fail& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_scale_succ>; using T_beta_ref = ref_type_if_t::value, T_scale_fail>; static const char* function = "beta_lpdf"; check_consistent_sizes(function, "Random variable", y, "First shape parameter", alpha, "Second shape parameter", beta); if (size_zero(y, alpha, beta)) { return 0; } T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_positive_finite(function, "First shape parameter", alpha_val); check_positive_finite(function, "Second shape parameter", beta_val); check_bounded(function, "Random variable", value_of(y_val), 0, 1); if (!include_summand::value) { return 0; } const auto& log_y = to_ref(log(y_val)); const auto& log1m_y = to_ref(log1m(y_val)); size_t N = max_size(y, alpha, beta); T_partials_return logp(0); if (include_summand::value) { logp -= sum(lgamma(alpha_val)) * N / max_size(alpha); } if (include_summand::value) { logp -= sum(lgamma(beta_val)) * N / max_size(beta); } if (include_summand::value) { logp += sum((alpha_val - 1.0) * log_y) * N / max_size(y, alpha); } if (include_summand::value) { logp += sum((beta_val - 1.0) * log1m_y) * N / max_size(y, beta); } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); if (!is_constant_all::value) { edge<0>(ops_partials).partials_ = (alpha_val - 1) / y_val + (beta_val - 1) / (y_val - 1); } if (include_summand::value) { const auto& alpha_beta = to_ref_if::value>( alpha_val + beta_val); logp += sum(lgamma(alpha_beta)) * N / max_size(alpha, beta); if (!is_constant_all::value) { const auto& digamma_alpha_beta = to_ref_if < !is_constant_all::value && !is_constant_all::value > (digamma(alpha_beta)); if (!is_constant_all::value) { edge<1>(ops_partials).partials_ = log_y + digamma_alpha_beta - digamma(alpha_val); } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = log1m_y + digamma_alpha_beta - digamma(beta_val); } } } return ops_partials.build(logp); } template inline return_type_t beta_lpdf( const T_y& y, const T_scale_succ& alpha, const T_scale_fail& beta) { return beta_lpdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gamma_lpdf.hpp0000644000176200001440000001173014645137106023473 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAMMA_LPDF_HPP #define STAN_MATH_PRIM_PROB_GAMMA_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of a gamma density for y with the specified * shape and inverse scale parameters. * Shape and inverse scale parameters must be greater than 0. * y must be greater than or equal to 0. * \f{eqnarray*}{ y &\sim& \mbox{\sf{Gamma}}(\alpha, \beta) \\ \log (p (y \, |\, \alpha, \beta) ) &=& \log \left( \frac{\beta^\alpha}{\Gamma(\alpha)} y^{\alpha - 1} \exp^{- \beta y} \right) \\ &=& \alpha \log(\beta) - \log(\Gamma(\alpha)) + (\alpha - 1) \log(y) - \beta y\\ & & \mathrm{where} \; y > 0 \f} * * @tparam T_y type of scalar * @tparam T_shape type of shape * @tparam T_inv_scale type of inverse scale * @param y A scalar variable. * @param alpha Shape parameter. * @param beta Inverse scale parameter. * @throw std::domain_error if alpha is not greater than 0. * @throw std::domain_error if beta is not greater than 0. * @throw std::domain_error if y is not greater than or equal to 0. */ template * = nullptr> return_type_t gamma_lpdf(const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_shape>; using T_beta_ref = ref_type_if_t::value, T_inv_scale>; static const char* function = "gamma_lpdf"; check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Inverse scale parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_positive_finite(function, "Random variable", y_val); check_positive_finite(function, "Shape parameter", alpha_val); check_positive_finite(function, "Inverse scale parameter", beta_val); if (size_zero(y, alpha, beta)) { return 0.0; } if (!include_summand::value) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); scalar_seq_view y_vec(y_val); for (size_t n = 0; n < stan::math::size(y); n++) { if (y_vec[n] < 0) { return LOG_ZERO; } } size_t N = max_size(y, alpha, beta); T_partials_return logp(0.0); if (include_summand::value) { logp = -sum(lgamma(alpha_val)) * N / math::size(alpha); } const auto& log_y = to_ref_if::value>(log(y_val)); if (include_summand::value) { const auto& log_beta = to_ref_if::value>(log(beta_val)); logp += sum(alpha_val * log_beta) * N / max_size(alpha, beta); if (!is_constant_all::value) { partials<1>(ops_partials) = log_beta + log_y - digamma(alpha_val); } } if (include_summand::value) { logp += sum((alpha_val - 1.0) * log_y) * N / max_size(alpha, y); } if (include_summand::value) { logp -= sum(beta_val * y_val) * N / max_size(beta, y); } if (!is_constant_all::value) { partials<0>(ops_partials) = (alpha_val - 1) / y_val - beta_val; } if (!is_constant_all::value) { partials<2>(ops_partials) = alpha_val / beta_val - y_val; } return ops_partials.build(logp); } template inline return_type_t gamma_lpdf( const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { return gamma_lpdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_chi_square_lcdf.hpp0000644000176200001440000001003114645137106025364 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_LCDF_HPP #define STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the inverse chi square log cumulative distribution function for * the given variate and degrees of freedom. If given containers of * matching sizes, returns the log sum of probabilities. * * @tparam T_y type of scalar parameter * @tparam T_dof type of degrees of freedom parameter * @param y scalar parameter * @param nu degrees of freedom parameter * @return log probability or log sum of probabilities * @throw std::domain_error if y is negative or nu is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template return_type_t inv_chi_square_lcdf(const T_y& y, const T_dof& nu) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; static const char* function = "inv_chi_square_lcdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_nonnegative(function, "Random variable", y_ref); if (size_zero(y, nu)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); size_t N = max_size(y, nu); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(negative_infinity()); } } VectorBuilder::value, T_partials_return, T_dof> gamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return nu_dbl = nu_vec.val(i); gamma_vec[i] = tgamma(0.5 * nu_dbl); digamma_vec[i] = digamma(0.5 * nu_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return nu_dbl = nu_vec.val(n); const T_partials_return Pn = gamma_q(0.5 * nu_dbl, 0.5 * y_inv_dbl); P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += 0.5 * y_inv_dbl * y_inv_dbl * exp(-0.5 * y_inv_dbl) * pow(0.5 * y_inv_dbl, 0.5 * nu_dbl - 1) / tgamma(0.5 * nu_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += 0.5 * grad_reg_inc_gamma(0.5 * nu_dbl, 0.5 * y_inv_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/ordered_logistic_rng.hpp0000644000176200001440000000225614645137106025576 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_ORDERED_LOGISTIC_RNG_HPP #define STAN_MATH_PRIM_PROB_ORDERED_LOGISTIC_RNG_HPP #include #include #include #include #include namespace stan { namespace math { template inline int ordered_logistic_rng( double eta, const Eigen::Matrix& c, RNG& rng) { using boost::variate_generator; static const char* function = "ordered_logistic"; check_finite(function, "Location parameter", eta); check_greater(function, "Size of cut points parameter", c.size(), 0); check_ordered(function, "Cut points parameter", c); check_finite(function, "Cut points parameter", c(c.size() - 1)); check_finite(function, "Cut points parameter", c(0)); Eigen::VectorXd cut(c.rows() + 1); cut(0) = 1 - inv_logit(eta - c(0)); for (int j = 1; j < c.rows(); j++) { cut(j) = inv_logit(eta - c(j - 1)) - inv_logit(eta - c(j)); } cut(c.rows()) = inv_logit(eta - c(c.rows() - 1)); return categorical_rng(cut, rng); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/binomial_rng.hpp0000644000176200001440000000433614645137106024050 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_RNG_HPP #define STAN_MATH_PRIM_PROB_BINOMIAL_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a pseudorandom binomial random variable for the given population * size and chance of success parameters using the specified random number * generator. * * beta can be a scalar or a one-dimensional container. * * @tparam T_N Type of population size parameter * @tparam T_theta Type of change of success parameter * @tparam RNG class of rng * @param N (Sequence of) population size parameter(s) * @param theta (Sequence of) chance of success parameter(s) * @param rng random number generator * @return (Sequence of) binomial random variate(s) * @throw std::domain_error if N is negative * @throw std::domain_error if theta is not a valid probability */ template inline typename VectorBuilder::type binomial_rng( const T_N& N, const T_theta& theta, RNG& rng) { using boost::binomial_distribution; using boost::variate_generator; using T_N_ref = ref_type_t; using T_theta_ref = ref_type_t; static const char* function = "binomial_rng"; check_consistent_sizes(function, "Population size parameter", N, "Probability Parameter", theta); T_N_ref N_ref = N; T_theta_ref theta_ref = theta; check_nonnegative(function, "Population size parameter", N_ref); check_bounded(function, "Probability parameter", value_of(theta_ref), 0.0, 1.0); scalar_seq_view N_vec(N_ref); scalar_seq_view theta_vec(theta_ref); size_t M = max_size(N, theta); VectorBuilder output(M); for (size_t m = 0; m < M; ++m) { variate_generator > binomial_rng( rng, binomial_distribution<>(N_vec[m], theta_vec[m])); output[m] = binomial_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_binomial_cdf.hpp0000644000176200001440000001263014645137106025005 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_BINOMIAL_CDF_HPP #define STAN_MATH_PRIM_PROB_BETA_BINOMIAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the CDF of the Beta-Binomial distribution with given population * size, prior success, and prior failure parameters. Given containers of * matching sizes, returns the product of probabilities. * * @tparam T_n type of success parameter * @tparam T_N type of population size parameter * @tparam T_size1 type of prior success parameter * @tparam T_size2 type of prior failure parameter * * @param n success parameter * @param N population size parameter * @param alpha prior success parameter * @param beta prior failure parameter * @return probability or product of probabilities * @throw std::domain_error if N, alpha, or beta fails to be positive * @throw std::invalid_argument if container sizes mismatch */ template return_type_t beta_binomial_cdf(const T_n& n, const T_N& N, const T_size1& alpha, const T_size2& beta) { using T_partials_return = partials_return_t; using std::exp; using T_N_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "beta_binomial_cdf"; check_consistent_sizes(function, "Successes variable", n, "Population size parameter", N, "First prior sample size parameter", alpha, "Second prior sample size parameter", beta); if (size_zero(n, N, alpha, beta)) { return 1.0; } T_N_ref N_ref = N; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_nonnegative(function, "Population size parameter", N_ref); check_positive_finite(function, "First prior sample size parameter", alpha_ref); check_positive_finite(function, "Second prior sample size parameter", beta_ref); T_partials_return P(1.0); auto ops_partials = make_partials_propagator(alpha_ref, beta_ref); scalar_seq_view n_vec(n); scalar_seq_view N_vec(N_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t max_size_seq_view = max_size(n, N, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(n); i++) { if (n_vec.val(i) < 0) { return ops_partials.build(0.0); } } for (size_t i = 0; i < max_size_seq_view; i++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_vec.val(i) >= N_vec.val(i)) { continue; } const T_partials_return n_dbl = n_vec.val(i); const T_partials_return N_dbl = N_vec.val(i); const T_partials_return alpha_dbl = alpha_vec.val(i); const T_partials_return beta_dbl = beta_vec.val(i); const T_partials_return N_minus_n = N_dbl - n_dbl; const T_partials_return mu = alpha_dbl + n_dbl + 1; const T_partials_return nu = beta_dbl + N_minus_n - 1; const T_partials_return one = 1; const T_partials_return F = hypergeometric_3F2({one, mu, 1 - N_minus_n}, {n_dbl + 2, 1 - nu}, one); T_partials_return C = lbeta(nu, mu) - lbeta(alpha_dbl, beta_dbl) - lbeta(N_minus_n, n_dbl + 2); C = F * exp(C) / (N_dbl + 1); const T_partials_return Pi = 1 - C; P *= Pi; T_partials_return digammaDiff = is_constant_all::value ? 0 : digamma(alpha_dbl + beta_dbl) - digamma(mu + nu); T_partials_return dF[6]; if (!is_constant_all::value) { grad_F32(dF, one, mu, 1 - N_minus_n, n_dbl + 2, 1 - nu, one); } if (!is_constant_all::value) { const T_partials_return g = -C * (digamma(mu) - digamma(alpha_dbl) + digammaDiff + dF[1] / F); partials<0>(ops_partials)[i] += g / Pi; } if (!is_constant_all::value) { const T_partials_return g = -C * (digamma(nu) - digamma(beta_dbl) + digammaDiff - dF[4] / F); partials<1>(ops_partials)[i] += g / Pi; } } if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(alpha); ++i) { partials<0>(ops_partials)[i] *= P; } } if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(beta); ++i) { partials<1>(ops_partials)[i] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/double_exponential_cdf.hpp0000644000176200001440000001120314645137106026073 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_CDF_HPP #define STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the double exponential cumulative density function. Given * containers of matching sizes, returns the product of probabilities. * * @tparam T_y type of real parameter. * @tparam T_loc type of location parameter. * @tparam T_scale type of scale parameter. * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @return probability or product of probabilities * @throw std::domain_error if y is nan, mu is infinite, * or sigma is nonpositive */ template * = nullptr> return_type_t double_exponential_cdf( const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_partials_array = Eigen::Array; using T_rep_deriv = std::conditional_t::value || is_vector::value || is_vector::value, T_partials_array, T_partials_return>; using std::exp; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "double_exponential_cdf"; T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_partials_return cdf(1.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, mu, sigma)) { return 1.0; } const auto& inv_sigma = to_ref(inv(sigma_val)); const auto& scaled_diff = to_ref_if::value>( (y_val - mu_val) * inv_sigma); const auto& exp_scaled_diff = to_ref(exp(scaled_diff)); T_rep_deriv rep_deriv; if (is_vector::value || is_vector::value) { using array_bool = Eigen::Array; cdf = forward_as(y_val < mu_val) .select(forward_as(exp_scaled_diff * 0.5), 1.0 - 0.5 / exp_scaled_diff) .prod(); rep_deriv = forward_as( forward_as(y_val < mu_val) .select((cdf * inv_sigma), forward_as(cdf * inv_sigma / (2 * exp_scaled_diff - 1)))); } else { if (is_vector::value) { cdf = forward_as(y_val < mu_val) ? forward_as(exp_scaled_diff * 0.5).prod() : forward_as(1.0 - 0.5 / exp_scaled_diff) .prod(); } else { cdf = forward_as(y_val < mu_val) ? forward_as(exp_scaled_diff * 0.5) : forward_as(1.0 - 0.5 / exp_scaled_diff); } if (forward_as(y_val < mu_val)) { rep_deriv = cdf * inv_sigma; } else { rep_deriv = cdf * inv_sigma / (2 * exp_scaled_diff - 1); } } if (!is_constant_all::value) { partials<0>(ops_partials) = rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = -rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = -rep_deriv * scaled_diff; } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/wishart_cholesky_rng.hpp0000644000176200001440000000375614645137106025645 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WISHART_CHOLESKY_RNG_HPP #define STAN_MATH_PRIM_PROB_WISHART_CHOLESKY_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a random Cholesky factor of the inverse covariance matrix * of the specified dimensionality drawn from the Wishart distribution * with the specified degrees of freedom * using the specified random number generator. * * @tparam RNG random number generator type * @param[in] nu scalar degrees of freedom * @param[in] L_S lower Cholesky factor of the scale matrix * @param[in, out] rng random-number generator * @return random lower Cholesky factor drawn from the given inverse Wishart * distribution * @throw std::domain_error if the scale matrix is not a Cholesky factor * @throw std::domain_error if the degrees of freedom is greater than k - 1 * where k is the dimension of L_S */ template inline Eigen::MatrixXd wishart_cholesky_rng(double nu, const Eigen::MatrixXd& L_S, RNG& rng) { using Eigen::MatrixXd; static const char* function = "wishart_cholesky_rng"; index_type_t k = L_S.rows(); check_square(function, "Cholesky Scale matrix", L_S); check_greater(function, "degrees of freedom > dims - 1", nu, k - 1); check_positive(function, "Cholesky Scale matrix", L_S.diagonal()); check_positive(function, "columns of Cholesky Scale matrix", L_S.cols()); MatrixXd B = MatrixXd::Zero(k, k); for (int j = 0; j < k; ++j) { for (int i = 0; i < j; ++i) { B(i, j) = normal_rng(0, 1, rng); } B(j, j) = std::sqrt(chi_square_rng(nu - j, rng)); } return L_S.template triangularView() * B.transpose(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_lcdf.hpp0000644000176200001440000001120514645137106025014 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t neg_binomial_lcdf( const T_n& n, const T_shape& alpha, const T_inv_scale& beta_param) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_n_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "neg_binomial_lcdf"; check_consistent_sizes(function, "Failures variable", n, "Shape parameter", alpha, "Inverse scale parameter", beta_param); T_n_ref n_ref = n; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta_param; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Inverse scale parameter", beta_ref); if (size_zero(n, alpha, beta_param)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(alpha_ref, beta_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t size_n = stan::math::size(n); size_t size_alpha = stan::math::size(alpha); size_t size_n_alpha = max_size(n, alpha); size_t max_size_seq_view = max_size(n, alpha, beta_param); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < size_n; i++) { if (n_vec.val(i) < 0) { return ops_partials.build(negative_infinity()); } } VectorBuilder::value, T_partials_return, T_n> digammaN_vec(size_n); VectorBuilder::value, T_partials_return, T_shape> digammaAlpha_vec(size_alpha); VectorBuilder::value, T_partials_return, T_n, T_shape> digammaSum_vec(size_n_alpha); if (!is_constant_all::value) { for (size_t i = 0; i < size_n; i++) { digammaN_vec[i] = digamma(n_vec.val(i) + 1); } for (size_t i = 0; i < size_alpha; i++) { digammaAlpha_vec[i] = digamma(alpha_vec.val(i)); } for (size_t i = 0; i < size_n_alpha; i++) { const T_partials_return n_dbl = n_vec.val(i); const T_partials_return alpha_dbl = alpha_vec.val(i); digammaSum_vec[i] = digamma(n_dbl + alpha_dbl + 1); } } for (size_t i = 0; i < max_size_seq_view; i++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_vec.val(i) == std::numeric_limits::max()) { return ops_partials.build(0.0); } const T_partials_return n_dbl = n_vec.val(i); const T_partials_return alpha_dbl = alpha_vec.val(i); const T_partials_return beta_dbl = beta_vec.val(i); const T_partials_return inv_beta_p1 = inv(beta_dbl + 1); const T_partials_return p_dbl = beta_dbl * inv_beta_p1; const T_partials_return d_dbl = square(inv_beta_p1); const T_partials_return Pi = inc_beta(alpha_dbl, n_dbl + 1.0, p_dbl); const T_partials_return beta_func = beta(n_dbl + 1, alpha_dbl); P += log(Pi); if (!is_constant_all::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; grad_reg_inc_beta(g1, g2, alpha_dbl, n_dbl + 1, p_dbl, digammaAlpha_vec[i], digammaN_vec[i], digammaSum_vec[i], beta_func); partials<0>(ops_partials)[i] += g1 / Pi; } if (!is_constant_all::value) { partials<1>(ops_partials)[i] += d_dbl * pow(1 - p_dbl, n_dbl) * pow(p_dbl, alpha_dbl - 1) / (beta_func * Pi); } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_binomial_log.hpp0000644000176200001440000000240014645137106025024 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_BINOMIAL_LOG_HPP #define STAN_MATH_PRIM_PROB_BETA_BINOMIAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use beta_binomial_lpmf */ template return_type_t beta_binomial_log(const T_n& n, const T_N& N, const T_size1& alpha, const T_size2& beta) { return beta_binomial_lpmf(n, N, alpha, beta); } /** \ingroup prob_dists * @deprecated use beta_binomial_lpmf */ template return_type_t beta_binomial_log(const T_n& n, const T_N& N, const T_size1& alpha, const T_size2& beta) { return beta_binomial_lpmf(n, N, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/logistic_lpdf.hpp0000644000176200001440000000776214645137106024240 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGISTIC_LPDF_HPP #define STAN_MATH_PRIM_PROB_LOGISTIC_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // Logistic(y|mu, sigma) [sigma > 0] template * = nullptr> return_type_t logistic_lpdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "logistic_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_finite(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, mu, sigma)) { return 0.0; } if (!include_summand::value) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& y_minus_mu = to_ref_if::value>(y_val - mu_val); const auto& y_minus_mu_div_sigma = to_ref(y_minus_mu * inv_sigma); size_t N = max_size(y, mu, sigma); T_partials_return logp = -sum(y_minus_mu_div_sigma) - 2.0 * sum(log1p(exp(-y_minus_mu_div_sigma))); if (include_summand::value) { logp -= sum(log(sigma_val)) * N / math::size(sigma); } if (!is_constant_all::value) { const auto& exp_y_minus_mu_div_sigma = exp(y_minus_mu_div_sigma); const auto& y_deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( (2 / (1 + exp_y_minus_mu_div_sigma) - 1) * inv_sigma); if (!is_constant_all::value) { partials<0>(ops_partials) = y_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = (-y_deriv * y_minus_mu - 1) * inv_sigma; } } if (!is_constant_all::value) { const auto& exp_mu_div_sigma = to_ref(exp(mu_val * inv_sigma)); edge<1>(ops_partials).partials_ = (1 - 2 * exp_mu_div_sigma / (exp_mu_div_sigma + exp(y_val * inv_sigma))) * inv_sigma; } return ops_partials.build(logp); } template inline return_type_t logistic_lpdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { return logistic_lpdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/uniform_lccdf.hpp0000644000176200001440000000663714645137106024230 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_UNIFORM_LCCDF_HPP #define STAN_MATH_PRIM_PROB_UNIFORM_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t uniform_lccdf(const T_y& y, const T_low& alpha, const T_high& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_low>; using T_beta_ref = ref_type_if_t::value, T_high>; static const char* function = "uniform_lccdf"; check_consistent_sizes(function, "Random variable", y, "Lower bound parameter", alpha, "Upper bound parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Lower bound parameter", alpha_val); check_finite(function, "Upper bound parameter", beta_val); check_greater(function, "Upper bound parameter", beta_val, alpha_val); if (size_zero(y, alpha, beta)) { return 0; } if (sum(promote_scalar(y_val < alpha_val)) || sum(promote_scalar(beta_val < y_val))) { return 0; } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); const auto& b_minus_a = to_ref_if::value>(beta_val - alpha_val); const auto& ccdf_log_n = to_ref_if::value>( 1 - (y_val - alpha_val) / b_minus_a); T_partials_return ccdf_log = sum(log(ccdf_log_n)); if (!is_constant_all::value) { partials<0>(ops_partials) = inv(-b_minus_a * ccdf_log_n); } if (!is_constant_all::value) { const auto& rep_deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( inv(b_minus_a * b_minus_a * ccdf_log_n)); if (!is_constant_all::value) { partials<1>(ops_partials) = (beta_val - y_val) * rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = (y_val - alpha_val) * rep_deriv; } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/student_t_cdf.hpp0000644000176200001440000001522414645137106024233 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STUDENT_T_CDF_HPP #define STAN_MATH_PRIM_PROB_STUDENT_T_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t student_t_cdf(const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; using std::exp; using std::pow; static const char* function = "student_t_cdf"; T_y_ref y_ref = y; T_nu_ref nu_ref = nu; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, nu, mu, sigma)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, nu, mu, sigma); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == NEGATIVE_INFTY) { return ops_partials.build(0.0); } } T_partials_return digammaHalf = 0; VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digammaNu_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digammaNuPlusHalf_vec(math::size(nu)); if (!is_constant_all::value) { digammaHalf = digamma(0.5); for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return nu_dbl = nu_vec.val(i); digammaNu_vec[i] = digamma(0.5 * nu_dbl); digammaNuPlusHalf_vec[i] = digamma(0.5 + 0.5 * nu_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return sigma_inv = 1.0 / sigma_vec.val(n); const T_partials_return t = (y_vec.val(n) - mu_vec.val(n)) * sigma_inv; const T_partials_return nu_dbl = nu_vec.val(n); const T_partials_return q = nu_dbl / (t * t); const T_partials_return r = 1.0 / (1.0 + q); const T_partials_return J = 2 * r * r * q / t; const T_partials_return betaNuHalf = beta(0.5, 0.5 * nu_dbl); double zJacobian = t > 0 ? -0.5 : 0.5; if (q < 2) { T_partials_return z = inc_beta(0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r); const T_partials_return Pn = t > 0 ? 1.0 - 0.5 * z : 0.5 * z; const T_partials_return d_ibeta = pow(r, -0.5) * pow(1.0 - r, 0.5 * nu_dbl - 1) / betaNuHalf; P *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += -zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; grad_reg_inc_beta(g1, g2, 0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r, digammaNu_vec[n], digammaHalf, digammaNuPlusHalf_vec[n], betaNuHalf); partials<1>(ops_partials)[n] += zJacobian * (d_ibeta * (r / t) * (r / t) + 0.5 * g1) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { partials<3>(ops_partials)[n] += zJacobian * d_ibeta * J * sigma_inv * t / Pn; } } else { T_partials_return z = 1.0 - inc_beta((T_partials_return)0.5, 0.5 * nu_dbl, r); zJacobian *= -1; const T_partials_return Pn = t > 0 ? 1.0 - 0.5 * z : 0.5 * z; T_partials_return d_ibeta = pow(1.0 - r, 0.5 * nu_dbl - 1) * pow(r, -0.5) / betaNuHalf; P *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; grad_reg_inc_beta(g1, g2, (T_partials_return)0.5, 0.5 * nu_dbl, r, digammaHalf, digammaNu_vec[n], digammaNuPlusHalf_vec[n], betaNuHalf); partials<1>(ops_partials)[n] += zJacobian * (-d_ibeta * (r / t) * (r / t) + 0.5 * g2) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += -zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { partials<3>(ops_partials)[n] += -zJacobian * d_ibeta * J * sigma_inv * t / Pn; } } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(nu); ++n) { partials<1>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(mu); ++n) { partials<2>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(sigma); ++n) { partials<3>(ops_partials)[n] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exp_mod_normal_ccdf_log.hpp0000644000176200001440000000137714645137106026235 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use exp_mod_normal_lccdf */ template return_type_t exp_mod_normal_ccdf_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_inv_scale& lambda) { return exp_mod_normal_lccdf(y, mu, sigma, lambda); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exp_mod_normal_cdf.hpp0000644000176200001440000001372714645137106025233 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_CDF_HPP #define STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t exp_mod_normal_cdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_inv_scale& lambda) { using T_partials_return = partials_return_t; using T_partials_array = Eigen::Array; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using T_lambda_ref = ref_type_if_t::value, T_inv_scale>; static const char* function = "exp_mod_normal_cdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma, "Inv_scale paramter", lambda); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_lambda_ref lambda_ref = lambda; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); check_positive_finite(function, "Inv_scale parameter", lambda_val); if (size_zero(y, mu, sigma, lambda)) { return 1.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, lambda_ref); using T_y_val_scalar = scalar_type_t; if (is_vector::value) { if ((forward_as>(y_val) == NEGATIVE_INFTY) .any()) { return ops_partials.build(0.0); } } else { if (forward_as(y_val) == NEGATIVE_INFTY) { return ops_partials.build(0.0); } } const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& diff = to_ref(y_val - mu_val); const auto& v = to_ref(lambda_val * sigma_val); const auto& scaled_diff = to_ref(diff * INV_SQRT_TWO * inv_sigma); const auto& scaled_diff_diff = to_ref_if::value>( scaled_diff - v * INV_SQRT_TWO); const auto& erf_calc = to_ref(0.5 * (1 + erf(scaled_diff_diff))); const auto& exp_term = to_ref_if::value>( exp(0.5 * square(v) - lambda_val * diff)); const auto& cdf_n = to_ref(0.5 + 0.5 * erf(scaled_diff) - exp_term * erf_calc); T_partials_return cdf(1.0); if (is_vector::value) { cdf = forward_as(cdf_n).prod(); } else { cdf = forward_as(cdf_n); } if (!is_constant_all::value) { const auto& exp_term_2 = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( exp(-square(scaled_diff_diff))); if (!is_constant_all::value) { constexpr bool need_deriv_refs = !is_constant_all::value && !is_constant_all::value; const auto& deriv_1 = to_ref_if(lambda_val * exp_term * erf_calc); const auto& deriv_2 = to_ref_if( INV_SQRT_TWO_PI * exp_term * exp_term_2 * inv_sigma); const auto& sq_scaled_diff = square(scaled_diff); const auto& exp_m_sq_scaled_diff = exp(-sq_scaled_diff); const auto& deriv_3 = to_ref_if( INV_SQRT_TWO_PI * exp_m_sq_scaled_diff * inv_sigma); if (!is_constant_all::value) { const auto& deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( cdf * (deriv_1 - deriv_2 + deriv_3) / cdf_n); if (!is_constant_all::value) { partials<0>(ops_partials) = deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = -deriv; } } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = -cdf * ((deriv_1 - deriv_2) * v + (deriv_3 - deriv_2) * scaled_diff * SQRT_TWO) / cdf_n; } } if (!is_constant_all::value) { edge<3>(ops_partials).partials_ = cdf * exp_term * (INV_SQRT_TWO_PI * sigma_val * exp_term_2 - (v * sigma_val - diff) * erf_calc) / cdf_n; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_id_glm_lpdf.hpp0000644000176200001440000002145214645137106025216 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_ID_GLM_LPDF_HPP #define STAN_MATH_PRIM_PROB_NORMAL_ID_GLM_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns the log PDF of the Generalized Linear Model (GLM) * with Normal distribution and id link function. * If containers are supplied, returns the log sum of the probabilities. * The idea is that normal_id_glm_lpdf(y, x, alpha, beta, sigma) should * compute a more efficient version of normal_lpdf(y, alpha + x * beta, sigma) * by using analytically simplified gradients. * * @tparam T_y type of vector of dependent variables (labels); * @tparam T_x type of the matrix of independent variables (features) * @tparam T_alpha type of the intercept(s); * this can be a vector (of the same length as y) of intercepts or a single * value (for models with constant intercept); * @tparam T_beta type of the weight vector; * this can also be a single value; * @tparam T_scale type of the (positive) scale(s); * this can be a vector (of the same length as y, for heteroskedasticity) * or a scalar. * * @param y scalar or vector of dependent variables. If it is a scalar it will * be broadcast - used for all instances. * @param x design matrix or row vector. If it is a row vector it will be * broadcast - used for all instances. * @param alpha intercept (in log odds) * @param beta weight vector * @param sigma (Sequence of) scale parameters for the normal * distribution. * @return log probability or log sum of probabilities * @throw std::domain_error if x, beta or alpha is infinite. * @throw std::domain_error if the scale is not positive. * @throw std::invalid_argument if container sizes mismatch. */ template * = nullptr> return_type_t normal_id_glm_lpdf( const T_y& y, const T_x& x, const T_alpha& alpha, const T_beta& beta, const T_scale& sigma) { using Eigen::Array; using Eigen::Dynamic; using Eigen::Matrix; using Eigen::VectorXd; using std::isfinite; constexpr int T_x_rows = T_x::RowsAtCompileTime; using T_partials_return = partials_return_t; using T_scale_val = typename std::conditional_t< is_vector::value, Eigen::Array, -1, 1>, partials_return_t>; using T_y_scaled_tmp = typename std::conditional_t>; using T_y_ref = ref_type_if_t::value, T_y>; using T_x_ref = ref_type_if_t::value, T_x>; using T_alpha_ref = ref_type_if_t::value, T_alpha>; using T_beta_ref = ref_type_if_t::value, T_beta>; using T_sigma_ref = ref_type_if_t::value, T_scale>; const size_t N_instances = T_x_rows == 1 ? stan::math::size(y) : x.rows(); const size_t N_attributes = x.cols(); static const char* function = "normal_id_glm_lpdf"; check_consistent_size(function, "Vector of dependent variables", y, N_instances); check_consistent_size(function, "Weight vector", beta, N_attributes); check_consistent_size(function, "Vector of scale parameters", sigma, N_instances); check_consistent_size(function, "Vector of intercepts", alpha, N_instances); T_sigma_ref sigma_ref = sigma; const auto& sigma_val = value_of(sigma_ref); const auto& sigma_val_vec = to_ref(as_column_vector_or_scalar(sigma_val)); check_positive_finite(function, "Scale vector", sigma_val_vec); if (size_zero(y, sigma)) { return 0; } if (!include_summand::value) { return 0; } T_y_ref y_ref = y; T_x_ref x_ref = x; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; const auto& y_val = value_of(y_ref); const auto& x_val = to_ref_if::value>(value_of(x_ref)); const auto& alpha_val = value_of(alpha_ref); const auto& beta_val = value_of(beta_ref); const auto& y_val_vec = as_column_vector_or_scalar(y_val); const auto& alpha_val_vec = as_column_vector_or_scalar(alpha_val); const auto& beta_val_vec = to_ref_if::value>( as_column_vector_or_scalar(beta_val)); T_scale_val inv_sigma = 1.0 / as_array_or_scalar(sigma_val_vec); // the most efficient way to calculate this depends on template parameters T_partials_return y_scaled_sq_sum; Array y_scaled(N_instances); if (T_x_rows == 1) { T_y_scaled_tmp y_scaled_tmp = forward_as((x_val * beta_val_vec).coeff(0, 0)); y_scaled = (as_array_or_scalar(y_val_vec) - y_scaled_tmp - as_array_or_scalar(alpha_val_vec)) * inv_sigma; } else { y_scaled = x_val * beta_val_vec; y_scaled = (as_array_or_scalar(y_val_vec) - y_scaled - as_array_or_scalar(alpha_val_vec)) * inv_sigma; } auto ops_partials = make_partials_propagator(y_ref, x_ref, alpha_ref, beta_ref, sigma_ref); if (!(is_constant_all::value)) { Matrix mu_derivative = inv_sigma * y_scaled; if (!is_constant_all::value) { if (is_vector::value) { partials<0>(ops_partials) = -mu_derivative; } else { partials<0>(ops_partials)[0] = -mu_derivative.sum(); } } if (!is_constant_all::value) { if (T_x_rows == 1) { edge<1>(ops_partials).partials_ = forward_as>( beta_val_vec * sum(mu_derivative)); } else { edge<1>(ops_partials).partials_ = (beta_val_vec * mu_derivative.transpose()).transpose(); } } if (!is_constant_all::value) { if (T_x_rows == 1) { edge<3>(ops_partials).partials_ = forward_as>( mu_derivative.sum() * x_val); } else { partials<3>(ops_partials) = mu_derivative.transpose() * x_val; } } if (!is_constant_all::value) { if (is_vector::value) { partials<2>(ops_partials) = mu_derivative; } else { partials<2>(ops_partials)[0] = sum(mu_derivative); } } if (!is_constant_all::value) { if (is_vector::value) { Array y_scaled_sq = y_scaled * y_scaled; y_scaled_sq_sum = sum(y_scaled_sq); partials<4>(ops_partials) = (y_scaled_sq - 1) * inv_sigma; } else { y_scaled_sq_sum = sum(y_scaled * y_scaled); partials<4>(ops_partials)[0] = (y_scaled_sq_sum - N_instances) * forward_as>(inv_sigma); } } else { y_scaled_sq_sum = sum(y_scaled * y_scaled); } } else { y_scaled_sq_sum = sum(y_scaled * y_scaled); } if (!isfinite(y_scaled_sq_sum)) { check_finite(function, "Vector of dependent variables", y_val_vec); check_finite(function, "Weight vector", beta_val_vec); check_finite(function, "Intercept", alpha_val_vec); // if all other checks passed, next will only fail if x is not finite check_finite(function, "Matrix of independent variables", y_scaled_sq_sum); } // Compute log probability. T_partials_return logp(0.0); if (include_summand::value) { logp += NEG_LOG_SQRT_TWO_PI * N_instances; } if (include_summand::value) { if (is_vector::value) { logp -= sum(log(sigma_val_vec)); } else { logp -= N_instances * log(forward_as>(sigma_val_vec)); } } logp -= 0.5 * y_scaled_sq_sum; return ops_partials.build(logp); } template inline return_type_t normal_id_glm_lpdf( const T_y& y, const T_x& x, const T_alpha& alpha, const T_beta& beta, const T_scale& sigma) { return normal_id_glm_lpdf(y, x, alpha, beta, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_chi_square_rng.hpp0000644000176200001440000000334614645137106025255 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_RNG_HPP #define STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a pseudorandom inverse chi squared variate with the nu * degrees of freedom using the specified random number generator. * * nu can be a scalar or a one-dimensional container. * * @tparam T_deg Type of degrees of freedom parameter * @tparam RNG class of random number generator * @param nu (Sequence of) positive degrees of freedom parameter(s) * @param rng random number generator * @return (Sequence of) inverse chi squared random variate(s) * @throw std::domain_error if nu is nonpositive */ template inline typename VectorBuilder::type inv_chi_square_rng( const T_deg& nu, RNG& rng) { using boost::variate_generator; using boost::random::chi_squared_distribution; using T_nu_ref = ref_type_t; static const char* function = "inv_chi_square_rng"; T_nu_ref nu_ref = nu; check_positive_finite(function, "Degrees of freedom parameter", nu_ref); scalar_seq_view nu_vec(nu_ref); size_t N = stan::math::size(nu); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > chi_square_rng( rng, chi_squared_distribution<>(nu_vec[n])); output[n] = 1 / chi_square_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exp_mod_normal_lpdf.hpp0000644000176200001440000001236714645137106025423 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_LPDF_HPP #define STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t exp_mod_normal_lpdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_inv_scale& lambda) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using T_lambda_ref = ref_type_if_t::value, T_inv_scale>; static const char* function = "exp_mod_normal_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma, "Inv_scale paramter", lambda); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_lambda_ref lambda_ref = lambda; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); check_positive_finite(function, "Inv_scale parameter", lambda_val); if (size_zero(y, mu, sigma, lambda)) { return 0.0; } if (!include_summand::value) { return 0.0; } const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& sigma_sq = to_ref_if::value>(square(sigma_val)); const auto& lambda_sigma_sq = to_ref(lambda_val * sigma_sq); const auto& mu_minus_y = to_ref(mu_val - y_val); const auto& inner_term = to_ref_if::value>( (mu_minus_y + lambda_sigma_sq) * INV_SQRT_TWO * inv_sigma); const auto& erfc_calc = to_ref(erfc(inner_term)); size_t N = max_size(y, mu, sigma, lambda); T_partials_return logp(0.0); if (include_summand::value) { logp -= LOG_TWO * N; } if (include_summand::value) { logp += sum(log(lambda_val)) * N / math::size(lambda); } const auto& log_erfc_calc = log(erfc_calc); logp += sum(lambda_val * (mu_minus_y + 0.5 * lambda_sigma_sq) + log_erfc_calc); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, lambda_ref); if (!is_constant_all::value) { const auto& exp_m_sq_inner_term = exp(-square(inner_term)); const auto& deriv_logerfc = to_ref_if< !is_constant_all::value + !is_constant_all::value + !is_constant_all::value >= 2>(-SQRT_TWO_OVER_SQRT_PI * exp_m_sq_inner_term / erfc_calc); if (!is_constant_all::value) { const auto& deriv = to_ref_if < !is_constant_all::value && !is_constant_all::value > (lambda_val + deriv_logerfc * inv_sigma); if (!is_constant_all::value) { partials<0>(ops_partials) = -deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = deriv; } } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = sigma_val * square(lambda_val) + deriv_logerfc * (lambda_val - mu_minus_y / sigma_sq); } if (!is_constant_all::value) { partials<3>(ops_partials) = inv(lambda_val) + lambda_sigma_sq + mu_minus_y + deriv_logerfc * sigma_val; } } return ops_partials.build(logp); } template inline return_type_t exp_mod_normal_lpdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_inv_scale& lambda) { return exp_mod_normal_lpdf(y, mu, sigma, lambda); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/std_normal_ccdf_log.hpp0000644000176200001440000000073614645137106025372 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STD_NORMAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_STD_NORMAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use std_normal_lccdf */ template inline return_type_t std_normal_ccdf_log(const T_y& y) { return std_normal_lccdf(y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exponential_rng.hpp0000644000176200001440000000326014645137106024577 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXPONENTIAL_RNG_HPP #define STAN_MATH_PRIM_PROB_EXPONENTIAL_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a exponential random variate with inverse scale beta * using the specified random number generator. * * beta can be a scalar or a one-dimensional container. * * @tparam T_inv Type of inverse scale parameter * @tparam RNG class of random number generator * @param beta (Sequence of) positive inverse scale parameter(s) * @param rng random number generator * @return (Sequence of) exponential random variate(s) * @throw std::domain_error if beta is nonpositive */ template inline typename VectorBuilder::type exponential_rng( const T_inv& beta, RNG& rng) { using boost::exponential_distribution; using boost::variate_generator; static const char* function = "exponential_rng"; using T_beta_ref = ref_type_t; T_beta_ref beta_ref = beta; check_positive_finite(function, "Inverse scale parameter", beta_ref); scalar_seq_view beta_vec(beta_ref); size_t N = stan::math::size(beta); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > exp_rng( rng, exponential_distribution<>(beta_vec[n])); output[n] = exp_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/frechet_lccdf.hpp0000644000176200001440000000567014645137106024165 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_FRECHET_LCCDF_HPP #define STAN_MATH_PRIM_PROB_FRECHET_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t frechet_lccdf(const T_y& y, const T_shape& alpha, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "frechet_lccdf"; T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_sigma_ref sigma_ref = sigma; using std::pow; check_positive(function, "Random variable", y_ref); check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y_ref, alpha_ref, sigma_ref)) { return 0; } T_partials_return ccdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view sigma_vec(sigma_ref); scalar_seq_view alpha_vec(alpha_ref); size_t N = max_size(y_ref, sigma_ref, alpha_ref); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return pow_n = pow(sigma_dbl / y_dbl, alpha_dbl); const T_partials_return exp_n = exp(-pow_n); ccdf_log += log1m(exp_n); const T_partials_return rep_deriv = pow_n / (1.0 / exp_n - 1); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= alpha_dbl / y_dbl * rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= log(y_dbl / sigma_dbl) * rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += alpha_dbl / sigma_dbl * rep_deriv; } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/logistic_cdf.hpp0000644000176200001440000000727014645137106024041 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGISTIC_CDF_HPP #define STAN_MATH_PRIM_PROB_LOGISTIC_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // Logistic(y|mu, sigma) [sigma > 0] template * = nullptr> return_type_t logistic_cdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::exp; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "logistic_cdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, mu, sigma); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == NEGATIVE_INFTY) { return ops_partials.build(0.0); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return sigma_inv_vec = 1.0 / sigma_vec.val(n); const T_partials_return Pn = 1.0 / (1.0 + exp(-(y_dbl - mu_dbl) * sigma_inv_vec)); P *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += exp(logistic_log(y_dbl, mu_dbl, sigma_dbl)) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += -exp(logistic_log(y_dbl, mu_dbl, sigma_dbl)) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += -(y_dbl - mu_dbl) * sigma_inv_vec * exp(logistic_log(y_dbl, mu_dbl, sigma_dbl)) / Pn; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(mu); ++n) { partials<1>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(sigma); ++n) { partials<2>(ops_partials)[n] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/frechet_cdf_log.hpp0000644000176200001440000000125314645137106024500 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_FRECHET_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_FRECHET_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use frechet_lcdf */ template return_type_t frechet_cdf_log(const T_y& y, const T_shape& alpha, const T_scale& sigma) { return frechet_lcdf(y, alpha, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/std_normal_lcdf.hpp0000644000176200001440000002024714645137106024541 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STD_NORMAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_STD_NORMAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template < typename T_y, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline return_type_t std_normal_lcdf(const T_y& y) { using T_partials_return = partials_return_t; using std::exp; using std::fabs; using std::log; using std::pow; using T_y_ref = ref_type_t; static const char* function = "std_normal_lcdf"; T_y_ref y_ref = y; check_not_nan(function, "Random variable", y_ref); if (size_zero(y)) { return 0; } T_partials_return lcdf(0.0); auto ops_partials = make_partials_propagator(y_ref); scalar_seq_view y_vec(y_ref); size_t N = stan::math::size(y); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return scaled_y = y_dbl * INV_SQRT_TWO; const T_partials_return x2 = square(scaled_y); // Rigorous numerical approximations are applied here to deal with values // of |scaled_y|>>0. This is needed to deal with rare base-rate // logistic regression problems where it is useful to use an alternative // link function instead. // // use erfc() instead of erf() in order to retain precision // since for x>0 erfc()->0 if (scaled_y > 0.0) { // CDF(x) = 1/2 + 1/2erf(x) = 1 - 1/2erfc(x) lcdf += log1p(-0.5 * erfc(scaled_y)); if (!is_not_nan(lcdf)) { lcdf = 0; } } else if (scaled_y > -20.0) { // CDF(x) = 1/2 - 1/2erf(-x) = 1/2erfc(-x) lcdf += log(erfc(-scaled_y)) + LOG_HALF; } else if (10.0 * log(fabs(scaled_y)) < log(std::numeric_limits::max())) { // entering territory where erfc(-x)~0 // need to use direct numerical approximation of lcdf instead // the following based on W. J. Cody, Math. Comp. 23(107):631-638 (1969) // CDF(x) = 1/2erfc(-x) const T_partials_return x4 = pow(scaled_y, 4); const T_partials_return x6 = pow(scaled_y, 6); const T_partials_return x8 = pow(scaled_y, 8); const T_partials_return x10 = pow(scaled_y, 10); const T_partials_return temp_p = 0.000658749161529837803157 + 0.0160837851487422766278 / x2 + 0.125781726111229246204 / x4 + 0.360344899949804439429 / x6 + 0.305326634961232344035 / x8 + 0.0163153871373020978498 / x10; const T_partials_return temp_q = -0.00233520497626869185443 - 0.0605183413124413191178 / x2 - 0.527905102951428412248 / x4 - 1.87295284992346047209 / x6 - 2.56852019228982242072 / x8 - 1.0 / x10; lcdf += LOG_HALF + log(INV_SQRT_PI + (temp_p / temp_q) / x2) - log(-scaled_y) - x2; } else { // scaled_y^10 term will overflow lcdf = stan::math::negative_infinity(); } if (!is_constant_all::value) { // compute partial derivatives // based on analytic form given by: // dln(CDF)/dx = exp(-x^2)/(sqrt(pi)*(1/2+erf(x)/2) T_partials_return dnlcdf = 0.0; T_partials_return t = 0.0; T_partials_return t2 = 0.0; T_partials_return t4 = 0.0; // calculate using piecewise function // (due to instability / inaccuracy in the various approximations) if (scaled_y > 2.9) { // approximation derived from Abramowitz and Stegun (1964) 7.1.26 t = 1.0 / (1.0 + 0.3275911 * scaled_y); t2 = square(t); t4 = pow(t, 4); dnlcdf = INV_SQRT_PI / (exp(x2) - 0.254829592 + 0.284496736 * t - 1.421413741 * t2 + 1.453152027 * t2 * t - 1.061405429 * t4); } else if (scaled_y > 2.5) { // in the trouble area where all of the standard numerical // approximations are unstable - bridge the gap using Taylor // expansions of the analytic function // use Taylor expansion centred around x=2.7 t = scaled_y - 2.7; t2 = square(t); t4 = pow(t, 4); dnlcdf = 0.0003849882382 - 0.002079084702 * t + 0.005229340880 * t2 - 0.008029540137 * t2 * t + 0.008232190507 * t4 - 0.005692364250 * t4 * t + 0.002399496363 * pow(t, 6); } else if (scaled_y > 2.1) { // use Taylor expansion centred around x=2.3 t = scaled_y - 2.3; t2 = square(t); t4 = pow(t, 4); dnlcdf = 0.002846135439 - 0.01310032351 * t + 0.02732189391 * t2 - 0.03326906904 * t2 * t + 0.02482478940 * t4 - 0.009883071924 * t4 * t - 0.0002771362254 * pow(t, 6); } else if (scaled_y > 1.5) { // use Taylor expansion centred around x=1.85 t = scaled_y - 1.85; t2 = square(t); t4 = pow(t, 4); dnlcdf = 0.01849212058 - 0.06876280470 * t + 0.1099906382 * t2 - 0.09274533184 * t2 * t + 0.03543327418 * t4 + 0.005644855518 * t4 * t - 0.01111434424 * pow(t, 6); } else if (scaled_y > 0.8) { // use Taylor expansion centred around x=1.15 t = scaled_y - 1.15; t2 = square(t); t4 = pow(t, 4); dnlcdf = 0.1585747034 - 0.3898677543 * t + 0.3515963775 * t2 - 0.09748053605 * t2 * t - 0.04347986191 * t4 + 0.02182506378 * t4 * t + 0.01074751427 * pow(t, 6); } else if (scaled_y > 0.1) { // use Taylor expansion centred around x=0.45 t = scaled_y - 0.45; t2 = square(t); t4 = pow(t, 4); dnlcdf = 0.6245634904 - 0.9521866949 * t + 0.3986215682 * t2 + 0.04700850676 * t2 * t - 0.03478651979 * t4 - 0.01772675404 * t4 * t + 0.0006577254811 * pow(t, 6); } else if (10.0 * log(fabs(scaled_y)) < log(std::numeric_limits::max())) { // approximation derived from Abramowitz and Stegun (1964) 7.1.26 // use fact that erf(x)=-erf(-x) // Abramowitz and Stegun define this for -inf::value) { partials<0>(ops_partials)[n] += dnlcdf * INV_SQRT_TWO; } } } return ops_partials.build(lcdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/chi_square_lcdf.hpp0000644000176200001440000001003714645137106024516 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CHI_SQUARE_LCDF_HPP #define STAN_MATH_PRIM_PROB_CHI_SQUARE_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the chi square log cumulative distribution function for the given * variate and degrees of freedom. If given containers of matching sizes, * returns the log sum of probabilities. * * @tparam T_y type of scalar parameter * @tparam T_dof type of degrees of freedom parameter * @param y scalar parameter * @param nu degrees of freedom parameter * @return log probability or log sum of probabilities * @throw std::domain_error if y is negative or nu is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template return_type_t chi_square_lcdf(const T_y& y, const T_dof& nu) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; static const char* function = "chi_square_lcdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; check_not_nan(function, "Random variable", y_ref); check_nonnegative(function, "Random variable", y_ref); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); if (size_zero(y, nu)) { return 0; } T_partials_return cdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); size_t N = max_size(y, nu); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(negative_infinity()); } } VectorBuilder::value, T_partials_return, T_dof> gamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return alpha_dbl = nu_vec.val(i) * 0.5; gamma_vec[i] = tgamma(alpha_dbl); digamma_vec[i] = digamma(alpha_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { return ops_partials.build(0.0); } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return alpha_dbl = nu_vec.val(n) * 0.5; const T_partials_return beta_dbl = 0.5; const T_partials_return Pn = gamma_p(alpha_dbl, beta_dbl * y_dbl); cdf_log += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += beta_dbl * exp(-beta_dbl * y_dbl) * pow(beta_dbl * y_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= 0.5 * grad_reg_inc_gamma(alpha_dbl, beta_dbl * y_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_logit_lpmf.hpp0000644000176200001440000000727714645137106025626 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_LPMF_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log PMF of the logit-parametrized Bernoulli distribution. If * containers are supplied, returns the log sum of the probabilities. * * @tparam T_n type of integer parameter * @tparam T_prob type of chance of success parameter * @param n integer parameter * @param theta logit-transformed chance of success parameter * @return log probability or log sum of probabilities * @throw std::domain_error if theta is infinite. * @throw std::invalid_argument if container sizes mismatch. */ template * = nullptr> return_type_t bernoulli_logit_lpmf(const T_n& n, const T_prob& theta) { using T_partials_return = partials_return_t; using T_partials_array = Eigen::Array; using std::exp; using T_n_ref = ref_type_if_t::value, T_n>; using T_theta_ref = ref_type_if_t::value, T_prob>; static const char* function = "bernoulli_logit_lpmf"; check_consistent_sizes(function, "Random variable", n, "Probability parameter", theta); if (size_zero(n, theta)) { return 0.0; } T_n_ref n_ref = n; T_theta_ref theta_ref = theta; check_bounded(function, "n", n_ref, 0, 1); decltype(auto) theta_val = to_ref(as_value_column_array_or_scalar(theta_ref)); check_not_nan(function, "Logit transformed probability parameter", theta_val); if (!include_summand::value) { return 0.0; } const auto& n_col = as_column_vector_or_scalar(n_ref); const auto& n_double = value_of_rec(n_col); auto signs = to_ref_if::value>( (2 * as_array_or_scalar(n_double) - 1)); T_partials_array ntheta; if (is_vector::value || is_vector::value) { ntheta = forward_as(signs * theta_val); } else { T_partials_return ntheta_s = forward_as(signs * theta_val); ntheta = T_partials_array::Constant(1, 1, ntheta_s); } T_partials_array exp_m_ntheta = exp(-ntheta); static const double cutoff = 20.0; T_partials_return logp = sum( (ntheta > cutoff) .select(-exp_m_ntheta, (ntheta < -cutoff).select(ntheta, -log1p(exp_m_ntheta)))); auto ops_partials = make_partials_propagator(theta_ref); if (!is_constant_all::value) { edge<0>(ops_partials).partials_ = (ntheta > cutoff) .select(-exp_m_ntheta, (ntheta >= -cutoff) .select(signs * exp_m_ntheta / (exp_m_ntheta + 1), signs)); } return ops_partials.build(logp); } template inline return_type_t bernoulli_logit_lpmf(const T_n& n, const T_prob& theta) { return bernoulli_logit_lpmf(n, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exp_mod_normal_cdf_log.hpp0000644000176200001440000000137014645137106026063 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use exp_mod_normal_lcdf */ template return_type_t exp_mod_normal_cdf_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_inv_scale& lambda) { return exp_mod_normal_lcdf(y, mu, sigma, lambda); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/cauchy_lpdf.hpp0000644000176200001440000001153014645137106023663 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CAUCHY_LPDF_HPP #define STAN_MATH_PRIM_PROB_CAUCHY_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the Cauchy density for the specified scalar(s) given * the specified location parameter(s) and scale parameter(s). y, * mu, or sigma can each either be scalar a vector. Any vector * inputs must be the same length. * *

The result log probability is defined to be the sum of * the log probabilities for each observation/mu/sigma triple. * * @tparam T_y type of scalar outcome * @tparam T_loc type of location * @tparam T_scale type of scale * @param y (Sequence of) scalar(s). * @param mu (Sequence of) location(s). * @param sigma (Sequence of) scale(s). * @return The log of the product of densities. */ template * = nullptr> return_type_t cauchy_lpdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::log; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "cauchy_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; if (size_zero(y, mu, sigma)) { return 0.0; } if (!include_summand::value) { return 0.0; } T_partials_return logp(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); size_t N = max_size(y, mu, sigma); const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& y_minus_mu = to_ref_if::value>(y_val - mu_val); logp -= sum(log1p(square(y_minus_mu * inv_sigma))); if (include_summand::value) { logp -= N * LOG_PI; } if (include_summand::value) { logp -= sum(log(sigma_val)) * N / math::size(sigma); } if (!is_constant_all::value) { const auto& sigma_squared = to_ref_if::value>(square(sigma_val)); const auto& y_minus_mu_squared = to_ref_if::value>(square(y_minus_mu)); if (!is_constant_all::value) { auto mu_deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( 2 * y_minus_mu / (sigma_squared + y_minus_mu_squared)); if (!is_constant_all::value) { if (is_vector::value) { partials<0>(ops_partials) = -mu_deriv; } else { partials<0>(ops_partials)[0] = -sum(mu_deriv); } } if (!is_constant_all::value) { partials<1>(ops_partials) = std::move(mu_deriv); } } if (!is_constant_all::value) { partials<2>(ops_partials) = (y_minus_mu_squared - sigma_squared) * inv_sigma / (sigma_squared + y_minus_mu_squared); } } return ops_partials.build(logp); } template inline return_type_t cauchy_lpdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { return cauchy_lpdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/discrete_range_log.hpp0000644000176200001440000000161114645137106025220 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DISCRETE_RANGE_LOG_HPP #define STAN_MATH_PRIM_PROB_DISCRETE_RANGE_LOG_HPP #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use discrete_range_lpmf */ template double discrete_range_log(const T_y& y, const T_lower& lower, const T_upper& upper) { return discrete_range_lpmf(y, lower, upper); } /** \ingroup prob_dists * @deprecated use discrete_range_lpmf */ template inline double discrete_range_log(const T_y& y, const T_lower& lower, const T_upper& upper) { return discrete_range_lpmf(y, lower, upper); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/discrete_range_ccdf_log.hpp0000644000176200001440000000107014645137106026176 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DISCRETE_RANGE_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_DISCRETE_RANGE_CCDF_LOG_HPP #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use discrete_range_lccdf */ template double discrete_range_ccdf_log(const T_y& y, const T_lower& lower, const T_upper& upper) { return discrete_range_lccdf(y, lower, upper); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_double_exponential_cdf.hpp0000644000176200001440000001354114645137106027133 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_CDF_HPP #define STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the skew double exponential cumulative density * function. Given containers of matching sizes, returns the log sum of * probabilities. * * @tparam T_y type of real parameter. * @tparam T_loc type of location parameter. * @tparam T_scale type of scale parameter. * @tparam T_skewness type of skewness parameter. * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @param tau skewness parameter * @return log probability or log sum of probabilities * @throw std::domain_error if mu is infinite or sigma is nonpositive or tau is * not bound between 0.0 and 1.0 * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t skew_double_exponential_cdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_skewness& tau) { using T_partials_return = partials_return_t; static const char* function = "skew_double_exponential_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Shape parameter", sigma, "Skewness parameter", tau); auto&& y_ref = to_ref(y); auto&& mu_ref = to_ref(mu); auto&& sigma_ref = to_ref(sigma); auto&& tau_ref = to_ref(tau); auto&& y_val = as_value_array_or_scalar(y_ref); auto&& mu_val = as_value_array_or_scalar(mu_ref); auto&& sigma_val = as_value_array_or_scalar(sigma_ref); auto&& tau_val = as_value_array_or_scalar(tau_ref); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); check_bounded(function, "Skewness parameter", tau_val, 0.0, 1.0); if (size_zero(y, mu, sigma, tau)) { return 1.0; } T_partials_return cdf(1.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, tau_ref); scalar_seq_view> y_vec(y_val); scalar_seq_view> mu_vec(mu_val); scalar_seq_view> sigma_vec(sigma_val); scalar_seq_view> tau_vec(tau_val); const auto N = max_size(y, mu, sigma, tau); auto inv_sigma_val = to_ref(inv(sigma_val)); scalar_seq_view inv_sigma(inv_sigma_val); for (int i = 0; i < N; ++i) { const T_partials_return y_dbl = y_vec[i]; const T_partials_return mu_dbl = mu_vec[i]; const T_partials_return sigma_dbl = sigma_vec[i]; const T_partials_return tau_dbl = tau_vec[i]; const T_partials_return y_m_mu = y_dbl - mu_dbl; const T_partials_return diff_sign = sign(y_m_mu); const T_partials_return diff_sign_smaller_0 = step(-diff_sign); const T_partials_return abs_diff_y_mu = fabs(y_m_mu); const T_partials_return abs_diff_y_mu_over_sigma = abs_diff_y_mu * inv_sigma[i]; const T_partials_return expo = (diff_sign_smaller_0 + diff_sign * tau_dbl) * abs_diff_y_mu_over_sigma; const T_partials_return inv_exp_2_expo_tau = inv(exp(2.0 * expo) + tau_dbl - 1.0); const T_partials_return rep_deriv = y_dbl < mu_dbl ? 2.0 * inv_sigma[i] * (1.0 - tau_dbl) : -2.0 * (tau_dbl - 1.0) * tau_dbl * inv_sigma[i] * inv_exp_2_expo_tau; const T_partials_return sig_deriv = y_dbl < mu_dbl ? 2.0 * inv_sigma[i] * expo : -rep_deriv * expo / tau_dbl; const T_partials_return skew_deriv = y_dbl < mu_dbl ? 1.0 / tau_dbl + 2.0 * inv_sigma[i] * y_m_mu * diff_sign : (sigma_dbl - 2.0 * (tau_dbl - 1.0) * y_m_mu) * inv_sigma[i] * inv_exp_2_expo_tau; T_partials_return cdfn(1.0); if (y_dbl <= mu_dbl) { cdfn *= tau_dbl * exp(-2.0 * expo); } else { cdfn *= 1.0 - (1.0 - tau_dbl) * exp(-2.0 * expo); } cdf *= cdfn; if (!is_constant_all::value) { partials<0>(ops_partials)[i] += rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials)[i] -= rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials)[i] += sig_deriv; } if (!is_constant_all::value) { partials<3>(ops_partials)[i] += skew_deriv; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= cdf; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(mu); ++n) { partials<1>(ops_partials)[n] *= cdf; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(sigma); ++n) { partials<2>(ops_partials)[n] *= cdf; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(tau); ++n) { partials<3>(ops_partials)[n] *= cdf; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_student_t_lpdf.hpp0000644000176200001440000001167614645137106025645 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_STUDENT_T_LPDF_HPP #define STAN_MATH_PRIM_PROB_MULTI_STUDENT_T_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the multivariate student t density for the given y, mu, * nu, and scale matrix. * * This version of the function is vectorized on y and mu. * * @param y scalar vector of random variates * @param nu scalar degrees of freedom * @param mu location vector * @param Sigma scale matrix * @return The log of the multivariate student t density. * @throw std::domain_error if LL' is not square, not symmetric, * or not semi-positive definite. * @tparam T_y Type of scalar. * @tparam T_dof Type of scalar. * @tparam T_loc Type of location. * @tparam T_scale Type of scale. * @return log probability of the multivariate student t distribution. */ template return_type_t multi_student_t_lpdf( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& Sigma) { using T_scale_elem = typename scalar_type::type; using lp_type = return_type_t; using Eigen::Matrix; using std::log; using std::vector; static const char* function = "multi_student_t"; check_not_nan(function, "Degrees of freedom parameter", nu); check_positive(function, "Degrees of freedom parameter", nu); check_finite(function, "Degrees of freedom parameter", nu); check_consistent_sizes_mvt(function, "y", y, "mu", mu); vector_seq_view y_vec(y); vector_seq_view mu_vec(mu); size_t size_vec = max_size_mvt(y, mu); if (size_vec == 0) { return 0; } int num_dims = y_vec[0].size(); if (num_dims == 0) { return 0; } for (size_t i = 1, size_mvt_y = size_mvt(y); i < size_mvt_y; i++) { check_size_match( function, "Size of one of the vectors of the random variable", y_vec[i].size(), "Size of another vector of the random variable", y_vec[i - 1].size()); } for (size_t i = 1, size_mvt_mu = size_mvt(mu); i < size_mvt_mu; i++) { check_size_match(function, "Size of one of the vectors " "of the location variable", mu_vec[i].size(), "Size of another vector of " "the location variable", mu_vec[i - 1].size()); } check_size_match(function, "Size of random variable", num_dims, "size of location parameter", mu_vec[0].size()); check_size_match(function, "Size of random variable", num_dims, "rows of scale parameter", Sigma.rows()); check_size_match(function, "Size of random variable", num_dims, "columns of scale parameter", Sigma.cols()); for (size_t i = 0; i < size_vec; i++) { check_finite(function, "Location parameter", mu_vec[i]); check_not_nan(function, "Random variable", y_vec[i]); } const auto& Sigma_ref = to_ref(Sigma); check_symmetric(function, "Scale parameter", Sigma_ref); auto ldlt_Sigma = make_ldlt_factor(Sigma_ref); check_ldlt_factor(function, "LDLT_Factor of scale parameter", ldlt_Sigma); lp_type lp(0); if (include_summand::value) { lp += lgamma(0.5 * (nu + num_dims)) * size_vec; lp -= lgamma(0.5 * nu) * size_vec; lp -= (0.5 * num_dims) * log(nu) * size_vec; } if (include_summand::value) { lp -= (0.5 * num_dims) * LOG_PI * size_vec; } using Eigen::Array; if (include_summand::value) { lp -= 0.5 * log_determinant_ldlt(ldlt_Sigma) * size_vec; } if (include_summand::value) { lp_type sum_lp_vec(0.0); for (size_t i = 0; i < size_vec; i++) { const auto& y_col = as_column_vector_or_scalar(y_vec[i]); const auto& mu_col = as_column_vector_or_scalar(mu_vec[i]); sum_lp_vec += log1p(trace_inv_quad_form_ldlt(ldlt_Sigma, y_col - mu_col) / nu); } lp -= 0.5 * (nu + num_dims) * sum_lp_vec; } return lp; } template inline return_type_t multi_student_t_lpdf( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& Sigma) { return multi_student_t_lpdf(y, nu, mu, Sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/std_normal_lpdf.hpp0000644000176200001440000000407214645137106024554 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STD_NORMAL_LPDF_HPP #define STAN_MATH_PRIM_PROB_STD_NORMAL_LPDF_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the normal density for the specified scalar(s) given * a location of 0 and a scale of 1. y can be either * a scalar or a vector. * *

The result log probability is defined to be the sum of the * log probabilities for each observation. * * @tparam T_y type of scalar * @param y (Sequence of) scalar(s). * @return The log of the product of the densities. * @throw std::domain_error if any scalar is nan. */ template < bool propto, typename T_y, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> return_type_t std_normal_lpdf(const T_y& y) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; static const char* function = "std_normal_lpdf"; T_y_ref y_ref = y; check_not_nan(function, "Random variable", y_ref); if (size_zero(y)) { return 0.0; } if (!include_summand::value) { return 0.0; } T_partials_return logp(0.0); auto ops_partials = make_partials_propagator(y_ref); scalar_seq_view y_vec(y_ref); size_t N = stan::math::size(y); for (size_t n = 0; n < N; n++) { const T_partials_return y_val = y_vec.val(n); logp += y_val * y_val; if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= y_val; } } logp *= -0.5; if (include_summand::value) { logp += NEG_LOG_SQRT_TWO_PI * N; } return ops_partials.build(logp); } template inline return_type_t std_normal_lpdf(const T_y& y) { return std_normal_lpdf(y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/scaled_inv_chi_square_rng.hpp0000644000176200001440000000453114645137106026565 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_RNG_HPP #define STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_RNG_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a scaled chi square random variate for the given * number of degrees of freedom and scale using the specified random * number generator. * * nu and sigma can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_deg type of degrees of freedom parameter * @tparam T_scale type of scale parameter * @tparam RNG type of random number generator * @param nu (Sequence of) positive degrees of freedom parameter(s) * @param s (Sequence of) positive scale parameter(s) * @param rng random number generator * @return (Sequence of) scaled chi square random variate(s) * @throw std::domain_error if nu or sigma are nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type scaled_inv_chi_square_rng(const T_deg& nu, const T_scale& s, RNG& rng) { using boost::variate_generator; using boost::random::chi_squared_distribution; static const char* function = "scaled_inv_chi_square_rng"; check_consistent_sizes(function, "Location parameter", nu, "Scale Parameter", s); const auto& nu_ref = to_ref(nu); const auto& s_ref = to_ref(s); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_positive_finite(function, "Scale parameter", s_ref); scalar_seq_view nu_vec(nu_ref); scalar_seq_view s_vec(s_ref); size_t N = max_size(nu, s); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > chi_square_rng( rng, chi_squared_distribution<>(nu_vec[n])); output[n] = nu_vec[n] * s_vec[n] * s_vec[n] / chi_square_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_gp_log.hpp0000644000176200001440000000355214645137106024070 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_GP_LOG_HPP #define STAN_MATH_PRIM_PROB_MULTI_GP_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of a multivariate Gaussian Process for the given y, Sigma, and * w. y is a dxN matrix, where each column is a different observation and each * row is a different output dimension. The Gaussian Process is assumed to * have a scaled kernel matrix with a different scale for each output dimension. * This distribution is equivalent to: * for (i in 1:d) row(y, i) ~ multi_normal(0, (1/w[i])*Sigma). * * @deprecated use multi_gp_lpdf * * @param y A dxN matrix * @param Sigma The NxN kernel matrix * @param w A d-dimensional vector of positive inverse scale parameters for each * output. * @return The log of the multivariate GP density. * @throw std::domain_error if Sigma is not square, not symmetric, * or not semi-positive definite. * @tparam T_y Type of scalar. * @tparam T_covar Type of kernel. * @tparam T_w Type of weight. */ template return_type_t multi_gp_log(const T_y& y, const T_covar& Sigma, const T_w& w) { return multi_gp_lpdf(y, Sigma, w); } /** \ingroup multivar_dists * @deprecated use multi_gp_lpdf */ template inline return_type_t multi_gp_log(const T_y& y, const T_covar& Sigma, const T_w& w) { return multi_gp_lpdf<>(y, Sigma, w); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/binomial_lpmf.hpp0000644000176200001440000001207614645137106024220 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_LPMF_HPP #define STAN_MATH_PRIM_PROB_BINOMIAL_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log PMF for the binomial distribution evaluated at the * specified success, population size, and chance of success. If given * containers of matching lengths, returns the log sum of probabilities. * * @tparam T_n type of successes parameter * @tparam T_N type of population size parameter * @tparam T_prob type of chance of success parameter * @param n successes parameter * @param N population size parameter * @param theta chance of success parameter * @return log probability or log sum of probabilities * @throw std::domain_error if n is negative or greater than N * @throw std::domain_error if N is negative * @throw std::domain_error if theta is not a valid probability * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t binomial_lpmf(const T_n& n, const T_N& N, const T_prob& theta) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_t; using T_N_ref = ref_type_t; using T_theta_ref = ref_type_t; static const char* function = "binomial_lpmf"; check_consistent_sizes(function, "Successes variable", n, "Population size parameter", N, "Probability parameter", theta); T_n_ref n_ref = n; T_N_ref N_ref = N; T_theta_ref theta_ref = theta; check_bounded(function, "Successes variable", value_of(n_ref), 0, N_ref); check_nonnegative(function, "Population size parameter", N_ref); check_bounded(function, "Probability parameter", value_of(theta_ref), 0.0, 1.0); if (size_zero(n, N, theta)) { return 0.0; } if (!include_summand::value) { return 0.0; } T_partials_return logp = 0; auto ops_partials = make_partials_propagator(theta_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view N_vec(N_ref); scalar_seq_view theta_vec(theta_ref); size_t size_theta = stan::math::size(theta); size_t max_size_seq_view = max_size(n, N, theta); VectorBuilder log1m_theta(size_theta); for (size_t i = 0; i < size_theta; ++i) { log1m_theta[i] = log1m(theta_vec.val(i)); } if (include_summand::value) { for (size_t i = 0; i < max_size_seq_view; ++i) { logp += binomial_coefficient_log(N_vec[i], n_vec[i]); } } for (size_t i = 0; i < max_size_seq_view; ++i) { if (N_vec[i] != 0) { if (n_vec[i] == 0) { logp += N_vec[i] * log1m_theta[i]; } else if (n_vec[i] == N_vec[i]) { logp += n_vec[i] * log(theta_vec.val(i)); } else { logp += n_vec[i] * log(theta_vec.val(i)) + (N_vec[i] - n_vec[i]) * log1m_theta[i]; } } } if (!is_constant_all::value) { if (size_theta == 1) { T_partials_return sum_n = 0; T_partials_return sum_N = 0; for (size_t i = 0; i < max_size_seq_view; ++i) { sum_n += n_vec[i]; sum_N += N_vec[i]; } const T_partials_return theta_dbl = theta_vec.val(0); if (sum_N != 0) { if (sum_n == 0) { partials<0>(ops_partials)[0] -= sum_N / (1.0 - theta_dbl); } else if (sum_n == sum_N) { partials<0>(ops_partials)[0] += sum_n / theta_dbl; } else { partials<0>(ops_partials)[0] += sum_n / theta_dbl - (sum_N - sum_n) / (1.0 - theta_dbl); } } } else { for (size_t i = 0; i < max_size_seq_view; ++i) { const T_partials_return theta_dbl = theta_vec.val(i); if (N_vec[i] != 0) { if (n_vec[i] == 0) { partials<0>(ops_partials)[i] -= N_vec[i] / (1.0 - theta_dbl); } else if (n_vec[i] == N_vec[i]) { partials<0>(ops_partials)[i] += n_vec[i] / theta_dbl; } else { partials<0>(ops_partials)[i] += n_vec[i] / theta_dbl - (N_vec[i] - n_vec[i]) / (1.0 - theta_dbl); } } } } } return ops_partials.build(logp); } template inline return_type_t binomial_lpmf(const T_n& n, const T_N& N, const T_prob& theta) { return binomial_lpmf(n, N, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/std_normal_cdf_log.hpp0000644000176200001440000000073014645137106025221 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STD_NORMAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_STD_NORMAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use std_normal_lcdf */ template inline return_type_t std_normal_cdf_log(const T_y& y) { return std_normal_lcdf(y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/double_exponential_log.hpp0000644000176200001440000000171714645137106026131 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_LOG_HPP #define STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use double_exponential_lpdf */ template return_type_t double_exponential_log( const T_y& y, const T_loc& mu, const T_scale& sigma) { return double_exponential_lpdf(y, mu, sigma); } /** \ingroup prob_dists * @deprecated use double_exponential_lpdf */ template return_type_t double_exponential_log( const T_y& y, const T_loc& mu, const T_scale& sigma) { return double_exponential_lpdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/uniform_rng.hpp0000644000176200001440000000474314645137106023737 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_UNIFORM_RNG_HPP #define STAN_MATH_PRIM_PROB_UNIFORM_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a uniform random variate for the given upper and lower bounds using * the specified random number generator. * * alpha and beta can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_alpha type of shape parameter * @tparam T_beta type of inverse scale parameter * @tparam RNG type of random number generator * @param alpha (Sequence of) lower bound parameter(s) * @param beta (Sequence of) upper bound parameter(s) * @param rng random number generator * @return (Sequence of) uniform random variate(s) * @throw std::domain_error if alpha or beta are non-finite * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type uniform_rng( const T_alpha& alpha, const T_beta& beta, RNG& rng) { using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; using boost::variate_generator; using boost::random::uniform_real_distribution; static const char* function = "uniform_rng"; check_consistent_sizes(function, "Lower bound parameter", alpha, "Upper bound parameter", beta); T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_finite(function, "Lower bound parameter", alpha_ref); check_finite(function, "Upper bound parameter", beta_ref); check_greater(function, "Upper bound parameter", beta_ref, alpha_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(alpha, beta); VectorBuilder output(N); variate_generator > uniform_rng( rng, uniform_real_distribution<>(0.0, 1.0)); for (size_t n = 0; n < N; ++n) { output[n] = (beta_vec[n] - alpha_vec[n]) * uniform_rng() + alpha_vec[n]; } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/categorical_log.hpp0000644000176200001440000000164214645137106024523 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CATEGORICAL_LOG_HPP #define STAN_MATH_PRIM_PROB_CATEGORICAL_LOG_HPP #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use categorical_lpmf */ template inline return_type_t categorical_log(const T_n& ns, const T_prob& theta) { return categorical_lpmf(ns, theta); } /** \ingroup multivar_dists * @deprecated use categorical_lpmf */ template inline return_type_t categorical_log(const T_n& ns, const T_prob& theta) { return categorical_lpmf(ns, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exp_mod_normal_rng.hpp0000644000176200001440000000541014645137106025253 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_RNG_HPP #define STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return an exponentially modified normal random variate for the * given location, scale, and inverse scale using the specified random * number generator. * * mu, sigma, and lambda can each be a scalar or a one-dimensional container. * Any non-scalar inputs must be the same size. * * @tparam T_loc Type of location parameter * @tparam T_scale Type of scale parameter * @tparam T_inv_scale Type of inverse scale parameter * @tparam RNG type of random number generator * @param mu (Sequence of) location parameter(s) * @param sigma (Sequence of) scale parameter(s) * @param lambda (Sequence of) inverse scale parameter(s) * @param rng random number generator * @return (Sequence of) Exponentially modified normal random variate(s) * @throw std::domain_error if mu is infinite, sigma is nonpositive, * or lambda is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type exp_mod_normal_rng(const T_loc& mu, const T_scale& sigma, const T_inv_scale& lambda, RNG& rng) { static const char* function = "exp_mod_normal_rng"; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; using T_lambda_ref = ref_type_t; check_consistent_sizes(function, "Location parameter", mu, "Scale Parameter", sigma, "Inv_scale Parameter", lambda); T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_lambda_ref lambda_ref = lambda; check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); check_positive_finite(function, "Inv_scale parameter", lambda_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); scalar_seq_view lambda_vec(lambda_ref); size_t N = max_size(mu, sigma, lambda); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { output[n] = normal_rng(mu_vec[n], sigma_vec[n], rng) + exponential_rng(lambda_vec[n], rng); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/chi_square_cdf_log.hpp0000644000176200001440000000100414645137106025175 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CHI_SQUARE_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_CHI_SQUARE_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use chi_square_lcdf */ template return_type_t chi_square_cdf_log(const T_y& y, const T_dof& nu) { return chi_square_lcdf(y, nu); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/ordered_logistic_glm_lpmf.hpp0000644000176200001440000002041214645137106026577 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_ORDERED_LOGISTIC_GLM_LPMF_HPP #define STAN_MATH_PRIM_PROB_ORDERED_LOGISTIC_GLM_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns the log PMF of the ordinal regression Generalized Linear Model * (GLM). This is equivalent to and faster than ordered_logistic_lpmf(y, x * * beta, cuts). * * @tparam T_y type of integer vector of classes. It can be either * `std::vector` or `int`. * @tparam T_x type of the matrix of independent variables (features) * @tparam T_beta type of the vector of weights * @tparam T_cuts type of the vector of cutpoints * * @param y a scalar or vector of classes. If it is a scalar it will be * broadcast - used for all instances. Values should be between 1 and number * of classes, including endpoints. * @param x design matrix or row vector. If it is a row vector it will be * broadcast - used for all instances. * @param beta weight vector * @param cuts cutpoints vector * @return log probability * @throw std::domain_error If any class is not between 1 and * the number of cutpoints plus 2 or if the cutpoint vector is not sorted in * ascending order or any input is not finite * @throw std::invalid_argument if container sizes mismatch. */ template * = nullptr, require_all_col_vector_t* = nullptr> return_type_t ordered_logistic_glm_lpmf( const T_y& y, const T_x& x, const T_beta& beta, const T_cuts& cuts) { using Eigen::Array; using Eigen::Dynamic; using Eigen::Matrix; using Eigen::VectorXd; using std::exp; using std::isfinite; constexpr int T_x_rows = T_x::RowsAtCompileTime; using T_cuts_partials = partials_return_t; using T_xbeta_partials = partials_return_t; using T_partials_return = partials_return_t; typedef typename std::conditional_t< T_x_rows == 1, T_xbeta_partials, Eigen::Matrix> T_location; using T_y_ref = ref_type_t; using T_x_ref = ref_type_if_t::value, T_x>; using T_beta_ref = ref_type_if_t::value, T_beta>; using T_cuts_ref = ref_type_if_t::value, T_cuts>; const size_t N_instances = T_x_rows == 1 ? stan::math::size(y) : x.rows(); const size_t N_attributes = x.cols(); const size_t N_classes = stan::math::size(cuts) + 1; static const char* function = "ordered_logistic_glm_lpmf"; check_consistent_size(function, "Vector of dependent variables", y, N_instances); check_consistent_size(function, "Weight vector", beta, N_attributes); T_y_ref y_ref = y; T_cuts_ref cuts_ref = cuts; const auto& cuts_val = value_of(cuts_ref); const auto& cuts_val_vec = to_ref(as_column_vector_or_scalar(cuts_val)); check_bounded(function, "Vector of dependent variables", y_ref, 1, N_classes); check_ordered(function, "Cut-points", cuts_val_vec); if (N_classes > 1) { if (N_classes > 2) { check_finite(function, "Final cut-point", cuts_val_vec[N_classes - 2]); } check_finite(function, "First cut-point", cuts_val_vec[0]); } if (size_zero(y, cuts)) { return 0; } if (!include_summand::value) { return 0; } T_x_ref x_ref = x; T_beta_ref beta_ref = beta; const auto& x_val = to_ref_if::value>(value_of(x_ref)); const auto& beta_val = value_of(beta_ref); const auto& beta_val_vec = to_ref_if::value>( as_column_vector_or_scalar(beta_val)); scalar_seq_view y_seq(y_ref); Array cuts_y1(N_instances), cuts_y2(N_instances); for (int i = 0; i < N_instances; i++) { int c = y_seq[i]; if (c != N_classes) { cuts_y1.coeffRef(i) = cuts_val_vec.coeff(c - 1); } else { cuts_y1.coeffRef(i) = INFINITY; } if (c != 1) { cuts_y2.coeffRef(i) = cuts_val_vec.coeff(c - 2); } else { cuts_y2.coeffRef(i) = -INFINITY; } } T_location location = x_val * beta_val_vec; if (!isfinite(sum(location))) { check_finite(function, "Weight vector", beta); check_finite(function, "Matrix of independent variables", x); } Array cut2 = as_array_or_scalar(location) - cuts_y2; Array cut1 = as_array_or_scalar(location) - cuts_y1; // Not immediately evaluating next two expressions benefits performance auto m_log_1p_exp_cut1 = (cut1 > 0.0).select(-cut1, 0) - (-cut1.abs()).exp().log1p(); auto m_log_1p_exp_m_cut2 = (cut2 <= 0.0).select(cut2, 0) - (-cut2.abs()).exp().log1p(); T_partials_return logp(0); if (is_vector::value) { Eigen::Map> y_vec(y_seq.data(), y_seq.size()); logp = y_vec.cwiseEqual(1) .select(m_log_1p_exp_cut1, y_vec.cwiseEqual(N_classes).select( m_log_1p_exp_m_cut2, m_log_1p_exp_m_cut2 + log1m_exp(cut1 - cut2).array() + m_log_1p_exp_cut1)) .sum(); } else { if (y_seq[0] == 1) { logp = m_log_1p_exp_cut1.sum(); } else if (y_seq[0] == N_classes) { logp = m_log_1p_exp_m_cut2.sum(); } else { logp = (m_log_1p_exp_m_cut2 + log1m_exp(cut1 - cut2).array() + m_log_1p_exp_cut1) .sum(); } } auto ops_partials = make_partials_propagator(x_ref, beta_ref, cuts_ref); if (!is_constant_all::value) { Array exp_m_cut1 = exp(-cut1); Array exp_m_cut2 = exp(-cut2); Array exp_cuts_diff = exp(cuts_y2 - cuts_y1); Array d1 = (cut2 > 0).select(exp_m_cut2 / (1 + exp_m_cut2), 1 / (1 + exp(cut2))) - exp_cuts_diff / (exp_cuts_diff - 1); Array d2 = 1 / (1 - exp_cuts_diff) - (cut1 > 0).select(exp_m_cut1 / (1 + exp_m_cut1), 1 / (1 + exp(cut1))); if (!is_constant_all::value) { Matrix location_derivative = d1 - d2; if (!is_constant_all::value) { if (T_x_rows == 1) { edge<0>(ops_partials).partials_ = beta_val_vec * location_derivative.sum(); } else { edge<0>(ops_partials).partials_ = (beta_val_vec * location_derivative).transpose(); } } if (!is_constant_all::value) { if (T_x_rows == 1) { edge<1>(ops_partials).partials_ = (location_derivative * x_val.replicate(N_instances, 1)) .transpose(); } else { edge<1>(ops_partials).partials_ = (location_derivative * x_val).transpose(); } } } if (!is_constant_all::value) { for (int i = 0; i < N_instances; i++) { int c = y_seq[i]; if (c != N_classes) { partials<2>(ops_partials)[c - 1] += d2.coeff(i); } if (c != 1) { partials<2>(ops_partials)[c - 2] -= d1.coeff(i); } } } } return ops_partials.build(logp); } template return_type_t ordered_logistic_glm_lpmf( const T_y& y, const T_x& x, const T_beta& beta, const T_cuts& cuts) { return ordered_logistic_glm_lpmf(y, x, beta, cuts); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multinomial_logit_lpmf.hpp0000644000176200001440000000402314645137106026147 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTINOMIAL_LOGIT_LPMF_HPP #define STAN_MATH_PRIM_PROB_MULTINOMIAL_LOGIT_LPMF_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Multinomial log PMF in log parametrization. * Multinomial(ns| softmax(beta)) * * @param ns Array of outcome counts * @param beta Vector of unnormalized log probabilities * @return log probability */ template , require_eigen_col_vector_t* = nullptr> return_type_t multinomial_logit_lpmf(const std::vector& ns, const T_beta& beta) { static const char* function = "multinomial_logit_lpmf"; check_size_match(function, "Size of number of trials variable", ns.size(), "rows of log-probabilities parameter", beta.rows()); check_nonnegative(function, "Number of trials variable", ns); const auto& beta_ref = to_ref(beta); check_finite(function, "log-probabilities parameter", beta_ref); return_type_t lp(0.0); decltype(auto) ns_map = as_array_or_scalar(ns); if (include_summand::value) { lp += lgamma(1 + ns_map.sum()) - lgamma(1 + ns_map).sum(); } if (include_summand::value) { T_prob alpha = log_sum_exp(beta_ref); for (unsigned int i = 0; i < ns.size(); ++i) { if (ns[i] != 0) { lp += ns[i] * (beta_ref.coeff(i) - alpha); } } } return lp; } template * = nullptr> return_type_t multinomial_logit_lpmf(const std::vector& ns, const T_beta& beta) { return multinomial_logit_lpmf(ns, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/student_t_log.hpp0000644000176200001440000000432014645137106024253 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STUDENT_T_LOG_HPP #define STAN_MATH_PRIM_PROB_STUDENT_T_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the Student-t density for the given y, nu, mean, and * scale parameter. The scale parameter must be greater * than 0. * * \f{eqnarray*}{ y &\sim& t_{\nu} (\mu, \sigma^2) \\ \log (p (y \, |\, \nu, \mu, \sigma) ) &=& \log \left( \frac{\Gamma((\nu + 1) /2)} {\Gamma(\nu/2)\sqrt{\nu \pi} \sigma} \left( 1 + \frac{1}{\nu} (\frac{y - \mu}{\sigma})^2 \right)^{-(\nu + 1)/2} \right) \\ &=& \log( \Gamma( (\nu+1)/2 )) - \log (\Gamma (\nu/2) - \frac{1}{2} \log(\nu \pi) - \log(\sigma) -\frac{\nu + 1}{2} \log (1 + \frac{1}{\nu} (\frac{y - \mu}{\sigma})^2) \f} * * @deprecated use student_t_lpdf * * @param y A scalar variable. * @param nu Degrees of freedom. * @param mu The mean of the Student-t distribution. * @param sigma The scale parameter of the Student-t distribution. * @return The log of the Student-t density at y. * @throw std::domain_error if sigma is not greater than 0. * @throw std::domain_error if nu is not greater than 0. * @tparam T_y Type of scalar. * @tparam T_dof Type of degrees of freedom. * @tparam T_loc Type of location. * @tparam T_scale Type of scale. */ template return_type_t student_t_log(const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) { return student_t_lpdf(y, nu, mu, sigma); } /** \ingroup prob_dists * @deprecated use student_t_lpdf */ template inline return_type_t student_t_log( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) { return student_t_lpdf(y, nu, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_binomial_rng.hpp0000644000176200001440000000264714645137106025625 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_RNG_HPP #define STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_RNG_HPP #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a pseudorandom Poisson binomial random variable for the given vector * of success parameters using the specified random number * generator. * * @tparam RNG class of rng * @param theta (Sequence of) chance of success parameter(s) * @param rng random number generator * @return a Poisson binomial distribution random variable * @throw std::domain_error if theta is not a valid probability */ template * = nullptr> inline int poisson_binomial_rng(const T_theta& theta, RNG& rng) { static const char* function = "poisson_binomial_rng"; check_finite(function, "Probability parameters", theta); check_bounded(function, "Probability parameters", value_of(theta), 0.0, 1.0); int y = 0; for (size_t i = 0; i < theta.size(); ++i) { boost::variate_generator > bernoulli_rng(rng, boost::bernoulli_distribution<>(theta(i))); y += bernoulli_rng(); } return y; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_double_exponential_lcdf.hpp0000644000176200001440000001240714645137106027307 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the skew double exponential log cumulative density * function. Given containers of matching sizes, returns the log sum of * probabilities. * * @tparam T_y type of real parameter. * @tparam T_loc type of location parameter. * @tparam T_scale type of scale parameter. * @tparam T_skewness type of skewness parameter. * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @param tau skewness parameter * @return log probability or log sum of probabilities * @throw std::domain_error if mu is infinite or sigma is nonpositive or tau is * not bound between 0.0 and 1.0 * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t skew_double_exponential_lcdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_skewness& tau) { using std::exp; using std::log; using T_partials_return = partials_return_t; static const char* function = "skew_double_exponential_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Shape parameter", sigma, "Skewness parameter", tau); auto&& y_ref = to_ref(y); auto&& mu_ref = to_ref(mu); auto&& sigma_ref = to_ref(sigma); auto&& tau_ref = to_ref(tau); auto&& y_val = as_value_array_or_scalar(y_ref); auto&& mu_val = as_value_array_or_scalar(mu_ref); auto&& sigma_val = as_value_array_or_scalar(sigma_ref); auto&& tau_val = as_value_array_or_scalar(tau_ref); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); check_bounded(function, "Skewness parameter", tau_val, 0.0, 1.0); if (size_zero(y, mu, sigma, tau)) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, tau_ref); scalar_seq_view> y_vec(y_val); scalar_seq_view> mu_vec(mu_val); scalar_seq_view> sigma_vec(sigma_val); scalar_seq_view> tau_vec(tau_val); const auto N = max_size(y, mu, sigma, tau); auto inv_sigma_val = to_ref(inv(sigma_val)); scalar_seq_view inv_sigma(inv_sigma_val); T_partials_return cdf_log(0.0); for (int i = 0; i < N; ++i) { const T_partials_return y_dbl = y_vec[i]; const T_partials_return mu_dbl = mu_vec[i]; const T_partials_return sigma_dbl = sigma_vec[i]; const T_partials_return tau_dbl = tau_vec[i]; const T_partials_return y_m_mu = y_dbl - mu_dbl; const T_partials_return diff_sign = sign(y_m_mu); const T_partials_return diff_sign_smaller_0 = step(-diff_sign); const T_partials_return abs_diff_y_mu = fabs(y_m_mu); const T_partials_return abs_diff_y_mu_over_sigma = abs_diff_y_mu * inv_sigma[i]; const T_partials_return expo = (diff_sign_smaller_0 + diff_sign * tau_dbl) * abs_diff_y_mu_over_sigma; const T_partials_return inv_exp_2_expo_tau = inv(exp(2.0 * expo) + tau_dbl - 1.0); const T_partials_return rep_deriv = y_dbl < mu_dbl ? 2.0 * inv_sigma[i] * (1.0 - tau_dbl) : -2.0 * (tau_dbl - 1.0) * tau_dbl * inv_sigma[i] * inv_exp_2_expo_tau; const T_partials_return sig_deriv = y_dbl < mu_dbl ? 2.0 * inv_sigma[i] * expo : -rep_deriv * expo / tau_dbl; const T_partials_return skew_deriv = y_dbl < mu_dbl ? 1.0 / tau_dbl + 2.0 * inv_sigma[i] * y_m_mu * diff_sign : (sigma_dbl - 2.0 * (tau_dbl - 1.0) * y_m_mu) * inv_sigma[i] * inv_exp_2_expo_tau; if (y_dbl <= mu_dbl) { cdf_log += log(tau_dbl) - 2.0 * expo; } else { cdf_log += log1m_exp(log1m(tau_dbl) - 2.0 * expo); } if (!is_constant_all::value) { partials<0>(ops_partials)[i] += rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials)[i] -= rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials)[i] += sig_deriv; } if (!is_constant_all::value) { partials<3>(ops_partials)[i] += skew_deriv; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_cdf.hpp0000644000176200001440000000724314645137106023516 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_CDF_HPP #define STAN_MATH_PRIM_PROB_PARETO_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t pareto_cdf(const T_y& y, const T_scale& y_min, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_y_min_ref = ref_type_t; using T_alpha_ref = ref_type_t; using std::exp; using std::log; static const char* function = "pareto_cdf"; check_consistent_sizes(function, "Random variable", y, "Scale parameter", y_min, "Shape parameter", alpha); if (size_zero(y, y_min, alpha)) { return 1.0; } T_y_ref y_ref = y; T_y_min_ref y_min_ref = y_min; T_alpha_ref alpha_ref = alpha; check_nonnegative(function, "Random variable", y_ref); check_positive_finite(function, "Scale parameter", y_min_ref); check_positive_finite(function, "Shape parameter", alpha_ref); T_partials_return P(1.0); auto ops_partials = make_partials_propagator(y_ref, y_min_ref, alpha_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view y_min_vec(y_min_ref); scalar_seq_view alpha_vec(alpha_ref); size_t N = max_size(y, y_min, alpha); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) < y_min_vec.val(i)) { return ops_partials.build(0.0); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return log_dbl = log(y_min_vec.val(n) / y_vec.val(n)); const T_partials_return y_min_inv_dbl = 1.0 / y_min_vec.val(n); const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return Pn = 1.0 - exp(alpha_dbl * log_dbl); P *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += alpha_dbl * y_min_inv_dbl * exp((alpha_dbl + 1) * log_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += -alpha_dbl * y_min_inv_dbl * exp(alpha_dbl * log_dbl) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += -exp(alpha_dbl * log_dbl) * log_dbl / Pn; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y_min); ++n) { partials<1>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(alpha); ++n) { partials<2>(ops_partials)[n] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/loglogistic_rng.hpp0000644000176200001440000000447414645137106024600 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGLOGISTIC_RNG_HPP #define STAN_MATH_PRIM_PROB_LOGLOGISTIC_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a loglogistic random variate for the given scale and * shape parameters using the specified random number generator. * * alpha and beta can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_scale type of scale parameter * @tparam T_shape type of shape parameter * @tparam RNG type of random number generator * @param alpha (Sequence of) positive scale parameter(s) * @param beta (Sequence of) positive shape parameter(s) * @param rng random number generator * @return (Sequence of) loglogistic random variate(s) * @throw std::domain_error if alpha or beta are nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type loglogistic_rng(const T_scale& alpha, const T_shape& beta, RNG& rng) { using boost::uniform_01; using boost::variate_generator; using std::pow; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "loglogistic_rng"; check_consistent_sizes(function, "Scale parameter", alpha, "Shape Parameter", beta); T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Scale parameter", alpha_ref); check_positive_finite(function, "Shape parameter", beta_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(alpha, beta); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > uniform01_rng(rng, uniform_01<>()); const double tmp = uniform01_rng(); output[n] = alpha_vec[n] * pow(tmp / (1 - tmp), 1 / beta_vec[n]); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/binomial_cdf.hpp0000644000176200001440000000724014645137106024013 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_CDF_HPP #define STAN_MATH_PRIM_PROB_BINOMIAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the CDF for the binomial distribution evaluated at the specified * success, population size, and chance of success. If given containers of * matching lengths, returns the product of probabilities. * * @tparam T_n type of successes parameter * @tparam T_N type of population size parameter * @tparam theta type of chance of success parameter * @param n successes parameter * @param N population size parameter * @param theta chance of success parameter * @return probability or product of probabilities * @throw std::domain_error if N is negative * @throw std::domain_error if theta is not a valid probability * @throw std::invalid_argument if container sizes mismatch */ template return_type_t binomial_cdf(const T_n& n, const T_N& N, const T_prob& theta) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_t; using T_N_ref = ref_type_t; using T_theta_ref = ref_type_t; using std::exp; using std::pow; static const char* function = "binomial_cdf"; check_consistent_sizes(function, "Successes variable", n, "Population size parameter", N, "Probability parameter", theta); T_n_ref n_ref = n; T_N_ref N_ref = N; T_theta_ref theta_ref = theta; check_nonnegative(function, "Population size parameter", N_ref); check_bounded(function, "Probability parameter", value_of(theta_ref), 0.0, 1.0); if (size_zero(n, N, theta)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(theta_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view N_vec(N_ref); scalar_seq_view theta_vec(theta_ref); size_t max_size_seq_view = max_size(n, N, theta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(n); i++) { if (n_vec.val(i) < 0) { return ops_partials.build(0.0); } } for (size_t i = 0; i < max_size_seq_view; i++) { const T_partials_return n_dbl = n_vec.val(i); const T_partials_return N_dbl = N_vec.val(i); // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_dbl >= N_dbl) { continue; } const T_partials_return theta_dbl = theta_vec.val(i); const T_partials_return Pi = inc_beta(N_dbl - n_dbl, n_dbl + 1, 1 - theta_dbl); P *= Pi; if (!is_constant_all::value) { const T_partials_return denom = beta(N_dbl - n_dbl, n_dbl + 1) * Pi; partials<0>(ops_partials)[i] -= pow(theta_dbl, n_dbl) * pow(1 - theta_dbl, N_dbl - n_dbl - 1) / denom; } } if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(theta); ++i) { partials<0>(ops_partials)[i] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gumbel_cdf.hpp0000644000176200001440000001014014645137106023465 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GUMBEL_CDF_HPP #define STAN_MATH_PRIM_PROB_GUMBEL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the Gumbel distribution cumulative distribution for the given * location and scale. Given containers of matching sizes, returns the * product of probabilities. * * @tparam T_y type of real parameter * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param mu location parameter * @param beta scale parameter * @return probability or product of probabilities * @throw std::domain_error if y is nan, mu is infinite, or beta is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t gumbel_cdf(const T_y& y, const T_loc& mu, const T_scale& beta) { using T_partials_return = partials_return_t; using T_partials_array = Eigen::Array; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_beta_ref = ref_type_if_t::value, T_scale>; static const char* function = "gumbel_cdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", beta); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive(function, "Scale parameter", beta_val); if (size_zero(y, mu, beta)) { return 1.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, beta_ref); const auto& scaled_diff = to_ref_if::value>((y_val - mu_val) / beta_val); const auto& exp_m_scaled_diff = to_ref_if::value>( exp(-scaled_diff)); const auto& cdf_n = to_ref_if::value>( exp(-exp_m_scaled_diff)); T_partials_return cdf(1.0); if (is_vector::value || is_vector::value || is_vector::value) { cdf = forward_as(cdf_n).prod(); } else { cdf = forward_as(cdf_n); } if (!is_constant_all::value) { const auto& rep_deriv_tmp = exp(-scaled_diff - exp_m_scaled_diff); const auto& rep_deriv = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(cdf * rep_deriv_tmp / (beta_val * cdf_n)); if (!is_constant_all::value) { partials<0>(ops_partials) = rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = -rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = -rep_deriv * scaled_diff; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/matrix_normal_prec_log.hpp0000644000176200001440000000373214645137106026135 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MATRIX_NORMAL_PREC_LOG_HPP #define STAN_MATH_PRIM_PROB_MATRIX_NORMAL_PREC_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the matrix normal density for the given y, mu, Sigma and D * where Sigma and D are given as precision matrices, not covariance * matrices. * * @deprecated use matrix_normal_prec_lpdf * * @param y An mxn matrix. * @param Mu The mean matrix. * @param Sigma The mxm inverse covariance matrix (i.e., the precision * matrix) of the rows of y. * @param D The nxn inverse covariance matrix (i.e., the precision * matrix) of the columns of y. * @return The log of the matrix normal density. * @throw std::domain_error if Sigma or D are not square, not symmetric, * or not semi-positive definite. * @tparam T_y Type of scalar. * @tparam T_Mu Type of location. * @tparam T_Sigma Type of Sigma. * @tparam T_D Type of D. */ template * = nullptr> return_type_t matrix_normal_prec_log( const T_y& y, const T_Mu& Mu, const T_Sigma& Sigma, const T_D& D) { return matrix_normal_prec_lpdf(y, Mu, Sigma, D); } /** \ingroup multivar_dists * @deprecated use matrix_normal_prec_lpdf */ template * = nullptr> return_type_t matrix_normal_prec_log( const T_y& y, const T_Mu& Mu, const T_Sigma& Sigma, const T_D& D) { return matrix_normal_prec_lpdf(y, Mu, Sigma, D); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_lpmf.hpp0000644000176200001440000001157614645137106025055 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_LPMF_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { namespace internal { // Exposing to let me use in tests // The current tests fail for 1e8 and pass for 1e9, so setting to 1e10 constexpr double neg_binomial_alpha_cutoff = 1e10; } // namespace internal // NegBinomial(n|alpha, beta) [alpha > 0; beta > 0; n >= 0] template * = nullptr> return_type_t neg_binomial_lpmf(const T_n& n, const T_shape& alpha, const T_inv_scale& beta) { using T_partials_return = partials_return_t; using std::log; using T_n_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "neg_binomial_lpmf"; check_consistent_sizes(function, "Failures variable", n, "Shape parameter", alpha, "Inverse scale parameter", beta); T_n_ref n_ref = n; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_nonnegative(function, "Failures variable", n_ref); check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Inverse scale parameter", beta_ref); if (size_zero(n, alpha, beta)) { return 0.0; } if (!include_summand::value) { return 0.0; } T_partials_return logp(0.0); auto ops_partials = make_partials_propagator(alpha_ref, beta_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t size_alpha = stan::math::size(alpha); size_t size_beta = stan::math::size(beta); size_t size_alpha_beta = max_size(alpha, beta); size_t max_size_seq_view = max_size(n, alpha, beta); VectorBuilder::value, T_partials_return, T_shape> digamma_alpha(size_alpha); if (!is_constant_all::value) { for (size_t i = 0; i < size_alpha; ++i) { digamma_alpha[i] = digamma(alpha_vec.val(i)); } } VectorBuilder log1p_inv_beta(size_beta); VectorBuilder log1p_beta(size_beta); for (size_t i = 0; i < size_beta; ++i) { const T_partials_return beta_dbl = beta_vec.val(i); log1p_inv_beta[i] = log1p(inv(beta_dbl)); log1p_beta[i] = log1p(beta_dbl); } VectorBuilder::value, T_partials_return, T_shape, T_inv_scale> lambda_m_alpha_over_1p_beta(size_alpha_beta); if (!is_constant_all::value) { for (size_t i = 0; i < size_alpha_beta; ++i) { const T_partials_return alpha_dbl = alpha_vec.val(i); const T_partials_return beta_dbl = beta_vec.val(i); lambda_m_alpha_over_1p_beta[i] = alpha_dbl / beta_dbl - alpha_dbl / (1 + beta_dbl); } } for (size_t i = 0; i < max_size_seq_view; i++) { const T_partials_return alpha_dbl = alpha_vec.val(i); const T_partials_return beta_dbl = beta_vec.val(i); if (include_summand::value) { if (n_vec[i] != 0) { logp += binomial_coefficient_log(n_vec[i] + alpha_dbl - 1.0, alpha_dbl - 1.0); } } logp -= alpha_dbl * log1p_inv_beta[i] + n_vec[i] * log1p_beta[i]; if (!is_constant_all::value) { partials<0>(ops_partials)[i] += digamma(alpha_dbl + n_vec[i]) - digamma_alpha[i] - log1p_inv_beta[i]; } if (!is_constant_all::value) { partials<1>(ops_partials)[i] += lambda_m_alpha_over_1p_beta[i] - n_vec[i] / (beta_dbl + 1.0); } } return ops_partials.build(logp); } template inline return_type_t neg_binomial_lpmf( const T_n& n, const T_shape& alpha, const T_inv_scale& beta) { return neg_binomial_lpmf(n, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_lccdf.hpp0000644000176200001440000000520714645137106024534 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_LCCDF_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log CCDF of the Bernoulli distribution. If containers are * supplied, returns the log sum of the probabilities. * * @tparam T_n type of integer parameter * @tparam T_prob type of chance of success parameter * @param n integer parameter * @param theta chance of success parameter * @return log probability or log sum of probabilities * @throw std::domain_error if theta is not a valid probability * @throw std::invalid_argument if container sizes mismatch. */ template * = nullptr> return_type_t bernoulli_lccdf(const T_n& n, const T_prob& theta) { using T_partials_return = partials_return_t; using T_theta_ref = ref_type_t; using std::log; static const char* function = "bernoulli_lccdf"; check_consistent_sizes(function, "Random variable", n, "Probability parameter", theta); T_theta_ref theta_ref = theta; check_bounded(function, "Probability parameter", value_of(theta_ref), 0.0, 1.0); if (size_zero(n, theta)) { return 0.0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(theta_ref); scalar_seq_view n_vec(n); scalar_seq_view theta_vec(theta_ref); size_t max_size_seq_view = max_size(n, theta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(n); i++) { const double n_dbl = n_vec.val(i); if (n_dbl < 0) { return ops_partials.build(0.0); } if (n_dbl >= 1) { return ops_partials.build(NEGATIVE_INFTY); } } for (size_t i = 0; i < max_size_seq_view; i++) { const T_partials_return Pi = theta_vec.val(i); P += log(Pi); if (!is_constant_all::value) { partials<0>(ops_partials)[i] += inv(Pi); } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_cdf_log.hpp0000644000176200001440000000115514645137106025504 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use neg_binomial_lcdf */ template return_type_t neg_binomial_cdf_log( const T_n& n, const T_shape& alpha, const T_inv_scale& beta) { return neg_binomial_lcdf(n, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_rng.hpp0000644000176200001440000000576214645137106024705 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_RNG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a negative binomial random variate with the specified shape and * inverse scale parameters using the given random number generator. * * alpha and beta can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_shape type of shape parameter * @tparam T_inv type of inverse scale parameter * @tparam RNG type of random number generator * @param alpha (Sequence of) positive shape parameter(s) * @param beta (Sequence of) positive inverse scale parameter(s) * @param rng random number generator * @return (Sequence of) negative binomial random variate(s) * @throw std::domain_error if alpha or beta are nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type neg_binomial_rng( const T_shape& alpha, const T_inv& beta, RNG& rng) { using boost::gamma_distribution; using boost::variate_generator; using boost::random::poisson_distribution; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "neg_binomial_rng"; check_consistent_sizes(function, "Shape parameter", alpha, "Inverse scale Parameter", beta); T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Inverse scale parameter", beta_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(alpha, beta); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { double rng_from_gamma = variate_generator >( rng, gamma_distribution<>(alpha_vec[n], 1.0 / beta_vec[n]))(); // same as the constraints for poisson_rng check_less(function, "Random number that came from gamma distribution", rng_from_gamma, POISSON_MAX_RATE); check_not_nan(function, "Random number that came from gamma distribution", rng_from_gamma); check_nonnegative(function, "Random number that came from gamma distribution", rng_from_gamma); output[n] = variate_generator >( rng, poisson_distribution<>(rng_from_gamma))(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/scaled_inv_chi_square_lccdf.hpp0000644000176200001440000001036514645137106027054 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_LCCDF_HPP #define STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t scaled_inv_chi_square_lccdf( const T_y& y, const T_dof& nu, const T_scale& s) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_s_ref = ref_type_t; static const char* function = "scaled_inv_chi_square_lccdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu, "Scale parameter", s); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; T_s_ref s_ref = s; check_nonnegative(function, "Random variable", y_ref); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_positive_finite(function, "Scale parameter", s_ref); if (size_zero(y, nu, s)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref, s_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); scalar_seq_view s_vec(s_ref); size_t N = max_size(y, nu, s); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_dof> gamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return half_nu_dbl = 0.5 * nu_vec.val(i); gamma_vec[i] = tgamma(half_nu_dbl); digamma_vec[i] = digamma(half_nu_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { return ops_partials.build(negative_infinity()); } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return half_nu_dbl = 0.5 * nu_vec.val(n); const T_partials_return s_dbl = s_vec.val(n); const T_partials_return half_s2_overx_dbl = 0.5 * s_dbl * s_dbl * y_inv_dbl; const T_partials_return half_nu_s2_overx_dbl = 2.0 * half_nu_dbl * half_s2_overx_dbl; const T_partials_return Pn = gamma_p(half_nu_dbl, half_nu_s2_overx_dbl); const T_partials_return gamma_p_deriv = exp(-half_nu_s2_overx_dbl) * pow(half_nu_s2_overx_dbl, half_nu_dbl - 1) / tgamma(half_nu_dbl); P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= half_nu_s2_overx_dbl * y_inv_dbl * gamma_p_deriv / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= (0.5 * grad_reg_inc_gamma(half_nu_dbl, half_nu_s2_overx_dbl, gamma_vec[n], digamma_vec[n]) - half_s2_overx_dbl * gamma_p_deriv) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += 2.0 * half_nu_dbl * s_dbl * y_inv_dbl * gamma_p_deriv / Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/von_mises_lpdf.hpp0000644000176200001440000000742214645137106024416 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_VON_MISES_LPDF_HPP #define STAN_MATH_PRIM_PROB_VON_MISES_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t von_mises_lpdf(T_y const& y, T_loc const& mu, T_scale const& kappa) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_kappa_ref = ref_type_if_t::value, T_scale>; static char const* const function = "von_mises_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", kappa); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_kappa_ref kappa_ref = kappa; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) kappa_val = to_ref(as_value_column_array_or_scalar(kappa_ref)); check_finite(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_nonnegative(function, "Scale parameter", kappa_val); check_finite(function, "Scale parameter", kappa_val); if (size_zero(y, mu, kappa)) { return 0; } if (!include_summand::value) { return 0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, kappa_ref); const auto& cos_mu_minus_y = to_ref_if::value>(cos(mu_val - y_val)); size_t N = max_size(y, mu, kappa); T_partials_return logp = sum(kappa_val * cos_mu_minus_y); if (include_summand::value) { logp -= LOG_TWO_PI * N; } if (include_summand::value) { logp -= sum(log_modified_bessel_first_kind(0, kappa_val)) * N / math::size(kappa); } if (!is_constant_all::value) { const auto& sin_diff = sin(y_val - mu_val); auto kappa_sin = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>(kappa_val * sin_diff); if (!is_constant_all::value) { partials<0>(ops_partials) = -kappa_sin; } if (!is_constant_all::value) { partials<1>(ops_partials) = std::move(kappa_sin); } } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = cos_mu_minus_y - modified_bessel_first_kind(-1, kappa_val) / modified_bessel_first_kind(0, kappa_val); } return ops_partials.build(logp); } template inline return_type_t von_mises_lpdf(T_y const& y, T_loc const& mu, T_scale const& kappa) { return von_mises_lpdf(y, mu, kappa); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/frechet_ccdf_log.hpp0000644000176200001440000000126314645137106024644 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_FRECHET_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_FRECHET_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use frechet_lccdf */ template return_type_t frechet_ccdf_log(const T_y& y, const T_shape& alpha, const T_scale& sigma) { return frechet_lccdf(y, alpha, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_cdf_log.hpp0000644000176200001440000000124614645137106024352 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_NORMAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use normal_lcdf */ template inline return_type_t normal_cdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return normal_lcdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_gamma_cdf.hpp0000644000176200001440000001213114645137106024152 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_GAMMA_CDF_HPP #define STAN_MATH_PRIM_PROB_INV_GAMMA_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The CDF of an inverse gamma density for y with the specified * shape and scale parameters. y, shape, and scale parameters must * be greater than 0. * * @tparam T_y type of scalar * @tparam T_shape type of shape * @tparam T_scale type of scale * @param y A scalar variable. * @param alpha Shape parameter. * @param beta Scale parameter. * @throw std::domain_error if alpha is not greater than 0. * @throw std::domain_error if beta is not greater than 0. * @throw std::domain_error if y is not greater than 0. */ template return_type_t inv_gamma_cdf(const T_y& y, const T_shape& alpha, const T_scale& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; using std::exp; using std::pow; static const char* function = "inv_gamma_cdf"; check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Scale Parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Scale parameter", beta_ref); check_nonnegative(function, "Random variable", y_ref); if (size_zero(y, alpha, beta)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(y, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_shape> gamma_vec(math::size(alpha)); VectorBuilder::value, T_partials_return, T_shape> digamma_vec(math::size(alpha)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(alpha); i++) { const T_partials_return alpha_dbl = alpha_vec.val(i); gamma_vec[i] = tgamma(alpha_dbl); digamma_vec[i] = digamma(alpha_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return beta_dbl = beta_vec.val(n); const T_partials_return Pn = gamma_q(alpha_dbl, beta_dbl * y_inv_dbl); P *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += beta_dbl * y_inv_dbl * y_inv_dbl * exp(-beta_dbl * y_inv_dbl) * pow(beta_dbl * y_inv_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += grad_reg_inc_gamma(alpha_dbl, beta_dbl * y_inv_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += -y_inv_dbl * exp(-beta_dbl * y_inv_dbl) * pow(beta_dbl * y_inv_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(alpha); ++n) { partials<1>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(beta); ++n) { partials<2>(ops_partials)[n] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lkj_corr_log.hpp0000644000176200001440000000151214645137106024047 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LKJ_CORR_LOG_HPP #define STAN_MATH_PRIM_PROB_LKJ_CORR_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use lkj_corr_lpdf */ template return_type_t lkj_corr_log(const T_y& y, const T_shape& eta) { return lkj_corr_lpdf(y, eta); } /** \ingroup multivar_dists * @deprecated use lkj_corr_lpdf */ template inline return_type_t lkj_corr_log(const T_y& y, const T_shape& eta) { return lkj_corr_lpdf<>(y, eta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/student_t_cdf_log.hpp0000644000176200001440000000116714645137106025075 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STUDENT_T_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_STUDENT_T_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use student_t_lcdf */ template return_type_t student_t_cdf_log( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) { return student_t_lcdf(y, nu, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exponential_ccdf_log.hpp0000644000176200001440000000114114645137106025545 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXPONENTIAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_EXPONENTIAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use exponential_lccdf */ template return_type_t exponential_ccdf_log(const T_y& y, const T_inv_scale& beta) { return exponential_lccdf(y, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_lccdf.hpp0000644000176200001440000000331614645137106025404 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LCCDF_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LCCDF_HPP #include #include #include #include #include #include namespace stan { namespace math { // Temporary neg_binomial_2_ccdf implementation that // transforms the input parameters and calls neg_binomial_ccdf template return_type_t neg_binomial_2_lccdf( const T_n& n, const T_location& mu, const T_precision& phi) { using T_mu_ref = ref_type_t; using T_phi_ref = ref_type_t; static const char* function = "neg_binomial_2_lccdf"; check_consistent_sizes(function, "Random variable", n, "Location parameter", mu, "Precision Parameter", phi); T_mu_ref mu_ref = mu; T_phi_ref phi_ref = phi; check_positive_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Precision parameter", phi_ref); check_not_nan(function, "Random variable", n); if (size_zero(n, mu, phi)) { return 0; } scalar_seq_view mu_vec(mu_ref); scalar_seq_view phi_vec(phi_ref); size_t size_beta = max_size(mu, phi); VectorBuilder, T_location, T_precision> beta_vec(size_beta); for (size_t i = 0; i < size_beta; ++i) { beta_vec[i] = phi_vec[i] / mu_vec[i]; } return neg_binomial_lccdf(n, phi_ref, beta_vec.data()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gaussian_dlm_obs_log.hpp0000644000176200001440000000235214645137106025556 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAUSSIAN_DLM_OBS_LOG_HPP #define STAN_MATH_PRIM_PROB_GAUSSIAN_DLM_OBS_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use gaussian_dlm_obs_lpdf */ template inline return_type_t gaussian_dlm_obs_log( const T_y& y, const T_F& F, const T_G& G, const T_V& V, const T_W& W, const T_m0& m0, const T_C0& C0) { return gaussian_dlm_obs_lpdf(y, F, G, V, W, m0, C0); } /** \ingroup multivar_dists * @deprecated use gaussian_dlm_obs_lpdf */ template inline return_type_t gaussian_dlm_obs_log( const T_y& y, const T_F& F, const T_G& G, const T_V& V, const T_W& W, const T_m0& m0, const T_C0& C0) { return gaussian_dlm_obs_lpdf<>(y, F, G, V, W, m0, C0); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gumbel_lcdf.hpp0000644000176200001440000000722614645137106023654 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GUMBEL_LCDF_HPP #define STAN_MATH_PRIM_PROB_GUMBEL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the Gumbel log cumulative distribution for the given * location and scale. Given containers of matching sizes, returns the * log sum of probabilities. * * @tparam T_y type of real parameter * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param mu location parameter * @param beta scale parameter * @return log probability or log sum of probabilities * @throw std::domain_error if y is nan, mu is infinite, or beta is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t gumbel_lcdf(const T_y& y, const T_loc& mu, const T_scale& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_beta_ref = ref_type_if_t::value, T_scale>; static const char* function = "gumbel_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", beta); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive(function, "Scale parameter", beta_val); if (size_zero(y, mu, beta)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, beta_ref); const auto& scaled_diff = to_ref_if::value>((mu_val - y_val) / beta_val); const auto& exp_scaled_diff = to_ref_if::value>( exp(scaled_diff)); T_partials_return cdf_log = -sum(exp_scaled_diff); if (!is_constant_all::value) { const auto& rep_deriv = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(exp_scaled_diff / beta_val); if (!is_constant_all::value) { partials<0>(ops_partials) = rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = -rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = rep_deriv * scaled_diff; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gamma_cdf.hpp0000644000176200001440000001220414645137106023277 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAMMA_CDF_HPP #define STAN_MATH_PRIM_PROB_GAMMA_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The cumulative density function for a gamma distribution for y * with the specified shape and inverse scale parameters. * * @tparam T_y type of scalar * @tparam T_shape type of shape * @tparam T_inv_scale type of inverse scale * @param y A scalar variable. * @param alpha Shape parameter. * @param beta Inverse scale parameter. * @throw std::domain_error if alpha is not greater than 0. * @throw std::domain_error if beta is not greater than 0. * @throw std::domain_error if y is not greater than or equal to 0. */ template return_type_t gamma_cdf(const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; using std::exp; static const char* function = "gamma_cdf"; check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Inverse scale parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Inverse scale parameter", beta_ref); check_nonnegative(function, "Random variable", y_ref); if (size_zero(y, alpha, beta)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(y, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(0.0); } } using std::exp; using std::pow; VectorBuilder::value, T_partials_return, T_shape> gamma_vec(math::size(alpha)); VectorBuilder::value, T_partials_return, T_shape> digamma_vec(math::size(alpha)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(alpha); i++) { const T_partials_return alpha_dbl = alpha_vec.val(i); gamma_vec[i] = tgamma(alpha_dbl); digamma_vec[i] = digamma(alpha_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return beta_dbl = beta_vec.val(n); const T_partials_return Pn = gamma_p(alpha_dbl, beta_dbl * y_dbl); P *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += beta_dbl * exp(-beta_dbl * y_dbl) * pow(beta_dbl * y_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= grad_reg_inc_gamma(alpha_dbl, beta_dbl * y_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += y_dbl * exp(-beta_dbl * y_dbl) * pow(beta_dbl * y_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(alpha); ++n) { partials<1>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(beta); ++n) { partials<2>(ops_partials)[n] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/uniform_ccdf_log.hpp0000644000176200001440000000123714645137106024704 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_UNIFORM_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_UNIFORM_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use uniform_lccdf */ template return_type_t uniform_ccdf_log(const T_y& y, const T_low& alpha, const T_high& beta) { return uniform_lccdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_binomial_ccdf_log.hpp0000644000176200001440000000115014645137106026563 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use poisson_binomial_lccdf */ template return_type_t poisson_binomial_ccdf_log(const T_y& y, const T_theta& theta) { return poisson_binomial_lccdf(y, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_lccdf.hpp0000644000176200001440000000433114645137106024230 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_LCCDF_HPP #define STAN_MATH_PRIM_PROB_POISSON_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t poisson_lccdf(const T_n& n, const T_rate& lambda) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_if_t::value, T_n>; using T_lambda_ref = ref_type_if_t::value, T_rate>; static const char* function = "poisson_lccdf"; check_consistent_sizes(function, "Random variable", n, "Rate parameter", lambda); T_n_ref n_ref = n; T_lambda_ref lambda_ref = lambda; decltype(auto) n_val = to_ref(as_value_column_array_or_scalar(n_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); check_nonnegative(function, "Rate parameter", lambda_val); if (size_zero(n, lambda)) { return 0; } auto ops_partials = make_partials_propagator(lambda_ref); if (sum(promote_scalar(n_val < 0))) { return ops_partials.build(0.0); } const auto& log_Pi = to_ref(log(gamma_p(n_val + 1.0, lambda_val))); T_partials_return P = sum(log_Pi); if (!is_constant_all::value) { partials<0>(ops_partials) = exp(n_val * log(lambda_val) - lambda_val - lgamma(n_val + 1.0) - log_Pi); } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_log.hpp0000644000176200001440000000171214645137106023157 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_LOG_HPP #define STAN_MATH_PRIM_PROB_BETA_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use beta_lpdf */ template return_type_t beta_log( const T_y& y, const T_scale_succ& alpha, const T_scale_fail& beta) { return beta_lpdf(y, alpha, beta); } /** \ingroup prob_dists * @deprecated use beta_lpdf */ template inline return_type_t beta_log( const T_y& y, const T_scale_succ& alpha, const T_scale_fail& beta) { return beta_lpdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_chi_square_log.hpp0000644000176200001440000000317114645137106025244 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_LOG_HPP #define STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of an inverse chi-squared density for y with the specified * degrees of freedom parameter. * The degrees of freedom parameter must be greater than 0. * y must be greater than 0. * \f{eqnarray*}{ y &\sim& \mbox{\sf{Inv-}}\chi^2_\nu \\ \log (p (y \, |\, \nu)) &=& \log \left( \frac{2^{-\nu / 2}}{\Gamma (\nu / 2)} y^{- (\nu / 2 + 1)} \exp^{-1 / (2y)} \right) \\ &=& - \frac{\nu}{2} \log(2) - \log (\Gamma (\nu / 2)) - (\frac{\nu}{2} + 1) \log(y) - \frac{1}{2y} \\ & & \mathrm{ where } \; y > 0 \f} * * @deprecated use inv_chi_square_lpdf * * @param y A scalar variable. * @param nu Degrees of freedom. * @throw std::domain_error if nu is not greater than or equal to 0 * @throw std::domain_error if y is not greater than or equal to 0. * @tparam T_y Type of scalar. * @tparam T_dof Type of degrees of freedom. */ template return_type_t inv_chi_square_log(const T_y& y, const T_dof& nu) { return inv_chi_square_lpdf(y, nu); } /** \ingroup prob_dists * @deprecated use inv_chi_square_lpdf */ template inline return_type_t inv_chi_square_log(const T_y& y, const T_dof& nu) { return inv_chi_square_lpdf(y, nu); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exp_mod_normal_lcdf.hpp0000644000176200001440000001324014645137106025375 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t exp_mod_normal_lcdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_inv_scale& lambda) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using T_lambda_ref = ref_type_if_t::value, T_inv_scale>; static const char* function = "exp_mod_normal_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma, "Inv_scale paramter", lambda); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_lambda_ref lambda_ref = lambda; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); check_positive_finite(function, "Inv_scale parameter", lambda_val); if (size_zero(y, mu, sigma, lambda)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, lambda_ref); scalar_seq_view y_vec(y_val); for (size_t n = 0, size_y = stan::math::size(y); n < size_y; n++) { if (is_inf(y_vec[n])) { return ops_partials.build(y_vec[n] < 0 ? negative_infinity() : 0); } } const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& diff = to_ref(y_val - mu_val); const auto& v = to_ref(lambda_val * sigma_val); const auto& scaled_diff = to_ref(diff * INV_SQRT_TWO * inv_sigma); const auto& scaled_diff_diff = to_ref_if::value>( scaled_diff - v * INV_SQRT_TWO); const auto& erf_calc = to_ref(0.5 * (1 + erf(scaled_diff_diff))); const auto& exp_term = to_ref_if::value>( exp(0.5 * square(v) - lambda_val * diff)); const auto& cdf_n = to_ref(0.5 + 0.5 * erf(scaled_diff) - exp_term * erf_calc); T_partials_return cdf_log = sum(log(cdf_n)); if (!is_constant_all::value) { const auto& exp_term_2 = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( exp(-square(scaled_diff_diff))); if (!is_constant_all::value) { constexpr bool need_deriv_refs = !is_constant_all::value && !is_constant_all::value; const auto& deriv_1 = to_ref_if(lambda_val * exp_term * erf_calc); const auto& deriv_2 = to_ref_if( INV_SQRT_TWO_PI * exp_term * exp_term_2 * inv_sigma); const auto& sq_scaled_diff = square(scaled_diff); const auto& exp_m_sq_scaled_diff = exp(-sq_scaled_diff); const auto& deriv_3 = to_ref_if( INV_SQRT_TWO_PI * exp_m_sq_scaled_diff * inv_sigma); if (!is_constant_all::value) { const auto& deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( (deriv_1 - deriv_2 + deriv_3) / cdf_n); if (!is_constant_all::value) { partials<0>(ops_partials) = deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = -deriv; } } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = -((deriv_1 - deriv_2) * v + (deriv_3 - deriv_2) * scaled_diff * SQRT_TWO) / cdf_n; } } if (!is_constant_all::value) { edge<3>(ops_partials).partials_ = exp_term * (INV_SQRT_TWO_PI * sigma_val * exp_term_2 - (v * sigma_val - diff) * erf_calc) / cdf_n; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/cauchy_cdf_log.hpp0000644000176200001440000000113714645137106024335 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CAUCHY_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_CAUCHY_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use cauchy_lcdf */ template return_type_t cauchy_cdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return cauchy_lcdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_log.hpp0000644000176200001440000000346414645137106023542 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_LOG_HPP #define STAN_MATH_PRIM_PROB_NORMAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the normal density for the specified scalar(s) given * the specified mean(s) and deviation(s). y, mu, or sigma can * each be either a scalar or a vector. Any vector inputs * must be the same length. * *

The result log probability is defined to be the sum of the * log probabilities for each observation/mean/deviation triple. * * @deprecated use normal_lpdf * * @param y (Sequence of) scalar(s). * @param mu (Sequence of) location parameter(s) * for the normal distribution. * @param sigma (Sequence of) scale parameters for the normal * distribution. * @return The log of the product of the densities. * @throw std::domain_error if the scale is not positive. * @tparam T_y Underlying type of scalar in sequence. * @tparam T_loc Type of location parameter. */ template inline return_type_t normal_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return normal_lpdf(y, mu, sigma); } /** \ingroup prob_dists * @deprecated use normal_lpdf */ template inline return_type_t normal_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return normal_lpdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/discrete_range_lcdf.hpp0000644000176200001440000000466014645137106025356 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DISCRETE_RANGE_LCDF_HPP #define STAN_MATH_PRIM_PROB_DISCRETE_RANGE_LCDF_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return the log CDF of a discrete range distribution for the given y, * lower and upper bounds (all integers). * * `y`, `lower` and `upper` can each be a scalar or a one-dimensional container. * Any container arguments must be the same size. * * @tparam T_y type of scalar, either int or std::vector * @tparam T_lower type of lower bound, either int or std::vector * @tparam T_upper type of upper bound, either int or std::vector * * @param y integer random variable * @param lower integer lower bound * @param upper integer upper bound * @return The log CDF evaluated at the specified arguments. If containers are * supplied, returns the sum of the log CDFs. * @throw std::domain_error if upper is smaller than lower. * @throw std::invalid_argument if non-scalar arguments are of different * sizes. */ template double discrete_range_lcdf(const T_y& y, const T_lower& lower, const T_upper& upper) { static const char* function = "discrete_range_lcdf"; check_consistent_sizes(function, "Lower bound parameter", lower, "Upper bound parameter", upper); check_greater_or_equal(function, "Upper bound parameter", upper, lower); if (size_zero(y, lower, upper)) { return 0; } scalar_seq_view y_vec(y); scalar_seq_view lower_vec(lower); scalar_seq_view upper_vec(upper); size_t N = max_size(y, lower, upper); for (size_t n = 0; n < N; ++n) { const int y_dbl = y_vec[n]; if (y_dbl < lower_vec[n]) { return LOG_ZERO; } } double cdf(0.0); for (size_t n = 0; n < N; n++) { const int y_dbl = y_vec[n]; if (y_dbl < upper_vec[n]) { const int lower_dbl = lower_vec[n]; const int upper_dbl = upper_vec[n]; cdf += log(y_dbl - lower_dbl + 1) - log(upper_dbl - lower_dbl + 1); } } return cdf; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/hypergeometric_lpmf.hpp0000644000176200001440000000443514645137106025454 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_HYPERGEOMETRIC_LPMF_HPP #define STAN_MATH_PRIM_PROB_HYPERGEOMETRIC_LPMF_HPP #include #include #include #include #include #include namespace stan { namespace math { // Hypergeometric(n|N, a, b) [0 <= n <= a; 0 <= N-n <= b; 0 <= N <= a+b] // n: #white balls drawn; N: #balls drawn; // a: #white balls; b: #black balls template double hypergeometric_lpmf(const T_n& n, const T_N& N, const T_a& a, const T_b& b) { static const char* function = "hypergeometric_lpmf"; check_bounded(function, "Successes variable", value_of(n), 0, a); check_consistent_sizes(function, "Successes variable", n, "Draws parameter", N, "Successes in population parameter", a, "Failures in population parameter", b); check_greater_or_equal(function, "Draws parameter", N, n); if (size_zero(n, N, a, b)) { return 0.0; } double logp(0.0); scalar_seq_view n_vec(n); scalar_seq_view N_vec(N); scalar_seq_view a_vec(a); scalar_seq_view b_vec(b); size_t max_size_seq_view = max_size(n, N, a, b); for (size_t i = 0; i < max_size_seq_view; i++) { check_bounded(function, "Draws parameter minus successes variable", N_vec[i] - n_vec[i], 0, b_vec[i]); check_bounded(function, "Draws parameter", N_vec[i], 0, a_vec[i] + b_vec[i]); } if (!include_summand::value) { return 0.0; } for (size_t i = 0; i < max_size_seq_view; i++) { logp += math::binomial_coefficient_log(a_vec[i], n_vec[i]) + math::binomial_coefficient_log(b_vec[i], N_vec[i] - n_vec[i]) - math::binomial_coefficient_log(a_vec[i] + b_vec[i], N_vec[i]); } return logp; } template inline double hypergeometric_lpmf(const T_n& n, const T_N& N, const T_a& a, const T_b& b) { return hypergeometric_lpmf(n, N, a, b); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multinomial_logit_log.hpp0000644000176200001440000000206114645137106025772 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTINOMIAL_LOGIT_LOG_HPP #define STAN_MATH_PRIM_PROB_MULTINOMIAL_LOGIT_LOG_HPP #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use multinomial_logit_lpmf */ template * = nullptr> return_type_t multinomial_logit_log(const std::vector& ns, const T_beta& beta) { return multinomial_logit_lpmf(ns, beta); } /** \ingroup multivar_dists * @deprecated use multinomial_logit_lpmf */ template * = nullptr> return_type_t multinomial_logit_log(const std::vector& ns, const T_beta& beta) { return multinomial_logit_lpmf(ns, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/discrete_range_rng.hpp0000644000176200001440000000431314645137106025227 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DISCRETE_RANGE_RNG_HPP #define STAN_MATH_PRIM_PROB_DISCRETE_RANGE_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return an integer random variate between the given lower and upper bounds * (inclusive) using the specified random number generator. * * `lower` and `upper` can each be a scalar or a one-dimensional container. * Any non-scalar inputs must be the same size. * * @tparam T_lower type of lower bound, either int or std::vector * @tparam T_upper type of upper bound, either int or std::vector * @tparam RNG type of random number generator * * @param lower lower bound * @param upper upper bound * @param rng random number generator * @return A (sequence of) integer random variate(s) between `lower` and * `upper`, both bounds included. * @throw std::domain_error if upper is smaller than lower. * @throw std::invalid_argument if non-scalar arguments are of different * sizes. */ template inline typename VectorBuilder::type discrete_range_rng(const T_lower& lower, const T_upper& upper, RNG& rng) { static const char* function = "discrete_range_rng"; using boost::variate_generator; using boost::random::uniform_int_distribution; check_consistent_sizes(function, "Lower bound parameter", lower, "Upper bound parameter", upper); check_greater_or_equal(function, "Upper bound parameter", upper, lower); scalar_seq_view lower_vec(lower); scalar_seq_view upper_vec(upper); size_t N = max_size(lower, upper); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator> discrete_range_rng( rng, uniform_int_distribution<>(lower_vec[n], upper_vec[n])); output[n] = discrete_range_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/weibull_cdf.hpp0000644000176200001440000000737314645137106023673 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WEIBULL_CDF_HPP #define STAN_MATH_PRIM_PROB_WEIBULL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the Weibull cumulative distribution function for the given * location and scale. Given containers of matching sizes, returns the * product of probabilities. * * @tparam T_y type of real parameter * @tparam T_shape type of shape parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param alpha shape parameter * @param sigma scale parameter * @return probability or product of probabilities * @throw std::domain_error if y is negative, alpha sigma is nonpositive */ template * = nullptr> return_type_t weibull_cdf(const T_y& y, const T_shape& alpha, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_shape>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using std::pow; static const char* function = "weibull_cdf"; T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Shape parameter", alpha_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, alpha, sigma)) { return 1.0; } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, sigma_ref); constexpr bool any_derivs = !is_constant_all::value; const auto& pow_n = to_ref_if(pow(y_val / sigma_val, alpha_val)); const auto& exp_n = to_ref_if(exp(-pow_n)); const auto& cdf_n = to_ref_if(1 - exp_n); T_partials_return cdf = prod(cdf_n); if (any_derivs) { const auto& rep_deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( exp_n * pow_n * cdf / cdf_n); if (!is_constant_all::value) { const auto& deriv_y_sigma = to_ref_if<( !is_constant_all::value && !is_constant_all::value)>( rep_deriv * alpha_val); if (!is_constant_all::value) { partials<0>(ops_partials) = deriv_y_sigma / y_val; } if (!is_constant_all::value) { partials<2>(ops_partials) = -deriv_y_sigma / sigma_val; } } if (!is_constant_all::value) { partials<1>(ops_partials) = rep_deriv * log(y_val / sigma_val); } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_cdf.hpp0000644000176200001440000001073614645137106025071 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_CDF_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t neg_binomial_2_cdf( const T_n& n, const T_location& mu, const T_precision& phi) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_phi_ref = ref_type_t; static const char* function = "neg_binomial_2_cdf"; check_consistent_sizes(function, "Random variable", n, "Location parameter", mu, "Precision Parameter", phi); T_n_ref n_ref = n; T_mu_ref mu_ref = mu; T_phi_ref phi_ref = phi; check_positive_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Precision parameter", phi_ref); check_not_nan(function, "Random variable", n_ref); if (size_zero(n, mu, phi)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(mu_ref, phi_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view phi_vec(phi_ref); size_t size_phi = stan::math::size(phi); size_t size_n_phi = max_size(n, phi); size_t max_size_seq_view = max_size(n, mu, phi); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(n); i++) { if (n_vec.val(i) < 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_precision> digamma_phi_vec(size_phi); VectorBuilder::value, T_partials_return, T_n, T_precision> digamma_sum_vec(size_n_phi); if (!is_constant_all::value) { for (size_t i = 0; i < size_phi; i++) { digamma_phi_vec[i] = digamma(phi_vec.val(i)); } for (size_t i = 0; i < size_n_phi; i++) { const T_partials_return n_dbl = n_vec.val(i); const T_partials_return phi_dbl = phi_vec.val(i); digamma_sum_vec[i] = digamma(n_dbl + phi_dbl + 1); } } for (size_t i = 0; i < max_size_seq_view; i++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_vec.val(i) == std::numeric_limits::max()) { return ops_partials.build(1.0); } const T_partials_return n_dbl_p1 = n_vec.val(i) + 1; const T_partials_return mu_dbl = mu_vec.val(i); const T_partials_return phi_dbl = phi_vec.val(i); const T_partials_return inv_mu_plus_phi = inv(mu_dbl + phi_dbl); const T_partials_return p_dbl = phi_dbl * inv_mu_plus_phi; const T_partials_return d_dbl = square(inv_mu_plus_phi); const T_partials_return P_i = inc_beta(phi_dbl, n_dbl_p1, p_dbl); const T_partials_return inc_beta_ddz_i = is_constant_all::value ? 0 : inc_beta_ddz(phi_dbl, n_dbl_p1, p_dbl) * d_dbl / P_i; P *= P_i; if (!is_constant_all::value) { partials<0>(ops_partials)[i] -= inc_beta_ddz_i * phi_dbl; } if (!is_constant_all::value) { partials<1>(ops_partials)[i] += inc_beta_dda(phi_dbl, n_dbl_p1, p_dbl, digamma_phi_vec[i], digamma_sum_vec[i]) / P_i + inc_beta_ddz_i * mu_dbl; } } if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(mu); ++i) { partials<0>(ops_partials)[i] *= P; } } if (!is_constant_all::value) { for (size_t i = 0; i < size_phi; ++i) { partials<1>(ops_partials)[i] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/scaled_inv_chi_square_lpdf.hpp0000644000176200001440000001454614645137106026733 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_LPDF_HPP #define STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of a scaled inverse chi-squared density for y with the * specified degrees of freedom parameter and scale parameter. * \f{eqnarray*}{ y &\sim& \mbox{\sf{Inv-}}\chi^2(\nu, s^2) \\ \log (p (y \, |\, \nu, s)) &=& \log \left( \frac{(\nu / 2)^{\nu / 2}}{\Gamma (\nu / 2)} s^\nu y^{- (\nu / 2 + 1)} \exp^{-\nu s^2 / (2y)} \right) \\ &=& \frac{\nu}{2} \log(\frac{\nu}{2}) - \log (\Gamma (\nu / 2)) + \nu \log(s) - (\frac{\nu}{2} + 1) \log(y) - \frac{\nu s^2}{2y} \\ & & \mathrm{ where } \; y > 0 \f} * * @tparam T_y type of scalar * @tparam T_dof type of degrees of freedom * @tparam T_Scale type of scale * @param y A scalar variable. * @param nu Degrees of freedom. * @param s Scale parameter. * @throw std::domain_error if nu is not greater than 0 * @throw std::domain_error if s is not greater than 0. * @throw std::domain_error if y is not greater than 0. */ template * = nullptr> return_type_t scaled_inv_chi_square_lpdf( const T_y& y, const T_dof& nu, const T_scale& s) { using T_partials_return = partials_return_t; using std::log; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_s_ref = ref_type_t; static const char* function = "scaled_inv_chi_square_lpdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu, "Scale parameter", s); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; T_s_ref s_ref = s; check_not_nan(function, "Random variable", y_ref); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_positive_finite(function, "Scale parameter", s_ref); if (size_zero(y, nu, s)) { return 0; } if (!include_summand::value) { return 0; } T_partials_return logp(0); auto ops_partials = make_partials_propagator(y_ref, nu_ref, s_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); scalar_seq_view s_vec(s_ref); size_t N = max_size(y, nu, s); for (size_t n = 0; n < N; n++) { if (y_vec.val(n) <= 0) { return LOG_ZERO; } } VectorBuilder::value, T_partials_return, T_dof> half_nu(math::size(nu)); for (size_t i = 0; i < stan::math::size(nu); i++) { if (include_summand::value) { half_nu[i] = 0.5 * nu_vec.val(i); } } VectorBuilder::value, T_partials_return, T_y> log_y(math::size(y)); for (size_t i = 0; i < stan::math::size(y); i++) { if (include_summand::value) { log_y[i] = log(y_vec.val(i)); } } VectorBuilder::value, T_partials_return, T_y> inv_y(math::size(y)); for (size_t i = 0; i < stan::math::size(y); i++) { if (include_summand::value) { inv_y[i] = 1.0 / y_vec.val(i); } } VectorBuilder::value, T_partials_return, T_scale> log_s(math::size(s)); for (size_t i = 0; i < stan::math::size(s); i++) { if (include_summand::value) { log_s[i] = log(s_vec.val(i)); } } VectorBuilder::value, T_partials_return, T_dof> log_half_nu(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> lgamma_half_nu(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digamma_half_nu_over_two(math::size(nu)); for (size_t i = 0; i < stan::math::size(nu); i++) { if (include_summand::value) { lgamma_half_nu[i] = lgamma(half_nu[i]); } if (include_summand::value) { log_half_nu[i] = log(half_nu[i]); } if (!is_constant_all::value) { digamma_half_nu_over_two[i] = digamma(half_nu[i]) * 0.5; } } for (size_t n = 0; n < N; n++) { const T_partials_return s_dbl = s_vec.val(n); const T_partials_return nu_dbl = nu_vec.val(n); if (include_summand::value) { logp += half_nu[n] * log_half_nu[n] - lgamma_half_nu[n]; } if (include_summand::value) { logp += nu_dbl * log_s[n]; } if (include_summand::value) { logp -= (half_nu[n] + 1.0) * log_y[n]; } if (include_summand::value) { logp -= half_nu[n] * s_dbl * s_dbl * inv_y[n]; } if (!is_constant_all::value) { partials<0>(ops_partials)[n] += -(half_nu[n] + 1.0) * inv_y[n] + half_nu[n] * s_dbl * s_dbl * inv_y[n] * inv_y[n]; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += 0.5 * log_half_nu[n] + 0.5 - digamma_half_nu_over_two[n] + log_s[n] - 0.5 * log_y[n] - 0.5 * s_dbl * s_dbl * inv_y[n]; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += nu_dbl / s_dbl - nu_dbl * inv_y[n] * s_dbl; } } return ops_partials.build(logp); } template inline return_type_t scaled_inv_chi_square_lpdf( const T_y& y, const T_dof& nu, const T_scale& s) { return scaled_inv_chi_square_lpdf(y, nu, s); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_binomial_lccdf.hpp0000644000176200001440000001206014645137106025321 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_BINOMIAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_BETA_BINOMIAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log CCDF of the Beta-Binomial distribution with given population * size, prior success, and prior failure parameters. Given containers of * matching sizes, returns the log sum of probabilities. * * @tparam T_n type of success parameter * @tparam T_N type of population size parameter * @tparam T_size1 type of prior success parameter * @tparam T_size2 type of prior failure parameter * * @param n success parameter * @param N population size parameter * @param alpha prior success parameter * @param beta prior failure parameter * @return log probability or log sum of probabilities * @throw std::domain_error if N, alpha, or beta fails to be positive * @throw std::invalid_argument if container sizes mismatch */ template return_type_t beta_binomial_lccdf(const T_n& n, const T_N& N, const T_size1& alpha, const T_size2& beta) { using T_partials_return = partials_return_t; using std::exp; using std::log; using T_N_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "beta_binomial_lccdf"; check_consistent_sizes(function, "Successes variable", n, "Population size parameter", N, "First prior sample size parameter", alpha, "Second prior sample size parameter", beta); if (size_zero(n, N, alpha, beta)) { return 0; } T_N_ref N_ref = N; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_nonnegative(function, "Population size parameter", N_ref); check_positive_finite(function, "First prior sample size parameter", alpha_ref); check_positive_finite(function, "Second prior sample size parameter", beta_ref); T_partials_return P(0.0); auto ops_partials = make_partials_propagator(alpha_ref, beta_ref); scalar_seq_view n_vec(n); scalar_seq_view N_vec(N_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t max_size_seq_view = max_size(n, N, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(n); i++) { if (n_vec.val(i) < 0) { return ops_partials.build(0.0); } } for (size_t i = 0; i < max_size_seq_view; i++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as neg infinity if (n_vec.val(i) >= N_vec.val(i)) { return ops_partials.build(negative_infinity()); } const T_partials_return n_dbl = n_vec.val(i); const T_partials_return N_dbl = N_vec.val(i); const T_partials_return alpha_dbl = alpha_vec.val(i); const T_partials_return beta_dbl = beta_vec.val(i); const T_partials_return mu = alpha_dbl + n_dbl + 1; const T_partials_return nu = beta_dbl + N_dbl - n_dbl - 1; const T_partials_return one = 1; const T_partials_return F = hypergeometric_3F2( {one, mu, -N_dbl + n_dbl + 1}, {n_dbl + 2, 1 - nu}, one); T_partials_return C = lbeta(nu, mu) - lbeta(alpha_dbl, beta_dbl) - lbeta(N_dbl - n_dbl, n_dbl + 2); C = F * exp(C) / (N_dbl + 1); const T_partials_return Pi = C; P += log(Pi); T_partials_return digammaDiff = is_constant_all::value ? 0 : digamma(alpha_dbl + beta_dbl) - digamma(mu + nu); T_partials_return dF[6]; if (!is_constant_all::value) { grad_F32(dF, one, mu, -N_dbl + n_dbl + 1, n_dbl + 2, 1 - nu, one); } if (!is_constant_all::value) { partials<0>(ops_partials)[i] += digamma(mu) - digamma(alpha_dbl) + digammaDiff + dF[1] / F; } if (!is_constant_all::value) { partials<1>(ops_partials)[i] += digamma(nu) - digamma(beta_dbl) + digammaDiff - dF[4] / F; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lognormal_cdf_log.hpp0000644000176200001440000000125114645137106025050 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGNORMAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_LOGNORMAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use lognormal_lcdf */ template return_type_t lognormal_cdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return lognormal_lcdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_type_2_lccdf.hpp0000644000176200001440000001016314645137106025312 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_TYPE_2_LCCDF_HPP #define STAN_MATH_PRIM_PROB_PARETO_TYPE_2_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t pareto_type_2_lccdf( const T_y& y, const T_loc& mu, const T_scale& lambda, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_lambda_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; static const char* function = "pareto_type_2_lccdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", lambda, "Shape parameter", alpha); if (size_zero(y, mu, lambda, alpha)) { return 0; } T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_lambda_ref lambda_ref = lambda; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Scale parameter", lambda_val); check_positive_finite(function, "Shape parameter", alpha_val); check_greater_or_equal(function, "Random variable", y_val, mu_val); auto ops_partials = make_partials_propagator(y_ref, mu_ref, lambda_ref, alpha_ref); const auto& log_temp = to_ref_if::value>( log1p((y_val - mu_val) / lambda_val)); T_partials_return P = -sum(alpha_val * log_temp); if (!is_constant_all::value) { auto rep_deriv = to_ref_if<(!is_constant_all::value + !is_constant_all::value + !is_constant_all::value) >= 2>(alpha_val / (y_val - mu_val + lambda_val)); if (!is_constant_all::value) { partials<0>(ops_partials) = -rep_deriv; } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = rep_deriv * (y_val - mu_val) / lambda_val; } if (!is_constant_all::value) { partials<1>(ops_partials) = std::move(rep_deriv); } } size_t N = max_size(y, mu, lambda, alpha); if (!is_constant_all::value) { if (is_vector::value) { using Log_temp_scalar = partials_return_t; using Log_temp_array = Eigen::Array; if (is_vector::value || is_vector::value || is_vector::value) { partials<3>(ops_partials) = -forward_as(log_temp); } else { partials<3>(ops_partials) = Log_temp_array::Constant( N, 1, -forward_as(log_temp)); } } else { forward_as>( partials<3>(ops_partials)) = -log_temp * N / max_size(y, mu, lambda); } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_type_2_cdf_log.hpp0000644000176200001440000000124314645137106025633 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_TYPE_2_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_PARETO_TYPE_2_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use pareto_type_2_lcdf */ template return_type_t pareto_type_2_cdf_log( const T_y& y, const T_loc& mu, const T_scale& lambda, const T_shape& alpha) { return pareto_type_2_lcdf(y, mu, lambda, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_log_glm_log.hpp0000644000176200001440000000244014645137106026607 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_GLM_LOG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_GLM_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use neg_binomial_2_log_glm_lpmf */ template return_type_t neg_binomial_2_log_glm_log( const T_y &y, const T_x &x, const T_alpha &alpha, const T_beta &beta, const T_precision &phi) { return neg_binomial_2_log_glm_lpmf(y, x, alpha, beta, phi); } /** \ingroup multivar_dists * @deprecated use poisson_logit_glm_lpmf */ template inline return_type_t neg_binomial_2_log_glm_log(const T_y &y, const T_x &x, const T_alpha &alpha, const T_beta &beta, const T_precision &phi) { return neg_binomial_2_log_glm_lpmf(y, x, alpha, beta, phi); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/chi_square_ccdf_log.hpp0000644000176200001440000000101214645137106025337 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CHI_SQUARE_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_CHI_SQUARE_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use chi_square_lccdf */ template return_type_t chi_square_ccdf_log(const T_y& y, const T_dof& nu) { return chi_square_lccdf(y, nu); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_logit_glm_log.hpp0000644000176200001440000000207314645137106026275 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_GLM_LOG_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_GLM_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use bernoulli_logit_glm_lpmf */ template return_type_t bernoulli_logit_glm_log( const T_y &y, const T_x &x, const T_alpha &alpha, const T_beta &beta) { return bernoulli_logit_glm_lpmf( y, x, alpha, beta); } /** \ingroup multivar_dists * @deprecated use bernoulli_logit_glm_lpmf */ template inline return_type_t bernoulli_logit_glm_log( const T_y &y, const T_x &x, const T_alpha &alpha, const T_beta &beta) { return bernoulli_logit_glm_lpmf(y, x, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_log_log.hpp0000644000176200001440000000210014645137106025741 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_LOG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use neg_binomial_2_log_lpmf */ template return_type_t neg_binomial_2_log_log( const T_n& n, const T_log_location& eta, const T_precision& phi) { return neg_binomial_2_log_lpmf( n, eta, phi); } /** \ingroup prob_dists * @deprecated use neg_binomial_2_log_lpmf */ template inline return_type_t neg_binomial_2_log_log( const T_n& n, const T_log_location& eta, const T_precision& phi) { return neg_binomial_2_log_lpmf(n, eta, phi); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_wishart_lpdf.hpp0000644000176200001440000000740314645137106024750 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_WISHART_LPDF_HPP #define STAN_MATH_PRIM_PROB_INV_WISHART_LPDF_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the Inverse-Wishart density for the given W, degrees * of freedom, and scale matrix. * * The scale matrix, S, must be k x k, symmetric, and semi-positive * definite. * * \f{eqnarray*}{ W &\sim& \mbox{\sf{Inv-Wishart}}_{\nu} (S) \\ \log (p (W \, |\, \nu, S) ) &=& \log \left( \left(2^{\nu k/2} \pi^{k (k-1) /4} \prod_{i=1}^k{\Gamma (\frac{\nu + 1 - i}{2})} \right)^{-1} \times \left| S \right|^{\nu/2} \left| W \right|^{-(\nu + k + 1) / 2} \times \exp (-\frac{1}{2} \mbox{tr} (S W^{-1})) \right) \\ &=& -\frac{\nu k}{2}\log(2) - \frac{k (k-1)}{4} \log(\pi) - \sum_{i=1}^{k}{\log (\Gamma (\frac{\nu+1-i}{2}))} +\frac{\nu}{2} \log(\det(S)) - \frac{\nu+k+1}{2}\log (\det(W)) - \frac{1}{2} \mbox{tr}(S W^{-1}) \f} * * @tparam T_y type of scalar * @tparam T_dof type of degrees of freedom * @tparam T_scale type of scale * @param W A scalar matrix * @param nu Degrees of freedom * @param S The scale matrix * @return The log of the Inverse-Wishart density at W given nu and S. * @throw std::domain_error if nu is not greater than k-1 * @throw std::domain_error if S is not square, not symmetric, or not * semi-positive definite. */ template return_type_t inv_wishart_lpdf(const T_y& W, const T_dof& nu, const T_scale& S) { using Eigen::Dynamic; using Eigen::Matrix; using T_W_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_S_ref = ref_type_t; static const char* function = "inv_wishart_lpdf"; check_size_match(function, "Rows of random variable", W.rows(), "columns of scale parameter", S.rows()); check_square(function, "random variable", W); check_square(function, "scale parameter", S); Eigen::Index k = S.rows(); T_nu_ref nu_ref = nu; T_S_ref S_ref = S; T_W_ref W_ref = W; check_greater(function, "Degrees of freedom parameter", nu_ref, k - 1); check_symmetric(function, "random variable", W_ref); check_symmetric(function, "scale parameter", S_ref); auto ldlt_W = make_ldlt_factor(W_ref); check_ldlt_factor(function, "LDLT_Factor of random variable", ldlt_W); auto ldlt_S = make_ldlt_factor(S_ref); check_ldlt_factor(function, "LDLT_Factor of scale parameter", ldlt_S); return_type_t lp(0.0); if (include_summand::value) { lp -= lmgamma(k, 0.5 * nu_ref); } if (include_summand::value) { lp += 0.5 * nu_ref * log_determinant_ldlt(ldlt_S); } if (include_summand::value) { lp -= 0.5 * (nu_ref + k + 1.0) * log_determinant_ldlt(ldlt_W); } if (include_summand::value) { lp -= 0.5 * trace(mdivide_left_ldlt(ldlt_W, S_ref)); } if (include_summand::value) { lp -= nu_ref * k * HALF_LOG_TWO; } return lp; } template inline return_type_t inv_wishart_lpdf(const T_y& W, const T_dof& nu, const T_scale& S) { return inv_wishart_lpdf(W, nu, S); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/rayleigh_lpdf.hpp0000644000176200001440000000614614645137106024222 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_RAYLEIGH_LPDF_HPP #define STAN_MATH_PRIM_PROB_RAYLEIGH_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t rayleigh_lpdf(const T_y& y, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "rayleigh_lpdf"; check_consistent_sizes(function, "Random variable", y, "Scale parameter", sigma); T_y_ref y_ref = y; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_positive(function, "Scale parameter", sigma_val); check_positive(function, "Random variable", y_val); if (size_zero(y, sigma)) { return 0.0; } if (!include_summand::value) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, sigma_ref); const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& y_over_sigma = to_ref_if::value>(y_val * inv_sigma); size_t N = max_size(y, sigma); T_partials_return logp = -0.5 * sum(square(y_over_sigma)); if (include_summand::value) { logp -= 2.0 * sum(log(sigma_val)) * N / math::size(sigma); } if (include_summand::value) { logp += sum(log(y_val)) * N / math::size(y); } if (!is_constant_all::value) { const auto& scaled_diff = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( inv_sigma * y_over_sigma); if (!is_constant_all::value) { partials<0>(ops_partials) = inv(y_val) - scaled_diff; } if (!is_constant_all::value) { edge<1>(ops_partials).partials_ = y_over_sigma * scaled_diff - 2.0 * inv_sigma; } } return ops_partials.build(logp); } template inline return_type_t rayleigh_lpdf(const T_y& y, const T_scale& sigma) { return rayleigh_lpdf(y, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_cdf.hpp0000644000176200001440000001056414645137106024647 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_CDF_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t neg_binomial_cdf(const T_n& n, const T_shape& alpha, const T_inv_scale& beta) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "neg_binomial_cdf"; check_consistent_sizes(function, "Failures variable", n, "Shape parameter", alpha, "Inverse scale parameter", beta); T_n_ref n_ref = n; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Inverse scale parameter", beta_ref); if (size_zero(n, alpha, beta)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(alpha_ref, beta_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t size_alpha = stan::math::size(alpha); size_t size_n_alpha = max_size(n, alpha); size_t max_size_seq_view = max_size(n, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(n); i++) { if (n_vec.val(i) < 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_shape> digamma_alpha_vec(size_alpha); VectorBuilder::value, T_partials_return, T_n, T_shape> digamma_sum_vec(size_n_alpha); if (!is_constant_all::value) { for (size_t i = 0; i < size_alpha; i++) { digamma_alpha_vec[i] = digamma(alpha_vec.val(i)); } for (size_t i = 0; i < size_n_alpha; i++) { const T_partials_return n_dbl = n_vec.val(i); const T_partials_return alpha_dbl = alpha_vec.val(i); digamma_sum_vec[i] = digamma(n_dbl + alpha_dbl + 1); } } for (size_t i = 0; i < max_size_seq_view; i++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_vec.val(i) == std::numeric_limits::max()) { return ops_partials.build(1.0); } const T_partials_return n_dbl = n_vec.val(i); const T_partials_return alpha_dbl = alpha_vec.val(i); const T_partials_return beta_dbl = beta_vec.val(i); const T_partials_return inv_beta_p1 = inv(beta_dbl + 1); const T_partials_return p_dbl = beta_dbl * inv_beta_p1; const T_partials_return d_dbl = square(inv_beta_p1); const T_partials_return P_i = inc_beta(alpha_dbl, n_dbl + 1.0, p_dbl); P *= P_i; if (!is_constant_all::value) { partials<0>(ops_partials)[i] += inc_beta_dda(alpha_dbl, n_dbl + 1, p_dbl, digamma_alpha_vec[i], digamma_sum_vec[i]) / P_i; } if (!is_constant_all::value) { partials<1>(ops_partials)[i] += inc_beta_ddz(alpha_dbl, n_dbl + 1.0, p_dbl) * d_dbl / P_i; } } if (!is_constant_all::value) { for (size_t i = 0; i < size_alpha; ++i) { partials<0>(ops_partials)[i] *= P; } } if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(beta); ++i) { partials<1>(ops_partials)[i] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_lcdf.hpp0000644000176200001440000002330714645137106023667 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_NORMAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> inline return_type_t normal_lcdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::exp; using std::fabs; using std::log; using std::pow; using std::sqrt; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "normal_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 0; } T_partials_return cdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, mu, sigma); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return scaled_diff = (y_dbl - mu_dbl) / (sigma_dbl * SQRT_TWO); const T_partials_return sigma_sqrt2 = sigma_dbl * SQRT_TWO; const T_partials_return x2 = square(scaled_diff); // Rigorous numerical approximations are applied here to deal with values // of |scaled_diff|>>0. This is needed to deal with rare base-rate // logistic regression problems where it is useful to use an alternative // link function instead. // // use erfc() instead of erf() in order to retain precision // since for x>0 erfc()->0 if (scaled_diff > 0.0) { // CDF(x) = 1/2 + 1/2erf(x) = 1 - 1/2erfc(x) cdf_log += log1p(-0.5 * erfc(scaled_diff)); if (!is_not_nan(cdf_log)) { cdf_log = 0; } } else if (scaled_diff > -20.0) { // CDF(x) = 1/2 - 1/2erf(-x) = 1/2erfc(-x) cdf_log += log(erfc(-scaled_diff)) + LOG_HALF; } else if (10.0 * log(fabs(scaled_diff)) < log(std::numeric_limits::max())) { // entering territory where erfc(-x)~0 // need to use direct numerical approximation of cdf_log instead // the following based on W. J. Cody, Math. Comp. 23(107):631-638 (1969) // CDF(x) = 1/2erfc(-x) const T_partials_return x4 = pow(scaled_diff, 4); const T_partials_return x6 = pow(scaled_diff, 6); const T_partials_return x8 = pow(scaled_diff, 8); const T_partials_return x10 = pow(scaled_diff, 10); const T_partials_return temp_p = 0.000658749161529837803157 + 0.0160837851487422766278 / x2 + 0.125781726111229246204 / x4 + 0.360344899949804439429 / x6 + 0.305326634961232344035 / x8 + 0.0163153871373020978498 / x10; const T_partials_return temp_q = -0.00233520497626869185443 - 0.0605183413124413191178 / x2 - 0.527905102951428412248 / x4 - 1.87295284992346047209 / x6 - 2.56852019228982242072 / x8 - 1.0 / x10; cdf_log += LOG_HALF + log(INV_SQRT_PI + (temp_p / temp_q) / x2) - log(-scaled_diff) - x2; } else { // scaled_diff^10 term will overflow cdf_log = stan::math::negative_infinity(); } if (!is_constant_all::value) { // compute partial derivatives // based on analytic form given by: // dln(CDF)/dx = exp(-x^2)/(sqrt(pi)*(1/2+erf(x)/2) T_partials_return dncdf_log = 0.0; T_partials_return t = 0.0; T_partials_return t2 = 0.0; T_partials_return t4 = 0.0; // calculate using piecewise function // (due to instability / inaccuracy in the various approximations) if (scaled_diff > 2.9) { // approximation derived from Abramowitz and Stegun (1964) 7.1.26 t = 1.0 / (1.0 + 0.3275911 * scaled_diff); t2 = square(t); t4 = pow(t, 4); dncdf_log = 1.0 / (SQRT_PI * (exp(x2) - 0.254829592 + 0.284496736 * t - 1.421413741 * t2 + 1.453152027 * t2 * t - 1.061405429 * t4)); } else if (scaled_diff > 2.5) { // in the trouble area where all of the standard numerical // approximations are unstable - bridge the gap using Taylor // expansions of the analytic function // use Taylor expansion centred around x=2.7 t = scaled_diff - 2.7; t2 = square(t); t4 = pow(t, 4); dncdf_log = 0.0003849882382 - 0.002079084702 * t + 0.005229340880 * t2 - 0.008029540137 * t2 * t + 0.008232190507 * t4 - 0.005692364250 * t4 * t + 0.002399496363 * pow(t, 6); } else if (scaled_diff > 2.1) { // use Taylor expansion centred around x=2.3 t = scaled_diff - 2.3; t2 = square(t); t4 = pow(t, 4); dncdf_log = 0.002846135439 - 0.01310032351 * t + 0.02732189391 * t2 - 0.03326906904 * t2 * t + 0.02482478940 * t4 - 0.009883071924 * t4 * t - 0.0002771362254 * pow(t, 6); } else if (scaled_diff > 1.5) { // use Taylor expansion centred around x=1.85 t = scaled_diff - 1.85; t2 = square(t); t4 = pow(t, 4); dncdf_log = 0.01849212058 - 0.06876280470 * t + 0.1099906382 * t2 - 0.09274533184 * t2 * t + 0.03543327418 * t4 + 0.005644855518 * t4 * t - 0.01111434424 * pow(t, 6); } else if (scaled_diff > 0.8) { // use Taylor expansion centred around x=1.15 t = scaled_diff - 1.15; t2 = square(t); t4 = pow(t, 4); dncdf_log = 0.1585747034 - 0.3898677543 * t + 0.3515963775 * t2 - 0.09748053605 * t2 * t - 0.04347986191 * t4 + 0.02182506378 * t4 * t + 0.01074751427 * pow(t, 6); } else if (scaled_diff > 0.1) { // use Taylor expansion centred around x=0.45 t = scaled_diff - 0.45; t2 = square(t); t4 = pow(t, 4); dncdf_log = 0.6245634904 - 0.9521866949 * t + 0.3986215682 * t2 + 0.04700850676 * t2 * t - 0.03478651979 * t4 - 0.01772675404 * t4 * t + 0.0006577254811 * pow(t, 6); } else if (10.0 * log(fabs(scaled_diff)) < log(std::numeric_limits::max())) { // approximation derived from Abramowitz and Stegun (1964) 7.1.26 // use fact that erf(x)=-erf(-x) // Abramowitz and Stegun define this for -inf::value) { partials<0>(ops_partials)[n] += dncdf_log / sigma_sqrt2; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= dncdf_log / sigma_sqrt2; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= dncdf_log * scaled_diff / sigma_dbl; } } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/loglogistic_log.hpp0000644000176200001440000000217214645137106024564 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGLOGISTIC_LOG_HPP #define STAN_MATH_PRIM_PROB_LOGLOGISTIC_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use loglogistic_lpdf */ template * = nullptr> return_type_t loglogistic_log(const T_y& y, const T_scale& alpha, const T_shape& beta) { return loglogistic_lpdf(y, alpha, beta); } /** \ingroup prob_dists * @deprecated use loglogistic_lpdf */ template inline return_type_t loglogistic_log( const T_y& y, const T_scale& alpha, const T_shape& beta) { return loglogistic_lpdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_binomial_cdf_log.hpp0000644000176200001440000000114114645137106026420 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use poisson_binomial_lcdf */ template return_type_t poisson_binomial_cdf_log(const T_y& y, const T_theta& theta) { return poisson_binomial_lcdf(y, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_normal_cholesky_log.hpp0000644000176200001440000000340114645137106026644 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_NORMAL_CHOLESKY_LOG_HPP #define STAN_MATH_PRIM_PROB_MULTI_NORMAL_CHOLESKY_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the multivariate normal density for the given y, mu, and * a Cholesky factor L of the variance matrix. * Sigma = LL', a square, semi-positive definite matrix. * * @deprecated use multi_normal_cholesky_lpdf * * @param y A scalar vector * @param mu The mean vector of the multivariate normal distribution. * @param L The Cholesky decomposition of a variance matrix * of the multivariate normal distribution * @return The log of the multivariate normal density. * @throw std::domain_error if LL' is not square, not symmetric, * or not semi-positive definite. * @tparam T_y Type of scalar. * @tparam T_loc Type of location. * @tparam T_covar Type of scale. */ template return_type_t multi_normal_cholesky_log(const T_y& y, const T_loc& mu, const T_covar& L) { return multi_normal_cholesky_lpdf(y, mu, L); } /** \ingroup multivar_dists * @deprecated use multi_normal_cholesky_lpdf */ template inline return_type_t multi_normal_cholesky_log( const T_y& y, const T_loc& mu, const T_covar& L) { return multi_normal_cholesky_lpdf(y, mu, L); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_logit_glm_rng.hpp0000644000176200001440000000631114645137106026301 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_GLM_RNG_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_GLM_RNG_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns a draw from the Generalized Linear Model (GLM) * with Bernoulli distribution and logit link function. * * This is a convenience wrapper around * bernoulli_logit_rng(alpha + x * beta, rng). * @tparam T_x type of the matrix of independent variables (features); * this should be an Eigen::Matrix whose number of columns should * match the length of beta; the number of rows is the number of * samples. * @tparam T_alpha type of the intercept(s); this can be a vector of * the same length as y) of intercepts or a single value (for models * with constant intercept); if a vector its length should match x's * row-count; * @tparam T_beta type of the weight vector; * @tparam RNG Type of pseudo-random number generator. * @param x design matrix * @param alpha intercept (in log odds) * @param beta weight vector * @param rng Pseudo-random number generator. * @return Bernoulli logit glm random variate * @throw std::domain_error if x, beta or alpha is infinite. * @throw std::invalid_argument if container sizes mismatch. */ template inline typename VectorBuilder::type bernoulli_logit_glm_rng( const T_x& x, const T_alpha& alpha, const T_beta& beta, RNG& rng) { using boost::bernoulli_distribution; using boost::variate_generator; using T_x_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; const size_t N = x.cols(); const size_t M = x.rows(); static const char* function = "bernoulli_logit_glm_rng"; check_consistent_size(function, "Weight vector", beta, N); check_consistent_size(function, "Vector of intercepts", alpha, M); T_x_ref x_ref = x; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_finite(function, "Matrix of independent variables", x_ref); check_finite(function, "Weight vector", beta_ref); check_finite(function, "Intercept", alpha_ref); const auto& beta_vector = as_column_vector_or_scalar(beta_ref); Eigen::VectorXd x_beta; if (is_vector::value) { x_beta = x_ref * beta_vector; } else { x_beta = (x_ref.array() * forward_as(beta_vector)).rowwise().sum(); } scalar_seq_view alpha_vec(alpha_ref); VectorBuilder output(M); for (size_t m = 0; m < M; ++m) { double theta_m = alpha_vec[m] + x_beta(m); variate_generator> bernoulli_rng( rng, bernoulli_distribution<>(inv_logit(theta_m))); output[m] = bernoulli_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/uniform_lpdf.hpp0000644000176200001440000001111014645137106024060 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_UNIFORM_LPDF_HPP #define STAN_MATH_PRIM_PROB_UNIFORM_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of a uniform density for the given * y, lower, and upper bound. * \f{eqnarray*}{ y &\sim& \mbox{\sf{U}}(\alpha, \beta) \\ \log (p (y \, |\, \alpha, \beta)) &=& \log \left( \frac{1}{\beta-\alpha} \right) \\ &=& \log (1) - \log (\beta - \alpha) \\ &=& -\log (\beta - \alpha) \\ & & \mathrm{ where } \; y \in [\alpha, \beta], \log(0) \; \mathrm{otherwise} \f} * * @tparam T_y type of scalar * @tparam T_low type of lower bound * @tparam T_high type of upper bound * @param y A scalar variable. * @param alpha Lower bound. * @param beta Upper bound. * @throw std::invalid_argument if the lower bound is greater than * or equal to the lower bound */ template * = nullptr> return_type_t uniform_lpdf(const T_y& y, const T_low& alpha, const T_high& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_low>; using T_beta_ref = ref_type_if_t::value, T_high>; static const char* function = "uniform_lpdf"; check_consistent_sizes(function, "Random variable", y, "Lower bound parameter", alpha, "Upper bound parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Lower bound parameter", alpha_val); check_finite(function, "Upper bound parameter", beta_val); check_greater(function, "Upper bound parameter", beta_val, alpha_val); if (size_zero(y, alpha, beta)) { return 0.0; } if (!include_summand::value) { return 0.0; } if (sum(promote_scalar(y_val < alpha_val)) || sum(promote_scalar(beta_val < y_val))) { return LOG_ZERO; } T_partials_return logp = 0; size_t N = max_size(y, alpha, beta); if (include_summand::value) { logp -= sum(log(beta_val - alpha_val)) * N / max_size(alpha, beta); } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); if (!is_constant_all::value) { auto inv_beta_minus_alpha = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( inv(beta_val - alpha_val)); if (!is_constant_all::value) { if (is_vector::value && !is_vector::value && !is_vector::value) { partials<2>(ops_partials) = -inv_beta_minus_alpha * math::size(y); } else { partials<2>(ops_partials) = -inv_beta_minus_alpha; } } if (!is_constant_all::value) { if (is_vector::value && !is_vector::value && !is_vector::value) { partials<1>(ops_partials) = inv_beta_minus_alpha * math::size(y); } else { partials<1>(ops_partials) = std::move(inv_beta_minus_alpha); } } } return ops_partials.build(logp); } template inline return_type_t uniform_lpdf(const T_y& y, const T_low& alpha, const T_high& beta) { return uniform_lpdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_cdf_log.hpp0000644000176200001440000000117514645137106025727 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use neg_binomial_2_lcdf */ template return_type_t neg_binomial_2_cdf_log( const T_n& n, const T_location& mu, const T_precision& phi) { return neg_binomial_2_lcdf(n, mu, phi); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_wishart_cholesky_lpdf.hpp0000644000176200001440000000772514645137106026660 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_WISHART_CHOLESKY_LPDF_HPP #define STAN_MATH_PRIM_PROB_INV_WISHART_CHOLESKY_LPDF_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return the natural logarithm of the unnormalized inverse * wishart density of the specified lower-triangular Cholesky * factor variate, positive degrees of freedom, and lower-triangular * Cholesky factor of the scale matrix. * * The scale matrix, `L_S`, must be a lower Cholesky factor. * Dimension, k, is implicit. * nu must be greater than k-1 * * The change of variables from Y, a positive-definite matrix, to * L_Y, the lower triangular Cholesky factor, is given in Theorem 2.1.9 in * Muirhead, R. J. (2005). * Aspects of Multivariate Statistical Theory. Wiley-Interscience. * @tparam T_y Cholesky factor matrix * @tparam T_dof scalar degrees of freedom * @tparam T_scale Cholesky factor matrix * @param L_Y lower triangular Cholesky factor of a covariance matrix * @param nu scalar degrees of freedom * @param L_S lower triangular Choleskyy factor of the scale matrix * @return natural logarithm of the Wishart density at L_Y given nu and L_S * @throw std::domain_error if nu is not greater than k-1 * @throw std::domain_error if L_S is not a valid Cholesky factor. */ template * = nullptr, require_all_matrix_t* = nullptr> return_type_t inv_wishart_cholesky_lpdf( const T_y& L_Y, const T_dof& nu, const T_scale& L_S) { using Eigen::Lower; using T_L_Y_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_L_S_ref = ref_type_t; using T_return = return_type_t; static const char* function = "inv_wishart_cholesky_lpdf"; Eigen::Index k = L_Y.rows(); check_size_match(function, "Rows of Random variable", L_Y.rows(), "columns of scale parameter", L_S.rows()); check_size_match(function, "Rows of random variable", L_Y.rows(), "columns of Random variable", L_Y.cols()); check_size_match(function, "Rows of scale parameter", L_S.rows(), "columns of scale parameter", L_S.cols()); T_L_Y_ref L_Y_ref = L_Y; T_nu_ref nu_ref = nu; T_L_S_ref L_S_ref = L_S; check_greater(function, "Degrees of freedom parameter", nu_ref, k - 1); check_positive(function, "Cholesky Random variable", L_Y_ref.diagonal()); check_positive(function, "columns of Cholesky Random variable", L_Y_ref.cols()); check_positive(function, "Cholesky Scale matrix", L_S_ref.diagonal()); check_positive(function, "columns of Cholesky Scale matrix", L_S_ref.cols()); T_return lp(0.0); if (include_summand::value) { lp += k * LOG_TWO * (1 - 0.5 * nu_ref); lp -= lmgamma(k, 0.5 * nu_ref); } if (include_summand::value) { auto L_YinvL_S = mdivide_left_tri(L_Y_ref, L_S_ref); T_return dot_LYinvLS(0.0); Eigen::Matrix linspaced_rv(k); T_return nu_plus_1 = nu_ref + 1; for (int i = 0; i < k; i++) { dot_LYinvLS += dot_self(L_YinvL_S.row(i).head(i + 1)); linspaced_rv(i) = nu_plus_1 + i; } lp += -0.5 * dot_LYinvLS - dot_product(linspaced_rv, log(L_Y_ref.diagonal())) + nu_ref * sum(log(L_S_ref.diagonal())); } return lp; } template inline return_type_t inv_wishart_cholesky_lpdf( const T_y& L_Y, const T_dof& nu, const T_scale& L_S) { return inv_wishart_cholesky_lpdf(L_Y, nu, L_S); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/categorical_lpmf.hpp0000644000176200001440000000420014645137106024671 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CATEGORICAL_LPMF_HPP #define STAN_MATH_PRIM_PROB_CATEGORICAL_LPMF_HPP #include #include #include #include #include #include namespace stan { namespace math { // Categorical(n|theta) [0 < n <= N; 0 <= theta[n] <= 1; SUM theta = 1] template * = nullptr> return_type_t categorical_lpmf(int n, const T_prob& theta) { static const char* function = "categorical_lpmf"; using std::log; check_bounded(function, "Number of categories", n, 1, theta.size()); ref_type_t theta_ref = theta; check_simplex(function, "Probabilities parameter", value_of(theta_ref)); if (include_summand::value) { return log(theta_ref.coeff(n - 1)); } return 0.0; } template * = nullptr> return_type_t categorical_lpmf(const std::vector& ns, const T_prob& theta) { static const char* function = "categorical_lpmf"; check_bounded(function, "element of outcome array", ns, 1, theta.size()); ref_type_t theta_ref = theta; check_simplex(function, "Probabilities parameter", value_of(theta_ref)); if (!include_summand::value) { return 0.0; } if (ns.size() == 0) { return 0.0; } Eigen::Matrix, Eigen::Dynamic, 1> log_theta = log(theta_ref); Eigen::Matrix, Eigen::Dynamic, 1> log_theta_ns( ns.size()); for (size_t i = 0; i < ns.size(); ++i) { log_theta_ns(i) = log_theta(ns[i] - 1); } return sum(log_theta_ns); } template * = nullptr, require_eigen_col_vector_t* = nullptr> inline return_type_t categorical_lpmf(const T_n& ns, const T_prob& theta) { return categorical_lpmf(ns, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_binomial_rng.hpp0000644000176200001440000000434014645137106025036 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_BINOMIAL_RNG_HPP #define STAN_MATH_PRIM_PROB_BETA_BINOMIAL_RNG_HPP #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a beta-binomial random variate with the specified population size, * success, and failure parameters using the given random number generator. * * N, alpha, and beta can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_N Type of population size parameter * @tparam T_shape1 Type of success parameter * @tparam T_shape2 Type of failure parameter * @tparam RNG type of random number generator * @param N (Sequence of) population size parameter(s) * @param alpha (Sequence of) positive success parameter(s) * @param beta (Sequence of) positive failure parameter(s) * @param rng random number generator * @return (Sequence of) beta-binomial random variate(s) * @throw std::domain_error if N is negative, or alpha or beta are nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type beta_binomial_rng(const T_N &N, const T_shape1 &alpha, const T_shape2 &beta, RNG &rng) { using T_N_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char *function = "beta_binomial_rng"; check_consistent_sizes(function, "First prior sample size parameter", alpha, "Second prior sample size parameter", beta); T_N_ref N_ref = N; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_nonnegative(function, "Population size parameter", N_ref); check_positive_finite(function, "First prior sample size parameter", alpha_ref); check_positive_finite(function, "Second prior sample size parameter", beta_ref); auto p = beta_rng(alpha_ref, beta_ref, rng); return binomial_rng(N_ref, p, rng); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_binomial_ccdf_log.hpp0000644000176200001440000000145514645137106026014 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_BINOMIAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_BETA_BINOMIAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use beta_binomial_lccdf */ template return_type_t beta_binomial_ccdf_log(const T_n& n, const T_N& N, const T_size1& alpha, const T_size2& beta) { return beta_binomial_lccdf(n, N, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/rayleigh_ccdf_log.hpp0000644000176200001440000000107214645137106025026 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_RAYLEIGH_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_RAYLEIGH_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use rayleigh_lccdf */ template return_type_t rayleigh_ccdf_log(const T_y& y, const T_scale& sigma) { return rayleigh_lccdf(y, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_id_glm_log.hpp0000644000176200001440000000223214645137106025045 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_ID_GLM_LOG_HPP #define STAN_MATH_PRIM_PROB_NORMAL_ID_GLM_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use normal_id_glm_lpdf */ template return_type_t normal_id_glm_log( const T_y &y, const T_x &x, const T_alpha &alpha, const T_beta &beta, const T_scale &sigma) { return normal_id_glm_lpdf( y, x, alpha, beta, sigma); } /** \ingroup multivar_dists * @deprecated use normal_id_glm_lpdf */ template inline return_type_t normal_id_glm_log( const T_y &y, const T_x &x, const T_alpha &alpha, const T_beta &beta, const T_scale &sigma) { return normal_id_glm_lpdf(y, x, alpha, beta, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_cdf.hpp0000644000176200001440000000430114645137106023706 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_CDF_HPP #define STAN_MATH_PRIM_PROB_POISSON_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // Poisson CDF template return_type_t poisson_cdf(const T_n& n, const T_rate& lambda) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_if_t::value, T_n>; using T_lambda_ref = ref_type_if_t::value, T_rate>; using std::pow; static const char* function = "poisson_cdf"; check_consistent_sizes(function, "Random variable", n, "Rate parameter", lambda); T_n_ref n_ref = n; T_lambda_ref lambda_ref = lambda; decltype(auto) n_val = to_ref(as_value_column_array_or_scalar(n_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); check_nonnegative(function, "Rate parameter", lambda_val); if (size_zero(n, lambda)) { return 1.0; } auto ops_partials = make_partials_propagator(lambda_ref); if (sum(promote_scalar(n_val < 0))) { return ops_partials.build(0.0); } const auto& Pi = to_ref_if::value>( gamma_q(n_val + 1.0, lambda_val)); T_partials_return P = prod(Pi); if (!is_constant_all::value) { partials<0>(ops_partials) = -exp(-lambda_val) * pow(lambda_val, n_val) / (tgamma(n_val + 1.0) * Pi) * P; } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multinomial_log.hpp0000644000176200001440000000161314645137106024576 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTINOMIAL_LOG_HPP #define STAN_MATH_PRIM_PROB_MULTINOMIAL_LOG_HPP #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use multinomial_lpmf */ template return_type_t multinomial_log(const std::vector& ns, const T_prob& theta) { return multinomial_lpmf(ns, theta); } /** \ingroup multivar_dists * @deprecated use multinomial_lpmf */ template return_type_t multinomial_log(const std::vector& ns, const T_prob& theta) { return multinomial_lpmf(ns, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multinomial_lpmf.hpp0000644000176200001440000000324014645137106024751 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTINOMIAL_LPMF_HPP #define STAN_MATH_PRIM_PROB_MULTINOMIAL_LPMF_HPP #include #include #include #include #include #include namespace stan { namespace math { // Multinomial(ns|N, theta) [0 <= n <= N; SUM ns = N; // 0 <= theta[n] <= 1; SUM theta = 1] template * = nullptr> return_type_t multinomial_lpmf(const std::vector& ns, const T_prob& theta) { static const char* function = "multinomial_lpmf"; check_size_match(function, "Size of number of trials variable", ns.size(), "rows of probabilities parameter", theta.rows()); check_nonnegative(function, "Number of trials variable", ns); const auto& theta_ref = to_ref(theta); check_simplex(function, "Probabilities parameter", theta_ref); return_type_t lp(0.0); if (include_summand::value) { double sum = 1.0; for (int n : ns) { sum += n; lp -= lgamma(n + 1.0); } lp += lgamma(sum); } if (include_summand::value) { for (unsigned int i = 0; i < ns.size(); ++i) { lp += multiply_log(ns[i], theta_ref.coeff(i)); } } return lp; } template return_type_t multinomial_lpmf(const std::vector& ns, const T_prob& theta) { return multinomial_lpmf(ns, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_ccdf_log.hpp0000644000176200001440000000077714645137106024727 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_POISSON_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use poisson_lccdf */ template return_type_t poisson_ccdf_log(const T_n& n, const T_rate& lambda) { return poisson_lccdf(n, lambda); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_ccdf_log.hpp0000644000176200001440000000114014645137106024131 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_BETA_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use beta_lccdf */ template return_type_t beta_ccdf_log( const T_y& y, const T_scale_succ& alpha, const T_scale_fail& beta) { return beta_lccdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/binomial_logit_lpmf.hpp0000644000176200001440000001050414645137106025410 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_LOGIT_LPMF_HPP #define STAN_MATH_PRIM_PROB_BINOMIAL_LOGIT_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Binomial log PMF in logit parametrization. Binomial(n|n, inv_logit(alpha)) * * If given vectors of matching lengths, returns * the log sum of probabilities. * * @param n successes variable * @param N population size parameter * @param alpha logit transformed probability parameter * @return log probability or log sum of probabilities * @throw std::domain_error if N is negative or probability parameter is invalid * @throw std::invalid_argument if vector sizes do not match */ template * = nullptr> return_type_t binomial_logit_lpmf(const T_n& n, const T_N& N, const T_prob& alpha) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_if_t::value, T_n>; using T_N_ref = ref_type_if_t::value, T_N>; using T_alpha_ref = ref_type_if_t::value, T_prob>; static const char* function = "binomial_logit_lpmf"; check_consistent_sizes(function, "Successes variable", n, "Population size parameter", N, "Probability parameter", alpha); T_n_ref n_ref = n; T_N_ref N_ref = N; T_alpha_ref alpha_ref = alpha; decltype(auto) n_val = to_ref(as_value_column_array_or_scalar(n_ref)); decltype(auto) N_val = to_ref(as_value_column_array_or_scalar(N_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_bounded(function, "Successes variable", value_of(n_val), 0, N_val); check_nonnegative(function, "Population size parameter", N_val); check_finite(function, "Probability parameter", alpha_val); if (size_zero(n, N, alpha)) { return 0.0; } if (!include_summand::value) { return 0.0; } const auto& inv_logit_alpha = to_ref_if::value>(inv_logit(alpha_val)); const auto& inv_logit_neg_alpha = to_ref_if::value>(inv_logit(-alpha_val)); size_t maximum_size = max_size(n, N, alpha); const auto& log_inv_logit_alpha = log(inv_logit_alpha); const auto& log_inv_logit_neg_alpha = log(inv_logit_neg_alpha); T_partials_return logp = sum(n_val * log_inv_logit_alpha + (N_val - n_val) * log_inv_logit_neg_alpha); if (include_summand::value) { logp += sum(binomial_coefficient_log(N_val, n_val)) * maximum_size / max_size(n, N); } auto ops_partials = make_partials_propagator(alpha_ref); if (!is_constant_all::value) { if (is_vector::value) { edge<0>(ops_partials).partials_ = n_val * inv_logit_neg_alpha - (N_val - n_val) * inv_logit_alpha; } else { T_partials_return sum_n = sum(n_val) * maximum_size / math::size(n); partials<0>(ops_partials)[0] = forward_as( sum_n * inv_logit_neg_alpha - (sum(N_val) * maximum_size / math::size(N) - sum_n) * inv_logit_alpha); } } return ops_partials.build(logp); } template inline return_type_t binomial_logit_lpmf(const T_n& n, const T_N& N, const T_prob& alpha) { return binomial_logit_lpmf(n, N, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/chi_square_rng.hpp0000644000176200001440000000327014645137106024375 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CHI_SQUARE_RNG_HPP #define STAN_MATH_PRIM_PROB_CHI_SQUARE_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a chi squared random variate with nu degrees of * freedom using the specified random number generator. * * nu can be a scalar or a one-dimensional container. * * @tparam T_deg type of degrees of freedom parameter * @tparam RNG class of random number generator * @param nu (Sequence of) positive degrees of freedom parameter(s) * @param rng random number generator * @return (Sequence of) chi squared random variate(s) * @throw std::domain_error if nu is nonpositive */ template inline typename VectorBuilder::type chi_square_rng( const T_deg& nu, RNG& rng) { using boost::variate_generator; using boost::random::chi_squared_distribution; using T_nu_ref = ref_type_t; static const char* function = "chi_square_rng"; T_nu_ref nu_ref = nu; check_positive_finite(function, "Degrees of freedom parameter", nu_ref); scalar_seq_view nu_vec(nu_ref); size_t N = stan::math::size(nu); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > chi_square_rng( rng, chi_squared_distribution<>(nu_vec[n])); output[n] = chi_square_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/hmm_hidden_state_prob.hpp0000644000176200001440000000663714645137106025734 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_HMM_HIDDEN_STATE_PROB_HPP #define STAN_MATH_PRIM_PROB_HMM_HIDDEN_STATE_PROB_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * For a hidden Markov model with observation y, hidden state x, * and parameters theta, compute the marginal probability * vector for each x, given y and theta, p(x_i | y, theta). * In this setting, the hidden states are discrete * and take values over the finite space {1, ..., K}. * Hence for each hidden variable x, we compute a simplex with K elements. * The final result is stored in a K by N matrix, where N is the length of x. * log_omegas is a matrix of observational densities, where * the (i, j)th entry corresponds to the density of the ith observation, y_i, * given x_i = j. * The transition matrix Gamma is such that the (i, j)th entry is the * probability that x_n = j given x_{n - 1} = i. The rows of Gamma are * simplexes. * This function cannot be used to reconstruct the marginal distributon * of a state sequence given parameters and an observation sequence, * p(x | y, theta), * because it only computes marginals on a state-by-state basis. * * @tparam T_omega type of the log likelihood matrix * @tparam T_Gamma type of the transition matrix * @tparam T_rho type of the initial guess vector * @param[in] log_omegas log matrix of observational densities * @param[in] Gamma transition density between hidden states * @param[in] rho initial state * @return the posterior probability for each latent state * @throw `std::invalid_argument` if Gamma is not square * or if the size of rho is not the number of rows of log_omegas * @throw `std::domain_error` if rho is not a simplex and of the rows * of Gamma are not a simplex */ template * = nullptr, require_eigen_col_vector_t* = nullptr> inline Eigen::MatrixXd hmm_hidden_state_prob(const T_omega& log_omegas, const T_Gamma& Gamma, const T_rho& rho) { int n_states = log_omegas.rows(); int n_transitions = log_omegas.cols() - 1; Eigen::MatrixXd omegas = value_of(log_omegas).array().exp(); ref_type_t rho_dbl = value_of(rho); ref_type_t Gamma_dbl = value_of(Gamma); hmm_check(log_omegas, Gamma_dbl, rho_dbl, "hmm_hidden_state_prob"); Eigen::MatrixXd alphas(n_states, n_transitions + 1); alphas.col(0) = omegas.col(0).cwiseProduct(rho_dbl); alphas.col(0) /= alphas.col(0).maxCoeff(); for (int n = 0; n < n_transitions; ++n) alphas.col(n + 1) = omegas.col(n + 1).cwiseProduct(Gamma_dbl.transpose() * alphas.col(n)); // Backward pass with running normalization Eigen::VectorXd beta = Eigen::VectorXd::Ones(n_states); alphas.col(n_transitions) /= alphas.col(n_transitions).sum(); for (int n = n_transitions; n-- > 0;) { beta = Gamma_dbl * omegas.col(n + 1).cwiseProduct(beta); beta /= beta.maxCoeff(); // Reuse alphas to store probabilities alphas.col(n) = alphas.col(n).cwiseProduct(beta); alphas.col(n) /= alphas.col(n).sum(); } return alphas; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/wishart_log.hpp0000644000176200001440000000430414645137106023725 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WISHART_LOG_HPP #define STAN_MATH_PRIM_PROB_WISHART_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the Wishart density for the given W, degrees of freedom, * and scale matrix. * * The scale matrix, S, must be k x k, symmetric, and semi-positive definite. * Dimension, k, is implicit. * nu must be greater than k-1 * * \f{eqnarray*}{ W &\sim& \mbox{\sf{Wishart}}_{\nu} (S) \\ \log (p (W \, |\, \nu, S) ) &=& \log \left( \left(2^{\nu k/2} \pi^{k (k-1) /4} \prod_{i=1}^k{\Gamma (\frac{\nu + 1 - i}{2})} \right)^{-1} \times \left| S \right|^{-\nu/2} \left| W \right|^{(\nu - k - 1) / 2} \times \exp (-\frac{1}{2} \mbox{tr} (S^{-1} W)) \right) \\ &=& -\frac{\nu k}{2}\log(2) - \frac{k (k-1)}{4} \log(\pi) - \sum_{i=1}^{k}{\log (\Gamma (\frac{\nu+1-i}{2}))} -\frac{\nu}{2} \log(\det(S)) + \frac{\nu-k-1}{2}\log (\det(W)) - \frac{1}{2} \mbox{tr} (S^{-1}W) \f} * * @deprecated use wishart_lpdf * * @param W A scalar matrix * @param nu Degrees of freedom * @param S The scale matrix * @return The log of the Wishart density at W given nu and S. * @throw std::domain_error if nu is not greater than k-1 * @throw std::domain_error if S is not square, not symmetric, or not semi-positive definite. * @tparam T_y Type of matrix. * @tparam T_dof Type of degrees of freedom. * @tparam T_scale Type of scale. */ template return_type_t wishart_log(const T_y& W, const T_dof& nu, const T_scale& S) { return wishart_lpdf(W, nu, S); } /** \ingroup multivar_dists * @deprecated use wishart_lpdf */ template inline return_type_t wishart_log(const T_y& W, const T_dof& nu, const T_scale& S) { return wishart_lpdf(W, nu, S); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lkj_corr_cholesky_rng.hpp0000644000176200001440000000157114645137106025762 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LKJ_CORR_CHOLESKY_RNG_HPP #define STAN_MATH_PRIM_PROB_LKJ_CORR_CHOLESKY_RNG_HPP #include #include #include #include namespace stan { namespace math { template inline Eigen::MatrixXd lkj_corr_cholesky_rng(size_t K, double eta, RNG& rng) { static const char* function = "lkj_corr_cholesky_rng"; check_positive(function, "Shape parameter", eta); Eigen::ArrayXd CPCs((K * (K - 1)) / 2); double alpha = eta + 0.5 * (K - 1); unsigned int count = 0; for (size_t i = 0; i < (K - 1); i++) { alpha -= 0.5; for (size_t j = i + 1; j < K; j++) { CPCs(count) = 2.0 * beta_rng(alpha, alpha, rng) - 1.0; count++; } } return read_corr_L(CPCs, K); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_log.hpp0000644000176200001440000000206314645137106024667 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_LOG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use neg_binomial_lpmf */ template return_type_t neg_binomial_log(const T_n& n, const T_shape& alpha, const T_inv_scale& beta) { return neg_binomial_lpmf(n, alpha, beta); } /** \ingroup prob_dists * @deprecated use neg_binomial_lpmf */ template inline return_type_t neg_binomial_log( const T_n& n, const T_shape& alpha, const T_inv_scale& beta) { return neg_binomial_lpmf(n, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/rayleigh_log.hpp0000644000176200001440000000147414645137106024055 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_RAYLEIGH_LOG_HPP #define STAN_MATH_PRIM_PROB_RAYLEIGH_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use rayleigh_lpdf */ template return_type_t rayleigh_log(const T_y& y, const T_scale& sigma) { return rayleigh_lpdf(y, sigma); } /** \ingroup prob_dists * @deprecated use rayleigh_lpdf */ template inline return_type_t rayleigh_log(const T_y& y, const T_scale& sigma) { return rayleigh_lpdf(y, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/double_exponential_lpdf.hpp0000644000176200001440000001044614645137106026274 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_LPDF_HPP #define STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the double exponential log probability density function. Given * containers of matching sizes, returns the log sum of densities. * * @tparam T_y type of real parameter. * @tparam T_loc type of location parameter. * @tparam T_scale type of scale parameter. * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @return log probability density or log sum of probability densities * @throw std::domain_error if y is nan, mu is infinite, or sigma is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t double_exponential_lpdf( const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "double_exponential_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Shape parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; if (size_zero(y, mu, sigma)) { return 0.0; } if (!include_summand::value) { return 0.0; } T_partials_return logp(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_finite(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); const auto& inv_sigma = to_ref(inv(sigma_val)); const auto& y_m_mu = to_ref_if::value>(y_val - mu_val); const auto& abs_diff_y_mu = fabs(y_m_mu); const auto& scaled_diff = to_ref_if::value>(abs_diff_y_mu * inv_sigma); size_t N = max_size(y, mu, sigma); if (include_summand::value) { logp -= N * LOG_TWO; } if (include_summand::value) { logp -= sum(log(sigma_val)) * N / math::size(sigma); } logp -= sum(scaled_diff); if (!is_constant_all::value) { const auto& diff_sign = sign(y_m_mu); const auto& rep_deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>(diff_sign * inv_sigma); if (!is_constant_all::value) { partials<0>(ops_partials) = -rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = rep_deriv; } } if (!is_constant_all::value) { partials<2>(ops_partials) = inv_sigma * (scaled_diff - 1); } return ops_partials.build(logp); } template return_type_t double_exponential_lpdf( const T_y& y, const T_loc& mu, const T_scale& sigma) { return double_exponential_lpdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_cdf_log.hpp0000644000176200001440000000100314645137106025044 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use bernoulli_lcdf */ template return_type_t bernoulli_cdf_log(const T_n& n, const T_prob& theta) { return bernoulli_lcdf(n, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/binomial_logit_log.hpp0000644000176200001440000000172114645137106025234 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_LOGIT_LOG_HPP #define STAN_MATH_PRIM_PROB_BINOMIAL_LOGIT_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use binomial_logit_lpmf */ template return_type_t binomial_logit_log(const T_n& n, const T_N& N, const T_prob& alpha) { return binomial_logit_lpmf(n, N, alpha); } /** \ingroup prob_dists * @deprecated use binomial_logit_lpmf */ template inline return_type_t binomial_logit_log(const T_n& n, const T_N& N, const T_prob& alpha) { return binomial_logit_lpmf(n, N, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_log.hpp0000644000176200001440000000211214645137106023531 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_LOG_HPP #define STAN_MATH_PRIM_PROB_PARETO_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use pareto_lpdf */ template return_type_t pareto_log(const T_y& y, const T_scale& y_min, const T_shape& alpha) { return pareto_lpdf(y, y_min, alpha); } /** \ingroup prob_dists * @deprecated use pareto_lpdf */ template inline return_type_t pareto_log(const T_y& y, const T_scale& y_min, const T_shape& alpha) { return pareto_lpdf(y, y_min, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_lccdf.hpp0000644000176200001440000001222414645137106023451 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_LCCDF_HPP #define STAN_MATH_PRIM_PROB_BETA_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the beta log complementary cumulative distribution function * for the given probability, success, and failure parameters. Any * arguments other than scalars must be containers of the same size. * With non-scalar arguments, the return is the sum of the log ccdfs * with scalars broadcast as necessary. * * @tparam T_y type of y * @tparam T_scale_succ type of success parameter * @tparam T_scale_fail type of failure parameter * @param y (Sequence of) scalar(s) between zero and one * @param alpha (Sequence of) success parameter(s) * @param beta_param (Sequence of) failure parameter(s) * @return log probability or sum of log of probabilities * @throw std::domain_error if alpha or beta is negative * @throw std::domain_error if y is not a valid probability * @throw std::invalid_argument if container sizes mismatch */ template return_type_t beta_lccdf( const T_y& y, const T_scale_succ& alpha, const T_scale_fail& beta_param) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "beta_lccdf"; check_consistent_sizes(function, "Random variable", y, "First shape parameter", alpha, "Second shape parameter", beta_param); if (size_zero(y, alpha, beta_param)) { return 0; } T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta_param; check_positive_finite(function, "First shape parameter", alpha_ref); check_positive_finite(function, "Second shape parameter", beta_ref); check_bounded(function, "Random variable", value_of(y_ref), 0, 1); T_partials_return ccdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t size_alpha = stan::math::size(alpha); size_t size_beta = stan::math::size(beta_param); size_t size_alpha_beta = max_size(alpha, beta_param); size_t N = max_size(y, alpha, beta_param); VectorBuilder::value, T_partials_return, T_scale_succ> digamma_alpha(size_alpha); VectorBuilder::value, T_partials_return, T_scale_fail> digamma_beta(size_beta); VectorBuilder::value, T_partials_return, T_scale_succ, T_scale_fail> digamma_sum(size_alpha_beta); if (!is_constant_all::value) { for (size_t i = 0; i < size_alpha; i++) { digamma_alpha[i] = digamma(alpha_vec.val(i)); } for (size_t i = 0; i < size_beta; i++) { digamma_beta[i] = digamma(beta_vec.val(i)); } for (size_t i = 0; i < size_alpha_beta; i++) { digamma_sum[i] = digamma(alpha_vec.val(i) + beta_vec.val(i)); } } for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return beta_dbl = beta_vec.val(n); const T_partials_return betafunc_dbl = beta(alpha_dbl, beta_dbl); const T_partials_return Pn = 1.0 - inc_beta(alpha_dbl, beta_dbl, y_dbl); const T_partials_return inv_Pn = is_constant_all::value ? 0 : inv(Pn); ccdf_log += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= pow(1 - y_dbl, beta_dbl - 1) * pow(y_dbl, alpha_dbl - 1) * inv_Pn / betafunc_dbl; } T_partials_return g1 = 0; T_partials_return g2 = 0; if (!is_constant_all::value) { grad_reg_inc_beta(g1, g2, alpha_dbl, beta_dbl, y_dbl, digamma_alpha[n], digamma_beta[n], digamma_sum[n], betafunc_dbl); } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= g1 * inv_Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= g2 * inv_Pn; } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_log.hpp0000644000176200001440000000140314645137106024234 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_LOG_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use bernoulli_lpmf */ template return_type_t bernoulli_log(const T_n& n, const T_prob& theta) { return bernoulli_lpmf(n, theta); } /** \ingroup prob_dists * @deprecated use bernoulli_lpmf */ template inline return_type_t bernoulli_log(const T_y& n, const T_prob& theta) { return bernoulli_lpmf(n, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_binomial_lpmf.hpp0000644000176200001440000001553414645137106025215 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_BINOMIAL_LPMF_HPP #define STAN_MATH_PRIM_PROB_BETA_BINOMIAL_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log PMF of the Beta-Binomial distribution with given population * size, prior success, and prior failure parameters. Given containers of * matching sizes, returns the log sum of probabilities. * * @tparam T_n type of success parameter * @tparam T_N type of population size parameter * @tparam T_size1 type of prior success parameter * @tparam T_size2 type of prior failure parameter * @param n success parameter * @param N population size parameter * @param alpha prior success parameter * @param beta prior failure parameter * @return log probability or log sum of probabilities * @throw std::domain_error if N, alpha, or beta fails to be positive * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t beta_binomial_lpmf(const T_n& n, const T_N& N, const T_size1& alpha, const T_size2& beta) { using T_partials_return = partials_return_t; using T_N_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "beta_binomial_lpmf"; check_consistent_sizes(function, "Successes variable", n, "Population size parameter", N, "First prior sample size parameter", alpha, "Second prior sample size parameter", beta); if (size_zero(n, N, alpha, beta)) { return 0.0; } T_N_ref N_ref = N; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_nonnegative(function, "Population size parameter", N_ref); check_positive_finite(function, "First prior sample size parameter", alpha_ref); check_positive_finite(function, "Second prior sample size parameter", beta_ref); if (!include_summand::value) { return 0.0; } T_partials_return logp(0.0); auto ops_partials = make_partials_propagator(alpha_ref, beta_ref); scalar_seq_view n_vec(n); scalar_seq_view N_vec(N_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t size_alpha = stan::math::size(alpha); size_t size_beta = stan::math::size(beta); size_t size_n_N = max_size(n, N); size_t size_alpha_beta = max_size(alpha, beta); size_t max_size_seq_view = max_size(n, N, alpha, beta); for (size_t i = 0; i < max_size_seq_view; i++) { if (n_vec[i] < 0 || n_vec[i] > N_vec[i]) { return ops_partials.build(LOG_ZERO); } } VectorBuilder::value, T_partials_return, T_n, T_N> normalizing_constant(size_n_N); for (size_t i = 0; i < size_n_N; i++) if (include_summand::value) normalizing_constant[i] = binomial_coefficient_log(N_vec[i], n_vec[i]); VectorBuilder lbeta_denominator( size_alpha_beta); for (size_t i = 0; i < size_alpha_beta; i++) { lbeta_denominator[i] = lbeta(alpha_vec.val(i), beta_vec.val(i)); } VectorBuilder lbeta_diff( max_size_seq_view); for (size_t i = 0; i < max_size_seq_view; i++) { lbeta_diff[i] = lbeta(n_vec[i] + alpha_vec.val(i), N_vec[i] - n_vec[i] + beta_vec.val(i)) - lbeta_denominator[i]; } VectorBuilder::value, T_partials_return, T_n, T_size1> digamma_n_plus_alpha(max_size(n, alpha)); if (!is_constant_all::value) { for (size_t i = 0; i < max_size(n, alpha); i++) { digamma_n_plus_alpha[i] = digamma(n_vec.val(i) + alpha_vec.val(i)); } } VectorBuilder::value, T_partials_return, T_size1, T_size2> digamma_alpha_plus_beta(size_alpha_beta); if (!is_constant_all::value) { for (size_t i = 0; i < size_alpha_beta; i++) { digamma_alpha_plus_beta[i] = digamma(alpha_vec.val(i) + beta_vec.val(i)); } } VectorBuilder::value, T_partials_return, T_N, T_size1, T_size2> digamma_diff(max_size(N, alpha, beta)); if (!is_constant_all::value) { for (size_t i = 0; i < max_size(N, alpha, beta); i++) { digamma_diff[i] = digamma_alpha_plus_beta[i] - digamma(N_vec.val(i) + alpha_vec.val(i) + beta_vec.val(i)); } } VectorBuilder::value, T_partials_return, T_size1> digamma_alpha(size_alpha); for (size_t i = 0; i < size_alpha; i++) if (!is_constant_all::value) digamma_alpha[i] = digamma(alpha_vec.val(i)); VectorBuilder::value, T_partials_return, T_size2> digamma_beta(size_beta); for (size_t i = 0; i < size_beta; i++) if (!is_constant_all::value) digamma_beta[i] = digamma(beta_vec.val(i)); for (size_t i = 0; i < max_size_seq_view; i++) { if (include_summand::value) logp += normalizing_constant[i]; logp += lbeta_diff[i]; if (!is_constant_all::value) partials<0>(ops_partials)[i] += digamma_n_plus_alpha[i] + digamma_diff[i] - digamma_alpha[i]; if (!is_constant_all::value) partials<1>(ops_partials)[i] += digamma(N_vec.val(i) - n_vec.val(i) + beta_vec.val(i)) + digamma_diff[i] - digamma_beta[i]; } return ops_partials.build(logp); } template return_type_t beta_binomial_lpmf(const T_n& n, const T_N& N, const T_size1& alpha, const T_size2& beta) { return beta_binomial_lpmf(n, N, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_type_2_ccdf_log.hpp0000644000176200001440000000134414645137106026000 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_TYPE_2_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_PARETO_TYPE_2_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use pareto_type_2_lccdf */ template return_type_t pareto_type_2_ccdf_log( const T_y& y, const T_loc& mu, const T_scale& lambda, const T_shape& alpha) { return pareto_type_2_lccdf(y, mu, lambda, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gumbel_ccdf_log.hpp0000644000176200001440000000122714645137106024477 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GUMBEL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_GUMBEL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use gumbel_lccdf */ template return_type_t gumbel_ccdf_log(const T_y& y, const T_loc& mu, const T_scale& beta) { return gumbel_lccdf(y, mu, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_double_exponential_log.hpp0000644000176200001440000000230614645137106027155 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_LOG_HPP #define STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use skew_double_exponential_log */ template return_type_t skew_double_exponential_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_skewness& tau) { return skew_double_exponential_lpdf( y, mu, sigma, tau); } /** \ingroup prob_dists * @deprecated use skew_double_exponential_log */ template inline return_type_t skew_double_exponential_log(const T_y& y, const T_loc& mu, const T_scale& sigma, const T_skewness& tau) { return skew_double_exponential_lpdf( y, mu, sigma, tau); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/wiener_log.hpp0000644000176200001440000000362314645137106023540 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WIENER_LOG_HPP #define STAN_MATH_PRIM_PROB_WIENER_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the first passage time density function for a (Wiener) * drift diffusion model for the given \f$y\f$, * boundary separation \f$\alpha\f$, nondecision time \f$\tau\f$, * relative bias \f$\beta\f$, and drift rate \f$\delta\f$. * \f$\alpha\f$ and \f$\tau\f$ must be greater than 0, and * \f$\beta\f$ must be between 0 and 1. \f$y\f$ should contain * reaction times in seconds (strictly positive) with * upper-boundary responses. * * @deprecated use wiener_lpdf * * @param y A scalar variate. * @param alpha The boundary separation. * @param tau The nondecision time. * @param beta The relative bias. * @param delta The drift rate. * @return The log of the Wiener first passage time density of * the specified arguments. */ template return_type_t wiener_log( const T_y& y, const T_alpha& alpha, const T_tau& tau, const T_beta& beta, const T_delta& delta) { return wiener_lpdf( y, alpha, tau, beta, delta); } /** \ingroup prob_dists * @deprecated use wiener_lpdf */ template inline return_type_t wiener_log( const T_y& y, const T_alpha& alpha, const T_tau& tau, const T_beta& beta, const T_delta& delta) { return wiener_lpdf(y, alpha, tau, beta, delta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_log_lpmf.hpp0000644000176200001440000001212514645137106026126 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_LPMF_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // NegBinomial(n|eta, phi) [phi > 0; n >= 0] template * = nullptr> return_type_t neg_binomial_2_log_lpmf( const T_n& n, const T_log_location& eta, const T_precision& phi) { using T_partials_return = partials_return_t; using std::exp; using std::log; using T_n_ref = ref_type_t; using T_eta_ref = ref_type_t; using T_phi_ref = ref_type_t; static const char* function = "neg_binomial_2_log_lpmf"; check_consistent_sizes(function, "Failures variable", n, "Log location parameter", eta, "Precision parameter", phi); T_n_ref n_ref = n; T_eta_ref eta_ref = eta; T_phi_ref phi_ref = phi; check_nonnegative(function, "Failures variable", n_ref); check_finite(function, "Log location parameter", eta_ref); check_positive_finite(function, "Precision parameter", phi_ref); if (size_zero(n, eta, phi)) { return 0.0; } if (!include_summand::value) { return 0.0; } T_partials_return logp(0.0); auto ops_partials = make_partials_propagator(eta_ref, phi_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view eta_vec(eta_ref); scalar_seq_view phi_vec(phi_ref); size_t size_eta = stan::math::size(eta); size_t size_phi = stan::math::size(phi); size_t size_eta_phi = max_size(eta, phi); size_t size_n_phi = max_size(n, phi); size_t size_all = max_size(n, eta, phi); VectorBuilder eta_val(size_eta); for (size_t i = 0; i < size_eta; ++i) { eta_val[i] = eta_vec.val(i); } VectorBuilder phi_val(size_phi); VectorBuilder log_phi(size_phi); for (size_t i = 0; i < size_phi; ++i) { phi_val[i] = phi_vec.val(i); log_phi[i] = log(phi_val[i]); } VectorBuilder::value, T_partials_return, T_log_location> exp_eta(size_eta); if (!is_constant_all::value) { for (size_t i = 0; i < size_eta; ++i) { exp_eta[i] = exp(eta_val[i]); } } VectorBuilder::value, T_partials_return, T_log_location, T_precision> exp_eta_over_exp_eta_phi(size_eta_phi); if (!is_constant_all::value) { for (size_t i = 0; i < size_eta_phi; ++i) { exp_eta_over_exp_eta_phi[i] = inv(phi_val[i] / exp_eta[i] + 1); } } VectorBuilder log1p_exp_eta_m_logphi(size_eta_phi); for (size_t i = 0; i < size_eta_phi; ++i) { log1p_exp_eta_m_logphi[i] = log1p_exp(eta_val[i] - log_phi[i]); } VectorBuilder n_plus_phi( size_n_phi); for (size_t i = 0; i < size_n_phi; ++i) { n_plus_phi[i] = n_vec[i] + phi_val[i]; } for (size_t i = 0; i < size_all; i++) { if (include_summand::value) { logp += binomial_coefficient_log(n_plus_phi[i] - 1, n_vec[i]); } if (include_summand::value) { logp += n_vec[i] * eta_val[i]; } logp += -phi_val[i] * log1p_exp_eta_m_logphi[i] - n_vec[i] * (log_phi[i] + log1p_exp_eta_m_logphi[i]); if (!is_constant_all::value) { partials<0>(ops_partials)[i] += n_vec[i] - n_plus_phi[i] * exp_eta_over_exp_eta_phi[i]; } if (!is_constant_all::value) { partials<1>(ops_partials)[i] += exp_eta_over_exp_eta_phi[i] - n_vec[i] / (exp_eta[i] + phi_val[i]) - log1p_exp_eta_m_logphi[i] - (digamma(phi_val[i]) - digamma(n_plus_phi[i])); } } return ops_partials.build(logp); } template inline return_type_t neg_binomial_2_log_lpmf( const T_n& n, const T_log_location& eta, const T_precision& phi) { return neg_binomial_2_log_lpmf(n, eta, phi); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_lccdf.hpp0000644000176200001440000001112114645137106025154 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t neg_binomial_lccdf( const T_n& n, const T_shape& alpha, const T_inv_scale& beta_param) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "neg_binomial_lccdf"; check_consistent_sizes(function, "Failures variable", n, "Shape parameter", alpha, "Inverse scale parameter", beta_param); T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta_param; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Inverse scale parameter", beta_ref); if (size_zero(n, alpha, beta_param)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(alpha_ref, beta_ref); scalar_seq_view n_vec(n); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t size_n = stan::math::size(n); size_t size_alpha = stan::math::size(alpha); size_t size_n_alpha = max_size(n, alpha); size_t max_size_seq_view = max_size(n, alpha, beta_param); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < size_n; i++) { if (n_vec.val(i) < 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_n> digammaN_vec(size_n); VectorBuilder::value, T_partials_return, T_shape> digammaAlpha_vec(size_alpha); VectorBuilder::value, T_partials_return, T_n, T_shape> digammaSum_vec(size_n_alpha); if (!is_constant_all::value) { for (size_t i = 0; i < size_n; i++) { digammaN_vec[i] = digamma(n_vec.val(i) + 1); } for (size_t i = 0; i < size_alpha; i++) { digammaAlpha_vec[i] = digamma(alpha_vec.val(i)); } for (size_t i = 0; i < size_n_alpha; i++) { const T_partials_return n_dbl = n_vec.val(i); const T_partials_return alpha_dbl = alpha_vec.val(i); digammaSum_vec[i] = digamma(n_dbl + alpha_dbl + 1); } } for (size_t i = 0; i < max_size_seq_view; i++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_vec.val(i) == std::numeric_limits::max()) { return ops_partials.build(negative_infinity()); } const T_partials_return n_dbl = n_vec.val(i); const T_partials_return alpha_dbl = alpha_vec.val(i); const T_partials_return beta_dbl = beta_vec.val(i); const T_partials_return inv_beta_p1 = inv(beta_dbl + 1); const T_partials_return p_dbl = beta_dbl * inv_beta_p1; const T_partials_return d_dbl = square(inv_beta_p1); const T_partials_return Pi = 1.0 - inc_beta(alpha_dbl, n_dbl + 1.0, p_dbl); const T_partials_return beta_func = beta(n_dbl + 1, alpha_dbl); P += log(Pi); if (!is_constant_all::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; grad_reg_inc_beta(g1, g2, alpha_dbl, n_dbl + 1, p_dbl, digammaAlpha_vec[i], digammaN_vec[i], digammaSum_vec[i], beta_func); partials<0>(ops_partials)[i] -= g1 / Pi; } if (!is_constant_all::value) { partials<1>(ops_partials)[i] -= d_dbl * pow(1 - p_dbl, n_dbl) * pow(p_dbl, alpha_dbl - 1) / (beta_func * Pi); } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_double_exponential_cdf_log.hpp0000644000176200001440000000141214645137106027766 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use skew_double_exponential_lcdf */ template inline return_type_t skew_double_exponential_cdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma, const T_skewness& tau) { return skew_double_exponential_lcdf( y, mu, sigma, tau); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/hmm_marginal.hpp0000644000176200001440000001622214645137106024040 0ustar liggesusers#ifndef STAN_MATH_REV_FUN_HMM_MARGINAL_LPDF_HPP #define STAN_MATH_REV_FUN_HMM_MARGINAL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template inline auto hmm_marginal_val( const Eigen::Matrix& omegas, const T_Gamma& Gamma_val, const T_rho& rho_val, Eigen::Matrix& alphas, Eigen::Matrix& alpha_log_norms, T_alpha& norm_norm) { const int n_transitions = omegas.cols() - 1; alphas.col(0) = omegas.col(0).cwiseProduct(rho_val); const auto norm = alphas.col(0).maxCoeff(); alphas.col(0) /= norm; alpha_log_norms(0) = log(norm); auto Gamma_val_transpose = Gamma_val.transpose().eval(); for (int n = 1; n <= n_transitions; ++n) { alphas.col(n) = omegas.col(n).cwiseProduct(Gamma_val_transpose * alphas.col(n - 1)); const auto col_norm = alphas.col(n).maxCoeff(); alphas.col(n) /= col_norm; alpha_log_norms(n) = log(col_norm) + alpha_log_norms(n - 1); } norm_norm = alpha_log_norms(n_transitions); return log(alphas.col(n_transitions).sum()) + norm_norm; } /** * For a Hidden Markov Model with observation y, hidden state x, * and parameters theta, return the log marginal density, log * p(y | theta). In this setting, the hidden states are discrete * and take values over the finite space {1, ..., K}. * The marginal lpdf is obtained via a forward pass, and * the derivative is calculated with an adjoint method, * e.g (Betancourt, Margossian, & Leos-Barajas, 2020). * log_omegas is a matrix of observational densities, where * the (i, j)th entry corresponds to the density of the jth observation, y_j, * given x_j = i. * The transition matrix Gamma is such that the (i, j)th entry is the * probability that x_n = j given x_{n - 1} = i. The rows of Gamma are * simplexes. * * @tparam T_omega type of the log likelihood matrix * @tparam T_Gamma type of the transition matrix * @tparam T_rho type of the initial guess vector * @param[in] log_omegas log matrix of observational densities. * @param[in] Gamma transition density between hidden states. * @param[in] rho initial state * @return log marginal density. * @throw `std::invalid_argument` if Gamma is not square, when we have * at least one transition, or if the size of rho is not the * number of rows of log_omegas. * @throw `std::domain_error` if rho is not a simplex and of the rows * of Gamma are not a simplex (when there is at least one transition). */ template * = nullptr, require_eigen_col_vector_t* = nullptr> inline auto hmm_marginal(const T_omega& log_omegas, const T_Gamma& Gamma, const T_rho& rho) { using T_partial_type = partials_return_t; using eig_matrix_partial = Eigen::Matrix; using eig_vector_partial = Eigen::Matrix; using T_omega_ref = ref_type_if_t::value, T_omega>; using T_Gamma_ref = ref_type_if_t::value, T_Gamma>; using T_rho_ref = ref_type_if_t::value, T_rho>; int n_states = log_omegas.rows(); int n_transitions = log_omegas.cols() - 1; T_omega_ref log_omegas_ref = log_omegas; T_Gamma_ref Gamma_ref = Gamma; T_rho_ref rho_ref = rho; const auto& Gamma_val = to_ref(value_of(Gamma_ref)); const auto& rho_val = to_ref(value_of(rho_ref)); hmm_check(log_omegas, Gamma_val, rho_val, "hmm_marginal"); auto ops_partials = make_partials_propagator(log_omegas_ref, Gamma_ref, rho_ref); eig_matrix_partial alphas(n_states, n_transitions + 1); eig_vector_partial alpha_log_norms(n_transitions + 1); // compute the density using the forward algorithm. eig_matrix_partial omegas = value_of(log_omegas_ref).array().exp(); T_partial_type norm_norm; auto log_marginal_density = hmm_marginal_val( omegas, Gamma_val, rho_val, alphas, alpha_log_norms, norm_norm); // Variables required for all three Jacobian-adjoint products. auto unnormed_marginal = alphas.col(n_transitions).sum(); std::vector kappa(n_transitions); eig_vector_partial kappa_log_norms(n_transitions); std::vector grad_corr(n_transitions, 0); if (n_transitions > 0) { kappa[n_transitions - 1] = Eigen::VectorXd::Ones(n_states); kappa_log_norms(n_transitions - 1) = 0; grad_corr[n_transitions - 1] = exp(alpha_log_norms(n_transitions - 1) - norm_norm); } for (int n = n_transitions - 1; n-- > 0;) { kappa[n] = Gamma_val * (omegas.col(n + 2).cwiseProduct(kappa[n + 1])); auto norm = kappa[n].maxCoeff(); kappa[n] /= norm; kappa_log_norms[n] = log(norm) + kappa_log_norms[n + 1]; grad_corr[n] = exp(alpha_log_norms[n] + kappa_log_norms[n] - norm_norm); } if (!is_constant_all::value) { for (int n = n_transitions - 1; n >= 0; --n) { edge<1>(ops_partials).partials_ += grad_corr[n] * alphas.col(n) * kappa[n].cwiseProduct(omegas.col(n + 1)).transpose() / unnormed_marginal; } } if (!is_constant_all::value) { // Boundary terms if (n_transitions == 0) { if (!is_constant_all::value) { edge<0>(ops_partials).partials_ = omegas.cwiseProduct(rho_val) / exp(log_marginal_density); } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = omegas.col(0) / exp(log_marginal_density); } return ops_partials.build(log_marginal_density); } else { auto grad_corr_boundary = exp(kappa_log_norms(0) - norm_norm); eig_vector_partial C = Gamma_val * omegas.col(1).cwiseProduct(kappa[0]); if (!is_constant_all::value) { eig_matrix_partial log_omega_jacad = Eigen::MatrixXd::Zero(n_states, n_transitions + 1); for (int n = n_transitions - 1; n >= 0; --n) { log_omega_jacad.col(n + 1) = grad_corr[n] * kappa[n].cwiseProduct(Gamma_val.transpose() * alphas.col(n)); } log_omega_jacad.col(0) = grad_corr_boundary * C.cwiseProduct(rho_val); edge<0>(ops_partials).partials_ = log_omega_jacad.cwiseProduct(omegas / unnormed_marginal); } if (!is_constant_all::value) { partials<2>(ops_partials) = grad_corr_boundary * C.cwiseProduct(omegas.col(0)) / unnormed_marginal; } } } return ops_partials.build(log_marginal_density); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_proportion_lccdf.hpp0000644000176200001440000001312414645137106025744 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_PROPORTION_LCCDF_HPP #define STAN_MATH_PRIM_PROB_BETA_PROPORTION_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the beta log complementary cumulative distribution function * for specified probability, location, and precision parameters: * beta_proportion_lccdf(y | mu, kappa) = beta_lccdf(y | mu * kappa, (1 - * mu) * kappa). Any arguments other than scalars must be containers of * the same size. With non-scalar arguments, the return is the sum of * the log ccdfs with scalars broadcast as necessary. * * @tparam T_y type of y * @tparam T_loc type of location parameter * @tparam T_prec type of precision parameter * @param y (Sequence of) scalar(s) between zero and one * @param mu (Sequence of) location parameter(s) * @param kappa (Sequence of) precision parameter(s) * @return log probability or sum of log of probabilities * @throw std::domain_error if mu is outside (0, 1) * @throw std::domain_error if kappa is nonpositive * @throw std::domain_error if 1 - y is not a valid probability * @throw std::invalid_argument if container sizes mismatch */ template return_type_t beta_proportion_lccdf(const T_y& y, const T_loc& mu, const T_prec& kappa) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_kappa_ref = ref_type_t; static const char* function = "beta_proportion_lccdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Precision parameter", kappa); if (size_zero(y, mu, kappa)) { return 0; } T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_kappa_ref kappa_ref = kappa; check_positive(function, "Location parameter", value_of(mu_ref)); check_less(function, "Location parameter", value_of(mu_ref), 1.0); check_positive_finite(function, "Precision parameter", value_of(kappa_ref)); check_bounded(function, "Random variable", value_of(y_ref), 0.0, 1.0); T_partials_return ccdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, kappa_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view kappa_vec(kappa_ref); size_t size_kappa = stan::math::size(kappa); size_t size_mu_kappa = max_size(mu, kappa); size_t N = max_size(y, mu, kappa); VectorBuilder::value, T_partials_return, T_loc, T_prec> digamma_mukappa(size_mu_kappa); VectorBuilder::value, T_partials_return, T_loc, T_prec> digamma_kappa_mukappa(size_mu_kappa); VectorBuilder::value, T_partials_return, T_prec> digamma_kappa(size_kappa); if (!is_constant_all::value) { for (size_t i = 0; i < size_mu_kappa; i++) { const T_partials_return kappa_dbl = kappa_vec.val(i); const T_partials_return mukappa_dbl = mu_vec.val(i) * kappa_dbl; digamma_mukappa[i] = digamma(mukappa_dbl); digamma_kappa_mukappa[i] = digamma(kappa_dbl - mukappa_dbl); } for (size_t i = 0; i < size_kappa; i++) { digamma_kappa[i] = digamma(kappa_vec.val(i)); } } for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return kappa_dbl = kappa_vec.val(n); const T_partials_return mukappa_dbl = mu_dbl * kappa_dbl; const T_partials_return kappa_mukappa_dbl = kappa_dbl - mukappa_dbl; const T_partials_return betafunc_dbl = beta(mukappa_dbl, kappa_mukappa_dbl); const T_partials_return Pn = 1 - inc_beta(mukappa_dbl, kappa_mukappa_dbl, y_dbl); ccdf_log += log(Pn); const T_partials_return inv_Pn = is_constant_all::value ? 0 : inv(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= pow(1 - y_dbl, kappa_mukappa_dbl - 1) * pow(y_dbl, mukappa_dbl - 1) * inv_Pn / betafunc_dbl; } T_partials_return g1 = 0; T_partials_return g2 = 0; if (!is_constant_all::value) { grad_reg_inc_beta(g1, g2, mukappa_dbl, kappa_mukappa_dbl, y_dbl, digamma_mukappa[n], digamma_kappa_mukappa[n], digamma_kappa[n], betafunc_dbl); } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= kappa_dbl * (g1 - g2) * inv_Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= (g1 * mu_dbl + g2 * (1 - mu_dbl)) * inv_Pn; } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_gamma_lpdf.hpp0000644000176200001440000001114514645137106024347 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_GAMMA_LPDF_HPP #define STAN_MATH_PRIM_PROB_INV_GAMMA_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of an inverse gamma density for y with the specified * shape and scale parameters. * Shape and scale parameters must be greater than 0. * y must be greater than 0. * * @param y A scalar variable. * @param alpha Shape parameter. * @param beta Scale parameter. * @throw std::domain_error if alpha is not greater than 0. * @throw std::domain_error if beta is not greater than 0. * @throw std::domain_error if y is not greater than 0. * @tparam T_y Type of scalar. * @tparam T_shape Type of shape. * @tparam T_scale Type of scale. */ template * = nullptr> return_type_t inv_gamma_lpdf(const T_y& y, const T_shape& alpha, const T_scale& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_shape>; using T_beta_ref = ref_type_if_t::value, T_scale>; static const char* function = "inv_gamma_lpdf"; check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Scale parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_not_nan(function, "Random variable", y_val); check_positive_finite(function, "Shape parameter", alpha_val); check_positive_finite(function, "Scale parameter", beta_val); if (size_zero(y, alpha, beta)) { return 0; } if (!include_summand::value) { return 0; } if (sum(promote_scalar(y_val <= 0))) { return LOG_ZERO; } T_partials_return logp(0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); const auto& log_y = to_ref_if::value>(log(y_val)); size_t N = max_size(y, alpha, beta); if (include_summand::value) { logp -= sum(lgamma(alpha_val)) * N / math::size(alpha); } if (include_summand::value) { const auto& log_beta = to_ref_if::value>(log(beta_val)); logp += sum(alpha_val * log_beta) * N / max_size(alpha, beta); if (!is_constant_all::value) { partials<1>(ops_partials) = log_beta - digamma(alpha_val) - log_y; } } if (include_summand::value) { logp -= sum((alpha_val + 1.0) * log_y) * N / max_size(y, alpha); } if (include_summand::value) { const auto& inv_y = to_ref_if<(!is_constant_all::value || !is_constant_all::value)>(inv(y_val)); logp -= sum(beta_val * inv_y) * N / max_size(y, beta); if (!is_constant_all::value) { edge<0>(ops_partials).partials_ = (beta_val * inv_y - alpha_val - 1) * inv_y; } if (!is_constant_all::value) { partials<2>(ops_partials) = alpha_val / beta_val - inv_y; } } return ops_partials.build(logp); } template inline return_type_t inv_gamma_lpdf( const T_y& y, const T_shape& alpha, const T_scale& beta) { return inv_gamma_lpdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_proportion_ccdf_log.hpp0000644000176200001440000000115214645137106026427 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_PROPORTION_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_BETA_PROPORTION_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use beta_proportion_lccdf */ template return_type_t beta_proportion_ccdf_log( const T_y& y, const T_loc& mu, const T_prec& kappa) { return beta_proportion_lccdf(y, mu, kappa); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lkj_corr_cholesky_lpdf.hpp0000644000176200001440000000374314645137106026124 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LKJ_CORR_CHOLESKY_LPDF_HPP #define STAN_MATH_PRIM_PROB_LKJ_CORR_CHOLESKY_LPDF_HPP #include #include #include #include #include #include #include namespace stan { namespace math { // LKJ_Corr(L|eta) [ L Cholesky factor of correlation matrix // eta > 0; eta == 1 <-> uniform] template return_type_t lkj_corr_cholesky_lpdf(const T_covar& L, const T_shape& eta) { using lp_ret = return_type_t; static const char* function = "lkj_corr_cholesky_lpdf"; const auto& L_ref = to_ref(L); check_positive(function, "Shape parameter", eta); check_lower_triangular(function, "Random variable", L_ref); const unsigned int K = L.rows(); if (K == 0) { return 0.0; } lp_ret lp(0.0); if (include_summand::value) { lp += do_lkj_constant(eta, K); } if (include_summand::value) { const int Km1 = K - 1; Eigen::Matrix, Eigen::Dynamic, 1> log_diagonals = log(L_ref.diagonal().tail(Km1).array()); Eigen::Matrix values(Km1); for (int k = 0; k < Km1; k++) { values(k) = (Km1 - k - 1) * log_diagonals(k); } if (eta == 1.0 && is_constant_all>::value) { lp += sum(values); return (lp); } values += multiply(2.0 * eta - 2.0, log_diagonals); lp += sum(values); } return lp; } template inline return_type_t lkj_corr_cholesky_lpdf( const T_covar& L, const T_shape& eta) { return lkj_corr_cholesky_lpdf(L, eta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_ccdf_log.hpp0000644000176200001440000000107714645137106024517 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_NORMAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use normal_lccdf */ template inline return_type_t normal_ccdf_log( const T_y& y, const T_loc& mu, const T_scale& sigma) { return normal_lccdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/binomial_lcdf.hpp0000644000176200001440000000723014645137106024166 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_BINOMIAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log CDF for the binomial distribution evaluated at the * specified success, population size, and chance of success. If given * containers of matching lengths, returns the log sum of probabilities. * * @tparam T_n type of successes parameter * @tparam T_N type of population size parameter * @tparam theta type of chance of success parameter * @param n successes parameter * @param N population size parameter * @param theta chance of success parameter * @return log probability or log sum of probabilities * @throw std::domain_error if N is negative * @throw std::domain_error if theta is not a valid probability * @throw std::invalid_argument if container sizes mismatch */ template return_type_t binomial_lcdf(const T_n& n, const T_N& N, const T_prob& theta) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_t; using T_N_ref = ref_type_t; using T_theta_ref = ref_type_t; using std::exp; using std::log; using std::pow; static const char* function = "binomial_lcdf"; check_consistent_sizes(function, "Successes variable", n, "Population size parameter", N, "Probability parameter", theta); T_n_ref n_ref = n; T_N_ref N_ref = N; T_theta_ref theta_ref = theta; check_nonnegative(function, "Population size parameter", N_ref); check_bounded(function, "Probability parameter", value_of(theta_ref), 0.0, 1.0); if (size_zero(n, N, theta)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(theta_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view N_vec(N_ref); scalar_seq_view theta_vec(theta_ref); size_t max_size_seq_view = max_size(n, N, theta); // Explicit return for extreme values // The gradients are technically ill-defined, // but treated as negative infinity for (size_t i = 0; i < stan::math::size(n); i++) { if (n_vec.val(i) < 0) { return ops_partials.build(NEGATIVE_INFTY); } } for (size_t i = 0; i < max_size_seq_view; i++) { const T_partials_return n_dbl = n_vec.val(i); const T_partials_return N_dbl = N_vec.val(i); // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_dbl >= N_dbl) { continue; } const T_partials_return theta_dbl = theta_vec.val(i); const T_partials_return Pi = inc_beta(N_dbl - n_dbl, n_dbl + 1, 1 - theta_dbl); P += log(Pi); if (!is_constant_all::value) { const T_partials_return denom = beta(N_dbl - n_dbl, n_dbl + 1) * Pi; partials<0>(ops_partials)[i] -= pow(theta_dbl, n_dbl) * pow(1 - theta_dbl, N_dbl - n_dbl - 1) / denom; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/frechet_rng.hpp0000644000176200001440000000450514645137106023674 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_FRECHET_RNG_HPP #define STAN_MATH_PRIM_PROB_FRECHET_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a pseudorandom Frechet variate for the given shape * and scale parameters using the specified random number generator. * * alpha and sigma can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_shape type of shape parameter * @tparam T_scale type of scale parameter * @tparam RNG type of random number generator * @param alpha (Sequence of) positive shape parameter(s) * @param sigma (Sequence of) positive scale parameter(s) * @param rng random number generator * @return (Sequence of) Frechet random variate(s) * @throw std::domain_error if alpha is nonpositive or sigma is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type frechet_rng( const T_shape& alpha, const T_scale& sigma, RNG& rng) { using boost::variate_generator; using boost::random::weibull_distribution; using T_alpha_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "frechet_rng"; check_consistent_sizes(function, "Shape parameter", alpha, "Scale Parameter", sigma); T_alpha_ref alpha_ref = alpha; T_sigma_ref sigma_ref = sigma; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Scale parameter", sigma_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(alpha, sigma); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > weibull_rng( rng, weibull_distribution<>(alpha_vec[n], 1.0 / sigma_vec[n])); output[n] = 1 / weibull_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_normal_prec_log.hpp0000644000176200001440000000210214645137106025751 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_NORMAL_PREC_LOG_HPP #define STAN_MATH_PRIM_PROB_MULTI_NORMAL_PREC_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use multi_normal_prec_lpdf */ template return_type_t multi_normal_prec_log(const T_y& y, const T_loc& mu, const T_covar& Sigma) { return multi_normal_prec_lpdf(y, mu, Sigma); } /** \ingroup multivar_dists * @deprecated use multi_normal_prec_lpdf */ template inline return_type_t multi_normal_prec_log( const T_y& y, const T_loc& mu, const T_covar& Sigma) { return multi_normal_prec_lpdf(y, mu, Sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_proportion_log.hpp0000644000176200001440000000203214645137106025446 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_PROPORTION_LOG_HPP #define STAN_MATH_PRIM_PROB_BETA_PROPORTION_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use beta_proportion_lpdf */ template return_type_t beta_proportion_log(const T_y& y, const T_loc& mu, const T_prec& kappa) { return beta_proportion_lpdf(y, mu, kappa); } /** \ingroup prob_dists * @deprecated use beta_proportion_lpdf */ template inline return_type_t beta_proportion_log( const T_y& y, const T_loc& mu, const T_prec& kappa) { return beta_proportion_lpdf(y, mu, kappa); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exponential_lccdf.hpp0000644000176200001440000000551714645137106025073 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXPONENTIAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_EXPONENTIAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t exponential_lccdf(const T_y& y, const T_inv_scale& beta) { using T_partials_return = partials_return_t; using T_partials_array = Eigen::Array; using T_y_ref = ref_type_if_t::value, T_y>; using T_beta_ref = ref_type_if_t::value, T_inv_scale>; static const char* function = "exponential_lccdf"; T_y_ref y_ref = y; T_beta_ref beta_ref = beta; auto y_val = to_ref(as_value_column_array_or_scalar(y_ref)); auto beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Inverse scale parameter", beta_val); if (size_zero(y, beta)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, beta_ref); T_partials_return ccdf_log = -sum(beta_val * y_val); if (!is_constant_all::value) { using beta_val_scalar = scalar_type_t; using beta_val_array = Eigen::Array; if (is_vector::value && !is_vector::value) { partials<0>(ops_partials) = T_partials_array::Constant( math::size(y), -forward_as(beta_val)); } else if (is_vector::value) { partials<0>(ops_partials) = -forward_as(beta_val); } else { partials<0>(ops_partials)[0] = -sum(beta_val); } } if (!is_constant_all::value) { using y_val_scalar = scalar_type_t; using y_val_array = Eigen::Array; if (is_vector::value && !is_vector::value) { partials<1>(ops_partials) = T_partials_array::Constant( math::size(beta), -forward_as(y_val)); } else if (is_vector::value) { partials<1>(ops_partials) = -forward_as(y_val); } else { partials<1>(ops_partials)[0] = -sum(y_val); } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_cdf.hpp0000644000176200001440000001247614645137106023143 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_CDF_HPP #define STAN_MATH_PRIM_PROB_BETA_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Calculates the beta cumulative distribution function for the given * variate and scale variables. * * @param y A scalar variate. * @param alpha Prior sample size. * @param beta Prior sample size. * @return The beta cdf evaluated at the specified arguments. * @tparam T_y Type of y. * @tparam T_scale_succ Type of alpha. * @tparam T_scale_fail Type of beta. */ template return_type_t beta_cdf( const T_y& y, const T_scale_succ& alpha, const T_scale_fail& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "beta_cdf"; check_consistent_sizes(function, "Random variable", y, "First shape parameter", alpha, "Second shape parameter", beta); if (size_zero(y, alpha, beta)) { return 1.0; } T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "First shape parameter", alpha_ref); check_positive_finite(function, "Second shape parameter", beta_ref); check_bounded(function, "Random variable", value_of(y_ref), 0, 1); T_partials_return P(1.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t size_alpha = stan::math::size(alpha); size_t size_beta = stan::math::size(beta); size_t size_alpha_beta = max_size(alpha, beta); size_t N = max_size(y, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) <= 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_scale_succ> digamma_alpha(size_alpha); if (!is_constant_all::value) { for (size_t n = 0; n < size_alpha; n++) { digamma_alpha[n] = digamma(alpha_vec.val(n)); } } VectorBuilder::value, T_partials_return, T_scale_fail> digamma_beta(size_beta); if (!is_constant_all::value) { for (size_t n = 0; n < size_beta; n++) { digamma_beta[n] = digamma(beta_vec.val(n)); } } VectorBuilder::value, T_partials_return, T_scale_succ, T_scale_fail> digamma_sum(size_alpha_beta); if (!is_constant_all::value) { for (size_t n = 0; n < size_alpha_beta; n++) { digamma_sum[n] = digamma(alpha_vec.val(n) + beta_vec.val(n)); } } for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_dbl >= 1.0) { continue; } const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return beta_dbl = beta_vec.val(n); const T_partials_return Pn = inc_beta(alpha_dbl, beta_dbl, y_dbl); const T_partials_return inv_Pn = is_constant_all::value ? 0 : inv(Pn); P *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += inc_beta_ddz(alpha_dbl, beta_dbl, y_dbl) * inv_Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += inc_beta_dda(alpha_dbl, beta_dbl, y_dbl, digamma_alpha[n], digamma_sum[n]) * inv_Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += inc_beta_ddb(alpha_dbl, beta_dbl, y_dbl, digamma_beta[n], digamma_sum[n]) * inv_Pn; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(alpha); ++n) { partials<1>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(beta); ++n) { partials<2>(ops_partials)[n] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/cauchy_lcdf.hpp0000644000176200001440000000643314645137106023654 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CAUCHY_LCDF_HPP #define STAN_MATH_PRIM_PROB_CAUCHY_LCDF_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the cauchy log cumulative distribution function for the given * location, and scale. If given containers of matching sizes * returns the log sum of probabilities. * * @tparam T_y type of real parameter * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @return log probability or log sum of probabilities * @throw std::domain_error if sigma is nonpositive or y, mu are nan * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t cauchy_lcdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::atan; using std::log; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "cauchy_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale Parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 0; } T_partials_return cdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, mu, sigma); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return sigma_inv_dbl = 1.0 / sigma_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return z = (y_dbl - mu_dbl) * sigma_inv_dbl; const T_partials_return Pn = atan(z) / pi() + 0.5; cdf_log += log(Pn); const T_partials_return rep_deriv = 1.0 / (pi() * Pn * (z * z * sigma_dbl + sigma_dbl)); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= rep_deriv * z; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gumbel_lpdf.hpp0000644000176200001440000001051114645137106023660 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GUMBEL_LPDF_HPP #define STAN_MATH_PRIM_PROB_GUMBEL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the Gumbel log probability density for the given * location and scale. Given containers of matching sizes, returns the * log sum of densities. * * @tparam T_y type of real parameter * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param mu location parameter * @param beta scale parameter * @return log probability density or log sum of probability densities * @throw std::domain_error if y is nan, mu is infinite, or beta is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t gumbel_lpdf(const T_y& y, const T_loc& mu, const T_scale& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_beta_ref = ref_type_if_t::value, T_scale>; static const char* function = "gumbel_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", beta); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive(function, "Scale parameter", beta_val); if (size_zero(y, mu, beta)) { return 0.0; } if (!include_summand::value) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, beta_ref); const auto& inv_beta = to_ref_if::value>(inv(beta_val)); const auto& y_minus_mu_over_beta = to_ref((y_val - mu_val) * inv_beta); const auto& exp_y_m_mu_over_beta = to_ref_if::value>( exp(-y_minus_mu_over_beta)); size_t N = max_size(y, mu, beta); T_partials_return logp = -sum(y_minus_mu_over_beta + exp_y_m_mu_over_beta); if (include_summand::value) { logp -= sum(log(beta_val)) * N / math::size(beta); } if (!is_constant_all::value) { const auto& scaled_diff = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(inv_beta * exp_y_m_mu_over_beta - inv_beta); if (!is_constant_all::value) { partials<0>(ops_partials) = scaled_diff; } if (!is_constant_all::value) { partials<1>(ops_partials) = -scaled_diff; } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = -y_minus_mu_over_beta * scaled_diff - inv_beta; } } return ops_partials.build(logp); } template inline return_type_t gumbel_lpdf(const T_y& y, const T_loc& mu, const T_scale& beta) { return gumbel_lpdf(y, mu, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gaussian_dlm_obs_lpdf.hpp0000644000176200001440000002622014645137106025722 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAUSSIAN_DLM_OBS_LPDF_HPP #define STAN_MATH_PRIM_PROB_GAUSSIAN_DLM_OBS_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* TODO: time-varying system matrices TODO: use sequential processing even for non-diagonal obs covariance. TODO: add constant terms in observation. */ namespace stan { namespace math { /** \ingroup multivar_dists * The log of a Gaussian dynamic linear model (GDLM). * This distribution is equivalent to, for \f$t = 1:T\f$, * \f{eqnarray*}{ * y_t & \sim N(F' \theta_t, V) \\ * \theta_t & \sim N(G \theta_{t-1}, W) \\ * \theta_0 & \sim N(m_0, C_0) * \f} * * If V is a vector, then the Kalman filter is applied * sequentially. * * @tparam T_y type of scalar * @tparam T_F type of design matrix * @tparam T_G type of transition matrix * @tparam T_V type of observation covariance matrix * @tparam T_W type of state covariance matrix * @tparam T_m0 type of initial state mean vector * @tparam T_C0 type of initial state covariance matrix * * @param y A r x T matrix of observations. Rows are variables, * columns are observations. * @param F A n x r matrix. The design matrix. * @param G A n x n matrix. The transition matrix. * @param V A r x r matrix. The observation covariance matrix. * @param W A n x n matrix. The state covariance matrix. * @param m0 A n x 1 matrix. The mean vector of the distribution * of the initial state. * @param C0 A n x n matrix. The covariance matrix of the * distribution of the initial state. * @return The log of the joint density of the GDLM. * @throw std::domain_error if a matrix in the Kalman filter is * not positive semi-definite. */ template * = nullptr, require_eigen_col_vector_t* = nullptr> inline return_type_t gaussian_dlm_obs_lpdf( const T_y& y, const T_F& F, const T_G& G, const T_V& V, const T_W& W, const T_m0& m0, const T_C0& C0) { using T_lp = return_type_t; using std::pow; static const char* function = "gaussian_dlm_obs_lpdf"; check_size_match(function, "columns of F", F.cols(), "rows of y", y.rows()); check_size_match(function, "rows of F", F.rows(), "rows of G", G.rows()); check_size_match(function, "rows of V", V.rows(), "rows of y", y.rows()); check_size_match(function, "rows of W", W.rows(), "rows of G", G.rows()); check_size_match(function, "size of m0", m0.size(), "rows of G", G.rows()); check_size_match(function, "rows of C0", C0.rows(), "rows of G", G.rows()); check_square(function, "G", G); const auto& y_ref = to_ref(y); const auto& F_ref = to_ref(F); const auto& G_ref = to_ref(G); const auto& V_ref = to_ref(V); const auto& W_ref = to_ref(W); const auto& m0_ref = to_ref(m0); const auto& C0_ref = to_ref(C0); check_finite(function, "y", y_ref); check_finite(function, "F", F_ref); check_finite(function, "G", G_ref); // TODO(anyone): incorporate support for infinite V check_finite(function, "V", V_ref); check_pos_semidefinite(function, "V", V_ref); // TODO(anyone): incorporate support for infinite W check_finite(function, "W", W_ref); check_pos_semidefinite(function, "W", W_ref); check_finite(function, "m0", m0_ref); check_pos_semidefinite(function, "C0", C0_ref); check_finite(function, "C0", C0_ref); if (size_zero(y)) { return 0; } int r = y.rows(); // number of variables int n = G.rows(); // number of states T_lp lp(0); if (include_summand::value) { lp -= HALF_LOG_TWO_PI * r * y.cols(); } if (include_summand::value) { Eigen::Matrix m{m0_ref}; Eigen::Matrix C{C0_ref}; Eigen::Matrix a(n); Eigen::Matrix R(n, n); Eigen::Matrix f(r); Eigen::Matrix Q(r, r); Eigen::Matrix Q_inv(r, r); Eigen::Matrix e(r); Eigen::Matrix A(n, r); for (int i = 0; i < y.cols(); i++) { // // Predict state // a_t = G_t m_{t-1} a = multiply(G_ref, m); // R_t = G_t C_{t-1} G_t' + W_t R = quad_form_sym(C, transpose(G_ref)) + W_ref; // // predict observation // f_t = F_t' a_t f = multiply(transpose(F_ref), a); // Q_t = F'_t R_t F_t + V_t Q = quad_form_sym(R, F_ref) + V_ref; Q_inv = inverse_spd(Q); // // filtered state // e_t = y_t - f_t e = y_ref.col(i) - f; // A_t = R_t F_t Q^{-1}_t A = multiply(multiply(R, F_ref), Q_inv); // m_t = a_t + A_t e_t m = a + multiply(A, e); // C = R_t - A_t Q_t A_t' C = R - quad_form_sym(Q, transpose(A)); lp -= 0.5 * (log_determinant_spd(Q) + trace_quad_form(Q_inv, e)); } } return lp; } /** \ingroup multivar_dists * The log of a Gaussian dynamic linear model (GDLM) with * uncorrelated observation disturbances. * This distribution is equivalent to, for \f$t = 1:T\f$, * \f{eqnarray*}{ * y_t & \sim N(F' \theta_t, diag(V)) \\ * \theta_t & \sim N(G \theta_{t-1}, W) \\ * \theta_0 & \sim N(m_0, C_0) * \f} * * If V is a vector, then the Kalman filter is applied * sequentially. * * @param y A r x T matrix of observations. Rows are variables, * columns are observations. * @param F A n x r matrix. The design matrix. * @param G A n x n matrix. The transition matrix. * @param V A size r vector. The diagonal of the observation * covariance matrix. * @param W A n x n matrix. The state covariance matrix. * @param m0 A n x 1 matrix. The mean vector of the distribution * of the initial state. * @param C0 A n x n matrix. The covariance matrix of the * distribution of the initial state. * @return The log of the joint density of the GDLM. * @throw std::domain_error if a matrix in the Kalman filter is * not semi-positive definite. * @tparam T_y Type of scalar. * @tparam T_F Type of design matrix. * @tparam T_G Type of transition matrix. * @tparam T_V Type of observation variances * @tparam T_W Type of state covariance matrix. * @tparam T_m0 Type of initial state mean vector. * @tparam T_C0 Type of initial state covariance matrix. */ template < bool propto, typename T_y, typename T_F, typename T_G, typename T_V, typename T_W, typename T_m0, typename T_C0, require_all_eigen_matrix_dynamic_t* = nullptr, require_all_eigen_col_vector_t* = nullptr> inline return_type_t gaussian_dlm_obs_lpdf( const T_y& y, const T_F& F, const T_G& G, const T_V& V, const T_W& W, const T_m0& m0, const T_C0& C0) { using T_lp = return_type_t; using std::log; static const char* function = "gaussian_dlm_obs_lpdf"; check_size_match(function, "columns of F", F.cols(), "rows of y", y.rows()); check_size_match(function, "rows of F", F.rows(), "rows of G", G.rows()); check_size_match(function, "rows of G", G.rows(), "columns of G", G.cols()); check_size_match(function, "size of V", V.size(), "rows of y", y.rows()); check_size_match(function, "rows of W", W.rows(), "rows of G", G.rows()); check_size_match(function, "size of m0", m0.size(), "rows of G", G.rows()); check_size_match(function, "rows of C0", C0.rows(), "rows of G", G.rows()); const auto& y_ref = to_ref(y); const auto& F_ref = to_ref(F); const auto& G_ref = to_ref(G); const auto& V_ref = to_ref(V); const auto& W_ref = to_ref(W); const auto& m0_ref = to_ref(m0); const auto& C0_ref = to_ref(C0); check_finite(function, "y", y_ref); check_finite(function, "F", F_ref); check_finite(function, "G", G_ref); check_nonnegative(function, "V", V_ref); // TODO(anyone): support infinite V check_finite(function, "V", V_ref); check_pos_semidefinite(function, "W", W_ref); // TODO(anyone): support infinite W check_finite(function, "W", W_ref); check_finite(function, "m0", m0_ref); check_pos_semidefinite(function, "C0", C0_ref); check_finite(function, "C0", C0_ref); if (y.cols() == 0 || y.rows() == 0) { return 0; } int r = y.rows(); // number of variables int n = G.rows(); // number of states T_lp lp(0); if (include_summand::value) { lp -= HALF_LOG_TWO_PI * r * y.cols(); } if (include_summand::value) { T_lp f; T_lp Q; T_lp Q_inv; T_lp e; Eigen::Matrix A(n); Eigen::Matrix Fj(n); Eigen::Matrix m{m0_ref}; Eigen::Matrix C{C0_ref}; for (int i = 0; i < y.cols(); i++) { // Predict state // reuse m and C instead of using a and R m = multiply(G_ref, m); C = quad_form_sym(C, transpose(G_ref)) + W_ref; for (int j = 0; j < y.rows(); ++j) { // predict observation // dim Fj = (n, 1) const auto& Fj = F_ref.col(j); // f_{t, i} = F_{t, i}' m_{t, i-1} f = dot_product(Fj, m); Q = trace_quad_form(C, Fj) + V_ref.coeff(j); if (i == 0) check_positive(function, "Q0", Q); Q_inv = 1.0 / Q; // filtered observation // e_{t, i} = y_{t, i} - f_{t, i} e = y_ref.coeff(j, i) - f; // A_{t, i} = C_{t, i-1} F_{t, i} Q_{t, i}^{-1} A = multiply(multiply(C, Fj), Q_inv); // m_{t, i} = m_{t, i-1} + A_{t, i} e_{t, i} m += multiply(A, e); // c_{t, i} = C_{t, i-1} - Q_{t, i} A_{t, i} A_{t, i}' // tcrossprod throws an error (ambiguous) // C = subtract(C, multiply(Q, tcrossprod(A))); C -= multiply(Q, multiply(A, transpose(A))); C = 0.5 * (C + transpose(C)); lp -= 0.5 * (log(Q) + square(e) * Q_inv); } } } return lp; } template inline return_type_t gaussian_dlm_obs_lpdf( const T_y& y, const T_F& F, const T_G& G, const T_V& V, const T_W& W, const T_m0& m0, const T_C0& C0) { return gaussian_dlm_obs_lpdf(y, F, G, V, W, m0, C0); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_double_exponential_ccdf_log.hpp0000644000176200001440000000142114645137106030131 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use skew_double_exponential_lccdf */ template inline return_type_t skew_double_exponential_ccdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma, const T_skewness& tau) { return skew_double_exponential_lccdf( y, mu, sigma, tau); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_cdf.hpp0000644000176200001440000000532514645137106024216 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_CDF_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_CDF_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the CDF of the Bernoulli distribution. If containers are * supplied, returns the product of the probabilities. * * @tparam T_n type of integer parameter * @tparam T_prob type of chance of success parameter * @param n integer parameter * @param theta chance of success parameter * @return probability or product of probabilities * @throw std::domain_error if theta is not a valid probability * @throw std::invalid_argument if container sizes mismatch. */ template * = nullptr> return_type_t bernoulli_cdf(const T_n& n, const T_prob& theta) { using T_partials_return = partials_return_t; using T_theta_ref = ref_type_t; static const char* function = "bernoulli_cdf"; check_consistent_sizes(function, "Random variable", n, "Probability parameter", theta); T_theta_ref theta_ref = theta; check_bounded(function, "Probability parameter", value_of(theta_ref), 0.0, 1.0); if (size_zero(n, theta)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(theta_ref); scalar_seq_view n_vec(n); scalar_seq_view theta_vec(theta_ref); size_t max_size_seq_view = max_size(n, theta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(n); i++) { if (n_vec.val(i) < 0) { return ops_partials.build(0.0); } } for (size_t i = 0; i < max_size_seq_view; i++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_vec.val(i) >= 1) { continue; } const T_partials_return Pi = 1 - theta_vec.val(i); P *= Pi; if (!is_constant_all::value) { partials<0>(ops_partials)[i] += -1 / Pi; } } if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(theta); ++i) { partials<0>(ops_partials)[i] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_normal_prec_lpdf.hpp0000644000176200001440000000770314645137106026131 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_NORMAL_PREC_LPDF_HPP #define STAN_MATH_PRIM_PROB_MULTI_NORMAL_PREC_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t multi_normal_prec_lpdf( const T_y& y, const T_loc& mu, const T_covar& Sigma) { using T_covar_elem = typename scalar_type::type; using lp_type = return_type_t; using Eigen::Matrix; using std::vector; static const char* function = "multi_normal_prec_lpdf"; check_positive(function, "Precision matrix rows", Sigma.rows()); size_t number_of_y = size_mvt(y); size_t number_of_mu = size_mvt(mu); if (number_of_y == 0 || number_of_mu == 0) { return 0; } check_consistent_sizes_mvt(function, "y", y, "mu", mu); lp_type lp(0); vector_seq_view y_vec(y); vector_seq_view mu_vec(mu); size_t size_vec = max_size_mvt(y, mu); int size_y = y_vec[0].size(); int size_mu = mu_vec[0].size(); if (size_vec > 1) { for (size_t i = 1, size_mvt_y = size_mvt(y); i < size_mvt_y; i++) { check_size_match(function, "Size of one of the vectors " "of the random variable", y_vec[i].size(), "Size of the first vector of " "the random variable", size_y); } for (size_t i = 1, size_mvt_mu = size_mvt(mu); i < size_mvt_mu; i++) { check_size_match(function, "Size of one of the vectors " "of the location variable", mu_vec[i].size(), "Size of the first vector of " "the location variable", size_mu); } } check_size_match(function, "Size of random variable", size_y, "size of location parameter", size_mu); check_size_match(function, "Size of random variable", size_y, "rows of covariance parameter", Sigma.rows()); check_size_match(function, "Size of random variable", size_y, "columns of covariance parameter", Sigma.cols()); for (size_t i = 0; i < size_vec; i++) { check_finite(function, "Location parameter", mu_vec[i]); check_not_nan(function, "Random variable", y_vec[i]); } const auto& Sigma_ref = to_ref(Sigma); check_symmetric(function, "Precision matrix", Sigma_ref); auto ldlt_Sigma = make_ldlt_factor(Sigma_ref); check_ldlt_factor(function, "LDLT_Factor of precision parameter", ldlt_Sigma); if (size_y == 0) { return lp; } if (include_summand::value) { lp += 0.5 * log_determinant_ldlt(ldlt_Sigma) * size_vec; } if (include_summand::value) { lp += NEG_LOG_SQRT_TWO_PI * size_y * size_vec; } if (include_summand::value) { lp_type sum_lp_vec(0.0); for (size_t i = 0; i < size_vec; i++) { const auto& y_col = as_column_vector_or_scalar(y_vec[i]); const auto& mu_col = as_column_vector_or_scalar(mu_vec[i]); sum_lp_vec += trace_quad_form(Sigma_ref, y_col - mu_col); } lp -= 0.5 * sum_lp_vec; } return lp; } template inline return_type_t multi_normal_prec_lpdf( const T_y& y, const T_loc& mu, const T_covar& Sigma) { return multi_normal_prec_lpdf(y, mu, Sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gamma_log.hpp0000644000176200001440000000361714645137106023334 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAMMA_LOG_HPP #define STAN_MATH_PRIM_PROB_GAMMA_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of a gamma density for y with the specified * shape and inverse scale parameters. * Shape and inverse scale parameters must be greater than 0. * y must be greater than or equal to 0. * \f{eqnarray*}{ y &\sim& \mbox{\sf{Gamma}}(\alpha, \beta) \\ \log (p (y \, |\, \alpha, \beta) ) &=& \log \left( \frac{\beta^\alpha}{\Gamma(\alpha)} y^{\alpha - 1} \exp^{- \beta y} \right) \\ &=& \alpha \log(\beta) - \log(\Gamma(\alpha)) + (\alpha - 1) \log(y) - \beta y\\ & & \mathrm{where} \; y > 0 \f} * * @deprecated use gamma_lpdf * * @param y A scalar variable. * @param alpha Shape parameter. * @param beta Inverse scale parameter. * @throw std::domain_error if alpha is not greater than 0. * @throw std::domain_error if beta is not greater than 0. * @throw std::domain_error if y is not greater than or equal to 0. * @tparam T_y Type of scalar. * @tparam T_shape Type of shape. * @tparam T_inv_scale Type of inverse scale. */ template return_type_t gamma_log(const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { return gamma_lpdf(y, alpha, beta); } /** \ingroup prob_dists * @deprecated use gamma_lpdf */ template inline return_type_t gamma_log( const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { return gamma_lpdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_chi_square_ccdf_log.hpp0000644000176200001440000000112414645137106026217 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use inv_chi_square_lccdf */ template return_type_t inv_chi_square_ccdf_log(const T_y& y, const T_dof& nu) { return inv_chi_square_lccdf(y, nu); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/discrete_range_lccdf.hpp0000644000176200001440000000467114645137106025523 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DISCRETE_RANGE_LCCDF_HPP #define STAN_MATH_PRIM_PROB_DISCRETE_RANGE_LCCDF_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return the log CCDF of a discrete range distribution for the given y, * lower and upper bounds (all integers). * * `y`, `lower` and `upper` can each be a scalar or a one-dimensional container. * Any container arguments must be the same size. * * @tparam T_y type of scalar, either int or std::vector * @tparam T_lower type of lower bound, either int or std::vector * @tparam T_upper type of upper bound, either int or std::vector * * @param y integer random variable * @param lower integer lower bound * @param upper integer upper bound * @return The log CCDF evaluated at the specified arguments. If containers are * supplied, returns the sum of the log CCDFs. * @throw std::domain_error if upper is smaller than lower. * @throw std::invalid_argument if non-scalar arguments are of different * sizes. */ template double discrete_range_lccdf(const T_y& y, const T_lower& lower, const T_upper& upper) { static const char* function = "discrete_range_lccdf"; check_consistent_sizes(function, "Lower bound parameter", lower, "Upper bound parameter", upper); check_greater_or_equal(function, "Upper bound parameter", upper, lower); if (size_zero(y, lower, upper)) { return 0; } scalar_seq_view y_vec(y); scalar_seq_view lower_vec(lower); scalar_seq_view upper_vec(upper); size_t N = max_size(y, lower, upper); for (size_t n = 0; n < N; ++n) { const int y_dbl = y_vec[n]; if (y_dbl >= upper_vec[n]) { return LOG_ZERO; } } double ccdf(0.0); for (size_t n = 0; n < N; n++) { const int y_dbl = y_vec[n]; if (y_dbl >= lower_vec[n]) { const int lower_dbl = lower_vec[n]; const int upper_dbl = upper_vec[n]; ccdf += log(upper_dbl - y_dbl) - log(upper_dbl - lower_dbl + 1); } } return ccdf; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_cdf_log.hpp0000644000176200001440000000124314645137106024351 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_PARETO_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use pareto_lcdf */ template return_type_t pareto_cdf_log(const T_y& y, const T_scale& y_min, const T_shape& alpha) { return pareto_lcdf(y, y_min, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_type_2_lpdf.hpp0000644000176200001440000001120214645137106025157 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_TYPE_2_LPDF_HPP #define STAN_MATH_PRIM_PROB_PARETO_TYPE_2_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // pareto_type_2(y|lambda, alpha) [y >= 0; lambda > 0; alpha > 0] template * = nullptr> return_type_t pareto_type_2_lpdf( const T_y& y, const T_loc& mu, const T_scale& lambda, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_lambda_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; static const char* function = "pareto_type_2_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", lambda, "Shape parameter", alpha); if (size_zero(y, mu, lambda, alpha)) { return 0.0; } T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_lambda_ref lambda_ref = lambda; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_greater_or_equal(function, "Random variable", y_val, mu_val); check_positive_finite(function, "Scale parameter", lambda_val); check_positive_finite(function, "Shape parameter", alpha_val); if (!include_summand::value) { return 0.0; } const auto& log1p_scaled_diff = to_ref_if::value>( log1p((y_val - mu_val) / lambda_val)); size_t N = max_size(y, mu, lambda, alpha); T_partials_return logp(0.0); if (include_summand::value) { logp += sum(log(alpha_val)) * N / math::size(alpha); } if (include_summand::value) { logp -= sum(log(lambda_val)) * N / math::size(lambda); } if (include_summand::value) { logp -= sum((alpha_val + 1.0) * log1p_scaled_diff); } auto ops_partials = make_partials_propagator(y_ref, mu_ref, lambda_ref, alpha_ref); if (!is_constant_all::value) { const auto& inv_sum = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( inv(lambda_val + y_val - mu_val)); const auto& alpha_div_sum = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>(alpha_val * inv_sum); if (!is_constant_all::value) { auto deriv_1_2 = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( inv_sum + alpha_div_sum); if (!is_constant_all::value) { partials<0>(ops_partials) = -deriv_1_2; } if (!is_constant_all::value) { partials<1>(ops_partials) = std::move(deriv_1_2); } } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = alpha_div_sum * (y_val - mu_val) / lambda_val - inv_sum; } } if (!is_constant_all::value) { partials<3>(ops_partials) = inv(alpha_val) - log1p_scaled_diff; } return ops_partials.build(logp); } template inline return_type_t pareto_type_2_lpdf( const T_y& y, const T_loc& mu, const T_scale& lambda, const T_shape& alpha) { return pareto_type_2_lpdf(y, mu, lambda, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lognormal_lpdf.hpp0000644000176200001440000001026314645137106024403 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGNORMAL_LPDF_HPP #define STAN_MATH_PRIM_PROB_LOGNORMAL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // LogNormal(y|mu, sigma) [y >= 0; sigma > 0] template * = nullptr> return_type_t lognormal_lpdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "lognormal_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_nonnegative(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, mu, sigma)) { return 0; } if (!include_summand::value) { return 0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); if (sum(promote_scalar(y_val == 0))) { return ops_partials.build(LOG_ZERO); } const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& inv_sigma_sq = to_ref_if::value>( square(inv_sigma)); const auto& log_y = to_ref_if::value>(log(y_val)); const auto& logy_m_mu = to_ref(log_y - mu_val); size_t N = max_size(y, mu, sigma); T_partials_return logp = N * NEG_LOG_SQRT_TWO_PI - 0.5 * sum(square(logy_m_mu) * inv_sigma_sq); if (include_summand::value) { logp -= sum(log(sigma_val)) * N / math::size(sigma); } if (include_summand::value) { logp -= sum(log_y) * N / math::size(y); } if (!is_constant_all::value) { const auto& logy_m_mu_div_sigma = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(logy_m_mu * inv_sigma_sq); if (!is_constant_all::value) { partials<0>(ops_partials) = -(1 + logy_m_mu_div_sigma) / y_val; } if (!is_constant_all::value) { partials<1>(ops_partials) = logy_m_mu_div_sigma; } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = (logy_m_mu_div_sigma * logy_m_mu - 1) * inv_sigma; } } return ops_partials.build(logp); } template inline return_type_t lognormal_lpdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { return lognormal_lpdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/uniform_log.hpp0000644000176200001440000000325214645137106023724 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_UNIFORM_LOG_HPP #define STAN_MATH_PRIM_PROB_UNIFORM_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of a uniform density for the given * y, lower, and upper bound. * \f{eqnarray*}{ y &\sim& \mbox{\sf{U}}(\alpha, \beta) \\ \log (p (y \, |\, \alpha, \beta)) &=& \log \left( \frac{1}{\beta-\alpha} \right) \\ &=& \log (1) - \log (\beta - \alpha) \\ &=& -\log (\beta - \alpha) \\ & & \mathrm{ where } \; y \in [\alpha, \beta], \log(0) \; \mathrm{otherwise} \f} * * @deprecated use uniform_lpdf * * @param y A scalar variable. * @param alpha Lower bound. * @param beta Upper bound. * @throw std::invalid_argument if the lower bound is greater than * or equal to the lower bound * @tparam T_y Type of scalar. * @tparam T_low Type of lower bound. * @tparam T_high Type of upper bound. */ template return_type_t uniform_log(const T_y& y, const T_low& alpha, const T_high& beta) { return uniform_lpdf(y, alpha, beta); } /** \ingroup prob_dists * @deprecated use uniform_lpdf */ template inline return_type_t uniform_log(const T_y& y, const T_low& alpha, const T_high& beta) { return uniform_lpdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/hmm_latent_rng.hpp0000644000176200001440000000775214645137106024413 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_HMM_LATENT_RNG_HPP #define STAN_MATH_PRIM_PROB_HMM_LATENT_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** * For a hidden Markov model with observation y, hidden state x, * and parameters theta, generate samples from the posterior distribution * of the hidden states, x. * In this setting, the hidden states are discrete * and takes values over the finite space {1, ..., K}. * log_omegas is a matrix of observational densities, where * the (i, j)th entry corresponds to the density of the ith observation, y_i, * given x_i = j. * The transition matrix Gamma is such that the (i, j)th entry is the * probability that x_n = j given x_{n - 1} = i. The rows of Gamma are * simplexes. * * @tparam T_omega type of the log likelihood matrix * @tparam T_Gamma type of the transition matrix * @tparam T_rho type of the initial guess vector * @param[in] log_omegas log matrix of observational densities. * @param[in] Gamma transition density between hidden states. * @param[in] rho initial state * @param[in] rng random number generator * @return sample from the posterior distribution of the hidden states. * @throw `std::invalid_argument` if Gamma is not square, when we have * at least one transition, or if the size of rho is not the * number of rows of log_omegas. * @throw `std::domain_error` if rho is not a simplex and of the rows * of Gamma are not a simplex (when there is at least one transition). */ template * = nullptr, require_eigen_col_vector_t* = nullptr> inline std::vector hmm_latent_rng(const T_omega& log_omegas, const T_Gamma& Gamma, const T_rho& rho, RNG& rng) { int n_states = log_omegas.rows(); int n_transitions = log_omegas.cols() - 1; Eigen::MatrixXd omegas = value_of(log_omegas).array().exp(); ref_type_t rho_dbl = value_of(rho); ref_type_t Gamma_dbl = value_of(Gamma); hmm_check(log_omegas, Gamma_dbl, rho_dbl, "hmm_latent_rng"); Eigen::MatrixXd alphas(n_states, n_transitions + 1); alphas.col(0) = omegas.col(0).cwiseProduct(rho_dbl); alphas.col(0) /= alphas.col(0).maxCoeff(); for (int n = 0; n < n_transitions; ++n) { alphas.col(n + 1) = omegas.col(n + 1).cwiseProduct(Gamma_dbl.transpose() * alphas.col(n)); alphas.col(n + 1) /= alphas.col(n + 1).maxCoeff(); } Eigen::VectorXd beta = Eigen::VectorXd::Ones(n_states); // sample the last hidden state std::vector hidden_states(n_transitions + 1); std::vector probs(n_states); Eigen::Map probs_vec(probs.data(), n_states); probs_vec = alphas.col(n_transitions) / alphas.col(n_transitions).sum(); boost::random::discrete_distribution<> cat_hidden(probs); hidden_states[n_transitions] = cat_hidden(rng) + stan::error_index::value; for (int n = n_transitions; n-- > 0;) { // Sample the nth hidden state conditional on (n + 1)st hidden state. // Subtract error_index in order to use C++ index. int last_hs = hidden_states[n + 1] - stan::error_index::value; probs_vec = alphas.col(n).cwiseProduct(Gamma_dbl.col(last_hs)) * beta(last_hs) * omegas(last_hs, n + 1); probs_vec /= probs_vec.sum(); // discrete_distribution produces samples in [0, K), so // we need to add 1 to generate over [1, K). boost::random::discrete_distribution<> cat_hidden(probs); hidden_states[n] = cat_hidden(rng) + stan::error_index::value; // update backwards state beta = Gamma_dbl * (omegas.col(n + 1).cwiseProduct(beta)); beta /= beta.maxCoeff(); } return hidden_states; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/categorical_logit_glm_lpmf.hpp0000644000176200001440000001762414645137106026744 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CATEGORICAL_LOGIT_GLM_LPMF_HPP #define STAN_MATH_PRIM_PROB_CATEGORICAL_LOGIT_GLM_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns the log PMF of the Generalized Linear Model (GLM) * with categorical distribution and logit (softmax) link function. * * @tparam T_y type of classes. It can be either `std::vector` or `int`. * @tparam T_x_scalar type of the matrix of independent variables (features) * @tparam T_alpha type of the intercept vector * @tparam T_beta type of the matrix of weights * @param y a scalar or vector of classes. If it is a scalar it will be * broadcast - used for all instances. Values should be between 1 and number * of classes, including endpoints. * @param x design matrix or row vector. If it is a row vector it will be * broadcast - used for all instances. * @param alpha intercept vector (in log odds) * @param beta weight matrix * @return log probability or log sum of probabilities * @throw std::domain_error x, beta or alpha is infinite or y is not within * bounds * @throw std::invalid_argument if container sizes mismatch. */ template * = nullptr, require_col_vector_t* = nullptr, require_matrix_t* = nullptr> return_type_t categorical_logit_glm_lpmf( const T_y& y, const T_x& x, const T_alpha& alpha, const T_beta& beta) { using T_partials_return = partials_return_t; using Eigen::Array; using Eigen::Dynamic; using Eigen::Matrix; using std::exp; using std::isfinite; using std::log; using T_y_ref = ref_type_t; using T_x_ref = ref_type_if_t::value, T_x>; using T_alpha_ref = ref_type_if_t::value, T_alpha>; using T_beta_ref = ref_type_if_t::value, T_beta>; using T_beta_partials = partials_type_t>; constexpr int T_x_rows = T_x::RowsAtCompileTime; const size_t N_instances = T_x_rows == 1 ? stan::math::size(y) : x.rows(); const size_t N_attributes = x.cols(); const size_t N_classes = beta.cols(); static const char* function = "categorical_logit_glm_lpmf"; check_consistent_size(function, "Vector of dependent variables", y, N_instances); check_consistent_size(function, "Intercept vector", alpha, N_classes); check_size_match(function, "x.cols()", N_attributes, "beta.rows()", beta.rows()); if (size_zero(y) || N_classes == 1) { return 0; } T_y_ref y_ref = y; check_bounded(function, "categorical outcome out of support", y_ref, 1, N_classes); if (!include_summand::value) { return 0; } T_x_ref x_ref = x; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; const auto& x_val = to_ref_if::value>(value_of(x_ref)); const auto& alpha_val = value_of(alpha_ref); const auto& beta_val = to_ref_if::value>(value_of(beta_ref)); const auto& alpha_val_vec = as_column_vector_or_scalar(alpha_val).transpose(); Array lin = (x_val * beta_val).rowwise() + alpha_val_vec; Array lin_max = lin.rowwise().maxCoeff(); // This is used to prevent overflow when // calculating softmax/log_sum_exp and // similar expressions Array exp_lin = exp(lin.colwise() - lin_max); Array inv_sum_exp_lin = 1 / exp_lin.rowwise().sum(); T_partials_return logp = log(inv_sum_exp_lin).sum() - lin_max.sum(); if (T_x_rows == 1) { logp *= N_instances; } scalar_seq_view y_seq(y_ref); for (int i = 0; i < N_instances; i++) { if (T_x_rows == 1) { logp += lin(0, y_seq[i] - 1); } else { logp += lin(i, y_seq[i] - 1); } } // TODO(Tadej) maybe we can replace previous block with the following line // when we have newer Eigen T_partials_return logp = // lin(Eigen::all,y-1).sum() + log(inv_sum_exp_lin).sum() - lin_max.sum(); if (!isfinite(logp)) { check_finite(function, "Weight vector", beta_ref); check_finite(function, "Intercept", alpha_ref); check_finite(function, "Matrix of independent variables", x_ref); } // Compute the derivatives. auto ops_partials = make_partials_propagator(x_ref, alpha_ref, beta_ref); if (!is_constant_all::value) { if (T_x_rows == 1) { Array beta_y = beta_val.col(y_seq[0] - 1); for (int i = 1; i < N_instances; i++) { beta_y += beta_val.col(y_seq[i] - 1).array(); } edge<0>(ops_partials).partials_ = beta_y - (exp_lin.matrix() * beta_val.transpose()).array().colwise() * inv_sum_exp_lin * N_instances; } else { Array beta_y(N_instances, N_attributes); for (int i = 0; i < N_instances; i++) { beta_y.row(i) = beta_val.col(y_seq[i] - 1); } edge<0>(ops_partials).partials_ = beta_y - (exp_lin.matrix() * beta_val.transpose()).array().colwise() * inv_sum_exp_lin; // TODO(Tadej) maybe we can replace previous block with the following // line when we have newer Eigen partials<0>(ops_partials) = beta_val(y // - 1, all) - (exp_lin.matrix() * beta.transpose()).colwise() * // inv_sum_exp_lin; } } if (!is_constant_all::value) { Array neg_softmax_lin = exp_lin.colwise() * -inv_sum_exp_lin; if (!is_constant_all::value) { if (T_x_rows == 1) { edge<1>(ops_partials).partials_ = neg_softmax_lin.colwise().sum() * N_instances; } else { partials<1>(ops_partials) = neg_softmax_lin.colwise().sum(); } for (int i = 0; i < N_instances; i++) { partials<1>(ops_partials)[y_seq[i] - 1] += 1; } } if (!is_constant_all::value) { Matrix beta_derivative = x_val.transpose().template cast() * neg_softmax_lin.matrix(); if (T_x_rows == 1) { beta_derivative *= N_instances; } for (int i = 0; i < N_instances; i++) { if (T_x_rows == 1) { beta_derivative.col(y_seq[i] - 1) += x_val; } else { beta_derivative.col(y_seq[i] - 1) += x_val.row(i); } } // TODO(Tadej) maybe we can replace previous loop with the following // line when we have newer Eigen partials<2>(ops_partials)(Eigen::all, // y // - 1) += x_val.colwise.sum().transpose(); partials<2>(ops_partials) = std::move(beta_derivative); } } return ops_partials.build(logp); } template return_type_t categorical_logit_glm_lpmf( const T_y& y, const T_x& x, const T_alpha& alpha, const T_beta& beta) { return categorical_logit_glm_lpmf(y, x, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/ordered_logistic_lpmf.hpp0000644000176200001440000001752014645137106025746 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_ORDERED_LOGISTIC_LPMF_HPP #define STAN_MATH_PRIM_PROB_ORDERED_LOGISTIC_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns the (natural) log probability of the specified array * of integers given the vector of continuous locations and * specified cutpoints in an ordered logistic model. * *

Typically the continuous location * will be the dot product of a vector of regression coefficients * and a vector of predictors for the outcome * \f[ \frac{\partial }{\partial \lambda} = \begin{cases}\\ -\mathrm{logit}^{-1}(\lambda - c_1) & \mbox{if } k = 1,\\ -(((1-e^{c_{k-1}-c_{k-2}})^{-1} - \mathrm{logit}^{-1}(c_{k-2}-\lambda)) + ((1-e^{c_{k-2}-c_{k-1}})^{-1} - \mathrm{logit}^{-1}(c_{k-1}-\lambda))) & \mathrm{if } 1 < k < K, \mathrm{and}\\ \mathrm{logit}^{-1}(c_{K-2}-\lambda) & \mathrm{if } k = K. \end{cases} \f] \f[ \frac{\partial }{\partial \lambda} = \begin{cases} -\mathrm{logit}^{-1}(\lambda - c_1) & \text{if } k = 1,\\ -(((1-e^{c_{k-1}-c_{k-2}})^{-1} - \mathrm{logit}^{-1}(c_{k-2}-\lambda)) + ((1-e^{c_{k-2}-c_{k-1}})^{-1} - \mathrm{logit}^{-1}(c_{k-1}-\lambda))) & \text{if } 1 < k < K, \text{ and}\\ \mathrm{logit}^{-1}(c_{K-2}-\lambda) & \text{if } k = K. \end{cases} \f] * * @tparam propto True if calculating up to a proportion. * @tparam T_y Y variable type (integer or array of integers). * @tparam T_loc Location type. * @tparam T_cut Cut-point type. * @param y Array of integers * @param lambda Vector of continuous location variables. * @param c Positive increasing vector of cutpoints. * @return Log probability of outcome given location and * cutpoints. * @throw std::domain_error If the outcome is not between 1 and * the number of cutpoints plus 2; if the cutpoint vector is * empty; if the cutpoint vector contains a non-positive, * non-finite value; or if the cutpoint vector is not sorted in * ascending order. * @throw std::invalid_argument If y and lambda are different * lengths. */ template * = nullptr> return_type_t ordered_logistic_lpmf(const T_y& y, const T_loc& lambda, const T_cut& c) { using T_partials_return = partials_return_t; using T_cuts_val = partials_return_t; using T_y_ref = ref_type_t; using T_lambda_ref = ref_type_if_t::value, T_loc>; using T_cut_ref = ref_type_if_t::value, T_cut>; using Eigen::Array; using Eigen::Dynamic; static const char* function = "ordered_logistic"; T_cut_ref c_ref = c; vector_seq_view c_vec(c_ref); int N = math::size(lambda); int C_l = size_mvt(c); check_consistent_sizes(function, "Integers", y, "Locations", lambda); if (C_l > 1) { check_size_match(function, "Length of location variables ", N, "Number of cutpoint vectors ", C_l); } T_y_ref y_ref = y; T_lambda_ref lambda_ref = lambda; scalar_seq_view y_seq(y_ref); decltype(auto) lambda_val = to_ref(as_value_array_or_scalar(lambda_ref)); check_finite(function, "Location parameter", lambda_val); if (C_l == 0 || N == 0) { return 0; } int K = c_vec[0].size() + 1; check_bounded(function, "Random variable", y_ref, 1, K); for (int i = 0; i < C_l; i++) { check_size_match(function, "One cutpoint set", c_vec[i].size(), "First cutpoint set", K - 1); check_ordered(function, "Cut-points", c_vec[i]); if (K > 1) { if (K > 2) { check_finite(function, "Final cut-point", c_vec[i].coeff(K - 2)); } check_finite(function, "First cut-point", c_vec[i].coeff(0)); } } if (!include_summand::value) { return 0.0; } scalar_seq_view lam_vec(lambda_val); T_partials_return logp(0.0); Array cuts_y1(N), cuts_y2(N); for (int i = 0; i < N; i++) { int c = y_seq[i]; if (c != K) { cuts_y1.coeffRef(i) = value_of(c_vec[i].coeff(c - 1)); } else { cuts_y1.coeffRef(i) = INFINITY; } if (c != 1) { cuts_y2.coeffRef(i) = value_of(c_vec[i].coeff(c - 2)); } else { cuts_y2.coeffRef(i) = -INFINITY; } } Array cut2 = lambda_val - cuts_y2; Array cut1 = lambda_val - cuts_y1; // Not immediately evaluating next two expressions benefits performance auto m_log_1p_exp_cut1 = (cut1 > 0.0).select(-cut1, 0) - (-cut1.abs()).exp().log1p(); auto m_log_1p_exp_m_cut2 = (cut2 <= 0.0).select(cut2, 0) - (-cut2.abs()).exp().log1p(); if (is_vector::value) { Eigen::Map, Eigen::Dynamic, 1>> y_vec( y_seq.data(), y_seq.size()); auto log1m_exp_cuts_diff = log1m_exp(cut1 - cut2); logp = y_vec.cwiseEqual(1) .select(m_log_1p_exp_cut1, y_vec.cwiseEqual(K).select(m_log_1p_exp_m_cut2, m_log_1p_exp_m_cut2 + log1m_exp_cuts_diff + m_log_1p_exp_cut1)) .sum(); } else { if (y_seq[0] == 1) { logp = m_log_1p_exp_cut1.sum(); } else if (y_seq[0] == K) { logp = m_log_1p_exp_m_cut2.sum(); } else { logp = (m_log_1p_exp_m_cut2 + log1m_exp(cut1 - cut2).array() + m_log_1p_exp_cut1) .sum(); } } auto ops_partials = make_partials_propagator(lambda_ref, c_ref); if (!is_constant_all::value) { Array exp_m_cut1 = exp(-cut1); Array exp_m_cut2 = exp(-cut2); Array exp_cuts_diff = exp(cuts_y2 - cuts_y1); Array d1 = (cut2 > 0).select(exp_m_cut2 / (1 + exp_m_cut2), 1 / (1 + exp(cut2))) - exp_cuts_diff / (exp_cuts_diff - 1); Array d2 = 1 / (1 - exp_cuts_diff) - (cut1 > 0).select(exp_m_cut1 / (1 + exp_m_cut1), 1 / (1 + exp(cut1))); if (!is_constant_all::value) { partials<0>(ops_partials) = d1 - d2; } if (!is_constant_all::value) { for (int i = 0; i < N; i++) { int c = y_seq[i]; if (c != K) { partials_vec<1>(ops_partials)[i][c - 1] += d2.coeff(i); } if (c != 1) { partials_vec<1>(ops_partials)[i][c - 2] -= d1.coeff(i); } } } } return ops_partials.build(logp); } template return_type_t ordered_logistic_lpmf(const T_y& y, const T_loc& lambda, const T_cut& c) { return ordered_logistic_lpmf(y, lambda, c); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/uniform_lcdf.hpp0000644000176200001440000000700514645137106024053 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_UNIFORM_LCDF_HPP #define STAN_MATH_PRIM_PROB_UNIFORM_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t uniform_lcdf(const T_y& y, const T_low& alpha, const T_high& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_low>; using T_beta_ref = ref_type_if_t::value, T_high>; static const char* function = "uniform_lcdf"; check_consistent_sizes(function, "Random variable", y, "Lower bound parameter", alpha, "Upper bound parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Lower bound parameter", alpha_val); check_finite(function, "Upper bound parameter", beta_val); check_greater(function, "Upper bound parameter", beta_val, alpha_val); if (size_zero(y, alpha, beta)) { return 0; } if (sum(promote_scalar(y_val < alpha_val)) || sum(promote_scalar(beta_val < y_val))) { return negative_infinity(); } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); const auto& b_minus_a = to_ref_if::value>(beta_val - alpha_val); const auto& y_minus_alpha = to_ref_if::value>(y_val - alpha_val); const auto& cdf_log_n = y_minus_alpha / b_minus_a; T_partials_return cdf_log = sum(log(cdf_log_n)); if (!is_constant_all::value) { if (!is_vector::value && is_vector::value && !is_vector::value) { partials<0>(ops_partials) = math::size(beta) * inv(y_minus_alpha); } else { partials<0>(ops_partials) = inv(y_minus_alpha); } } if (!is_constant_all::value) { edge<1>(ops_partials).partials_ = (y_val - beta_val) / (b_minus_a * y_minus_alpha); } if (!is_constant_all::value) { if (is_vector::value && !is_vector::value && !is_vector::value) { partials<2>(ops_partials) = inv(-b_minus_a) * math::size(y); } else { partials<2>(ops_partials) = inv(-b_minus_a); } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/student_t_lpdf.hpp0000644000176200001440000001462614645137106024431 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STUDENT_T_LPDF_HPP #define STAN_MATH_PRIM_PROB_STUDENT_T_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the Student-t density for the given y, nu, mean, and * scale parameter. The scale parameter must be greater * than 0. * * \f{eqnarray*}{ y &\sim& t_{\nu} (\mu, \sigma^2) \\ \log (p (y \, |\, \nu, \mu, \sigma) ) &=& \log \left( \frac{\Gamma((\nu + 1) /2)} {\Gamma(\nu/2)\sqrt{\nu \pi} \sigma} \left( 1 + \frac{1}{\nu} (\frac{y - \mu}{\sigma})^2 \right)^{-(\nu + 1)/2} \right) \\ &=& \log( \Gamma( (\nu+1)/2 )) - \log (\Gamma (\nu/2) - \frac{1}{2} \log(\nu \pi) - \log(\sigma) -\frac{\nu + 1}{2} \log (1 + \frac{1}{\nu} (\frac{y - \mu}{\sigma})^2) \f} * * @tparam T_y type of scalar * @tparam T_dof type of degrees of freedom * @tparam T_loc type of location * @tparam T_scale type of scale * * @param y A scalar variable. * @param nu Degrees of freedom. * @param mu The mean of the Student-t distribution. * @param sigma The scale parameter of the Student-t distribution. * @return The log of the Student-t density at y. * @throw std::domain_error if sigma is not greater than 0. * @throw std::domain_error if nu is not greater than 0. */ template * = nullptr> return_type_t student_t_lpdf(const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_nu_ref = ref_type_if_t::value, T_dof>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "student_t_lpdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_nu_ref nu_ref = nu; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) nu_val = to_ref(as_value_column_array_or_scalar(nu_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_not_nan(function, "Random variable", y_val); check_positive_finite(function, "Degrees of freedom parameter", nu_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, nu, mu, sigma)) { return 0.0; } if (!include_summand::value) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, nu_ref, mu_ref, sigma_ref); const auto& half_nu = to_ref_if::value>(0.5 * nu_val); const auto& square_y_scaled = square((y_val - mu_val) / sigma_val); const auto& square_y_scaled_over_nu = to_ref_if::value>( square_y_scaled / nu_val); const auto& log1p_val = to_ref_if::value>( log1p(square_y_scaled_over_nu)); size_t N = max_size(y, nu, mu, sigma); T_partials_return logp = -sum((half_nu + 0.5) * log1p_val); if (include_summand::value) { logp -= LOG_SQRT_PI * N; } if (include_summand::value) { logp += (sum(lgamma(half_nu + 0.5)) - sum(lgamma(half_nu)) - 0.5 * sum(log(nu_val))) * N / math::size(nu); } if (include_summand::value) { logp -= sum(log(sigma_val)) * N / math::size(sigma); } if (!is_constant_all::value) { const auto& square_sigma = square(sigma_val); auto deriv_y_mu = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( (nu_val + 1) * (y_val - mu_val) / ((1 + square_y_scaled_over_nu) * square_sigma * nu_val)); if (!is_constant_all::value) { partials<0>(ops_partials) = -deriv_y_mu; } if (!is_constant_all::value) { partials<2>(ops_partials) = std::move(deriv_y_mu); } } if (!is_constant_all::value) { const auto& rep_deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( (nu_val + 1) * square_y_scaled_over_nu / (1 + square_y_scaled_over_nu) - 1); if (!is_constant_all::value) { const auto& digamma_half_nu_plus_half = digamma(half_nu + 0.5); const auto& digamma_half_nu = digamma(half_nu); edge<1>(ops_partials).partials_ = 0.5 * (digamma_half_nu_plus_half - digamma_half_nu - log1p_val + rep_deriv / nu_val); } if (!is_constant_all::value) { partials<3>(ops_partials) = rep_deriv / sigma_val; } } return ops_partials.build(logp); } template inline return_type_t student_t_lpdf( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) { return student_t_lpdf(y, nu, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_logit_glm_lpmf.hpp0000644000176200001440000001602514645137106026454 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_GLM_LPMF_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_GLM_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns the log PMF of the Generalized Linear Model (GLM) * with Bernoulli distribution and logit link function. * The idea is that bernoulli_logit_glm_lpmf(y, x, alpha, beta) should * compute a more efficient version of bernoulli_logit_lpmf(y, alpha + x * beta) * by using analytically simplified gradients. * If containers are supplied, returns the log sum of the probabilities. * * @tparam T_y type of binary vector of dependent variables (labels); * this can also be a single binary value; * @tparam T_x type of the matrix of independent variables (features) * @tparam T_alpha type of the intercept(s); * this can be a vector (of the same length as y) of intercepts or a single * value (for models with constant intercept); * @tparam T_beta type of the weight vector * * @param y binary scalar or vector parameter. If it is a scalar it will be * broadcast - used for all instances. * @param x design matrix or row vector. If it is a row vector it will be * broadcast - used for all instances. * @param alpha intercept (in log odds) * @param beta weight vector * @return log probability or log sum of probabilities * @throw std::domain_error if x, beta or alpha is infinite. * @throw std::domain_error if y is not binary. * @throw std::invalid_argument if container sizes mismatch. */ template * = nullptr> return_type_t bernoulli_logit_glm_lpmf( const T_y& y, const T_x& x, const T_alpha& alpha, const T_beta& beta) { using Eigen::Array; using Eigen::Dynamic; using Eigen::log1p; using Eigen::Matrix; using std::exp; using std::isfinite; constexpr int T_x_rows = T_x::RowsAtCompileTime; using T_xbeta_partials = partials_return_t; using T_partials_return = partials_return_t; using T_ytheta_tmp = typename std::conditional_t>; using T_xbeta_tmp = typename std::conditional_t>; using T_x_ref = ref_type_if_t::value, T_x>; using T_alpha_ref = ref_type_if_t::value, T_alpha>; using T_beta_ref = ref_type_if_t::value, T_beta>; const size_t N_instances = T_x_rows == 1 ? stan::math::size(y) : x.rows(); const size_t N_attributes = x.cols(); static const char* function = "bernoulli_logit_glm_lpmf"; check_consistent_size(function, "Vector of dependent variables", y, N_instances); check_consistent_size(function, "Weight vector", beta, N_attributes); check_consistent_size(function, "Vector of intercepts", alpha, N_instances); if (size_zero(y)) { return 0; } const auto& y_ref = to_ref(y); check_bounded(function, "Vector of dependent variables", y_ref, 0, 1); if (!include_summand::value) { return 0; } T_x_ref x_ref = x; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; const auto& y_val = value_of(y_ref); const auto& x_val = to_ref_if::value>(value_of(x_ref)); const auto& alpha_val = value_of(alpha_ref); const auto& beta_val = value_of(beta_ref); const auto& y_val_vec = as_column_vector_or_scalar(y_val); const auto& alpha_val_vec = as_column_vector_or_scalar(alpha_val); const auto& beta_val_vec = to_ref_if::value>( as_column_vector_or_scalar(beta_val)); auto signs = to_ref_if::value>( 2 * as_array_or_scalar(y_val_vec) - 1); Array ytheta(N_instances); if (T_x_rows == 1) { T_ytheta_tmp ytheta_tmp = forward_as((x_val * beta_val_vec)(0, 0)); ytheta = signs * (ytheta_tmp + as_array_or_scalar(alpha_val_vec)); } else { ytheta = (x_val * beta_val_vec).array(); ytheta = signs * (ytheta + as_array_or_scalar(alpha_val_vec)); } // Compute the log-density and handle extreme values gracefully // using Taylor approximations. // And compute the derivatives wrt theta. static const double cutoff = 20.0; Eigen::Array exp_m_ytheta = exp(-ytheta); T_partials_return logp = sum( (ytheta > cutoff) .select(-exp_m_ytheta, (ytheta < -cutoff).select(ytheta, -log1p(exp_m_ytheta)))); if (!isfinite(logp)) { check_finite(function, "Weight vector", beta); check_finite(function, "Intercept", alpha); check_finite(function, "Matrix of independent variables", ytheta); } auto ops_partials = make_partials_propagator(x_ref, alpha_ref, beta_ref); // Compute the necessary derivatives. if (!is_constant_all::value) { Matrix theta_derivative = (ytheta > cutoff) .select(-exp_m_ytheta, (ytheta < -cutoff) .select(signs * T_partials_return(1.0), signs * exp_m_ytheta / (exp_m_ytheta + 1))); if (!is_constant_all::value) { if (T_x_rows == 1) { edge<2>(ops_partials).partials_ = forward_as>( theta_derivative.sum() * x_val); } else { partials<2>(ops_partials) = x_val.transpose() * theta_derivative; } } if (!is_constant_all::value) { if (T_x_rows == 1) { edge<0>(ops_partials).partials_ = forward_as>( beta_val_vec * theta_derivative.sum()); } else { edge<0>(ops_partials).partials_ = (beta_val_vec * theta_derivative.transpose()).transpose(); } } if (!is_constant_all::value) { partials<1>(ops_partials) = theta_derivative; } } return ops_partials.build(logp); } template inline return_type_t bernoulli_logit_glm_lpmf( const T_y& y, const T_x& x, const T_alpha& alpha, const T_beta& beta) { return bernoulli_logit_glm_lpmf(y, x, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_student_t_cholesky_rng.hpp0000644000176200001440000000664214645137106027404 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_STUDENT_T_CHOLESKY_RNG_HPP #define STAN_MATH_PRIM_PROB_MULTI_STUDENT_T_CHOLESKY_RNG_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a multivariate student-t random variate with the given degrees of * freedom location and Cholesky factor the scale matrix * using the specified random number generator. * * mu can be either an Eigen::VectorXd, an Eigen::RowVectorXd, or a std::vector * of either of those types. * * @tparam t_loc Type of location parameter * @tparam rng type of pseudo-random number generator * @param nu a scalar indicating the degrees of freedom parameter * @param mu An Eigen::VectorXd, Eigen::RowVectorXd, or std::vector * of location values for the multivariate student t * @param L cholesky factor of the scale matrix * @param rng random number generator * @return eigen vector of multivariate student t random variates * with the given nu, mu, L * @throw std::domain_error if L is not a Cholesky factor, any value in mu is * not finite, nu is not positive, or nu is NaN * @throw std::invalid_argument if the length of (each) mu is not equal to the * number of rows and columns in L */ template inline typename StdVectorBuilder::type multi_student_t_cholesky_rng(double nu, const T_loc& mu, const Eigen::MatrixXd& L, RNG& rng) { using boost::normal_distribution; using boost::variate_generator; using boost::random::gamma_distribution; static const char* function = "multi_student_t_cholesky_rng"; check_not_nan(function, "Degrees of freedom parameter", nu); check_positive(function, "Degrees of freedom parameter", nu); check_positive(function, "Scale matrix rows", L.rows()); vector_seq_view mu_vec(mu); size_t N = size_mvt(mu); for (size_t i = 1; i < N; i++) { check_size_match(function, "Size of one of the vectors of " "the location variable", mu_vec[i].size(), "Size of the first vector of the " "location variable", mu_vec[i - 1].size()); } check_size_match(function, "Size of random variable", mu_vec[0].size(), "rows of scale parameter", L.rows()); for (size_t i = 0; i < N; i++) { check_finite(function, "Location parameter", mu_vec[i]); } const auto& L_ref = to_ref(L); check_cholesky_factor(function, "L matrix", L_ref); StdVectorBuilder output(N); variate_generator > std_normal_rng( rng, normal_distribution<>(0, 1)); double w = inv_gamma_rng(nu / 2, nu / 2, rng); for (size_t n = 0; n < N; ++n) { Eigen::VectorXd z(L.cols()); for (int i = 0; i < L.cols(); i++) { z(i) = std_normal_rng(); } z *= std::sqrt(w); output[n] = as_column_vector_or_scalar(mu_vec[n]) + L * z; } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gumbel_cdf_log.hpp0000644000176200001440000000113514645137106024332 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GUMBEL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_GUMBEL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use gumbel_lcdf */ template return_type_t gumbel_cdf_log(const T_y& y, const T_loc& mu, const T_scale& beta) { return gumbel_lcdf(y, mu, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/scaled_inv_chi_square_cdf_log.hpp0000644000176200001440000000120414645137106027366 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use scaled_inv_chi_square_lcdf */ template return_type_t scaled_inv_chi_square_cdf_log( const T_y& y, const T_dof& nu, const T_scale& s) { return scaled_inv_chi_square_lcdf(y, nu, s); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_lpdf.hpp0000644000176200001440000001113014645137106023673 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_LPDF_HPP #define STAN_MATH_PRIM_PROB_NORMAL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the normal density for the specified scalar(s) given * the specified mean(s) and deviation(s). y, mu, or sigma can * each be either a scalar or a vector. Any vector inputs * must be the same length. * *

The result log probability is defined to be the sum of the * log probabilities for each observation/mean/deviation triple. * * @tparam T_y type of scalar * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @param y (Sequence of) scalar(s). * @param mu (Sequence of) location parameter(s) * for the normal distribution. * @param sigma (Sequence of) scale parameters for the normal distribution. * @return The log of the product of the densities. * @throw std::domain_error if the scale is not positive. */ template * = nullptr> inline return_type_t normal_lpdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "normal_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive(function, "Scale parameter", sigma_val); if (size_zero(y, mu, sigma)) { return 0.0; } if (!include_summand::value) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& y_scaled = to_ref((y_val - mu_val) * inv_sigma); const auto& y_scaled_sq = to_ref_if::value>(y_scaled * y_scaled); size_t N = max_size(y, mu, sigma); T_partials_return logp = -0.5 * sum(y_scaled_sq); if (include_summand::value) { logp += NEG_LOG_SQRT_TWO_PI * N; } if (include_summand::value) { logp -= sum(log(sigma_val)) * N / math::size(sigma); } if (!is_constant_all::value) { auto scaled_diff = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(inv_sigma * y_scaled); if (!is_constant_all::value) { partials<0>(ops_partials) = -scaled_diff; } if (!is_constant_all::value) { partials<2>(ops_partials) = inv_sigma * y_scaled_sq - inv_sigma; } if (!is_constant_all::value) { partials<1>(ops_partials) = std::move(scaled_diff); } } return ops_partials.build(logp); } template inline return_type_t normal_lpdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { return normal_lpdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/cauchy_rng.hpp0000644000176200001440000000430614645137106023527 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CAUCHY_RNG_HPP #define STAN_MATH_PRIM_PROB_CAUCHY_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Cauchy random variate for the given location and scale * using the specified random number generator. * * mu and sigma can each be a scalar or a vector. Any non-scalar * inputs must be the same length. * * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @tparam RNG type of random number generator * @param mu (Sequence of) location parameter(s) * @param sigma (Sequence of) scale parameter(s) * @param rng random number generator * @return (Sequence of) Cauchy random variate(s) * @throw std::domain_error if mu is infinite or sigma is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type cauchy_rng( const T_loc& mu, const T_scale& sigma, RNG& rng) { using boost::variate_generator; using boost::random::cauchy_distribution; static const char* function = "cauchy_rng"; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; check_consistent_sizes(function, "Location parameter", mu, "Scale Parameter", sigma); T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(mu, sigma); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > cauchy_rng( rng, cauchy_distribution<>(mu_vec[n], sigma_vec[n])); output[n] = cauchy_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/student_t_lccdf.hpp0000644000176200001440000001403614645137106024552 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STUDENT_T_LCCDF_HPP #define STAN_MATH_PRIM_PROB_STUDENT_T_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t student_t_lccdf( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; using std::exp; using std::log; using std::pow; static const char* function = "student_t_lccdf"; T_y_ref y_ref = y; T_nu_ref nu_ref = nu; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, nu, mu, sigma)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, nu, mu, sigma); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == NEGATIVE_INFTY) { return ops_partials.build(0.0); } } T_partials_return digammaHalf = 0; VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digammaNu_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digammaNuPlusHalf_vec(math::size(nu)); if (!is_constant_all::value) { digammaHalf = digamma(0.5); for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return nu_dbl = nu_vec.val(i); digammaNu_vec[i] = digamma(0.5 * nu_dbl); digammaNuPlusHalf_vec[i] = digamma(0.5 + 0.5 * nu_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { return ops_partials.build(negative_infinity()); } const T_partials_return sigma_inv = 1.0 / sigma_vec.val(n); const T_partials_return t = (y_vec.val(n) - mu_vec.val(n)) * sigma_inv; const T_partials_return nu_dbl = nu_vec.val(n); const T_partials_return q = nu_dbl / (t * t); const T_partials_return r = 1.0 / (1.0 + q); const T_partials_return J = 2 * r * r * q / t; const T_partials_return betaNuHalf = beta(0.5, 0.5 * nu_dbl); T_partials_return zJacobian = t > 0 ? -0.5 : 0.5; if (q < 2) { T_partials_return z = inc_beta(0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r); const T_partials_return Pn = t > 0 ? 0.5 * z : 1.0 - 0.5 * z; const T_partials_return d_ibeta = pow(r, -0.5) * pow(1.0 - r, 0.5 * nu_dbl - 1) / betaNuHalf; P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; grad_reg_inc_beta(g1, g2, 0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r, digammaNu_vec[n], digammaHalf, digammaNuPlusHalf_vec[n], betaNuHalf); partials<1>(ops_partials)[n] -= zJacobian * (d_ibeta * (r / t) * (r / t) + 0.5 * g1) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { partials<3>(ops_partials)[n] -= zJacobian * d_ibeta * J * sigma_inv * t / Pn; } } else { T_partials_return z = 1.0 - inc_beta((T_partials_return)0.5, 0.5 * nu_dbl, r); zJacobian *= -1; const T_partials_return Pn = t > 0 ? 0.5 * z : 1.0 - 0.5 * z; T_partials_return d_ibeta = pow(1.0 - r, 0.5 * nu_dbl - 1) * pow(r, -0.5) / betaNuHalf; P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; grad_reg_inc_beta(g1, g2, (T_partials_return)0.5, 0.5 * nu_dbl, r, digammaHalf, digammaNu_vec[n], digammaNuPlusHalf_vec[n], betaNuHalf); partials<1>(ops_partials)[n] -= zJacobian * (-d_ibeta * (r / t) * (r / t) + 0.5 * g2) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { partials<3>(ops_partials)[n] += zJacobian * d_ibeta * J * sigma_inv * t / Pn; } } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_binomial_cdf_log.hpp0000644000176200001440000000144414645137106025647 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_BINOMIAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_BETA_BINOMIAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use beta_binomial_lcdf */ template return_type_t beta_binomial_cdf_log(const T_n& n, const T_N& N, const T_size1& alpha, const T_size2& beta) { return beta_binomial_lcdf(n, N, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_rng.hpp0000644000176200001440000000430314645137106023540 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_RNG_HPP #define STAN_MATH_PRIM_PROB_NORMAL_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Normal random variate for the given location and scale * using the specified random number generator. * * mu and sigma can each be a scalar or a vector. Any non-scalar * inputs must be the same length. * * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @tparam RNG type of random number generator * @param mu (Sequence of) location parameter(s) * @param sigma (Sequence of) positive scale parameter(s) * @param rng random number generator * @return (Sequence of) normal random variate(s) * @throw std::domain_error if mu is infinite or sigma is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type normal_rng( const T_loc& mu, const T_scale& sigma, RNG& rng) { using boost::normal_distribution; using boost::variate_generator; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "normal_rng"; check_consistent_sizes(function, "Location parameter", mu, "Scale Parameter", sigma); T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(mu, sigma); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > norm_rng( rng, normal_distribution<>(mu_vec[n], sigma_vec[n])); output[n] = norm_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/rayleigh_cdf_log.hpp0000644000176200001440000000106314645137106024663 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_RAYLEIGH_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_RAYLEIGH_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use rayleigh_lcdf */ template return_type_t rayleigh_cdf_log(const T_y& y, const T_scale& sigma) { return rayleigh_lcdf(y, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/dirichlet_lpdf.hpp0000644000176200001440000001131014645137106024352 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DIRICHLET_LPDF_HPP #define STAN_MATH_PRIM_PROB_DIRICHLET_LPDF_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the Dirichlet density for the given theta and * a vector of prior sample sizes, alpha. * Each element of alpha must be greater than 0. * Each element of theta must be greater than or 0. * Theta sums to 1. * * \f[ * \theta\sim\mbox{Dirichlet}(\alpha_1,\ldots,\alpha_k)\\ * \log(p(\theta\,|\,\alpha_1,\ldots,\alpha_k))=\log\left( * \frac{\Gamma(\alpha_1+\cdots+\alpha_k)}{\Gamma(\alpha_1)+ * \cdots+\Gamma(\alpha_k)}* \left(\theta_1^{\alpha_1-1}+ * \cdots+\theta_k^{\alpha_k-1}\right)\right)\\ * =\log(\Gamma(\alpha_1+\cdots+\alpha_k))-\left( * \log(\Gamma(\alpha_1))+\cdots+\log(\Gamma(\alpha_k))\right)+ * (\alpha_1-1)\log(\theta_1)+\cdots+(\alpha_k-1)\log(\theta_k) * \f] * * \f[ * \frac{\partial }{\partial * \theta_x}\log(p(\theta\,|\,\alpha_1,\ldots,\alpha_k))= * \frac{\alpha_x-1}{\theta_x} * \f] * * \f[ * \frac{\partial}{\partial\alpha_x}\log(p(\theta\,|\,\alpha_1,\ldots,\alpha_k)) * =\psi_{(0)}(\sum\alpha)-\psi_{(0)}(\alpha_x)+\log\theta_x * \f] * * @tparam T_prob type of scalar * @tparam T_prior_size type of prior sample sizes * @param theta A scalar vector. * @param alpha Prior sample sizes. * @return The log of the Dirichlet density. * @throw std::domain_error if any element of alpha is less than * or equal to 0. * @throw std::domain_error if any element of theta is less than 0. * @throw std::domain_error if the sum of theta is not 1. */ template * = nullptr> return_type_t dirichlet_lpdf(const T_prob& theta, const T_prior_size& alpha) { using T_partials_return = partials_return_t; using T_partials_array = typename Eigen::Array; using T_theta_ref = ref_type_t; using T_alpha_ref = ref_type_t; static const char* function = "dirichlet_lpdf"; T_theta_ref theta_ref = theta; T_alpha_ref alpha_ref = alpha; vector_seq_view theta_vec(theta_ref); vector_seq_view alpha_vec(alpha_ref); const size_t t_length = max_size_mvt(theta, alpha); check_consistent_sizes(function, "probabilities", theta_vec[0], "prior sample sizes", alpha_vec[0]); for (size_t t = 0; t < t_length; t++) { check_positive(function, "prior sample sizes", alpha_vec[t]); check_simplex(function, "probabilities", theta_vec[t]); } const size_t t_size = theta_vec[0].size(); T_partials_array theta_dbl(t_size, t_length); for (size_t t = 0; t < t_length; t++) { theta_dbl.col(t) = theta_vec.val(t); } T_partials_array alpha_dbl(t_size, t_length); for (size_t t = 0; t < t_length; t++) { alpha_dbl.col(t) = alpha_vec.val(t); } T_partials_return lp(0.0); if (include_summand::value) { lp += (lgamma(alpha_dbl.colwise().sum()) - lgamma(alpha_dbl).colwise().sum()) .sum(); } const auto& alpha_m_1 = to_ref_if::value>(alpha_dbl - 1.0); const auto& theta_log = to_ref_if::value>(theta_dbl.log()); if (include_summand::value) { lp += (theta_log * alpha_m_1).sum(); } auto ops_partials = make_partials_propagator(theta_ref, alpha_ref); if (!is_constant_all::value) { for (size_t t = 0; t < t_length; t++) { partials_vec<0>(ops_partials)[t] += (alpha_m_1.col(t) / theta_dbl.col(t)).matrix(); } } if (!is_constant_all::value) { for (size_t t = 0; t < t_length; t++) { partials_vec<1>(ops_partials)[t] += (digamma(alpha_dbl.col(t).sum()) - digamma(alpha_dbl.col(t)) + theta_log.col(t)) .matrix(); } } return ops_partials.build(lp); } template return_type_t dirichlet_lpdf(const T_prob& theta, const T_prior_size& alpha) { return dirichlet_lpdf(theta, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/double_exponential_cdf_log.hpp0000644000176200001440000000117114645137106026737 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use double_exponential_lcdf */ template return_type_t double_exponential_cdf_log( const T_y& y, const T_loc& mu, const T_scale& sigma) { return double_exponential_lcdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_sufficient_lpdf.hpp0000644000176200001440000001553314645137106026125 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_SUFFICIENT_LPDF_HPP #define STAN_MATH_PRIM_PROB_NORMAL_SUFFICIENT_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the normal density for the specified scalar(s) given * the specified mean(s) and deviation(s). * y, s_squared, mu, or sigma can each be either * a scalar, a std vector or Eigen vector. * n can be either a single int or an std vector of ints. * Any vector inputs must be the same length. * *

The result log probability is defined to be the sum of the * log probabilities for each observation/mean/deviation triple. * * @tparam T_y type of sample average parameter * @tparam T_s type of sample squared errors parameter * @tparam T_n type of sample size parameter * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * * @param y_bar (Sequence of) scalar(s) (sample average(s)). * @param s_squared (Sequence of) sum(s) of sample squared errors * @param n_obs (Sequence of) sample size(s) * @param mu (Sequence of) location parameter(s) * for the normal distribution. * @param sigma (Sequence of) scale parameters for the normal * distribution. * @return The log of the product of the densities. * @throw std::domain_error if either n or sigma are not positive, * if s_squared is negative or if any parameter is not finite. */ template return_type_t normal_sufficient_lpdf( const T_y& y_bar, const T_s& s_squared, const T_n& n_obs, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_s_ref = ref_type_if_t::value, T_s>; using T_n_ref = ref_type_if_t::value, T_n>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "normal_sufficient_lpdf"; check_consistent_sizes(function, "Location parameter sufficient statistic", y_bar, "Scale parameter sufficient statistic", s_squared, "Number of observations", n_obs, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y_bar; T_s_ref s_squared_ref = s_squared; T_n_ref n_obs_ref = n_obs; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) s_squared_val = to_ref(as_value_column_array_or_scalar(s_squared_ref)); decltype(auto) n_obs_val_int = to_ref(as_value_column_array_or_scalar(n_obs_ref)); decltype(auto) n_obs_val = to_ref( promote_scalar(as_value_column_array_or_scalar(n_obs_ref))); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_finite(function, "Location parameter sufficient statistic", y_val); check_finite(function, "Scale parameter sufficient statistic", s_squared_val); check_nonnegative(function, "Scale parameter sufficient statistic", s_squared_val); check_positive_finite(function, "Number of observations", n_obs_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y_bar, s_squared, n_obs, mu, sigma)) { return 0.0; } if (!include_summand::value) { return 0.0; } const auto& sigma_squared = to_ref_if::value>( square(sigma_val)); const auto& diff = to_ref(mu_val - y_val); const auto& cons_expr = to_ref_if::value>( s_squared_val + n_obs_val * diff * diff); size_t N = max_size(y_bar, s_squared, n_obs, mu, sigma); T_partials_return logp = -sum(cons_expr / (2 * sigma_squared)); if (include_summand::value) { logp += NEG_LOG_SQRT_TWO_PI * sum(n_obs_val) * N / math::size(n_obs); } if (include_summand::value) { logp -= sum(n_obs_val * log(sigma_val)) * N / max_size(n_obs, sigma); } auto ops_partials = make_partials_propagator(y_ref, s_squared_ref, mu_ref, sigma_ref); if (!is_constant_all::value) { auto common_derivative = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( N / max_size(y_bar, mu, n_obs, sigma) * n_obs_val / sigma_squared * diff); if (!is_constant_all::value) { partials<2>(ops_partials) = -common_derivative; } if (!is_constant_all::value) { partials<0>(ops_partials) = std::move(common_derivative); } } if (!is_constant_all::value) { using T_sigma_value_scalar = scalar_type_t; using T_sigma_value_vector = Eigen::Array; if (is_vector::value) { edge<1>(ops_partials).partials_ = -0.5 / forward_as(sigma_squared); } else { if (is_vector::value) { partials<1>(ops_partials) = T_sigma_value_vector::Constant( N, -0.5 / forward_as(sigma_squared)); } else { forward_as>( partials<1>(ops_partials)) = -0.5 / sigma_squared * N / math::size(sigma); } } } if (!is_constant_all::value) { edge<3>(ops_partials).partials_ = (cons_expr / sigma_squared - n_obs_val) / sigma_val; } return ops_partials.build(logp); } template inline return_type_t normal_sufficient_lpdf( const T_y& y_bar, const T_s& s_squared, const T_n& n_obs, const T_loc& mu, const T_scale& sigma) { return normal_sufficient_lpdf(y_bar, s_squared, n_obs, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_cdf_log.hpp0000644000176200001440000000113214645137106023767 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_BETA_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use beta_lcdf */ template return_type_t beta_cdf_log( const T_y& y, const T_scale_succ& alpha, const T_scale_fail& beta) { return beta_lcdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/von_mises_ccdf_log.hpp0000644000176200001440000000121714645137106025225 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_VON_MISES_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_VON_MISES_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use von_mises_lccdf */ template inline return_type_t von_mises_ccdf_log(const T_x& x, const T_mu& mu, const T_k& k) { return von_mises_lccdf(x, mu, k); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_chi_square_lccdf.hpp0000644000176200001440000001010114645137106025525 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_LCCDF_HPP #define STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the inverse chi square log complementary cumulative distribution * function for the given variate and degrees of freedom. If given * containers of matching sizes, returns the log sum of probabilities. * * @tparam T_y type of scalar parameter * @tparam T_dof type of degrees of freedom parameter * @param y scalar parameter * @param nu degrees of freedom parameter * @return log probability or log sum of probabilities * @throw std::domain_error if y is negative or nu is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template return_type_t inv_chi_square_lccdf(const T_y& y, const T_dof& nu) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; static const char* function = "inv_chi_square_lccdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_nonnegative(function, "Random variable", y_ref); if (size_zero(y, nu)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); size_t N = max_size(y, nu); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_dof> gamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return nu_dbl = nu_vec.val(i); gamma_vec[i] = tgamma(0.5 * nu_dbl); digamma_vec[i] = digamma(0.5 * nu_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { return ops_partials.build(negative_infinity()); } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return nu_dbl = nu_vec.val(n); const T_partials_return Pn = gamma_p(0.5 * nu_dbl, 0.5 * y_inv_dbl); P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= 0.5 * y_inv_dbl * y_inv_dbl * exp(-0.5 * y_inv_dbl) * pow(0.5 * y_inv_dbl, 0.5 * nu_dbl - 1) / tgamma(0.5 * nu_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= 0.5 * grad_reg_inc_gamma(0.5 * nu_dbl, 0.5 * y_inv_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/ordered_probit_log.hpp0000644000176200001440000000430014645137106025243 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_ORDERED_PROBIT_LOG_HPP #define STAN_MATH_PRIM_PROB_ORDERED_PROBIT_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns the (natural) log probability of the integer/s * given the vector of continuous location/s and * specified cutpoints in an ordered probit model. * *

Typically the continuous location * will be the dot product of a vector of regression coefficients * and a vector of predictors for the outcome. * * @tparam propto True if calculating up to a proportion. * @tparam T_y y variable type (int or array of integers). * @tparam T_loc Location type (double or vector). * @tparam T_cut Cut-point type (vector or array of vectors). * @param y Integers * @param lambda Continuous location variables. * @param c Positive increasing cutpoints. * @return Log probability of outcome given location and * cutpoints. * @throw std::domain_error If the outcome is not between 1 and * the number of cutpoints plus 2; if the cutpoint vector is * empty; if the cutpoint vector contains a non-positive, * non-finite value; or if the cutpoint vector is not sorted in * ascending order. * @throw std::invalid_argument If array y and vector lambda * are different lengths. * @throw std::invalid_argument if array y and array of vectors * c are different lengths. * * @deprecated use ordered_probit_lpmf */ template return_type_t ordered_probit_log(const T_y& y, const T_loc& lambda, const T_cut& c) { return ordered_probit_lpmf(y, lambda, c); } /** \ingroup multivar_dists * @deprecated use ordered_probit_lpmf */ template return_type_t ordered_probit_log(const T_y& y, const T_loc& lambda, const T_cut& c) { return ordered_probit_lpmf(y, lambda, c); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_gamma_cdf_log.hpp0000644000176200001440000000127114645137106025016 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_GAMMA_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_INV_GAMMA_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use inv_gamma_lcdf */ template return_type_t inv_gamma_cdf_log(const T_y& y, const T_shape& alpha, const T_scale& beta) { return inv_gamma_lcdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/rayleigh_lcdf.hpp0000644000176200001440000000513714645137106024204 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_RAYLEIGH_LCDF_HPP #define STAN_MATH_PRIM_PROB_RAYLEIGH_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t rayleigh_lcdf(const T_y& y, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "rayleigh_lcdf"; check_consistent_sizes(function, "Random variable", y, "Scale parameter", sigma); T_y_ref y_ref = y; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_nonnegative(function, "Random variable", y_val); check_positive(function, "Scale parameter", sigma_val); if (size_zero(y, sigma)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, sigma_ref); const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& y_div_sigma_square = to_ref_if::value>(y_val * inv_sigma * inv_sigma); const auto& exp_val = to_ref_if::value>( exp(-0.5 * y_val * y_div_sigma_square)); T_partials_return cdf_log = sum(log1m(exp_val)); if (!is_constant_all::value) { auto common_deriv = y_div_sigma_square * exp_val / (1 - exp_val); if (!is_constant_all::value) { partials<1>(ops_partials) = -y_val * inv_sigma * common_deriv; } if (!is_constant_all::value) { partials<0>(ops_partials) = std::move(common_deriv); } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_normal_rng.hpp0000644000176200001440000000535714645137106024603 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_NORMAL_RNG_HPP #define STAN_MATH_PRIM_PROB_SKEW_NORMAL_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Skew-normal random variate for the given location, scale, * and shape using the specified random number generator. * * mu, sigma, and alpha can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @tparam T_shape type of shape parameter * @tparam RNG type of random number generator * * @param mu (Sequence of) location parameter(s) * @param sigma (Sequence of) scale parameter(s) * @param alpha (Sequence of) shape parameter(s) * @param rng random number generator * @return (Sequence of) Skew-normal random variate(s) * @throw std::domain_error if mu is infinite, sigma is nonpositive, or * alpha is infinite * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type skew_normal_rng(const T_loc& mu, const T_scale& sigma, const T_shape& alpha, RNG& rng) { using boost::variate_generator; using boost::random::normal_distribution; static const char* function = "skew_normal_rng"; check_consistent_sizes(function, "Location parameter", mu, "Scale Parameter", sigma, "Shape Parameter", alpha); const auto& mu_ref = to_ref(mu); const auto& sigma_ref = to_ref(sigma); const auto& alpha_ref = to_ref(alpha); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); check_finite(function, "Shape parameter", alpha_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); scalar_seq_view alpha_vec(alpha_ref); size_t N = max_size(mu, sigma, alpha); VectorBuilder output(N); variate_generator > norm_rng( rng, normal_distribution<>(0, 1)); for (size_t n = 0; n < N; ++n) { double r1 = norm_rng(); double r2 = norm_rng(); if (r2 > alpha_vec[n] * r1) { r1 = -r1; } output[n] = mu_vec[n] + sigma_vec[n] * r1; } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/rayleigh_lccdf.hpp0000644000176200001440000000457414645137106024353 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_RAYLEIGH_LCCDF_HPP #define STAN_MATH_PRIM_PROB_RAYLEIGH_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t rayleigh_lccdf(const T_y& y, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "rayleigh_lccdf"; check_consistent_sizes(function, "Random variable", y, "Scale parameter", sigma); T_y_ref y_ref = y; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_nonnegative(function, "Random variable", y_val); check_positive(function, "Scale parameter", sigma_val); if (size_zero(y, sigma)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, sigma_ref); const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& y_div_sigma_square = to_ref_if::value>(y_val * inv_sigma * inv_sigma); const auto& y_square_div_sigma_square = to_ref_if::value>(y_val * y_div_sigma_square); T_partials_return ccdf_log = -0.5 * sum(y_square_div_sigma_square); if (!is_constant_all::value) { partials<0>(ops_partials) = -y_div_sigma_square; } if (!is_constant_all::value) { partials<1>(ops_partials) = y_square_div_sigma_square * inv_sigma; } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_proportion_cdf_log.hpp0000644000176200001440000000132314645137106026264 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_PROPORTION_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_BETA_PROPORTION_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use beta_proportion_lcdf */ template return_type_t beta_proportion_cdf_log(const T_y& y, const T_loc& mu, const T_prec& kappa) { return beta_proportion_lcdf(y, mu, kappa); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_binomial_lpmf.hpp0000644000176200001440000000460614645137106025772 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_LPMF_HPP #define STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_LPMF_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log PMF for the Poisson-binomial distribution evaluated at an * specified array of numbers of successes and probabilities of successes. * * @tparam T_y type of number of successes parameter * @tparam T_theta type of chance of success parameters * @param y input scalar, vector, or nested vector of numbers of successes * @param theta array of chances of success parameters * @return sum of log probabilities * @throw std::domain_error if y is out of bounds * @throw std::domain_error if theta is not a valid vector of probabilities * @throw std::invalid_argument If y and theta are different lengths */ template return_type_t poisson_binomial_lpmf(const T_y& y, const T_theta& theta) { static const char* function = "poisson_binomial_lpmf"; size_t size_theta = size_mvt(theta); if (size_theta > 1) { check_consistent_sizes(function, "Successes variables", y, "Probability parameters", theta); } size_t max_sz = std::max(stan::math::size(y), size_theta); scalar_seq_view y_vec(y); vector_seq_view theta_vec(theta); for (size_t i = 0; i < max_sz; ++i) { check_bounded(function, "Successes variable", y_vec[i], 0, theta_vec[i].size()); check_finite(function, "Probability parameters", theta_vec.val(i)); check_bounded(function, "Probability parameters", theta_vec.val(i), 0.0, 1.0); } return_type_t log_prob = 0.0; for (size_t i = 0; i < max_sz; ++i) { auto x = poisson_binomial_log_probs(y_vec[i], theta_vec[i]); log_prob += x(y_vec[i]); } return log_prob; } template return_type_t poisson_binomial_lpmf(const T_y& y, const T_theta& theta) { return poisson_binomial_lpmf(y, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/loglogistic_cdf.hpp0000644000176200001440000001174414645137106024544 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGLOGISTIC_CDF_HPP #define STAN_MATH_PRIM_PROB_LOGLOGISTIC_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The loglogistic cumulative distribution function for the specified * scalar(s) given the specified scales(s) and shape(s). y, alpha, or * beta can each be either a scalar or a vector. Any vector inputs * must be the same length. * * * @tparam T_y type of scalar. * @tparam T_scale type of scale parameter. * @tparam T_shape type of shape parameter. * @param y (Sequence of) scalar(s). * @param alpha (Sequence of) scale parameter(s) * for the loglogistic distribution. * @param beta (Sequence of) shape parameter(s) for the * loglogistic distribution. * @return The loglogistic cdf evaluated at the specified arguments. * @throw std::domain_error if any of the inputs are not positive or * if and of the parameters are not finite. */ template * = nullptr> return_type_t loglogistic_cdf(const T_y& y, const T_scale& alpha, const T_shape& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; using std::pow; static const char* function = "loglogistic_cdf"; check_consistent_sizes(function, "Random variable", y, "Scale parameter", alpha, "Shape parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Scale parameter", alpha_val); check_positive_finite(function, "Shape parameter", beta_val); if (size_zero(y, alpha, beta)) { return 1.0; } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); if (sum(promote_scalar(y_val == 0))) { return ops_partials.build(0.0); } const auto& alpha_div_y = to_ref_if::value>(alpha_val / y_val); const auto& alpha_div_y_pow_beta = to_ref_if::value>( pow(alpha_div_y, beta_val)); const auto& prod_all = to_ref_if::value>( 1 / (1 + alpha_div_y_pow_beta)); T_partials_return cdf = prod(prod_all); if (!is_constant_all::value) { const auto& prod_all_sq = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(square(prod_all)); const auto& cdf_div_elt = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(cdf / prod_all); if (!is_constant_all::value) { const auto& alpha_div_times_beta = to_ref_if< !is_constant_all::value + !is_constant_all::value == 2>( alpha_div_y_pow_beta * beta_val); if (!is_constant_all::value) { const auto& y_deriv = alpha_div_times_beta / y_val * prod_all_sq; partials<0>(ops_partials) = y_deriv * cdf_div_elt; } if (!is_constant_all::value) { const auto& alpha_deriv = -alpha_div_times_beta / alpha_val * prod_all_sq; partials<1>(ops_partials) = alpha_deriv * cdf_div_elt; } } if (!is_constant_all::value) { const auto& beta_deriv = -multiply_log(alpha_div_y_pow_beta, alpha_div_y) * prod_all_sq; partials<2>(ops_partials) = beta_deriv * cdf_div_elt; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gumbel_rng.hpp0000644000176200001440000000426214645137106023527 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GUMBEL_RNG_HPP #define STAN_MATH_PRIM_PROB_GUMBEL_RNG_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Gumbel random variate with the given location and scale * using the specified random number generator. * * mu and beta can each be a scalar or a vector. Any non-scalar inputs * must be the same length. * * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @tparam RNG type of random number generator * @param mu (Sequence of) location parameter(s) * @param beta (Sequence of) scale parameter(s) * @param rng random number generator * @return (Sequence of) Gumbel random variate(s) * @throw std::domain_error if mu is infinite or beta is nonpositive. * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type gumbel_rng( const T_loc& mu, const T_scale& beta, RNG& rng) { using boost::uniform_01; using boost::variate_generator; using T_mu_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "gumbel_rng"; check_consistent_sizes(function, "Location parameter", mu, "Scale Parameter", beta); T_mu_ref mu_ref = mu; T_beta_ref beta_ref = beta; check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", beta_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(mu, beta); VectorBuilder output(N); variate_generator > uniform01_rng(rng, uniform_01<>()); for (size_t n = 0; n < N; ++n) { output[n] = mu_vec[n] - beta_vec[n] * std::log(-std::log(uniform01_rng())); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/weibull_cdf_log.hpp0000644000176200001440000000125314645137106024523 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WEIBULL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_WEIBULL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use weibull_lcdf */ template return_type_t weibull_cdf_log(const T_y& y, const T_shape& alpha, const T_scale& sigma) { return weibull_lcdf(y, alpha, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/hypergeometric_rng.hpp0000644000176200001440000000215514645137106025301 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_HYPERGEOMETRIC_RNG_HPP #define STAN_MATH_PRIM_PROB_HYPERGEOMETRIC_RNG_HPP #include #include #include #include namespace stan { namespace math { template inline int hypergeometric_rng(int N, int a, int b, RNG& rng) { using boost::variate_generator; using boost::math::hypergeometric_distribution; static const char* function = "hypergeometric_rng"; check_bounded(function, "Draws parameter", value_of(N), 0, a + b); check_positive(function, "Draws parameter", N); check_positive(function, "Successes in population parameter", a); check_positive(function, "Failures in population parameter", b); hypergeometric_distribution<> dist(b, N, a + b); double u = uniform_rng(0.0, 1.0, rng); int min = 0; int max = a - 1; while (min < max) { int mid = (min + max) / 2; if (cdf(dist, mid + 1) > u) { max = mid; } else { min = mid + 1; } } return min + 1; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/cauchy_ccdf_log.hpp0000644000176200001440000000123114645137106024473 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CAUCHY_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_CAUCHY_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use cauchy_lccdf */ template return_type_t cauchy_ccdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return cauchy_lccdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_lpmf.hpp0000644000176200001440000000600114645137106024107 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_LPMF_HPP #define STAN_MATH_PRIM_PROB_POISSON_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // Poisson(n|lambda) [lambda > 0; n >= 0] template * = nullptr> return_type_t poisson_lpmf(const T_n& n, const T_rate& lambda) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_if_t::value, T_n>; using T_lambda_ref = ref_type_if_t::value, T_rate>; using std::isinf; static const char* function = "poisson_lpmf"; check_consistent_sizes(function, "Random variable", n, "Rate parameter", lambda); T_n_ref n_ref = n; T_lambda_ref lambda_ref = lambda; decltype(auto) n_val = to_ref(as_value_column_array_or_scalar(n_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); check_nonnegative(function, "Random variable", n_val); check_nonnegative(function, "Rate parameter", lambda_val); if (size_zero(n, lambda)) { return 0.0; } if (!include_summand::value) { return 0.0; } if (sum(promote_scalar(isinf(lambda_val)))) { return LOG_ZERO; } size_t N = max_size(n, lambda); scalar_seq_view n_vec(n_val); scalar_seq_view lambda_vec(lambda_val); for (size_t i = 0; i < N; i++) { if (lambda_vec[i] == 0 && n_vec[i] != 0) { return LOG_ZERO; } } auto ops_partials = make_partials_propagator(lambda_ref); T_partials_return logp = stan::math::sum(multiply_log(n_val, lambda_val)); if (include_summand::value) { logp -= sum(lambda_val) * N / math::size(lambda); } if (include_summand::value) { logp -= sum(lgamma(n_val + 1.0)) * N / math::size(n); } if (!is_constant_all::value) { partials<0>(ops_partials) = n_val / lambda_val - 1.0; } return ops_partials.build(logp); } template inline return_type_t poisson_lpmf(const T_n& n, const T_rate& lambda) { return poisson_lpmf(n, lambda); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/frechet_cdf.hpp0000644000176200001440000000642214645137106023642 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_FRECHET_CDF_HPP #define STAN_MATH_PRIM_PROB_FRECHET_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t frechet_cdf(const T_y& y, const T_shape& alpha, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_sigma_ref = ref_type_t; using std::pow; static const char* function = "frechet_cdf"; T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_sigma_ref sigma_ref = sigma; check_positive(function, "Random variable", y_ref); check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y_ref, alpha_ref, sigma_ref)) { return 1.0; } T_partials_return cdf(1.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view sigma_vec(sigma_ref); scalar_seq_view alpha_vec(alpha_ref); size_t N = max_size(y_ref, sigma_ref, alpha_ref); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return pow_n = pow(sigma_dbl / y_dbl, alpha_dbl); const T_partials_return cdf_n = exp(-pow_n); cdf *= cdf_n; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += pow_n * alpha_dbl / y_dbl; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += pow_n * log(y_dbl / sigma_dbl); } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= pow_n * alpha_dbl / sigma_dbl; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= cdf; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(alpha); ++n) { partials<1>(ops_partials)[n] *= cdf; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(sigma); ++n) { partials<2>(ops_partials)[n] *= cdf; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_normal_rng.hpp0000644000176200001440000000554414645137106024762 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_NORMAL_RNG_HPP #define STAN_MATH_PRIM_PROB_MULTI_NORMAL_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a multivariate normal random variate with the given location and * covariance using the specified random number generator. * * mu can be either an Eigen::VectorXd, an Eigen::RowVectorXd, or a std::vector * of either of those types. * * @tparam T_loc Type of location parameter * @tparam RNG Type of pseudo-random number generator * @param mu (Sequence of) location parameter(s) * @param S Covariance matrix * @param rng random number generator * @throw std::domain_error if S is not positive definite, or * std::invalid_argument if the length of (each) mu is not equal to the number * of rows and columns in S */ template inline typename StdVectorBuilder::type multi_normal_rng(const T_loc& mu, const Eigen::Matrix& S, RNG& rng) { using boost::normal_distribution; using boost::variate_generator; static const char* function = "multi_normal_rng"; check_positive(function, "Covariance matrix rows", S.rows()); vector_seq_view mu_vec(mu); size_t size_mu = mu_vec[0].size(); size_t N = size_mvt(mu); for (size_t i = 1; i < N; i++) { check_size_match(function, "Size of one of the vectors of " "the location variable", mu_vec[i].size(), "Size of the first vector of the " "location variable", size_mu); } for (size_t i = 0; i < N; i++) { check_finite(function, "Location parameter", mu_vec[i]); } const auto& S_ref = to_ref(S); check_not_nan(function, "Covariance matrix", S_ref); check_symmetric(function, "Covariance matrix", S_ref); Eigen::LLT llt_of_S = S_ref.llt(); check_pos_definite("multi_normal_rng", "covariance matrix argument", llt_of_S); StdVectorBuilder output(N); variate_generator > std_normal_rng( rng, normal_distribution<>(0, 1)); for (size_t n = 0; n < N; ++n) { Eigen::VectorXd z(S.cols()); for (int i = 0; i < S.cols(); i++) { z(i) = std_normal_rng(); } output[n] = as_column_vector_or_scalar(mu_vec[n]) + llt_of_S.matrixL() * z; } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_log_log.hpp0000644000176200001440000000162014645137106024575 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_LOG_LOG_HPP #define STAN_MATH_PRIM_PROB_POISSON_LOG_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use poisson_log_lpmf */ template return_type_t poisson_log_log(const T_n& n, const T_log_rate& alpha) { return poisson_log_lpmf(n, alpha); } /** \ingroup prob_dists * @deprecated use poisson_log_lpmf */ template inline return_type_t poisson_log_log(const T_n& n, const T_log_rate& alpha) { return poisson_log_lpmf(n, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_log_rng.hpp0000644000176200001440000000661214645137106025762 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_RNG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_RNG_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a negative binomial random variate with the specified log-location * and inverse dispersion parameters using the given random number generator. * * eta and phi can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_loc Type of log-location parameter * @tparam T_inv Type of inverse overdispersion parameter * @tparam RNG type of random number generator * @param eta (Sequence of) positive log-location parameter(s) * @param phi (Sequence of) positive inverse dispersion parameter(s) * @param rng random number generator * @return (Sequence of) negative binomial random variate(s) * @throw std::domain_error if eta is non-finite or phi is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type neg_binomial_2_log_rng(const T_loc& eta, const T_inv& phi, RNG& rng) { using boost::gamma_distribution; using boost::variate_generator; using boost::random::poisson_distribution; using T_eta_ref = ref_type_t; using T_phi_ref = ref_type_t; static const char* function = "neg_binomial_2_log_rng"; check_consistent_sizes(function, "Log-location parameter", eta, "Inverse dispersion parameter", phi); T_eta_ref eta_ref = eta; T_phi_ref phi_ref = phi; check_finite(function, "Log-location parameter", eta_ref); check_positive_finite(function, "Inverse dispersion parameter", phi_ref); scalar_seq_view eta_vec(eta_ref); scalar_seq_view phi_vec(phi_ref); size_t N = max_size(eta, phi); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { double exp_eta_div_phi = std::exp(static_cast(eta_vec[n])) / phi_vec[n]; // gamma_rng params must be positive and finite check_positive_finite(function, "Exponential of the log-location parameter " "divided by the precision parameter", exp_eta_div_phi); double rng_from_gamma = variate_generator >( rng, gamma_distribution<>(phi_vec[n], exp_eta_div_phi))(); // same as the constraints for poisson_rng check_less(function, "Random number that came from gamma distribution", rng_from_gamma, POISSON_MAX_RATE); check_not_nan(function, "Random number that came from gamma distribution", rng_from_gamma); check_nonnegative(function, "Random number that came from gamma distribution", rng_from_gamma); output[n] = variate_generator >( rng, poisson_distribution<>(rng_from_gamma))(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_gamma_ccdf_log.hpp0000644000176200001440000000130114645137106025153 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_GAMMA_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_INV_GAMMA_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use inv_gamma_lccdf */ template return_type_t inv_gamma_ccdf_log(const T_y& y, const T_shape& alpha, const T_scale& beta) { return inv_gamma_lccdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_normal_lcdf.hpp0000644000176200001440000001075514645137106024723 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_NORMAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_SKEW_NORMAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t skew_normal_lcdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; static const char* function = "skew_normal_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma, "Shape parameter", alpha); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive(function, "Scale parameter", sigma_val); check_finite(function, "Shape parameter", alpha_val); if (size_zero(y, mu, sigma, alpha)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, alpha_ref); const auto& diff = to_ref((y_val - mu_val) / sigma_val); const auto& scaled_diff = to_ref_if::value>(diff / SQRT_TWO); const auto& erfc_m_scaled_diff = erfc(-scaled_diff); const auto& owens_t_diff_alpha = owens_t(diff, alpha_val); const auto& cdf_log_ = to_ref_if::value>( 0.5 * erfc_m_scaled_diff - 2 * owens_t_diff_alpha); T_partials_return cdf_log = sum(log(cdf_log_)); if (!is_constant_all::value) { const auto& diff_square = to_ref_if < !is_constant_all::value && !is_constant_all::value > (square(diff)); if (!is_constant_all::value) { const auto& erf_alpha_scaled_diff = erf(alpha_val * scaled_diff); const auto& exp_m_scaled_diff_square = exp(-0.5 * diff_square); auto rep_deriv = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>( (erf_alpha_scaled_diff + 1) * INV_SQRT_TWO_PI * exp_m_scaled_diff_square / (sigma_val * cdf_log_)); if (!is_constant_all::value) { partials<1>(ops_partials) = -rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = -rep_deriv * diff; } if (!is_constant_all::value) { partials<0>(ops_partials) = std::move(rep_deriv); } } if (!is_constant_all::value) { const auto& alpha_square = square(alpha_val); edge<3>(ops_partials).partials_ = -exp(-0.5 * diff_square * (1.0 + alpha_square)) / ((1 + alpha_square) * pi()) / cdf_log_; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_normal_lpdf.hpp0000644000176200001440000000777614645137106025132 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_NORMAL_LPDF_HPP #define STAN_MATH_PRIM_PROB_MULTI_NORMAL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t multi_normal_lpdf(const T_y& y, const T_loc& mu, const T_covar& Sigma) { using T_covar_elem = typename scalar_type::type; using lp_type = return_type_t; using Eigen::Dynamic; static const char* function = "multi_normal_lpdf"; check_positive(function, "Covariance matrix rows", Sigma.rows()); check_consistent_sizes_mvt(function, "y", y, "mu", mu); size_t number_of_y = size_mvt(y); size_t number_of_mu = size_mvt(mu); if (number_of_y == 0 || number_of_mu == 0) { return 0.0; } lp_type lp(0.0); vector_seq_view y_vec(y); vector_seq_view mu_vec(mu); size_t size_vec = max_size_mvt(y, mu); int size_y = y_vec[0].size(); int size_mu = mu_vec[0].size(); if (size_vec > 1) { for (size_t i = 1, size_mvt_y = size_mvt(y); i < size_mvt_y; i++) { check_size_match(function, "Size of one of the vectors of " "the random variable", y_vec[i].size(), "Size of the first vector of the " "random variable", size_y); } for (size_t i = 1, size_mvt_mu = size_mvt(mu); i < size_mvt_mu; i++) { check_size_match(function, "Size of one of the vectors of " "the location variable", mu_vec[i].size(), "Size of the first vector of the " "location variable", size_mu); } } check_size_match(function, "Size of random variable", size_y, "size of location parameter", size_mu); check_size_match(function, "Size of random variable", size_y, "rows of covariance parameter", Sigma.rows()); check_size_match(function, "Size of random variable", size_y, "columns of covariance parameter", Sigma.cols()); for (size_t i = 0; i < size_vec; i++) { check_finite(function, "Location parameter", mu_vec[i]); check_not_nan(function, "Random variable", y_vec[i]); } const auto& Sigma_ref = to_ref(Sigma); check_symmetric(function, "Covariance matrix", Sigma_ref); auto ldlt_Sigma = make_ldlt_factor(Sigma_ref); check_ldlt_factor(function, "LDLT_Factor of covariance parameter", ldlt_Sigma); if (size_y == 0) { return lp; } if (include_summand::value) { lp += NEG_LOG_SQRT_TWO_PI * size_y * size_vec; } if (include_summand::value) { lp -= 0.5 * log_determinant_ldlt(ldlt_Sigma) * size_vec; } if (include_summand::value) { lp_type sum_lp_vec(0.0); for (size_t i = 0; i < size_vec; i++) { const auto& y_col = as_column_vector_or_scalar(y_vec[i]); const auto& mu_col = as_column_vector_or_scalar(mu_vec[i]); sum_lp_vec += trace_inv_quad_form_ldlt(ldlt_Sigma, y_col - mu_col); } lp -= 0.5 * sum_lp_vec; } return lp; } template inline return_type_t multi_normal_lpdf( const T_y& y, const T_loc& mu, const T_covar& Sigma) { return multi_normal_lpdf(y, mu, Sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/scaled_inv_chi_square_lcdf.hpp0000644000176200001440000001033414645137106026705 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_LCDF_HPP #define STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t scaled_inv_chi_square_lcdf( const T_y& y, const T_dof& nu, const T_scale& s) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_s_ref = ref_type_t; static const char* function = "scaled_inv_chi_square_lcdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu, "Scale parameter", s); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; T_s_ref s_ref = s; check_nonnegative(function, "Random variable", y_ref); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_positive_finite(function, "Scale parameter", s_ref); if (size_zero(y, nu, s)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref, s_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); scalar_seq_view s_vec(s_ref); size_t N = max_size(y, nu, s); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(negative_infinity()); } } VectorBuilder::value, T_partials_return, T_dof> gamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return half_nu_dbl = 0.5 * nu_vec.val(i); gamma_vec[i] = tgamma(half_nu_dbl); digamma_vec[i] = digamma(half_nu_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return half_nu_dbl = 0.5 * nu_vec.val(n); const T_partials_return s_dbl = s_vec.val(n); const T_partials_return half_s2_overx_dbl = 0.5 * s_dbl * s_dbl * y_inv_dbl; const T_partials_return half_nu_s2_overx_dbl = 2.0 * half_nu_dbl * half_s2_overx_dbl; const T_partials_return Pn = gamma_q(half_nu_dbl, half_nu_s2_overx_dbl); const T_partials_return gamma_p_deriv = exp(-half_nu_s2_overx_dbl) * pow(half_nu_s2_overx_dbl, half_nu_dbl - 1) / tgamma(half_nu_dbl); P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += half_nu_s2_overx_dbl * y_inv_dbl * gamma_p_deriv / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += (0.5 * grad_reg_inc_gamma(half_nu_dbl, half_nu_s2_overx_dbl, gamma_vec[n], digamma_vec[n]) - half_s2_overx_dbl * gamma_p_deriv) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += -2.0 * half_nu_dbl * s_dbl * y_inv_dbl * gamma_p_deriv / Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_chi_square_lpdf.hpp0000644000176200001440000000771514645137106025420 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_LPDF_HPP #define STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of an inverse chi-squared density for y with the specified * degrees of freedom parameter. * The degrees of freedom parameter must be greater than 0. * y must be greater than 0. * \f{eqnarray*}{ y &\sim& \mbox{\sf{Inv-}}\chi^2_\nu \\ \log (p (y \, |\, \nu)) &=& \log \left( \frac{2^{-\nu / 2}}{\Gamma (\nu / 2)} y^{- (\nu / 2 + 1)} \exp^{-1 / (2y)} \right) \\ &=& - \frac{\nu}{2} \log(2) - \log (\Gamma (\nu / 2)) - (\frac{\nu}{2} + 1) \log(y) - \frac{1}{2y} \\ & & \mathrm{ where } \; y > 0 \f} * * @tparam T_y type of scalar * @tparam T_dof type of degrees of freedom * @param y A scalar variable. * @param nu Degrees of freedom. * @throw std::domain_error if nu is not greater than or equal to 0 * @throw std::domain_error if y is not greater than or equal to 0. */ template * = nullptr> return_type_t inv_chi_square_lpdf(const T_y& y, const T_dof& nu) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_nu_ref = ref_type_if_t::value, T_dof>; static const char* function = "inv_chi_square_lpdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) nu_val = to_ref(as_value_column_array_or_scalar(nu_ref)); check_positive_finite(function, "Degrees of freedom parameter", nu_val); check_not_nan(function, "Random variable", y_val); if (size_zero(y, nu)) { return 0; } if (!include_summand::value) { return 0; } if (sum(promote_scalar(y_val <= 0))) { return LOG_ZERO; } auto ops_partials = make_partials_propagator(y_ref, nu_ref); const auto& log_y = to_ref_if::value>(log(y_val)); const auto& half_nu = to_ref(0.5 * nu_val); size_t N = max_size(y, nu); T_partials_return logp = -sum((half_nu + 1.0) * log_y); if (include_summand::value) { logp -= (sum(nu_val) * HALF_LOG_TWO + sum(lgamma(half_nu))) * N / math::size(nu); } if (include_summand::value) { const auto& inv_y = to_ref_if::value>(inv(y_val)); logp -= 0.5 * sum(inv_y) * N / math::size(y); if (!is_constant_all::value) { partials<0>(ops_partials) = (0.5 * inv_y - half_nu - 1.0) * inv_y; } } if (!is_constant_all::value) { edge<1>(ops_partials).partials_ = -HALF_LOG_TWO - (digamma(half_nu) + log_y) * 0.5; } return ops_partials.build(logp); } template inline return_type_t inv_chi_square_lpdf(const T_y& y, const T_dof& nu) { return inv_chi_square_lpdf(y, nu); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/student_t_rng.hpp0000644000176200001440000000527314645137106024270 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STUDENT_T_RNG_HPP #define STAN_MATH_PRIM_PROB_STUDENT_T_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a student-t random variate for the given degrees of freedom, * location, and scale using the specified random number generator. * * nu, mu, and sigma can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_deg type of degrees of freedom parameter * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @tparam RNG type of random number generator * * @param nu (Sequence of) degrees of freedom parameter(s) * @param mu (Sequence of) location parameter(s) * @param sigma (Sequence of) scale parameter(s) * @param rng random number generator * @return Student-t random variate * @throw std::domain_error if nu is nonpositive, mu is infinite, or sigma * is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type student_t_rng(const T_deg& nu, const T_loc& mu, const T_scale& sigma, RNG& rng) { using T_nu_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; using boost::variate_generator; using boost::random::student_t_distribution; static const char* function = "student_t_rng"; check_consistent_sizes(function, "Degrees of freedom parameter", nu, "Location parameter", mu, "Scale Parameter", sigma); T_nu_ref nu_ref = nu; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); scalar_seq_view nu_vec(nu_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(nu, mu, sigma); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > rng_unit_student_t( rng, student_t_distribution<>(nu_vec[n])); output[n] = mu_vec[n] + sigma_vec[n] * rng_unit_student_t(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_logit_rng.hpp0000644000176200001440000000336114645137106025444 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_RNG_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_RNG_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Bernoulli random variate with logit-parameterized chance of success * using the specified random number generator. * * t can be a scalar or a one-dimensional container. * * @tparam T_t type of logit-parameterized chance of success parameter * @tparam RNG type of random number generator * @param t (Sequence of) logit-parameterized chance of success parameter(s) * @param rng random number generator * @return (Sequence of) Bernoulli random variate(s) * @throw std::domain_error if logit-parameterized chance of success parameter * is not finite */ template inline typename VectorBuilder::type bernoulli_logit_rng( const T_t& t, RNG& rng) { using boost::bernoulli_distribution; using boost::variate_generator; ref_type_t t_ref = t; check_finite("bernoulli_logit_rng", "Logit transformed probability parameter", t_ref); scalar_seq_view t_vec(t_ref); size_t N = stan::math::size(t); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > bernoulli_rng( rng, bernoulli_distribution<>(inv_logit(t_vec[n]))); output[n] = bernoulli_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/categorical_logit_rng.hpp0000644000176200001440000000271514645137106025730 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CATEGORICAL_LOGIT_RNG_HPP #define STAN_MATH_PRIM_PROB_CATEGORICAL_LOGIT_RNG_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a draw from a Categorical distribution given a * a vector of unnormalized log probabilities and a psuedo-random * number generator. * * This is a convenience wrapper around * categorical_rng(softmax(beta), rng). * * @tparam RNG Type of pseudo-random number generator. * @param beta Vector of unnormalized log probabilities. * @param rng Pseudo-random number generator. * @return Categorical random variate */ template inline int categorical_logit_rng(const Eigen::VectorXd& beta, RNG& rng) { using boost::uniform_01; using boost::variate_generator; static const char* function = "categorical_logit_rng"; check_finite(function, "Log odds parameter", beta); variate_generator > uniform01_rng(rng, uniform_01<>()); Eigen::VectorXd theta = softmax(beta); Eigen::VectorXd index = cumulative_sum(theta); double c = uniform01_rng(); int b = 0; while (c > index(b)) { b++; } return b + 1; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_log.hpp0000644000176200001440000000175014645137106025112 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use neg_binomial_2_lpmf */ template return_type_t neg_binomial_2_log( const T_n& n, const T_location& mu, const T_precision& phi) { return neg_binomial_2_lpmf(n, mu, phi); } /** \ingroup prob_dists * @deprecated use neg_binomial_2_lpmf */ template inline return_type_t neg_binomial_2_log( const T_n& n, const T_location& mu, const T_precision& phi) { return neg_binomial_2_lpmf(n, mu, phi); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exponential_log.hpp0000644000176200001440000000157014645137106024574 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXPONENTIAL_LOG_HPP #define STAN_MATH_PRIM_PROB_EXPONENTIAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use exponential_lpdf */ template return_type_t exponential_log(const T_y& y, const T_inv_scale& beta) { return exponential_lpdf(y, beta); } /** \ingroup prob_dists * @deprecated use exponential_lpdf */ template inline return_type_t exponential_log( const T_y& y, const T_inv_scale& beta) { return exponential_lpdf(y, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_log_glm_lpmf.hpp0000644000176200001440000002446614645137106027000 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_GLM_LPMF_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LOG_GLM_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns the log PMF of the Generalized Linear Model (GLM) * with Negative-Binomial-2 distribution and log link function. * The idea is that neg_binomial_2_log_glm_lpmf(y, x, alpha, beta, phi) should * compute a more efficient version of * neg_binomial_2_log_lpmf(y, alpha + x * beta, phi) by using analytically * simplified gradients. * If containers are supplied, returns the log sum of the probabilities. * * @tparam T_y type of positive int vector of variates (labels); * this can also be a single positive integer value; * @tparam T_x_scalar type of a scalar in the matrix of independent variables * (features) * @tparam T_x_rows compile-time number of rows of `x`. It can be either * `Eigen::Dynamic` or 1. * @tparam T_alpha type of the intercept(s); * this can be a vector (of the same length as y) of intercepts or a single * value (for models with constant intercept); * @tparam T_beta type of the weight vector; * this can also be a scalar; * @tparam T_precision type of the (positive) precision(s); * this can be a vector (of the same length as y, for heteroskedasticity) * or a scalar. * * @param y failures count scalar or vector parameter. If it is a scalar it * will be broadcast - used for all instances. * @param x design matrix or row vector. If it is a row vector it will be * broadcast - used for all instances. * @param alpha intercept (in log odds) * @param beta weight vector * @param phi (vector of) precision parameter(s) * @return log probability or log sum of probabilities * @throw std::invalid_argument if container sizes mismatch. * @throw std::domain_error if x, beta or alpha is infinite. * @throw std::domain_error if phi is infinite or non-positive. * @throw std::domain_error if y is negative. */ template * = nullptr> return_type_t neg_binomial_2_log_glm_lpmf( const T_y& y, const T_x& x, const T_alpha& alpha, const T_beta& beta, const T_precision& phi) { using Eigen::Array; using Eigen::Dynamic; using Eigen::exp; using Eigen::log1p; using Eigen::Matrix; constexpr int T_x_rows = T_x::RowsAtCompileTime; using T_xbeta_partials = partials_return_t; using T_partials_return = partials_return_t; using T_precision_val = typename std::conditional_t< is_vector::value, Eigen::Array, -1, 1>, partials_return_t>; using T_sum_val = typename std::conditional_t< is_vector::value || is_vector::value, Eigen::Array, -1, 1>, partials_return_t>; using T_theta_tmp = typename std::conditional_t>; using T_xbeta_tmp = typename std::conditional_t>; using T_x_ref = ref_type_if_t::value, T_x>; using T_alpha_ref = ref_type_if_t::value, T_alpha>; using T_beta_ref = ref_type_if_t::value, T_beta>; using T_phi_ref = ref_type_if_t::value, T_precision>; const size_t N_instances = T_x_rows == 1 ? stan::math::size(y) : x.rows(); const size_t N_attributes = x.cols(); static const char* function = "neg_binomial_2_log_glm_lpmf"; check_consistent_size(function, "Vector of dependent variables", y, N_instances); check_consistent_size(function, "Weight vector", beta, N_attributes); check_consistent_size(function, "Vector of precision parameters", phi, N_instances); check_consistent_size(function, "Vector of intercepts", alpha, N_instances); T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; const auto& beta_val = value_of(beta_ref); const auto& alpha_val = value_of(alpha_ref); const auto& beta_val_vec = to_ref(as_column_vector_or_scalar(beta_val)); const auto& alpha_val_vec = to_ref(as_column_vector_or_scalar(alpha_val)); check_finite(function, "Weight vector", beta_val_vec); check_finite(function, "Intercept", alpha_val_vec); if (size_zero(y, phi)) { return 0; } const auto& y_ref = to_ref(y); T_phi_ref phi_ref = phi; const auto& y_val = value_of(y_ref); const auto& phi_val = value_of(phi_ref); const auto& y_val_vec = to_ref(as_column_vector_or_scalar(y_val)); const auto& phi_val_vec = to_ref(as_column_vector_or_scalar(phi_val)); check_nonnegative(function, "Failures variables", y_val_vec); check_positive_finite(function, "Precision parameter", phi_val_vec); if (!include_summand::value) { return 0; } T_x_ref x_ref = x; const auto& x_val = to_ref_if::value>(value_of(x_ref)); const auto& y_arr = as_array_or_scalar(y_val_vec); const auto& phi_arr = as_array_or_scalar(phi_val_vec); Array theta(N_instances); if (T_x_rows == 1) { T_theta_tmp theta_tmp = forward_as((x_val * beta_val_vec)(0, 0)); theta = theta_tmp + as_array_or_scalar(alpha_val_vec); } else { theta = (x_val * beta_val_vec).array(); theta += as_array_or_scalar(alpha_val_vec); } check_finite(function, "Matrix of independent variables", theta); T_precision_val log_phi = log(phi_arr); Array logsumexp_theta_logphi = (theta > log_phi) .select(theta + log1p(exp(log_phi - theta)), log_phi + log1p(exp(theta - log_phi))); T_sum_val y_plus_phi = y_arr + phi_arr; // Compute the log-density. T_partials_return logp(0); if (include_summand::value) { if (is_vector::value) { logp -= sum(lgamma(y_arr + 1.0)); } else { logp -= sum(lgamma(y_arr + 1.0)) * N_instances; } } if (include_summand::value) { if (is_vector::value) { scalar_seq_view phi_vec(phi_val_vec); for (size_t n = 0; n < N_instances; ++n) { logp += multiply_log(phi_vec[n], phi_vec[n]) - lgamma(phi_vec[n]); } } else { using T_phi_scalar = scalar_type_t; logp += N_instances * (multiply_log(forward_as(phi_val), forward_as(phi_val)) - lgamma(forward_as(phi_val))); } } logp -= sum(y_plus_phi * logsumexp_theta_logphi); if (include_summand::value) { logp += sum(y_arr * theta); } if (include_summand::value) { if (is_vector::value || is_vector::value) { logp += sum(lgamma(y_plus_phi)); } else { logp += sum(lgamma(y_plus_phi)) * N_instances; } } // Compute the necessary derivatives. auto ops_partials = make_partials_propagator(x_ref, alpha_ref, beta_ref, phi_ref); if (!is_constant_all::value) { Array theta_exp = theta.exp(); if (!is_constant_all::value) { Matrix theta_derivative = y_arr - theta_exp * y_plus_phi / (theta_exp + phi_arr); if (!is_constant_all::value) { if (T_x_rows == 1) { edge<2>(ops_partials).partials_ = forward_as>( theta_derivative.sum() * x_val); } else { edge<2>(ops_partials).partials_ = x_val.transpose() * theta_derivative; } } if (!is_constant_all::value) { if (T_x_rows == 1) { edge<0>(ops_partials).partials_ = forward_as>( beta_val_vec * theta_derivative.sum()); } else { edge<0>(ops_partials).partials_ = (beta_val_vec * theta_derivative.transpose()).transpose(); } } if (!is_constant_all::value) { if (is_vector::value) { partials<1>(ops_partials) = std::move(theta_derivative); } else { partials<1>(ops_partials)[0] = sum(theta_derivative); } } } if (!is_constant_all::value) { if (is_vector::value) { edge<3>(ops_partials).partials_ = 1 - y_plus_phi / (theta_exp + phi_arr) + log_phi - logsumexp_theta_logphi + digamma(y_plus_phi) - digamma(phi_arr); } else { partials<3>(ops_partials)[0] = N_instances + sum(-y_plus_phi / (theta_exp + phi_arr) + log_phi - logsumexp_theta_logphi + digamma(y_plus_phi) - digamma(phi_arr)); } } } return ops_partials.build(logp); } template inline return_type_t neg_binomial_2_log_glm_lpmf(const T_y& y, const T_x& x, const T_alpha& alpha, const T_beta& beta, const T_precision& phi) { return neg_binomial_2_log_glm_lpmf(y, x, alpha, beta, phi); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_ccdf_log.hpp0000644000176200001440000000101014645137106025205 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use bernoulli_lccdf */ template return_type_t bernoulli_ccdf_log(const T_n& n, const T_prob& theta) { return bernoulli_lccdf(n, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lognormal_ccdf_log.hpp0000644000176200001440000000126114645137106025214 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGNORMAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_LOGNORMAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use lognormal_lccdf */ template return_type_t lognormal_ccdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return lognormal_lccdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exp_mod_normal_log.hpp0000644000176200001440000000226014645137106025246 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_LOG_HPP #define STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use exp_mod_normal_lpdf */ template return_type_t exp_mod_normal_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_inv_scale& lambda) { return exp_mod_normal_lpdf( y, mu, sigma, lambda); } /** \ingroup prob_dists * @deprecated use exp_mod_normal_lpdf */ template inline return_type_t exp_mod_normal_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_inv_scale& lambda) { return exp_mod_normal_lpdf(y, mu, sigma, lambda); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_lpmf.hpp0000644000176200001440000001101614645137106025263 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LPMF_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // NegBinomial(n|mu, phi) [mu >= 0; phi > 0; n >= 0] template * = nullptr> return_type_t neg_binomial_2_lpmf( const T_n& n, const T_location& mu, const T_precision& phi) { using T_partials_return = partials_return_t; using std::log; using T_n_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_phi_ref = ref_type_t; static const char* function = "neg_binomial_2_lpmf"; check_consistent_sizes(function, "Failures variable", n, "Location parameter", mu, "Precision parameter", phi); T_n_ref n_ref = n; T_mu_ref mu_ref = mu; T_phi_ref phi_ref = phi; check_nonnegative(function, "Failures variable", n_ref); check_positive_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Precision parameter", phi_ref); if (size_zero(n, mu, phi)) { return 0.0; } if (!include_summand::value) { return 0.0; } T_partials_return logp(0.0); auto ops_partials = make_partials_propagator(mu_ref, phi_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view phi_vec(phi_ref); size_t size_mu = stan::math::size(mu); size_t size_phi = stan::math::size(phi); size_t size_mu_phi = max_size(mu, phi); size_t size_n_phi = max_size(n, phi); size_t size_all = max_size(n, mu, phi); VectorBuilder mu_val(size_mu); for (size_t i = 0; i < size_mu; ++i) { mu_val[i] = mu_vec.val(i); } VectorBuilder phi_val(size_phi); VectorBuilder log_phi(size_phi); for (size_t i = 0; i < size_phi; ++i) { phi_val[i] = phi_vec.val(i); log_phi[i] = log(phi_val[i]); } VectorBuilder mu_plus_phi( size_mu_phi); VectorBuilder log_mu_plus_phi(size_mu_phi); for (size_t i = 0; i < size_mu_phi; ++i) { mu_plus_phi[i] = mu_val[i] + phi_val[i]; log_mu_plus_phi[i] = log(mu_plus_phi[i]); } VectorBuilder n_plus_phi( size_n_phi); for (size_t i = 0; i < size_n_phi; ++i) { n_plus_phi[i] = n_vec[i] + phi_val[i]; } for (size_t i = 0; i < size_all; i++) { if (include_summand::value) { logp += binomial_coefficient_log(n_plus_phi[i] - 1, n_vec[i]); } if (include_summand::value) { logp += multiply_log(n_vec[i], mu_val[i]); } logp += -phi_val[i] * (log1p(mu_val[i] / phi_val[i])) - n_vec[i] * log_mu_plus_phi[i]; if (!is_constant_all::value) { partials<0>(ops_partials)[i] += n_vec[i] / mu_val[i] - (n_vec[i] + phi_val[i]) / mu_plus_phi[i]; } if (!is_constant_all::value) { T_partials_return log_term; if (mu_val[i] < phi_val[i]) { log_term = log1p(-mu_val[i] / mu_plus_phi[i]); } else { log_term = log_phi[i] - log_mu_plus_phi[i]; } partials<1>(ops_partials)[i] += (mu_val[i] - n_vec[i]) / mu_plus_phi[i] + log_term - digamma(phi_val[i]) + digamma(n_plus_phi[i]); } } return ops_partials.build(logp); } template inline return_type_t neg_binomial_2_lpmf( const T_n& n, const T_location& mu, const T_precision& phi) { return neg_binomial_2_lpmf(n, mu, phi); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/logistic_lccdf.hpp0000644000176200001440000000650714645137106024362 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGISTIC_LCCDF_HPP #define STAN_MATH_PRIM_PROB_LOGISTIC_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t logistic_lccdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::exp; using std::log; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "logistic_lccdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, mu, sigma); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == NEGATIVE_INFTY) { return ops_partials.build(0.0); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { return ops_partials.build(negative_infinity()); } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return sigma_inv_vec = 1.0 / sigma_vec.val(n); const T_partials_return Pn = 1.0 - 1.0 / (1.0 + exp(-(y_dbl - mu_dbl) * sigma_inv_vec)); P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= exp(logistic_log(y_dbl, mu_dbl, sigma_dbl)) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= -exp(logistic_log(y_dbl, mu_dbl, sigma_dbl)) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= -(y_dbl - mu_dbl) * sigma_inv_vec * exp(logistic_log(y_dbl, mu_dbl, sigma_dbl)) / Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_normal_prec_rng.hpp0000644000176200001440000000575114645137106025773 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_NORMAL_PREC_RNG_HPP #define STAN_MATH_PRIM_PROB_MULTI_NORMAL_PREC_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a multivariate normal random variate with the given location * and precision using the specified random number generator. * * mu can be either an Eigen::VectorXd, an Eigen::RowVectorXd, or a * std::vector of either of those types. * * @tparam T_loc Type of location parameter * @tparam RNG Type of pseudo-random number generator * @param mu (Sequence of) location parameter(s) * @param S Precision matrix * @param rng random number generator * @throw std::domain_error if S is not positive definite, or * std::invalid_argument if the length of (each) mu is not equal to * the number of rows and columns in S */ template inline typename StdVectorBuilder::type multi_normal_prec_rng(const T_loc &mu, const Eigen::MatrixXd &S, RNG &rng) { using boost::normal_distribution; using boost::variate_generator; static const char *function = "multi_normal_prec_rng"; check_positive(function, "Precision matrix rows", S.rows()); vector_seq_view mu_vec(mu); check_positive(function, "number of location parameter vectors", mu_vec.size()); size_t size_mu = mu_vec[0].size(); size_t N = mu_vec.size(); for (size_t i = 1; i < N; i++) { check_size_match(function, "Size of one of the vectors of " "the location variable", mu_vec[i].size(), "Size of the first vector of the " "location variable", size_mu); } check_size_match(function, "Rows of location parameter", size_mu, "Rows of S", S.rows()); for (size_t i = 0; i < N; i++) { check_finite(function, "Location parameter", mu_vec[i]); } const auto &S_ref = to_ref(S); check_finite(function, "Precision matrix", S_ref); check_symmetric(function, "Precision matrix", S_ref); Eigen::LLT llt_of_S = S_ref.llt(); check_pos_definite(function, "precision matrix argument", llt_of_S); StdVectorBuilder output(N); variate_generator> std_normal_rng( rng, normal_distribution<>(0, 1)); for (size_t n = 0; n < N; ++n) { Eigen::VectorXd z(S.cols()); for (int i = 0; i < S.cols(); i++) { z(i) = std_normal_rng(); } output[n] = as_column_vector_or_scalar(mu_vec[n]) + llt_of_S.matrixU().solve(z); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_binomial_cdf.hpp0000644000176200001440000000526614645137106025573 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_CDF_HPP #define STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the CDF for the Poisson-binomial distribution evaluated at the * specified number of successes and probabilities of successes. * * @tparam T_y type of number of successes parameter * @tparam T_theta type of chance of success parameters * @param y input scalar, vector, or nested vector of numbers of successes * @param theta array of chances of success parameters * @return sum of log probabilities * @throw std::domain_error if y is out of bounds * @throw std::domain_error if theta is not a valid vector of probabilities * @throw std::invalid_argument If y and theta are different lengths */ template return_type_t poisson_binomial_cdf(const T_y& y, const T_theta& theta) { static const char* function = "poisson_binomial_cdf"; size_t size_theta = size_mvt(theta); if (size_theta > 1) { check_consistent_sizes(function, "Successes variables", y, "Probability parameters", theta); } size_t max_sz = std::max(stan::math::size(y), size_theta); scalar_seq_view y_vec(y); vector_seq_view theta_vec(theta); for (size_t i = 0; i < max_sz; ++i) { check_bounded(function, "Successes variable", y_vec[i], 0, theta_vec[i].size()); check_finite(function, "Probability parameters", theta_vec[i]); check_bounded(function, "Probability parameters", theta_vec[i], 0.0, 1.0); } return_type_t lcdf = 0.0; for (size_t i = 0; i < max_sz; ++i) { auto x = log_sum_exp(poisson_binomial_log_probs(y_vec[i], theta_vec[i])); lcdf += x; } return exp(lcdf); } template return_type_t poisson_binomial_cdf(const T_y& y, const T_theta& theta) { return poisson_binomial_cdf(y, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_log_lpmf.hpp0000644000176200001440000000633214645137106024757 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_LOG_LPMF_HPP #define STAN_MATH_PRIM_PROB_POISSON_LOG_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // PoissonLog(n|alpha) [n >= 0] = Poisson(n|exp(alpha)) template * = nullptr> return_type_t poisson_log_lpmf(const T_n& n, const T_log_rate& alpha) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_if_t::value, T_n>; using T_alpha_ref = ref_type_if_t::value, T_log_rate>; using std::isinf; static const char* function = "poisson_log_lpmf"; check_consistent_sizes(function, "Random variable", n, "Log rate parameter", alpha); T_n_ref n_ref = n; T_alpha_ref alpha_ref = alpha; decltype(auto) n_val = to_ref(as_value_column_array_or_scalar(n_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_nonnegative(function, "Random variable", n_val); check_not_nan(function, "Log rate parameter", alpha_val); if (size_zero(n, alpha)) { return 0.0; } if (!include_summand::value) { return 0.0; } if (sum(promote_scalar(INFTY == alpha_val))) { return LOG_ZERO; } size_t N = max_size(n, alpha); scalar_seq_view n_vec(n_val); scalar_seq_view alpha_vec(alpha_val); for (size_t i = 0; i < N; i++) { if (NEGATIVE_INFTY == alpha_vec[i] && n_vec[i] != 0) { return LOG_ZERO; } } auto ops_partials = make_partials_propagator(alpha_ref); const auto& exp_alpha = to_ref_if::value>(exp(alpha_val)); T_partials_return logp = sum(n_val * alpha_val); if (include_summand::value) { logp -= sum(exp_alpha) * N / math::size(alpha); } if (include_summand::value) { logp -= sum(lgamma(n_val + 1.0)) * N / math::size(n); } if (!is_constant_all::value) { partials<0>(ops_partials) = n_val - exp_alpha; } return ops_partials.build(logp); } template inline return_type_t poisson_log_lpmf(const T_n& n, const T_log_rate& alpha) { return poisson_log_lpmf(n, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/scaled_inv_chi_square_ccdf_log.hpp0000644000176200001440000000121214645137106027530 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use scaled_inv_chi_square_lccdf */ template return_type_t scaled_inv_chi_square_ccdf_log( const T_y& y, const T_dof& nu, const T_scale& s) { return scaled_inv_chi_square_lccdf(y, nu, s); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_student_t_rng.hpp0000644000176200001440000000712614645137106025501 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_STUDENT_T_RNG_HPP #define STAN_MATH_PRIM_PROB_MULTI_STUDENT_T_RNG_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a multivariate student-t random variate with the given degrees of * freedom location and Cholesky factor the scale matrix * using the specified random number generator. * * mu can be either an Eigen::VectorXd, an Eigen::RowVectorXd, or a std::vector * of either of those types. * * @tparam t_loc Type of location parameter * @tparam rng type of pseudo-random number generator * @param nu a scalar indicating the degrees of freedom parameter * @param mu An Eigen::VectorXd, Eigen::RowVectorXd, or std::vector * of location values for the multivariate student t * @param S scale matrix * @param rng random number generator * @return eigen vector of multivariate student t random variates * with the given nu, mu, S * @throw std::domain_error if S is not positive definite, any value in mu is * not finite, nu is not positive, or nu is NaN * @throw std::invalid_argument if the length of (each) mu is not equal to the * number of rows and columns in S */ template inline typename StdVectorBuilder::type multi_student_t_rng( double nu, const T_loc& mu, const Eigen::Matrix& S, RNG& rng) { using boost::normal_distribution; using boost::variate_generator; using boost::random::gamma_distribution; static const char* function = "multi_student_t_rng"; check_not_nan(function, "Degrees of freedom parameter", nu); check_positive(function, "Degrees of freedom parameter", nu); check_positive(function, "Covariance matrix rows", S.rows()); vector_seq_view mu_vec(mu); size_t size_mu = mu_vec[0].size(); size_t N = size_mvt(mu); for (size_t i = 1; i < N; i++) { check_size_match(function, "Size of one of the vectors of " "the location variable", mu_vec[i].size(), "Size of the first vector of the " "location variable", size_mu); } check_size_match(function, "Size of random variable", size_mu, "rows of scale parameter", S.rows()); for (size_t i = 0; i < N; i++) { check_finite(function, "Location parameter", mu_vec[i]); } const auto& S_ref = to_ref(S); check_not_nan(function, "Covariance matrix", S_ref); check_symmetric(function, "Covariance matrix", S_ref); Eigen::LLT llt_of_S = S_ref.llt(); check_pos_definite(function, "covariance matrix argument", llt_of_S); StdVectorBuilder output(N); variate_generator > std_normal_rng( rng, normal_distribution<>(0, 1)); double w = inv_gamma_rng(nu / 2, nu / 2, rng); for (size_t n = 0; n < N; ++n) { Eigen::VectorXd z(S.cols()); for (int i = 0; i < S.cols(); i++) { z(i) = std_normal_rng(); } z *= std::sqrt(w); output[n] = as_column_vector_or_scalar(mu_vec[n]) + llt_of_S.matrixL() * z; } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/von_mises_lccdf.hpp0000644000176200001440000000670714645137106024551 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_VON_MISES_LCCDF_HPP #define STAN_MATH_PRIM_PROB_VON_MISES_LCCDF_HPP #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Calculates the log of the complement of the cumulative distribution * function of the von Mises distribution: * * \f$VonMisesLCCDF(x, \mu, \kappa) = \log ( 1.0 - \frac{1}{2\pi I_0(\kappa)} * \int_{-\pi}^x\f$ \f$e^{\kappa cos(t - \mu)} dt )\f$ * * where * * \f$x \in [-\pi, \pi]\f$, \f$\mu \in \mathbb{R}\f$, * and \f$\kappa \in \mathbb{R}^+\f$. * * @param x Variate on the interval \f$[-pi, pi]\f$ * @param mu The mean of the distribution * @param k The inverse scale of the distriubtion * @return The log of the von Mises cdf evaluated at the specified arguments * @tparam T_x Type of x * @tparam T_mu Type of mean parameter * @tparam T_k Type of inverse scale parameter */ template inline return_type_t von_mises_lccdf(const T_x& x, const T_mu& mu, const T_k& k) { using internal::von_mises_cdf_centered; const double pi = stan::math::pi(); using T_return = return_type_t; using T_x_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_k_ref = ref_type_t; static char const* function = "von_mises_lccdf"; check_consistent_sizes(function, "Random variable", x, "Location parameter", mu, "Scale parameter", k); T_x_ref x_ref = x; T_mu_ref mu_ref = mu; T_k_ref k_ref = k; check_bounded(function, "Random variable", value_of(x_ref), -pi, pi); check_finite(function, "Location parameter", value_of(mu_ref)); check_positive(function, "Scale parameter", value_of(k_ref)); if (size_zero(x, mu, k)) { return 0.0; } T_return res(0.0); scalar_seq_view x_vec(x_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view k_vec(k_ref); size_t N = max_size(x, mu, k); for (size_t n = 0; n < N; ++n) { auto x_n = x_vec[n]; auto mu_n = mu_vec[n]; auto k_n = k_vec[n]; if (x_n == -pi) { res += 0.0; } else if (x_n == pi) { res += NEGATIVE_INFTY; } else { // shift x so that mean is 0 T_return x2 = x_n - mu_n; // x2 is on an interval (2*n*pi, (2*n + 1)*pi), move it to (-pi, pi) x2 += pi; const auto x_floor = floor(x2 / TWO_PI); const auto x_modded = x2 - x_floor * TWO_PI; x2 = x_modded - pi; // mu is on an interval (2*n*pi, (2*n + 1)*pi), move it to (-pi, pi) T_return mu2 = mu_n + pi; const auto mu_floor = floor(mu2 / TWO_PI); const auto mu_modded = mu2 - mu_floor * TWO_PI; mu2 = mu_modded - pi; // find cut T_return cut, bound_val; if (mu2 < 0) { cut = mu2 + pi; bound_val = -pi - mu2; } if (mu2 >= 0) { cut = mu2 - pi; bound_val = pi - mu2; } T_return f_bound_val = von_mises_cdf_centered(bound_val, k_n); T_return cdf_n; if (x_n <= cut) { cdf_n = von_mises_cdf_centered(x2, k_n) - f_bound_val; } else { cdf_n = von_mises_cdf_centered(x2, k_n) + 1 - f_bound_val; } if (cdf_n < 0.0) cdf_n = 0.0; res += log1m(cdf_n); } } return res; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/von_mises_cdf_log.hpp0000644000176200001440000000120714645137106025061 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_VON_MISES_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_VON_MISES_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use von_mises_lcdf */ template inline return_type_t von_mises_cdf_log(const T_x& x, const T_mu& mu, const T_k& k) { return von_mises_lcdf(x, mu, k); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_gp_cholesky_lpdf.hpp0000644000176200001440000000662414645137106026140 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_GP_CHOLESKY_LPDF_HPP #define STAN_MATH_PRIM_PROB_MULTI_GP_CHOLESKY_LPDF_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of a multivariate Gaussian Process for the given y, w, and * a Cholesky factor L of the kernel matrix Sigma. * Sigma = LL', a square, semi-positive definite matrix. * y is a dxN matrix, where each column is a different observation and each * row is a different output dimension. The Gaussian Process is assumed to * have a scaled kernel matrix with a different scale for each output dimension. * This distribution is equivalent to: * for (i in 1:d) row(y, i) ~ multi_normal(0, (1/w[i])*LL'). * * @tparam T_y type of scalar * @tparam T_covar type of kernel * @tparam T_w type of weight * * @param y A dxN matrix * @param L The Cholesky decomposition of a kernel matrix * @param w A d-dimensional vector of positive inverse scale parameters for each * output. * @return The log of the multivariate GP density. * @throw std::domain_error if Sigma is not square, not symmetric, * or not semi-positive definite. */ template * = nullptr, require_eigen_col_vector_t* = nullptr> return_type_t multi_gp_cholesky_lpdf(const T_y& y, const T_covar& L, const T_w& w) { using T_lp = return_type_t; static const char* function = "multi_gp_cholesky_lpdf"; check_size_match(function, "Size of random variable (rows y)", y.rows(), "Size of kernel scales (w)", w.size()); check_size_match(function, "Size of random variable", y.cols(), "rows of covariance parameter", L.rows()); const auto& y_ref = to_ref(y); const auto& L_ref = to_ref(L); const auto& w_ref = to_ref(w); check_positive_finite(function, "Kernel scales", w_ref); check_finite(function, "Random variable", y_ref); if (y.rows() == 0) { return 0; } T_lp lp(0); if (include_summand::value) { lp += NEG_LOG_SQRT_TWO_PI * y.size(); } if (include_summand::value) { lp -= sum(log(L_ref.diagonal())) * y.rows(); } if (include_summand::value) { lp += 0.5 * y.cols() * sum(log(w_ref)); } if (include_summand::value) { T_lp sum_lp_vec(0); for (int i = 0; i < y.rows(); i++) { sum_lp_vec += w_ref.coeff(i) * dot_self(mdivide_left_tri_low(L_ref, y_ref.row(i).transpose())); } lp -= 0.5 * sum_lp_vec; } return lp; } template inline return_type_t multi_gp_cholesky_lpdf(const T_y& y, const T_covar& L, const T_w& w) { return multi_gp_cholesky_lpdf(y, L, w); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_cdf.hpp0000644000176200001440000001034314645137106023507 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_CDF_HPP #define STAN_MATH_PRIM_PROB_NORMAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Calculates the normal cumulative distribution function for the given * variate, location, and scale. * * \f$\Phi(x) = \frac{1}{\sqrt{2 \pi}} \int_{-\inf}^x e^{-t^2/2} dt\f$. * * @tparam T_y type of y * @tparam T_loc type of mean parameter * @tparam T_scale type of standard deviation parameter * @param y A scalar variate. * @param mu The location of the normal distribution. * @param sigma The scale of the normal distribution * @return The unit normal cdf evaluated at the specified arguments. */ template * = nullptr> inline return_type_t normal_cdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::exp; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "normal_cdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 1.0; } T_partials_return cdf(1.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, mu, sigma); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return scaled_diff = (y_dbl - mu_dbl) / (sigma_dbl * SQRT_TWO); T_partials_return cdf_n; if (scaled_diff < -37.5 * INV_SQRT_TWO) { cdf_n = 0.0; } else if (scaled_diff < -5.0 * INV_SQRT_TWO) { cdf_n = 0.5 * erfc(-scaled_diff); } else if (scaled_diff > 8.25 * INV_SQRT_TWO) { cdf_n = 1; } else { cdf_n = 0.5 * (1.0 + erf(scaled_diff)); } cdf *= cdf_n; if (!is_constant_all::value) { const T_partials_return rep_deriv = (scaled_diff < -37.5 * INV_SQRT_TWO) ? 0.0 : INV_SQRT_TWO_PI * exp(-scaled_diff * scaled_diff) / (cdf_n * sigma_dbl); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= rep_deriv * scaled_diff * SQRT_TWO; } } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= cdf; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(mu); ++n) { partials<1>(ops_partials)[n] *= cdf; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(sigma); ++n) { partials<2>(ops_partials)[n] *= cdf; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_log_glm_lpmf.hpp0000644000176200001440000001552114645137106025616 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_LOG_GLM_LPMF_HPP #define STAN_MATH_PRIM_PROB_POISSON_LOG_GLM_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns the log PMF of the Generalized Linear Model (GLM) * with Poisson distribution and log link function. * The idea is that poisson_log_glm_lpmf(y, x, alpha, beta) should * compute a more efficient version of poisson_log_lpmf(y, alpha + x * beta) * by using analytically simplified gradients. * If containers are supplied, returns the log sum of the probabilities. * * @tparam T_y type of vector of variates (labels), integers >=0; * this can also be a single positive integer; * @tparam T_x type the matrix of independent variables (features) * @tparam T_x_rows compile-time number of rows of `x`. It can be either * `Eigen::Dynamic` or 1. * @tparam T_alpha type of the intercept(s); * this can be a vector (of the same length as y) of intercepts or a single * value (for models with constant intercept); * @tparam T_beta type of the weight vector; * this can also be a single value; * @param y positive integer scalar or vector parameter. If it is a scalar it * will be broadcast - used for all instances. * @param x design matrix or row vector. If it is a row vector it will be * broadcast - used for all instances. * @param alpha intercept (in log odds) * @param beta weight vector * @return log probability or log sum of probabilities * @throw std::domain_error if x, beta or alpha is infinite. * @throw std::domain_error if y is negative. * @throw std::invalid_argument if container sizes mismatch. */ template * = nullptr> return_type_t poisson_log_glm_lpmf(const T_y& y, const T_x& x, const T_alpha& alpha, const T_beta& beta) { using Eigen::Array; using Eigen::Dynamic; using Eigen::Matrix; using std::exp; using std::isfinite; constexpr int T_x_rows = T_x::RowsAtCompileTime; using T_xbeta_partials = partials_return_t; using T_partials_return = partials_return_t; using T_theta_tmp = typename std::conditional_t>; using T_xbeta_tmp = typename std::conditional_t>; using T_x_ref = ref_type_if_t::value, T_x>; using T_alpha_ref = ref_type_if_t::value, T_alpha>; using T_beta_ref = ref_type_if_t::value, T_beta>; const size_t N_instances = T_x_rows == 1 ? stan::math::size(y) : x.rows(); const size_t N_attributes = x.cols(); static const char* function = "poisson_log_glm_lpmf"; check_consistent_size(function, "Vector of dependent variables", y, N_instances); check_consistent_size(function, "Weight vector", beta, N_attributes); check_consistent_size(function, "Vector of intercepts", alpha, N_instances); const auto& y_ref = to_ref(y); check_nonnegative(function, "Vector of dependent variables", y_ref); if (size_zero(y)) { return 0; } if (!include_summand::value) { return 0; } T_x_ref x_ref = x; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; const auto& y_val = value_of(y_ref); const auto& x_val = to_ref_if::value>(value_of(x_ref)); const auto& alpha_val = value_of(alpha_ref); const auto& beta_val = value_of(beta_ref); const auto& y_val_vec = to_ref(as_column_vector_or_scalar(y_val)); const auto& alpha_val_vec = as_column_vector_or_scalar(alpha_val); const auto& beta_val_vec = to_ref_if::value>( as_column_vector_or_scalar(beta_val)); Array theta(N_instances); if (T_x_rows == 1) { T_theta_tmp theta_tmp = forward_as((x_val * beta_val_vec).coeff(0, 0)); theta = theta_tmp + as_array_or_scalar(alpha_val_vec); } else { theta = x_val * beta_val_vec; theta += as_array_or_scalar(alpha_val_vec); } Matrix theta_derivative = as_array_or_scalar(y_val_vec) - exp(theta.array()); T_partials_return theta_derivative_sum = sum(theta_derivative); if (!isfinite(theta_derivative_sum)) { check_finite(function, "Weight vector", beta); check_finite(function, "Intercept", alpha); check_finite(function, "Matrix of independent variables", theta); } T_partials_return logp(0); if (include_summand::value) { logp -= sum(lgamma(as_array_or_scalar(y_val_vec) + 1)); } logp += sum(as_array_or_scalar(y_val_vec) * theta.array() - exp(theta.array())); auto ops_partials = make_partials_propagator(x_ref, alpha_ref, beta_ref); // Compute the necessary derivatives. if (!is_constant_all::value) { if (T_x_rows == 1) { edge<2>(ops_partials).partials_ = forward_as>( theta_derivative.sum() * x_val); } else { partials<2>(ops_partials) = x_val.transpose() * theta_derivative; } } if (!is_constant_all::value) { if (T_x_rows == 1) { edge<0>(ops_partials).partials_ = forward_as>( beta_val_vec * theta_derivative.sum()); } else { edge<0>(ops_partials).partials_ = (beta_val_vec * theta_derivative.transpose()).transpose(); } } if (!is_constant_all::value) { if (is_vector::value) { partials<1>(ops_partials) = theta_derivative; } else { partials<1>(ops_partials)[0] = theta_derivative_sum; } } return ops_partials.build(logp); } template inline return_type_t poisson_log_glm_lpmf( const T_y& y, const T_x& x, const T_alpha& alpha, const T_beta& beta) { return poisson_log_glm_lpmf(y, x, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_lpmf.hpp0000644000176200001440000000751114645137106024417 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_LPMF_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log PMF of the Bernoulli distribution. If containers are * supplied, returns the log sum of the probabilities. * * @tparam T_n type of integer parameters * @tparam T_prob type of chance of success parameters * @param n integer parameter * @param theta chance of success parameter * @return log probability or log sum of probabilities * @throw std::domain_error if theta is not a valid probability * @throw std::invalid_argument if container sizes mismatch. */ template * = nullptr> return_type_t bernoulli_lpmf(const T_n& n, const T_prob& theta) { using T_partials_return = partials_return_t; using T_theta_ref = ref_type_t; using T_n_ref = ref_type_t; using std::log; static const char* function = "bernoulli_lpmf"; check_consistent_sizes(function, "Random variable", n, "Probability parameter", theta); const T_n_ref n_ref = to_ref(n); const T_theta_ref theta_ref = to_ref(theta); check_bounded(function, "n", n_ref, 0, 1); check_bounded(function, "Probability parameter", value_of(theta_ref), 0.0, 1.0); if (size_zero(n, theta)) { return 0.0; } if (!include_summand::value) { return 0.0; } T_partials_return logp(0.0); auto ops_partials = make_partials_propagator(theta_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view theta_vec(theta_ref); size_t N = max_size(n, theta); if (math::size(theta) == 1) { size_t sum = 0; for (size_t n = 0; n < N; n++) { sum += n_vec.val(n); } const T_partials_return theta_dbl = theta_vec.val(0); // avoid nans when sum == N or sum == 0 if (sum == N) { logp += N * log(theta_dbl); if (!is_constant_all::value) { partials<0>(ops_partials)[0] += N / theta_dbl; } } else if (sum == 0) { logp += N * log1m(theta_dbl); if (!is_constant_all::value) { partials<0>(ops_partials)[0] += N / (theta_dbl - 1); } } else { const T_partials_return log_theta = log(theta_dbl); const T_partials_return log1m_theta = log1m(theta_dbl); logp += sum * log_theta; logp += (N - sum) * log1m_theta; if (!is_constant_all::value) { partials<0>(ops_partials)[0] += sum * inv(theta_dbl); partials<0>(ops_partials)[0] += (N - sum) * inv(theta_dbl - 1); } } } else { for (size_t n = 0; n < N; n++) { const int n_int = n_vec.val(n); const T_partials_return theta_dbl = theta_vec.val(n); if (n_int == 1) { logp += log(theta_dbl); } else { logp += log1m(theta_dbl); } if (!is_constant_all::value) { if (n_int == 1) { partials<0>(ops_partials)[n] += inv(theta_dbl); } else { partials<0>(ops_partials)[n] += inv(theta_dbl - 1); } } } } return ops_partials.build(logp); } template inline return_type_t bernoulli_lpmf(const T_y& n, const T_prob& theta) { return bernoulli_lpmf(n, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_binomial_lcdf.hpp0000644000176200001440000000532714645137106025745 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log CDF for the Poisson-binomial distribution evaluated at the * specified number of successes and probabilities of successes. * * @tparam T_y type of number of successes parameter * @tparam T_theta type of chance of success parameters * @param y input scalar, vector, or nested vector of numbers of successes * @param theta array of chances of success parameters * @return sum of log probabilities * @throw std::domain_error if y is out of bounds * @throw std::domain_error if theta is not a valid vector of probabilities * @throw std::invalid_argument If y and theta are different lengths */ template return_type_t poisson_binomial_lcdf(const T_y& y, const T_theta& theta) { static const char* function = "poisson_binomial_lcdf"; size_t size_theta = size_mvt(theta); if (size_theta > 1) { check_consistent_sizes(function, "Successes variables", y, "Probability parameters", theta); } size_t max_sz = std::max(stan::math::size(y), size_theta); scalar_seq_view y_vec(y); vector_seq_view theta_vec(theta); for (size_t i = 0; i < max_sz; ++i) { check_bounded(function, "Successes variable", y_vec[i], 0, theta_vec[i].size()); check_finite(function, "Probability parameters", theta_vec.val(i)); check_bounded(function, "Probability parameters", theta_vec.val(i), 0.0, 1.0); } return_type_t lcdf = 0.0; for (size_t i = 0; i < max_sz; ++i) { auto x = log_sum_exp(poisson_binomial_log_probs(y_vec[i], theta_vec[i])); lcdf += x; } return lcdf; } template return_type_t poisson_binomial_lcdf(const T_y& y, const T_theta& theta) { return poisson_binomial_lcdf(y, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/categorical_logit_log.hpp0000644000176200001440000000174014645137106025720 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CATEGORICAL_LOGIT_LOG_HPP #define STAN_MATH_PRIM_PROB_CATEGORICAL_LOGIT_LOG_HPP #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use categorical_logit_lpmf */ template inline return_type_t categorical_logit_log(const T_n& ns, const T_prob& beta) { return categorical_logit_lpmf(ns, beta); } /** \ingroup multivar_dists * @deprecated use categorical_logit_lpmf */ template inline return_type_t categorical_logit_log(const T_n& ns, const T_prob& beta) { return categorical_logit_lpmf(ns, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_binomial_lcdf.hpp0000644000176200001440000001227314645137106025164 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_BINOMIAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_BETA_BINOMIAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log CDF of the Beta-Binomial distribution with given population * size, prior success, and prior failure parameters. Given containers of * matching sizes, returns the log sum of probabilities. * * @tparam T_n type of success parameter * @tparam T_N type of population size parameter * @tparam T_size1 type of prior success parameter * @tparam T_size2 type of prior failure parameter * * @param n success parameter * @param N population size parameter * @param alpha prior success parameter * @param beta prior failure parameter * @return log probability or log sum of probabilities * @throw std::domain_error if N, alpha, or beta fails to be positive * @throw std::invalid_argument if container sizes mismatch */ template return_type_t beta_binomial_lcdf(const T_n& n, const T_N& N, const T_size1& alpha, const T_size2& beta) { using T_partials_return = partials_return_t; using std::exp; using std::log; using T_N_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "beta_binomial_lcdf"; check_consistent_sizes(function, "Successes variable", n, "Population size parameter", N, "First prior sample size parameter", alpha, "Second prior sample size parameter", beta); if (size_zero(n, N, alpha, beta)) { return 0; } T_N_ref N_ref = N; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_nonnegative(function, "Population size parameter", N_ref); check_positive_finite(function, "First prior sample size parameter", alpha_ref); check_positive_finite(function, "Second prior sample size parameter", beta_ref); T_partials_return P(0.0); auto ops_partials = make_partials_propagator(alpha_ref, beta_ref); scalar_seq_view n_vec(n); scalar_seq_view N_vec(N_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t max_size_seq_view = max_size(n, N, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as neg infinity for (size_t i = 0; i < stan::math::size(n); i++) { if (n_vec.val(i) < 0) { return ops_partials.build(negative_infinity()); } } for (size_t i = 0; i < max_size_seq_view; i++) { const T_partials_return n_dbl = n_vec.val(i); const T_partials_return N_dbl = N_vec.val(i); // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_dbl >= N_dbl) { continue; } const T_partials_return alpha_dbl = alpha_vec.val(i); const T_partials_return beta_dbl = beta_vec.val(i); const T_partials_return N_minus_n = N_dbl - n_dbl; const T_partials_return mu = alpha_dbl + n_dbl + 1; const T_partials_return nu = beta_dbl + N_minus_n - 1; const T_partials_return one = 1; const T_partials_return F = hypergeometric_3F2({one, mu, 1 - N_minus_n}, {n_dbl + 2, 1 - nu}, one); T_partials_return C = lbeta(nu, mu) - lbeta(alpha_dbl, beta_dbl) - lbeta(N_minus_n, n_dbl + 2); C = F * exp(C) / (N_dbl + 1); const T_partials_return Pi = 1 - C; P += log(Pi); T_partials_return digammaDiff = is_constant_all::value ? 0 : digamma(alpha_dbl + beta_dbl) - digamma(mu + nu); T_partials_return dF[6]; if (!is_constant_all::value) { grad_F32(dF, one, mu, 1 - N_minus_n, n_dbl + 2, 1 - nu, one); } if (!is_constant_all::value) { const T_partials_return g = -C * (digamma(mu) - digamma(alpha_dbl) + digammaDiff + dF[1] / F); partials<0>(ops_partials)[i] += g / Pi; } if (!is_constant_all::value) { const T_partials_return g = -C * (digamma(nu) - digamma(beta_dbl) + digammaDiff - dF[4] / F); partials<1>(ops_partials)[i] += g / Pi; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_type_2_cdf.hpp0000644000176200001440000000774614645137106025010 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_TYPE_2_CDF_HPP #define STAN_MATH_PRIM_PROB_PARETO_TYPE_2_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t pareto_type_2_cdf( const T_y& y, const T_loc& mu, const T_scale& lambda, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_lambda_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; using std::pow; static const char* function = "pareto_type_2_cdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", lambda, "Shape parameter", alpha); if (size_zero(y, mu, lambda, alpha)) { return 1.0; } T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_lambda_ref lambda_ref = lambda; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Scale parameter", lambda_val); check_positive_finite(function, "Shape parameter", alpha_val); check_greater_or_equal(function, "Random variable", y_val, mu_val); const auto& summed = to_ref_if::value>( lambda_val + y_val - mu_val); const auto& temp = to_ref_if::value>(summed / lambda_val); const auto& p1_pow_alpha = to_ref_if::value>( pow(temp, -alpha_val)); T_partials_return P = prod(1.0 - p1_pow_alpha); auto ops_partials = make_partials_propagator(y_ref, mu_ref, lambda_ref, alpha_ref); if (!is_constant_all::value) { const auto& P_div_Pn = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( P / (1.0 - p1_pow_alpha)); if (!is_constant_all::value) { auto grad_1_2 = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(p1_pow_alpha / summed * alpha_val * P_div_Pn); if (!is_constant_all::value) { partials<1>(ops_partials) = -grad_1_2; } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = (mu_val - y_val) / lambda_val * grad_1_2; } if (!is_constant_all::value) { partials<0>(ops_partials) = std::move(grad_1_2); } } if (!is_constant_all::value) { partials<3>(ops_partials) = log(temp) * p1_pow_alpha * P_div_Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_type_2_log.hpp0000644000176200001440000000220314645137106025014 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_TYPE_2_LOG_HPP #define STAN_MATH_PRIM_PROB_PARETO_TYPE_2_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use pareto_type_2_lpdf */ template return_type_t pareto_type_2_log( const T_y& y, const T_loc& mu, const T_scale& lambda, const T_shape& alpha) { return pareto_type_2_lpdf(y, mu, lambda, alpha); } /** \ingroup prob_dists * @deprecated use pareto_type_2_lpdf */ template inline return_type_t pareto_type_2_log( const T_y& y, const T_loc& mu, const T_scale& lambda, const T_shape& alpha) { return pareto_type_2_lpdf(y, mu, lambda, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_type_2_rng.hpp0000644000176200001440000000552614645137106025034 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_TYPE_2_RNG_HPP #define STAN_MATH_PRIM_PROB_PARETO_TYPE_2_RNG_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Pareto type 2 random variate for the given location, * scale, and shape using the specified random number generator. * * mu, lambda, and alpha can each be a scalar or a one-dimensional container. * Any non-scalar inputs must be the same size. * * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @tparam T_shape type of shape parameter * @tparam RNG type of random number generator * * @param mu (Sequence of) location parameter(s) * @param lambda (Sequence of) scale parameter(s) * @param alpha (Sequence of) shape parameter(s) * @param rng random number generator * @return (Sequence of) Pareto type 2 random variate(s) * @throw std::domain_error if mu is infinite or lambda or alpha are * nonpositive, * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type pareto_type_2_rng(const T_loc& mu, const T_scale& lambda, const T_shape& alpha, RNG& rng) { using boost::variate_generator; using boost::random::uniform_real_distribution; static const char* function = "pareto_type_2_rng"; check_consistent_sizes(function, "Location parameter", mu, "Scale Parameter", lambda, "Shape Parameter", alpha); const auto& mu_ref = to_ref(mu); const auto& lambda_ref = to_ref(lambda); const auto& alpha_ref = to_ref(alpha); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", lambda_ref); check_positive_finite(function, "Shape parameter", alpha_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view lambda_vec(lambda_ref); scalar_seq_view alpha_vec(alpha_ref); size_t N = max_size(mu, lambda, alpha); VectorBuilder output(N); variate_generator > uniform_rng( rng, uniform_real_distribution<>(0.0, 1.0)); for (size_t n = 0; n < N; ++n) { output[n] = (std::pow(1.0 - uniform_rng(), -1.0 / alpha_vec[n]) - 1.0) * lambda_vec[n] + mu_vec[n]; } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lognormal_cdf.hpp0000644000176200001440000000660414645137106024216 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGNORMAL_CDF_HPP #define STAN_MATH_PRIM_PROB_LOGNORMAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t lognormal_cdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "lognormal_cdf"; T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_nonnegative(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, mu, sigma)) { return 1.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); if (sum(promote_scalar(y_val == 0))) { return ops_partials.build(0.0); } const auto& log_y = log(y_val); const auto& scaled_diff = to_ref_if::value>( (log_y - mu_val) / (sigma_val * SQRT_TWO)); const auto& erfc_m_diff = erfc(-scaled_diff); const auto& cdf_n = to_ref_if::value>( 0.5 * erfc_m_diff); T_partials_return cdf = prod(cdf_n); if (!is_constant_all::value) { const auto& exp_m_sq_diff = exp(-scaled_diff * scaled_diff); const auto& rep_deriv = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>( -cdf * INV_SQRT_TWO_PI * exp_m_sq_diff / (sigma_val * cdf_n)); if (!is_constant_all::value) { partials<0>(ops_partials) = -rep_deriv / y_val; } if (!is_constant_all::value) { partials<1>(ops_partials) = rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = rep_deriv * scaled_diff * SQRT_TWO; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/std_normal_lccdf.hpp0000644000176200001440000000404614645137106024703 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STD_NORMAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_STD_NORMAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template < typename T_y, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline return_type_t std_normal_lccdf(const T_y& y) { using T_partials_return = partials_return_t; using std::exp; using std::log; using T_y_ref = ref_type_t; static const char* function = "std_normal_lccdf"; T_y_ref y_ref = y; check_not_nan(function, "Random variable", y_ref); if (size_zero(y)) { return 0; } T_partials_return lccdf(0.0); auto ops_partials = make_partials_propagator(y_ref); scalar_seq_view y_vec(y_ref); size_t N = stan::math::size(y); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return scaled_y = y_dbl * INV_SQRT_TWO; T_partials_return one_m_erf; if (y_dbl < -37.5) { one_m_erf = 2.0; } else if (y_dbl < -5.0) { one_m_erf = 2.0 - erfc(-scaled_y); } else if (y_dbl > 8.25) { one_m_erf = 0.0; } else { one_m_erf = 1.0 - erf(scaled_y); } lccdf += LOG_HALF + log(one_m_erf); if (!is_constant_all::value) { const T_partials_return rep_deriv = y_dbl > 8.25 ? INFTY : SQRT_TWO_OVER_SQRT_PI * exp(-scaled_y * scaled_y) / one_m_erf; partials<0>(ops_partials)[n] -= rep_deriv; } } return ops_partials.build(lccdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/std_normal_cdf.hpp0000644000176200001440000000465614645137106024373 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STD_NORMAL_CDF_HPP #define STAN_MATH_PRIM_PROB_STD_NORMAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Calculates the standard normal cumulative distribution function * for the given variate. * * \f$\Phi(x) = \frac{1}{\sqrt{2 \pi}} \int_{-\inf}^x e^{-t^2/2} dt\f$. * * @tparam T_y type of y * @param y scalar variate * @return The standard normal cdf evaluated at the specified argument. */ template < typename T_y, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr> inline return_type_t std_normal_cdf(const T_y& y) { using T_partials_return = partials_return_t; using std::exp; using T_y_ref = ref_type_t; static const char* function = "std_normal_cdf"; T_y_ref y_ref = y; check_not_nan(function, "Random variable", y_ref); if (size_zero(y)) { return 1.0; } T_partials_return cdf(1.0); auto ops_partials = make_partials_propagator(y_ref); scalar_seq_view y_vec(y_ref); size_t N = stan::math::size(y); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return scaled_y = y_dbl * INV_SQRT_TWO; T_partials_return cdf_n; if (y_dbl < -37.5) { cdf_n = 0.0; } else if (y_dbl < -5.0) { cdf_n = 0.5 * erfc(-scaled_y); } else if (y_dbl > 8.25) { cdf_n = 1; } else { cdf_n = 0.5 * (1.0 + erf(scaled_y)); } cdf *= cdf_n; if (!is_constant_all::value) { const T_partials_return rep_deriv = (y_dbl < -37.5) ? 0.0 : INV_SQRT_TWO_PI * exp(-scaled_y * scaled_y) / cdf_n; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += rep_deriv; } } } if (!is_constant_all::value) { for (size_t n = 0; n < N; ++n) { partials<0>(ops_partials)[n] *= cdf; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/von_mises_cdf.hpp0000644000176200001440000001315114645137106024221 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_VON_MISES_CDF_HPP #define STAN_MATH_PRIM_PROB_VON_MISES_CDF_HPP #include #include #include namespace stan { namespace math { namespace internal { /** * This implementation of the von Mises cdf * is a copy of scipy's. see: * https://github.com/scipy/scipy/blob/8dba340293fe20e62e173bdf2c10ae208286692f/scipy/stats/vonmises.py * * When k < 50, approximate the von Mises cdf * with the following expansion that comes from * scipy. */ template return_type_t von_mises_cdf_series(const T_x& x, const T_k& k) { const double pi = stan::math::pi(); int p = value_of_rec(28 + 0.5 * k - 100 / (k + 5) + 1); auto s = sin(x); auto c = cos(x); auto sn = sin(p * x); auto cn = cos(p * x); using return_t = return_type_t; return_t R = 0.0; return_t V = 0.0; int n; for (n = 1; n < p; n++) { auto sn_tmp = sn * c - cn * s; cn = cn * c + sn * s; sn = sn_tmp; R = 1 / (2.0 * (p - n) / k + R); V = R * (sn / (p - n) + V); } return 0.5 + x / TWO_PI + V / pi; } /** * conv_mises_cdf_normapprox(x, k) is used to approximate the von * Mises cdf for k > 50. In this regime, the von Mises cdf * is well-approximated by a normal distribution. */ template return_type_t von_mises_cdf_normalapprox(const T_x& x, const T_k& k) { using std::exp; using std::sqrt; const auto b = sqrt(2 / pi()) * exp(k) / modified_bessel_first_kind(0, k); const auto z = b * sin(x / 2); const double mu = 0; const double sigma = 1; return normal_cdf(z, mu, sigma); } /** * This function calculates the cdf of the von Mises distribution in * the case where the distribution has support on (-pi, pi) and * has mean 0. If k is sufficiently small, this function approximates * the cdf with a Gaussian. Otherwise, use the expansion from scipy. */ template return_type_t von_mises_cdf_centered(const T_x& x, const T_k& k) { using return_t = return_type_t; return_t f; if (k < 49) { f = von_mises_cdf_series(x, k); if (f < 0) { f = 0.0; return f; } if (f > 1) { f = 1.0; return f; } return f; } else if (k < 50) { f = (50.0 - k) * von_mises_cdf_series(x, 49.0) + (k - 49.0) * von_mises_cdf_normalapprox(x, 50.0); return f; } else { f = von_mises_cdf_normalapprox(x, k); return f; } } } // namespace internal /** \ingroup prob_dists * Calculates the cumulative distribution function of the von Mises * distribution: * * \f$VonMisesCDF(x, \mu, \kappa) = \frac{1}{2\pi I_0(\kappa)} \int_{-\pi}^x\f$ * \f$e^{\kappa cos(t - \mu)} dt\f$ * * where * * \f$x \in [-\pi, \pi]\f$, \f$\mu \in \mathbb{R}\f$, * and \f$\kappa \in \mathbb{R}^+\f$. * * @param x Variate on the interval \f$[-pi, pi]\f$ * @param mu The mean of the distribution * @param k The inverse scale of the distriubtion * @return The von Mises cdf evaluated at the specified arguments * @tparam T_x Type of x * @tparam T_mu Type of mean parameter * @tparam T_k Type of inverse scale parameter */ template inline return_type_t von_mises_cdf(const T_x& x, const T_mu& mu, const T_k& k) { using internal::von_mises_cdf_centered; const double pi = stan::math::pi(); using T_return = return_type_t; using T_x_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_k_ref = ref_type_t; static char const* function = "von_mises_cdf"; check_consistent_sizes(function, "Random variable", x, "Location parameter", mu, "Scale parameter", k); T_x_ref x_ref = x; T_mu_ref mu_ref = mu; T_k_ref k_ref = k; check_bounded(function, "Random variable", value_of(x_ref), -pi, pi); check_finite(function, "Location parameter", value_of(mu_ref)); check_positive(function, "Scale parameter", value_of(k_ref)); if (size_zero(x, mu, k)) { return 1.0; } T_return res(1.0); scalar_seq_view x_vec(x_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view k_vec(k_ref); size_t N = max_size(x, mu, k); for (size_t n = 0; n < N; ++n) { auto x_n = x_vec[n]; auto mu_n = mu_vec[n]; auto k_n = k_vec[n]; if (x_n == -pi) { res *= 0.0; } else if (x_n == pi) { res *= 1.0; } else { // shift x so that mean is 0 T_return x2 = x_n - mu_n; // x2 is on an interval (2*n*pi, (2*n + 1)*pi), move it to (-pi, pi) x2 += pi; const auto x_floor = floor(x2 / TWO_PI); const auto x_modded = x2 - x_floor * TWO_PI; x2 = x_modded - pi; // mu is on an interval (2*n*pi, (2*n + 1)*pi), move it to (-pi, pi) T_return mu2 = mu_n + pi; const auto mu_floor = floor(mu2 / TWO_PI); const auto mu_modded = mu2 - mu_floor * TWO_PI; mu2 = mu_modded - pi; // find cut T_return cut, bound_val; if (mu2 < 0) { cut = mu2 + pi; bound_val = -pi - mu2; } if (mu2 >= 0) { cut = mu2 - pi; bound_val = pi - mu2; } T_return f_bound_val = von_mises_cdf_centered(bound_val, k_n); T_return cdf_n; if (x_n <= cut) { cdf_n = von_mises_cdf_centered(x2, k_n) - f_bound_val; } else { cdf_n = von_mises_cdf_centered(x2, k_n) + 1 - f_bound_val; } if (cdf_n < 0.0) cdf_n = 0.0; res *= cdf_n; } } return res; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_logit_log.hpp0000644000176200001440000000155214645137106025437 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_LOG_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_LOGIT_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use bernoulli_logit_lpmf */ template return_type_t bernoulli_logit_log(const T_n& n, const T_prob& theta) { return bernoulli_logit_lpmf(n, theta); } /** \ingroup prob_dists * @deprecated use bernoulli_logit_lpmf */ template inline return_type_t bernoulli_logit_log(const T_n& n, const T_prob& theta) { return bernoulli_logit_lpmf(n, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lkj_corr_cholesky_log.hpp0000644000176200001440000000167614645137106025763 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LKJ_CORR_CHOLESKY_LOG_HPP #define STAN_MATH_PRIM_PROB_LKJ_CORR_CHOLESKY_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use lkj_corr_cholesky_lpdf */ template return_type_t lkj_corr_cholesky_log(const T_covar& L, const T_shape& eta) { return lkj_corr_cholesky_lpdf(L, eta); } /** \ingroup multivar_dists * @deprecated use lkj_corr_cholesky_lpdf */ template inline return_type_t lkj_corr_cholesky_log( const T_covar& L, const T_shape& eta) { return lkj_corr_cholesky_lpdf<>(L, eta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/std_normal_log_qf.hpp0000644000176200001440000001327214645137106025100 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STD_NORMAL_LOG_QF_HPP #define STAN_MATH_PRIM_PROB_STD_NORMAL_LOG_QF_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** * The inverse of the unit normal cumulative distribution function evaluated at * the log probability. * * @param log_p argument between -Inf and 0 inclusive * @return Real value of the inverse cdf for the standard normal distribution. */ inline double std_normal_log_qf(double log_p) { check_not_nan("std_normal_log_qf", "Log probability variable", log_p); check_less_or_equal("std_normal_log_qf", "Probability variable", log_p, 0); if (log_p == NEGATIVE_INFTY) { return NEGATIVE_INFTY; } if (log_p == 0) { return INFTY; } static const double log_a[8] = {1.2199838032983212, 4.8914137334471356, 7.5865960847956080, 9.5274618535358388, 10.734698580862359, 11.116406781896242, 10.417226196842595, 7.8276718012189362}; static const double log_b[8] = {0., 3.7451021830139207, 6.5326064640478618, 8.5930788436817044, 9.9624069236663077, 10.579180688621286, 10.265665328832871, 8.5614962136628454}; static const double log_c[8] = {0.3530744474482423, 1.5326298343683388, 1.7525849400614634, 1.2941374937060454, 0.2393776640901312, -1.419724057885092, -3.784340465764968, -7.163234779359426}; static const double log_d[8] = {0., 0.7193954734947205, 0.5166395879845317, -0.371400933927844, -1.909840708457214, -4.186547581055928, -7.509976771225415, -20.67376157385924}; static const double log_e[8] = {1.8958048169567149, 1.6981417567726154, 0.5793212339927351, -1.215503791936417, -3.629396584023968, -6.690500273261249, -10.51540298415323, -15.41979457491781}; static const double log_f[8] = {0., -0.511105318617135, -1.988286302259815, -4.208049039384857, -7.147448611626374, -10.89973190740069, -15.76637472711685, -33.82373901099482}; double val; double log_q = log_p <= LOG_HALF ? log_diff_exp(LOG_HALF, log_p) : log_diff_exp(log_p, LOG_HALF); int log_q_sign = log_p <= LOG_HALF ? -1 : 1; if (log_q <= -0.85566611005772) { double log_r = log_diff_exp(-1.71133222011544, 2 * log_q); double log_agg_a = log_sum_exp(log_a[7] + log_r, log_a[6]); double log_agg_b = log_sum_exp(log_b[7] + log_r, log_b[6]); for (int i = 0; i < 6; i++) { log_agg_a = log_sum_exp(log_agg_a + log_r, log_a[5 - i]); log_agg_b = log_sum_exp(log_agg_b + log_r, log_b[5 - i]); } return log_q_sign * exp(log_q + log_agg_a - log_agg_b); } else { double log_r = log_q_sign == -1 ? log_p : log1m_exp(log_p); if (stan::math::is_inf(log_r)) { return 0; } log_r = log(sqrt(-log_r)); if (log_r <= 1.60943791243410) { log_r = log_diff_exp(log_r, 0.47000362924573); double log_agg_c = log_sum_exp(log_c[7] + log_r, log_c[6]); double log_agg_d = log_sum_exp(log_d[7] + log_r, log_d[6]); for (int i = 0; i < 6; i++) { log_agg_c = log_sum_exp(log_agg_c + log_r, log_c[5 - i]); log_agg_d = log_sum_exp(log_agg_d + log_r, log_d[5 - i]); } val = exp(log_agg_c - log_agg_d); } else { log_r = log_diff_exp(log_r, 1.60943791243410); double log_agg_e = log_sum_exp(log_e[7] + log_r, log_e[6]); double log_agg_f = log_sum_exp(log_f[7] + log_r, log_f[6]); for (int i = 0; i < 6; i++) { log_agg_e = log_sum_exp(log_agg_e + log_r, log_e[5 - i]); log_agg_f = log_sum_exp(log_agg_f + log_r, log_f[5 - i]); } val = exp(log_agg_e - log_agg_f); } if (log_q_sign == -1) return -val; } return val; } /** * Structure to wrap std_normal_log_qf() so it can be vectorized. * * @tparam T type of variable * @param x variable in range [-Inf, 0] * @return inverse of the unit normal CDF of x * @throw std::domain_error if x is not less than or equal to 0 */ struct std_normal_log_qf_fun { template static inline auto fun(const T& x) { return std_normal_log_qf(x); } }; /** * A vectorized version of std_normal_log_qf() that accepts std::vectors, Eigen * Matrix/Array objects, or expressions, and containers of these. * * @tparam T type of container * @param x container * @return inverse unit normal CDF of each value in x * @throw std::domain_error if x is not less than or equal to 0 */ template < typename T, require_all_not_nonscalar_prim_or_rev_kernel_expression_t* = nullptr, require_not_var_matrix_t* = nullptr> inline auto std_normal_log_qf(const T& x) { return apply_scalar_unary::apply(x); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_lccdf.hpp0000644000176200001440000000772314645137106024040 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_LCCDF_HPP #define STAN_MATH_PRIM_PROB_PARETO_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t pareto_lccdf(const T_y& y, const T_scale& y_min, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_y_min_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; using std::isinf; static const char* function = "pareto_lccdf"; check_consistent_sizes(function, "Random variable", y, "Scale parameter", y_min, "Shape parameter", alpha); if (size_zero(y, y_min, alpha)) { return 0; } T_y_ref y_ref = y; T_y_min_ref y_min_ref = y_min; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) y_min_val = to_ref(as_value_column_array_or_scalar(y_min_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Scale parameter", y_min_val); check_positive_finite(function, "Shape parameter", alpha_val); auto ops_partials = make_partials_propagator(y_ref, y_min_ref, alpha_ref); if (sum(promote_scalar(y_val < y_min_val))) { return ops_partials.build(0.0); } if (sum(promote_scalar(isinf(y_val)))) { return ops_partials.build(negative_infinity()); } auto log_quot = to_ref_if<(!is_constant_all::value || !is_constant_all::value)>( log(y_min_val / y_val)); T_partials_return P = sum(alpha_val * log_quot); size_t N = max_size(y, y_min, alpha); if (!is_constant_all::value) { const auto& alpha_div_y_min = to_ref_if<( !is_constant_all::value && !is_constant_all::value)>( alpha_val / y_min_val); if (!is_constant_all::value) { partials<0>(ops_partials) = -alpha_div_y_min * exp(log_quot); } if (!is_constant_all::value) { edge<1>(ops_partials).partials_ = alpha_div_y_min * N / max_size(y_min, alpha); } } if (!is_constant_all::value) { if (is_vector::value) { using Log_quot_scalar = partials_return_t; using Log_quot_array = Eigen::Array; if (is_vector::value || is_vector::value) { edge<2>(ops_partials).partials_ = forward_as(std::move(log_quot)); } else { partials<2>(ops_partials) = Log_quot_array::Constant( N, 1, forward_as(log_quot)); } } else { forward_as>( partials<2>(ops_partials)) = log_quot * N / max_size(y, y_min); } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/matrix_normal_prec_lpdf.hpp0000644000176200001440000000661114645137106026300 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MATRIX_NORMAL_PREC_LPDF_HPP #define STAN_MATH_PRIM_PROB_MATRIX_NORMAL_PREC_LPDF_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the matrix normal density for the given y, mu, Sigma and D * where Sigma and D are given as precision matrices, not covariance matrices. * * @tparam T_y type of scalar * @tparam T_Mu type of location * @tparam T_Sigma type of Sigma * @tparam T_D type of D * * @param y An mxn matrix. * @param Mu The mean matrix. * @param Sigma The mxm inverse covariance matrix (i.e., the precision matrix) * of the rows of y. * @param D The nxn inverse covariance matrix (i.e., the precision matrix) of * the columns of y. * @return The log of the matrix normal density. * @throw std::domain_error if Sigma or D are not square, not symmetric, * or not semi-positive definite. */ template * = nullptr> return_type_t matrix_normal_prec_lpdf( const T_y& y, const T_Mu& Mu, const T_Sigma& Sigma, const T_D& D) { static const char* function = "matrix_normal_prec_lpdf"; check_positive(function, "Sigma rows", Sigma.rows()); check_finite(function, "Sigma", Sigma); check_symmetric(function, "Sigma", Sigma); auto ldlt_Sigma = make_ldlt_factor(Sigma); check_ldlt_factor(function, "LDLT_Factor of Sigma", ldlt_Sigma); check_positive(function, "D rows", D.rows()); check_finite(function, "D", D); check_symmetric(function, "D", D); auto ldlt_D = make_ldlt_factor(D); check_ldlt_factor(function, "LDLT_Factor of D", ldlt_D); check_size_match(function, "Rows of random variable", y.rows(), "Rows of location parameter", Mu.rows()); check_size_match(function, "Columns of random variable", y.cols(), "Columns of location parameter", Mu.cols()); check_size_match(function, "Rows of random variable", y.rows(), "Rows of Sigma", Sigma.rows()); check_size_match(function, "Columns of random variable", y.cols(), "Rows of D", D.rows()); check_finite(function, "Location parameter", Mu); check_finite(function, "Random variable", y); return_type_t lp(0.0); if (include_summand::value) { lp += NEG_LOG_SQRT_TWO_PI * y.cols() * y.rows(); } if (include_summand::value) { lp += log_determinant_ldlt(ldlt_Sigma) * (0.5 * y.rows()); } if (include_summand::value) { lp += log_determinant_ldlt(ldlt_D) * (0.5 * y.cols()); } if (include_summand::value) { lp -= 0.5 * trace_gen_quad_form(D, Sigma, subtract(y, Mu)); } return lp; } template * = nullptr> return_type_t matrix_normal_prec_lpdf( const T_y& y, const T_Mu& Mu, const T_Sigma& Sigma, const T_D& D) { return matrix_normal_prec_lpdf(y, Mu, Sigma, D); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_log.hpp0000644000176200001440000000136514645137106023742 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_LOG_HPP #define STAN_MATH_PRIM_PROB_POISSON_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use poisson_lpmf */ template return_type_t poisson_log(const T_n& n, const T_rate& lambda) { return poisson_lpmf(n, lambda); } /** \ingroup prob_dists * @deprecated use poisson_lpmf */ template inline return_type_t poisson_log(const T_n& n, const T_rate& lambda) { return poisson_lpmf(n, lambda); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_ccdf_log.hpp0000644000176200001440000000125314645137106024515 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_PARETO_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use pareto_lccdf */ template return_type_t pareto_ccdf_log(const T_y& y, const T_scale& y_min, const T_shape& alpha) { return pareto_lccdf(y, y_min, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/weibull_lcdf.hpp0000644000176200001440000000733614645137106024046 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WEIBULL_LCDF_HPP #define STAN_MATH_PRIM_PROB_WEIBULL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the Weibull log cumulative distribution function for the given * location and scale. Given containers of matching sizes, returns the * log sum of probabilities. * * @tparam T_y type of real parameter * @tparam T_shape type of shape parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param alpha shape parameter * @param sigma scale parameter * @return log probability or log sum of probabilities * @throw std::domain_error if y is negative, alpha sigma is nonpositive */ template * = nullptr> return_type_t weibull_lcdf(const T_y& y, const T_shape& alpha, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_shape>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using std::pow; static const char* function = "weibull_lcdf"; T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Shape parameter", alpha_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, alpha, sigma)) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, sigma_ref); constexpr bool any_derivs = !is_constant_all::value; const auto& pow_n = to_ref_if(pow(y_val / sigma_val, alpha_val)); const auto& exp_n = to_ref_if(exp(-pow_n)); T_partials_return cdf_log = sum(log(1 - exp_n)); if (!is_constant_all::value) { const auto& rep_deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( pow_n / (1.0 / exp_n - 1.0)); if (!is_constant_all::value) { const auto& deriv_y_sigma = to_ref_if<( !is_constant_all::value && !is_constant_all::value)>( rep_deriv * alpha_val); if (!is_constant_all::value) { partials<0>(ops_partials) = deriv_y_sigma / y_val; } if (!is_constant_all::value) { partials<2>(ops_partials) = -deriv_y_sigma / sigma_val; } } if (!is_constant_all::value) { partials<1>(ops_partials) = rep_deriv * log(y_val / sigma_val); } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_gamma_rng.hpp0000644000176200001440000000454114645137106024212 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_GAMMA_RNG_HPP #define STAN_MATH_PRIM_PROB_INV_GAMMA_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a pseudorandom inverse gamma variate for the given shape * and scale parameters using the specified random number generator. * * alpha and beta can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_shape Type of shape parameter * @tparam T_scale Type of scale parameter * @tparam RNG type of random number generator * @param alpha (Sequence of) positive shape parameter(s) * @param beta (Sequence of) positive scale parameter(s) * @param rng random number generator * @return (Sequence of) inverse gamma random variate(s) * @throw std::domain_error if alpha or beta are nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type inv_gamma_rng(const T_shape& alpha, const T_scale& beta, RNG& rng) { using boost::variate_generator; using boost::random::gamma_distribution; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "inv_gamma_rng"; check_consistent_sizes(function, "Shape parameter", alpha, "Scale Parameter", beta); T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Scale parameter", beta_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(alpha, beta); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > gamma_rng( rng, gamma_distribution<>(alpha_vec[n], 1 / static_cast(beta_vec[n]))); output[n] = 1 / gamma_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/categorical_logit_lpmf.hpp0000644000176200001440000000452014645137106026074 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CATEGORICAL_LOGIT_LPMF_HPP #define STAN_MATH_PRIM_PROB_CATEGORICAL_LOGIT_LPMF_HPP #include #include #include #include #include #include #include namespace stan { namespace math { // CategoricalLog(n|theta) [0 < n <= N, theta unconstrained], no checking template * = nullptr> return_type_t categorical_logit_lpmf(int n, const T_prob& beta) { static const char* function = "categorical_logit_lpmf"; check_bounded(function, "categorical outcome out of support", n, 1, beta.size()); ref_type_t beta_ref = beta; check_finite(function, "log odds parameter", beta_ref); if (!include_summand::value) { return 0.0; } // FIXME: wasteful vs. creating term (n-1) if not vectorized return beta_ref.coeff(n - 1) - log_sum_exp(beta_ref); // == log_softmax(beta)(n-1); } template * = nullptr> return_type_t categorical_logit_lpmf(const std::vector& ns, const T_prob& beta) { static const char* function = "categorical_logit_lpmf"; check_bounded(function, "categorical outcome out of support", ns, 1, beta.size()); ref_type_t beta_ref = beta; check_finite(function, "log odds parameter", beta_ref); if (!include_summand::value) { return 0.0; } if (ns.empty()) { return 0.0; } auto log_softmax_beta = to_ref(log_softmax(beta_ref)); // FIXME: replace with more efficient sum() Eigen::Matrix, Eigen::Dynamic, 1> results(ns.size()); for (size_t i = 0; i < ns.size(); ++i) { results[i] = log_softmax_beta(ns[i] - 1); } return sum(results); } template * = nullptr, require_col_vector_t* = nullptr> inline return_type_t categorical_logit_lpmf(const T_n& ns, const T_prob& beta) { return categorical_logit_lpmf(ns, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/matrix_normal_prec_rng.hpp0000644000176200001440000001020214645137106026130 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MATRIX_NORMAL_PREC_RNG_HPP #define STAN_MATH_PRIM_PROB_MATRIX_NORMAL_PREC_RNG_HPP #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Sample from the the matrix normal distribution for the given Mu, * Sigma and D where Sigma and D are given as precision matrices, not * covariance matrices. * * @param Mu The mean matrix. * @param Sigma The mxm inverse covariance matrix (i.e., the precision * matrix) of the rows of y. * @param D The nxn inverse covariance matrix (i.e., the precision * matrix) of the columns of y. * @param rng Pseudo-random number generator. * @return A sample from the distribution, of type Matrix. * @throw std::invalid_argument if Sigma or D are not square. * @throw std::domain_error if Sigma or D are not symmetric, not * semi-positive definite, or if they contain infinities or NaNs. * @tparam RNG Type of pseudo-random number generator. */ template inline Eigen::MatrixXd matrix_normal_prec_rng(const Eigen::MatrixXd &Mu, const Eigen::MatrixXd &Sigma, const Eigen::MatrixXd &D, RNG &rng) { using boost::normal_distribution; using boost::variate_generator; static const char *function = "matrix_normal_prec_rng"; check_positive(function, "Sigma rows", Sigma.rows()); check_finite(function, "Sigma", Sigma); check_symmetric(function, "Sigma", Sigma); check_positive(function, "D rows", D.rows()); check_finite(function, "D", D); check_symmetric(function, "D", D); check_size_match(function, "Rows of location parameter", Mu.rows(), "Rows of Sigma", Sigma.rows()); check_size_match(function, "Columns of location parameter", Mu.cols(), "Rows of D", D.rows()); check_finite(function, "Location parameter", Mu); Eigen::LDLT Sigma_ldlt(Sigma); // Sigma = PS^T LS DS LS^T PS // PS a permutation matrix. // LS lower triangular with unit diagonal. // DS diagonal. Eigen::LDLT D_ldlt(D); // D = PD^T LD DD LD^T PD check_pos_semidefinite(function, "Sigma", Sigma_ldlt); check_pos_semidefinite(function, "D", D_ldlt); // If // C ~ N[0, I, I] // Then // A C B ~ N[0, A A^T, B^T B] // So to get // Y - Mu ~ N[0, Sigma^(-1), D^(-1)] // We need to do // Y - Mu = Q^T^(-1) C R^(-1) // Where Q^T^(-1) and R^(-1) are such that // Q^(-1) Q^(-1)^T = Sigma^(-1) // R^(-1)^T R^(-1) = D^(-1) // We choose: // Q^(-1)^T = PS^T LS^T^(-1) sqrt[DS]^(-1) // R^(-1) = sqrt[DD]^(-1) LD^(-1) PD // And therefore // Y - Mu = (PS^T LS^T^(-1) sqrt[DS]^(-1)) C (sqrt[DD]^(-1) LD^(-1) PD) int m = Sigma.rows(); int n = D.rows(); variate_generator> std_normal_rng( rng, normal_distribution<>(0, 1)); // X = sqrt[DS]^(-1) C sqrt[DD]^(-1) // X ~ N[0, DS, DD] Eigen::MatrixXd X(m, n); Eigen::VectorXd row_stddev = Sigma_ldlt.vectorD().array().inverse().sqrt().matrix(); Eigen::VectorXd col_stddev = D_ldlt.vectorD().array().inverse().sqrt().matrix(); for (int col = 0; col < n; ++col) { for (int row = 0; row < m; ++row) { double stddev = row_stddev(row) * col_stddev(col); // C(row, col) = std_normal_rng(); X(row, col) = stddev * std_normal_rng(); } } // Y - Mu = PS^T (LS^T^(-1) X LD^(-1)) PD // Y' = LS^T^(-1) X LD^(-1) // Y' = LS^T.solve(X) LD^(-1) // Y' = (LD^(-1)^T (LS^T.solve(X))^T)^T // Y' = (LD^T.solve((LS^T.solve(X))^T))^T // Y = Mu + PS^T Y' PD Eigen::MatrixXd Y = Mu + (Sigma_ldlt.transpositionsP().transpose() * (D_ldlt.matrixU().solve( (Sigma_ldlt.matrixU().solve(X)).transpose())) .transpose() * D_ldlt.transpositionsP()); return Y; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/rayleigh_rng.hpp0000644000176200001440000000335514645137106024062 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_RAYLEIGH_RNG_HPP #define STAN_MATH_PRIM_PROB_RAYLEIGH_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Rayleigh random variate with scale parameter sigma * using the specified random number generator. * * sigma can be a scalar or a one-dimensional container. * * @tparam T_scale Type of scale parameter * @tparam RNG class of random number generator * @param sigma (Sequence of) positive scale parameter(s) * @param rng random number generator * @return (Sequence of) Rayleigh random variate(s) * @throw std::domain_error if sigma is nonpositive */ template inline typename VectorBuilder::type rayleigh_rng( const T_scale& sigma, RNG& rng) { using boost::variate_generator; using boost::random::uniform_real_distribution; static const char* function = "rayleigh_rng"; const auto& sigma_ref = to_ref(sigma); check_positive_finite(function, "Scale parameter", sigma_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = stan::math::size(sigma); VectorBuilder output(N); variate_generator > uniform_rng( rng, uniform_real_distribution<>(0.0, 1.0)); for (size_t n = 0; n < N; ++n) { output[n] = sigma_vec[n] * std::sqrt(-2.0 * std::log(uniform_rng())); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/cauchy_log.hpp0000644000176200001440000000316714645137106023526 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CAUCHY_LOG_HPP #define STAN_MATH_PRIM_PROB_CAUCHY_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the Cauchy density for the specified scalar(s) given * the specified location parameter(s) and scale parameter(s). y, * mu, or sigma can each either be scalar a vector. Any vector * inputs must be the same length. * *

The result log probability is defined to be the sum of * the log probabilities for each observation/mu/sigma triple. * * @deprecated use cauchy_lpdf * * @param y (Sequence of) scalar(s). * @param mu (Sequence of) location(s). * @param sigma (Sequence of) scale(s). * @return The log of the product of densities. * @tparam T_scale Type of scale. * @tparam T_y Type of scalar outcome. * @tparam T_loc Type of location. */ template return_type_t cauchy_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return cauchy_lpdf(y, mu, sigma); } /** \ingroup prob_dists * @deprecated use cauchy_lpdf */ template inline return_type_t cauchy_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return cauchy_lpdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/double_exponential_lccdf.hpp0000644000176200001440000000711014645137106026414 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the double exponential log complementary cumulative density * function. Given containers of matching sizes, returns the log sum of * probabilities. * * @tparam T_y type of real parameter. * @tparam T_loc type of location parameter. * @tparam T_scale type of scale parameter. * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @return log probability or log sum of probabilities * @throw std::domain_error if y is nan, mu is infinite, or sigma is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t double_exponential_lccdf( const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::exp; using std::log; static const char* function = "double_exponential_lccdf"; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale Parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 0; } T_partials_return ccdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t size_sigma = stan::math::size(sigma); size_t N = max_size(y, mu, sigma); VectorBuilder inv_sigma(size_sigma); for (size_t i = 0; i < size_sigma; i++) { inv_sigma[i] = inv(sigma_vec.val(i)); } for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return scaled_diff = (y_dbl - mu_dbl) * inv_sigma[n]; const T_partials_return rep_deriv = y_dbl < mu_dbl ? inv_sigma[n] * inv(2 * exp(-scaled_diff) - 1) : inv_sigma[n]; if (y_dbl < mu_dbl) { ccdf_log += log1m(0.5 * exp(scaled_diff)); } else { ccdf_log += LOG_HALF - scaled_diff; } if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += rep_deriv * scaled_diff; } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/student_t_ccdf_log.hpp0000644000176200001440000000117514645137106025237 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STUDENT_T_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_STUDENT_T_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use student_t_lccdf */ template return_type_t student_t_ccdf_log( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) { return student_t_lccdf(y, nu, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multinomial_rng.hpp0000644000176200001440000000212614645137106024603 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTINOMIAL_RNG_HPP #define STAN_MATH_PRIM_PROB_MULTINOMIAL_RNG_HPP #include #include #include #include #include namespace stan { namespace math { template * = nullptr> inline std::vector multinomial_rng(const T_theta& theta, int N, RNG& rng) { static const char* function = "multinomial_rng"; const auto& theta_ref = to_ref(theta); check_simplex(function, "Probabilities parameter", theta_ref); check_positive(function, "number of trials variables", N); std::vector result(theta.size(), 0); double mass_left = 1.0; int n_left = N; for (int k = 0; n_left > 0 && k < theta.size(); ++k) { double p = theta_ref.coeff(k) / mass_left; if (p > 1.0) { p = 1.0; } result[k] = binomial_rng(n_left, p, rng); n_left -= result[k]; mass_left -= theta_ref(k); } return result; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_binomial_lccdf.hpp0000644000176200001440000000575514645137106026115 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_POISSON_BINOMIAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log CCDF for the Poisson-binomial distribution evaluated at the * specified number of successes and probabilities of successes. * * @tparam T_y type of number of successes parameter * @tparam T_theta type of chance of success parameters * @param y input scalar, vector, or nested vector of numbers of successes * @param theta array of chances of success parameters * @return sum of log probabilities * @throw std::domain_error if y is out of bounds * @throw std::domain_error if theta is not a valid vector of probabilities * @throw std::invalid_argument If y and theta are different lengths */ template return_type_t poisson_binomial_lccdf(const T_y& y, const T_theta& theta) { static const char* function = "poisson_binomial_lccdf"; size_t size_theta = size_mvt(theta); if (size_theta > 1) { check_consistent_sizes(function, "Successes variables", y, "Probability parameters", theta); } size_t max_sz = std::max(stan::math::size(y), size_mvt(theta)); scalar_seq_view y_vec(y); vector_seq_view theta_vec(theta); for (size_t i = 0; i < max_sz; ++i) { check_bounded(function, "Successes variable", y_vec[i], 0, theta_vec[i].size()); check_finite(function, "Probability parameters", theta_vec.val(i)); check_bounded(function, "Probability parameters", theta_vec.val(i), 0.0, 1.0); } return_type_t lccdf = 0.0; for (size_t i = 0; i < max_sz; ++i) { if (stan::math::size(theta_vec[i]) == 1) { if (y_vec[i] == 0) { lccdf += log(theta_vec[i][0]); } else { lccdf -= stan::math::INFTY; } } else { auto x = log1m_exp( log_sum_exp(poisson_binomial_log_probs(y_vec[i], theta_vec[i]))); lccdf += x; } } return lccdf; } template return_type_t poisson_binomial_lccdf(const T_y& y, const T_theta& theta) { return poisson_binomial_lccdf(y, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exponential_lcdf.hpp0000644000176200001440000000453414645137106024726 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXPONENTIAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_EXPONENTIAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t exponential_lcdf(const T_y& y, const T_inv_scale& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_beta_ref = ref_type_if_t::value, T_inv_scale>; static const char* function = "exponential_lcdf"; T_y_ref y_ref = y; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Inverse scale parameter", beta_val); if (size_zero(y, beta)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, beta_ref); const auto& exp_val = to_ref_if::value>( exp(-beta_val * y_val)); T_partials_return cdf_log = sum(log1m(exp_val)); if (!is_constant_all::value) { const auto& rep_deriv = to_ref_if<( !is_constant_all::value || !is_constant_all::value)>( -exp_val / (1.0 - exp_val)); if (!is_constant_all::value) { partials<0>(ops_partials) = -rep_deriv * beta_val; } if (!is_constant_all::value) { partials<1>(ops_partials) = -rep_deriv * y_val; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/ordered_probit_rng.hpp0000644000176200001440000000170714645137106025260 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_ORDERED_PROBIT_RNG_HPP #define STAN_MATH_PRIM_PROB_ORDERED_PROBIT_RNG_HPP #include #include #include #include namespace stan { namespace math { template inline int ordered_probit_rng(double eta, const Eigen::VectorXd& c, RNG& rng) { static const char* function = "ordered_probit"; check_finite(function, "Location parameter", eta); check_greater(function, "Size of cut points parameter", c.size(), 0); check_ordered(function, "Cut points vector", c); check_finite(function, "Cut-points", c); Eigen::VectorXd cut(c.rows() + 1); cut(0) = 1 - Phi(eta - c(0)); for (int j = 1; j < c.rows(); j++) { cut(j) = Phi(eta - c(j - 1)) - Phi(eta - c(j)); } cut(c.rows()) = Phi(eta - c(c.rows() - 1)); return categorical_rng(cut, rng); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/chi_square_lccdf.hpp0000644000176200001440000001006414645137106024661 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CHI_SQUARE_LCCDF_HPP #define STAN_MATH_PRIM_PROB_CHI_SQUARE_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the chi square log complementary cumulative distribution * function for the given variate and degrees of freedom. If given * containers of matching sizes, returns the log sum of probabilities. * * @tparam T_y type of scalar parameter * @tparam T_dof type of degrees of freedom parameter * @param y scalar parameter * @param nu degrees of freedom parameter * @return log probability or log sum of probabilities * @throw std::domain_error if y is negative or nu is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template return_type_t chi_square_lccdf(const T_y& y, const T_dof& nu) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; static const char* function = "chi_square_lccdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; check_not_nan(function, "Random variable", y_ref); check_nonnegative(function, "Random variable", y_ref); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); if (size_zero(y, nu)) { return 0; } T_partials_return ccdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); size_t N = max_size(y, nu); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_dof> gamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return alpha_dbl = nu_vec.val(i) * 0.5; gamma_vec[i] = tgamma(alpha_dbl); digamma_vec[i] = digamma(alpha_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { return ops_partials.build(negative_infinity()); } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return alpha_dbl = nu_vec.val(n) * 0.5; const T_partials_return beta_dbl = 0.5; const T_partials_return Pn = gamma_q(alpha_dbl, beta_dbl * y_dbl); ccdf_log += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= beta_dbl * exp(-beta_dbl * y_dbl) * pow(beta_dbl * y_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += 0.5 * grad_reg_inc_gamma(alpha_dbl, beta_dbl * y_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gamma_ccdf_log.hpp0000644000176200001440000000111714645137106024304 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAMMA_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_GAMMA_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use gamma_lccdf */ template return_type_t gamma_ccdf_log( const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { return gamma_lccdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/loglogistic_lpdf.hpp0000644000176200001440000001416614645137106024736 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGLOGISTIC_LPDF_HPP #define STAN_MATH_PRIM_PROB_LOGLOGISTIC_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the loglogistic density for the specified scalar(s) * given the specified scales(s) and shape(s). y, alpha, or beta * can each be either a scalar or a vector. Any vector inputs * must be the same length. * *

The result log probability is defined to be the sum of the * log probabilities for each observation/scale/shape triple. * * @tparam T_y type of scalar. * @tparam T_scale type of scale parameter. * @tparam T_shape type of shape parameter. * @param y (Sequence of) scalar(s). * @param alpha (Sequence of) scale parameter(s) * for the loglogistic distribution. * @param beta (Sequence of) shape parameter(s) for the * loglogistic distribution. * @return The log of the product of the densities. * @throw std::domain_error if any of the inputs are not positive and finite. */ template * = nullptr> return_type_t loglogistic_lpdf(const T_y& y, const T_scale& alpha, const T_shape& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_scale_ref = ref_type_if_t::value, T_scale>; using T_shape_ref = ref_type_if_t::value, T_shape>; using std::pow; static const char* function = "loglogistic_lpdf"; check_consistent_sizes(function, "Random variable", y, "Scale parameter", alpha, "Shape parameter", beta); T_y_ref y_ref = y; T_scale_ref alpha_ref = alpha; T_shape_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_positive_finite(function, "Random variable", y_val); check_positive_finite(function, "Scale parameter", alpha_val); check_positive_finite(function, "Shape parameter", beta_val); if (size_zero(y, alpha, beta)) { return 0.0; } if (!include_summand::value) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); const auto& inv_alpha = to_ref_if::value>(inv(alpha_val)); const auto& y_div_alpha = to_ref_if::value>(y_val * inv_alpha); const auto& y_div_alpha_pow_beta = to_ref_if::value>(pow(y_div_alpha, beta_val)); const auto& log1_arg = to_ref_if::value>( 1 + y_div_alpha_pow_beta); const auto& log_y = to_ref_if::value>(log(y_val)); const auto& log_alpha = to_ref_if::value>( log(alpha_val)); const auto& beta_minus_one = to_ref_if<(include_summand::value || !is_constant_all::value)>(beta_val - 1.0); size_t N = max_size(y, alpha, beta); size_t N_alpha_beta = max_size(alpha, beta); T_partials_return logp = sum(beta_minus_one * log_y - 2.0 * log(log1_arg)); if (include_summand::value) { logp += sum(N * (log(beta_val) - log_alpha - beta_minus_one * log_alpha) / N_alpha_beta); } if (!is_constant_all::value) { const auto& two_inv_log1_arg = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(2.0 * inv(log1_arg)); if (!is_constant_all::value) { const auto& y_pow_beta = to_ref_if::value>( pow(y_val, beta_val)); const auto& inv_alpha_pow_beta = to_ref_if < !is_constant_all::value && !is_constant_all::value > (pow(inv_alpha, beta_val)); if (!is_constant_all::value) { const auto& inv_y = inv(y_val); const auto& y_deriv = beta_minus_one * inv_y - two_inv_log1_arg * (beta_val * inv_alpha_pow_beta) * y_pow_beta * inv_y; partials<0>(ops_partials) = y_deriv; } if (!is_constant_all::value) { const auto& alpha_deriv = -beta_val * inv_alpha - two_inv_log1_arg * y_pow_beta * (-beta_val) * inv_alpha_pow_beta * inv_alpha; partials<1>(ops_partials) = alpha_deriv; } } if (!is_constant_all::value) { const auto& beta_deriv = (1.0 * inv(beta_val)) + log_y - log_alpha - two_inv_log1_arg * y_div_alpha_pow_beta * log(y_div_alpha); partials<2>(ops_partials) = beta_deriv; } } return ops_partials.build(logp); } template inline return_type_t loglogistic_lpdf( const T_y& y, const T_scale& alpha, const T_shape& beta) { return loglogistic_lpdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_lcdf.hpp0000644000176200001440000000411314645137106025235 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LCDF_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_LCDF_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t neg_binomial_2_lcdf( const T_n& n, const T_location& mu, const T_precision& phi) { using std::log; using T_n_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_phi_ref = ref_type_t; static const char* function = "neg_binomial_2_lcdf"; check_consistent_sizes(function, "Random variable", n, "Location parameter", mu, "Precision Parameter", phi); T_n_ref n_ref = n; T_mu_ref mu_ref = mu; T_phi_ref phi_ref = phi; check_positive_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Precision parameter", phi_ref); check_not_nan(function, "Random variable", n_ref); if (size_zero(n, mu, phi)) { return 0; } scalar_seq_view n_vec(n_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view phi_vec(phi_ref); size_t size_n = stan::math::size(n); size_t size_phi_mu = max_size(mu, phi); for (size_t i = 0; i < size_n; i++) { if (n_vec[i] < 0) { return LOG_ZERO; } } VectorBuilder, T_location, T_precision> phi_mu(size_phi_mu); for (size_t i = 0; i < size_phi_mu; i++) { phi_mu[i] = phi_vec[i] / (phi_vec[i] + mu_vec[i]); } VectorBuilder, T_n> np1(size_n); for (size_t i = 0; i < size_n; i++) { np1[i] = n_vec[i] + 1.0; } return beta_cdf_log(phi_mu.data(), phi_ref, np1.data()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lkj_corr_rng.hpp0000644000176200001440000000236114645137106024057 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LKJ_CORR_RNG_HPP #define STAN_MATH_PRIM_PROB_LKJ_CORR_RNG_HPP #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a random correlation matrix (symmetric, positive * definite, unit diagonal) of the specified dimensionality drawn * from the LKJ distribution with the specified degrees of freedom * using the specified random number generator. * * @tparam RNG Random number generator type. * @param[in] K Number of rows and columns of generated matrix. * @param[in] eta Degrees of freedom for LKJ distribution. * @param[in, out] rng Random-number generator to use. * @return Random variate with specified distribution. * @throw std::domain_error If the shape parameter is not positive. */ template inline Eigen::MatrixXd lkj_corr_rng(size_t K, double eta, RNG& rng) { static const char* function = "lkj_corr_rng"; check_positive(function, "Shape parameter", eta); return multiply_lower_tri_self_transpose(lkj_corr_cholesky_rng(K, eta, rng)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_lcdf.hpp0000644000176200001440000001217214645137106023310 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_LCDF_HPP #define STAN_MATH_PRIM_PROB_BETA_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the beta log cumulative distribution function for the given * probability, success, and failure parameters. Any arguments other * than scalars must be containers of the same size. With non-scalar * arguments, the return is the sum of the log cdfs with scalars * broadcast as necessary. * * @tparam T_y type of y * @tparam T_scale_succ type of success parameter * @tparam T_scale_fail type of failure parameter * @param y (Sequence of) scalar(s) between zero and one * @param alpha (Sequence of) success parameter(s) * @param beta_param (Sequence of) failure parameter(s) * @return log probability or sum of log of probabilities * @throw std::domain_error if alpha or beta is negative * @throw std::domain_error if y is not a valid probability * @throw std::invalid_argument if container sizes mismatch */ template return_type_t beta_lcdf( const T_y& y, const T_scale_succ& alpha, const T_scale_fail& beta_param) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "beta_lcdf"; check_consistent_sizes(function, "Random variable", y, "First shape parameter", alpha, "Second shape parameter", beta_param); if (size_zero(y, alpha, beta_param)) { return 0; } T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta_param; check_positive_finite(function, "First shape parameter", alpha_ref); check_positive_finite(function, "Second shape parameter", beta_ref); check_bounded(function, "Random variable", value_of(y_ref), 0, 1); T_partials_return cdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t size_alpha = stan::math::size(alpha); size_t size_beta = stan::math::size(beta_param); size_t size_alpha_beta = max_size(alpha, beta_param); size_t N = max_size(y, alpha, beta_param); VectorBuilder::value, T_partials_return, T_scale_succ> digamma_alpha(size_alpha); VectorBuilder::value, T_partials_return, T_scale_fail> digamma_beta(size_beta); VectorBuilder::value, T_partials_return, T_scale_succ, T_scale_fail> digamma_sum(size_alpha_beta); if (!is_constant_all::value) { for (size_t i = 0; i < size_alpha; i++) { digamma_alpha[i] = digamma(alpha_vec.val(i)); } for (size_t i = 0; i < size_beta; i++) { digamma_beta[i] = digamma(beta_vec.val(i)); } for (size_t i = 0; i < size_alpha_beta; i++) { digamma_sum[i] = digamma(alpha_vec.val(i) + beta_vec.val(i)); } } for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return beta_dbl = beta_vec.val(n); const T_partials_return betafunc_dbl = beta(alpha_dbl, beta_dbl); const T_partials_return Pn = inc_beta(alpha_dbl, beta_dbl, y_dbl); const T_partials_return inv_Pn = is_constant_all::value ? 0 : inv(Pn); cdf_log += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += pow(1 - y_dbl, beta_dbl - 1) * pow(y_dbl, alpha_dbl - 1) * inv_Pn / betafunc_dbl; } T_partials_return g1 = 0; T_partials_return g2 = 0; if (!is_constant_all::value) { grad_reg_inc_beta(g1, g2, alpha_dbl, beta_dbl, y_dbl, digamma_alpha[n], digamma_beta[n], digamma_sum[n], betafunc_dbl); } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += g1 * inv_Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += g2 * inv_Pn; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/dirichlet_lpmf.hpp0000644000176200001440000000160114645137106024365 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DIRICHLET_LPMF_HPP #define STAN_MATH_PRIM_PROB_DIRICHLET_LPMF_HPP #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use dirichlet_lpdf */ template return_type_t dirichlet_lpmf(const T_prob& theta, const T_prior_size& alpha) { return dirichlet_lpdf(theta, alpha); } template return_type_t dirichlet_lpmf(const T_prob& theta, const T_prior_size& alpha) { return dirichlet_lpdf(theta, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_lpdf.hpp0000644000176200001440000000763514645137106023714 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_LPDF_HPP #define STAN_MATH_PRIM_PROB_PARETO_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // Pareto(y|y_m, alpha) [y > y_m; y_m > 0; alpha > 0] template * = nullptr> return_type_t pareto_lpdf(const T_y& y, const T_scale& y_min, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_y_min_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; static const char* function = "pareto_lpdf"; check_consistent_sizes(function, "Random variable", y, "Scale parameter", y_min, "Shape parameter", alpha); if (size_zero(y, y_min, alpha)) { return 0; } T_y_ref y_ref = y; T_y_min_ref y_min_ref = y_min; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) y_min_val = to_ref(as_value_column_array_or_scalar(y_min_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_not_nan(function, "Random variable", y_val); check_positive_finite(function, "Scale parameter", y_min_val); check_positive_finite(function, "Shape parameter", alpha_val); if (!include_summand::value) { return 0; } if (sum(promote_scalar(y_val < y_min_val))) { return LOG_ZERO; } const auto& log_y = to_ref_if::value>(log(y_val)); size_t N = max_size(y, y_min, alpha); T_partials_return logp(0); if (include_summand::value) { logp = sum(log(alpha_val)) * N / math::size(alpha); } if (include_summand::value) { logp -= sum(alpha_val * log_y + log_y) * N / max_size(alpha, y); } auto ops_partials = make_partials_propagator(y_ref, y_min_ref, alpha_ref); if (!is_constant_all::value) { const auto& inv_y = inv(y_val); edge<0>(ops_partials).partials_ = -(alpha_val * inv_y + inv_y) * N / max_size(alpha, y); } if (!is_constant_all::value) { edge<1>(ops_partials).partials_ = alpha_val / y_min_val * N / max_size(alpha, y_min); } if (include_summand::value) { const auto& log_y_min = to_ref_if::value>(log(y_min_val)); logp += sum(alpha_val * log_y_min) * N / max_size(alpha, y_min); if (!is_constant_all::value) { partials<2>(ops_partials) = inv(alpha_val) + log_y_min - log_y; } } return ops_partials.build(logp); } template inline return_type_t pareto_lpdf(const T_y& y, const T_scale& y_min, const T_shape& alpha) { return pareto_lpdf(y, y_min, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lkj_cov_lpdf.hpp0000644000176200001440000001032214645137106024034 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LKJ_COV_LPDF_HPP #define STAN_MATH_PRIM_PROB_LKJ_COV_LPDF_HPP #include #include #include #include #include namespace stan { namespace math { /** @deprecated */ // LKJ_cov(y|mu, sigma, eta) [ y covariance matrix (not correlation matrix) // mu vector, sigma > 0 vector, eta > 0 ] template * = nullptr, require_all_eigen_col_vector_t* = nullptr> return_type_t lkj_cov_lpdf(const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& eta) { static const char* function = "lkj_cov_lpdf"; check_size_match(function, "Rows of location parameter", mu.rows(), "columns of scale parameter", sigma.rows()); check_square(function, "random variable", y); check_size_match(function, "Rows of random variable", y.rows(), "rows of location parameter", mu.rows()); const auto& y_ref = to_ref(y); const auto& mu_ref = to_ref(mu); const auto& sigma_ref = to_ref(sigma); check_positive(function, "Shape parameter", eta); check_finite(function, "Location parameter", mu_ref); check_finite(function, "Scale parameter", sigma_ref); check_finite(function, "Covariance matrix", y_ref); return_type_t lp(0.0); const unsigned int K = y.rows(); const Eigen::Array, Eigen::Dynamic, 1> sds = y_ref.diagonal().array().sqrt(); for (unsigned int k = 0; k < K; k++) { lp += lognormal_lpdf(sds(k), mu_ref(k), sigma_ref(k)); } if (stan::is_constant_all::value && eta == 1.0) { // no need to rescale y into a correlation matrix lp += lkj_corr_lpdf(y_ref, eta); return lp; } Eigen::DiagonalMatrix, Eigen::Dynamic> D(K); D.diagonal() = sds.inverse(); lp += lkj_corr_lpdf(D * y_ref * D, eta); return lp; } /** @deprecated */ // LKJ_Cov(y|mu, sigma, eta) [ y covariance matrix (not correlation matrix) // mu scalar, sigma > 0 scalar, eta > 0 ] template * = nullptr, require_all_stan_scalar_t* = nullptr> return_type_t lkj_cov_lpdf(const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& eta) { static const char* function = "lkj_cov_lpdf"; check_positive(function, "Shape parameter", eta); check_finite(function, "Location parameter", mu); check_finite(function, "Scale parameter", sigma); const auto& y_ref = to_ref(y); check_finite(function, "Covariance matrix", y_ref); return_type_t lp(0.0); const unsigned int K = y.rows(); const Eigen::Array, Eigen::Dynamic, 1> sds = y_ref.diagonal().array().sqrt(); for (unsigned int k = 0; k < K; k++) { lp += lognormal_lpdf(sds(k), mu, sigma); } if (stan::is_constant_all::value && eta == 1.0) { // no need to rescale y into a correlation matrix lp += lkj_corr_lpdf(y_ref, eta); return lp; } Eigen::DiagonalMatrix, Eigen::Dynamic> D(K); D.diagonal() = sds.inverse(); lp += lkj_corr_lpdf(D * y_ref * D, eta); return lp; } /** @deprecated */ template inline return_type_t lkj_cov_lpdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& eta) { return lkj_cov_lpdf(y, mu, sigma, eta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/logistic_rng.hpp0000644000176200001440000000443114645137106024067 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGISTIC_RNG_HPP #define STAN_MATH_PRIM_PROB_LOGISTIC_RNG_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Logistic random variate for the given location and scale * using the specified random number generator. * * mu and sigma can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @tparam RNG type of random number generator * @param mu (Sequence of) location parameter(s) * @param sigma (Sequence of) scale parameter(s) * @param rng random number generator * @return (Sequence of) Logistic random variate(s) * @throw std::domain_error if mu is infinite or sigma is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type logistic_rng( const T_loc& mu, const T_scale& sigma, RNG& rng) { using boost::variate_generator; using boost::random::exponential_distribution; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "logistic_rng"; check_consistent_sizes(function, "Location parameter", mu, "Scale Parameter", sigma); T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(mu, sigma); VectorBuilder output(N); variate_generator > exp_rng( rng, exponential_distribution<>(1)); for (size_t n = 0; n < N; ++n) { output[n] = mu_vec[n] - sigma_vec[n] * std::log(exp_rng() / exp_rng()); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/binomial_ccdf_log.hpp0000644000176200001440000000111714645137106025014 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_BINOMIAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use binomial_lccdf */ template return_type_t binomial_ccdf_log(const T_n& n, const T_N& N, const T_prob& theta) { return binomial_lccdf(n, N, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/ordered_logistic_log.hpp0000644000176200001440000000433314645137106025567 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_ORDERED_LOGISTIC_LOG_HPP #define STAN_MATH_PRIM_PROB_ORDERED_LOGISTIC_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns the (natural) log probability of the integer/s * given the vector of continuous location/s and * specified cutpoints in an ordered logistic model. * *

Typically the continuous location * will be the dot product of a vector of regression coefficients * and a vector of predictors for the outcome. * * @tparam propto True if calculating up to a proportion. * @tparam T_y y variable type (int or array of integers). * @tparam T_loc Location type (double or vector). * @tparam T_cut Cut-point type (vector or array of vectors). * @param y Integers * @param lambda Continuous location variables. * @param c Positive increasing cutpoints. * @return Log probability of outcome given location and * cutpoints. * @throw std::domain_error If the outcome is not between 1 and * the number of cutpoints plus 2; if the cutpoint vector is * empty; if the cutpoint vector contains a non-positive, * non-finite value; or if the cutpoint vector is not sorted in * ascending order. * @throw std::invalid_argument If array y and vector lambda * are different lengths. * @throw std::invalid_argument if array y and array of vectors * c are different lengths. * * @deprecated use ordered_logistic_lpmf */ template return_type_t ordered_logistic_log(const T_y& y, const T_loc& lambda, const T_cut& c) { return ordered_logistic_lpmf(y, lambda, c); } /** \ingroup multivar_dists * @deprecated use ordered_logistic_lpmf */ template return_type_t ordered_logistic_log(const T_y& y, const T_loc& lambda, const T_cut& c) { return ordered_logistic_lpmf(y, lambda, c); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_sufficient_log.hpp0000644000176200001440000000230214645137106025747 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_SUFFICIENT_LOG_HPP #define STAN_MATH_PRIM_PROB_NORMAL_SUFFICIENT_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use normal_lpdf */ template inline return_type_t normal_sufficient_log( const T_y& y_bar, const T_s& s_squared, const T_n& n_obs, const T_loc& mu, const T_scale& sigma) { return normal_sufficient_lpdf( y_bar, s_squared, n_obs, mu, sigma); } /** \ingroup prob_dists * @deprecated use normal_lpdf */ template inline return_type_t normal_sufficient_log( const T_y& y_bar, const T_s& s_squared, const T_n& n_obs, const T_loc& mu, const T_scale& sigma) { return normal_sufficient_lpdf( y_bar, s_squared, n_obs, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_normal_log.hpp0000644000176200001440000000201514645137106024743 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_NORMAL_LOG_HPP #define STAN_MATH_PRIM_PROB_MULTI_NORMAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use matrix_normal_lpdf */ template return_type_t multi_normal_log(const T_y& y, const T_loc& mu, const T_covar& Sigma) { return multi_normal_lpdf(y, mu, Sigma); } /** \ingroup multivar_dists * @deprecated use matrix_normal_lpdf */ template inline return_type_t multi_normal_log( const T_y& y, const T_loc& mu, const T_covar& Sigma) { return multi_normal_lpdf(y, mu, Sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_rng.hpp0000644000176200001440000000443114645137106023544 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_RNG_HPP #define STAN_MATH_PRIM_PROB_PARETO_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Pareto random variate for the given shape and scale * parameters using the specified random number generator. * * y_min and alpha can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_scale type of scale parameter * @tparam T_shape type of shape parameter * @tparam RNG type of random number generator * @param y_min (Sequence of) positive scale parameter(s) * @param alpha (Sequence of) positive shape parameter(s) * @param rng random number generator * @return (Sequence of) Pareto random variate(s) * @throw std::domain_error if y_min or alpha are nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type pareto_rng( const T_scale& y_min, const T_shape& alpha, RNG& rng) { using boost::exponential_distribution; using boost::variate_generator; static const char* function = "pareto_rng"; check_consistent_sizes(function, "Scale Parameter", y_min, "Shape parameter", alpha); const auto& y_min_ref = to_ref(y_min); const auto& alpha_ref = to_ref(alpha); check_positive_finite(function, "Scale parameter", y_min_ref); check_positive_finite(function, "Shape parameter", alpha_ref); scalar_seq_view y_min_vec(y_min_ref); scalar_seq_view alpha_vec(alpha_ref); size_t N = max_size(y_min, alpha); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > exp_rng( rng, exponential_distribution<>(alpha_vec[n])); output[n] = y_min_vec[n] * std::exp(exp_rng()); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_lcdf.hpp0000644000176200001440000000434714645137106024074 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_LCDF_HPP #define STAN_MATH_PRIM_PROB_POISSON_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t poisson_lcdf(const T_n& n, const T_rate& lambda) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_if_t::value, T_n>; using T_lambda_ref = ref_type_if_t::value, T_rate>; static const char* function = "poisson_lcdf"; check_consistent_sizes(function, "Random variable", n, "Rate parameter", lambda); T_n_ref n_ref = n; T_lambda_ref lambda_ref = lambda; decltype(auto) n_val = to_ref(as_value_column_array_or_scalar(n_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); check_nonnegative(function, "Rate parameter", lambda_val); if (size_zero(n, lambda)) { return 0; } auto ops_partials = make_partials_propagator(lambda_ref); if (sum(promote_scalar(n_val < 0))) { return ops_partials.build(negative_infinity()); } const auto& log_Pi = to_ref(log(gamma_q(n_val + 1.0, lambda_val))); T_partials_return P = sum(log_Pi); if (!is_constant_all::value) { partials<0>(ops_partials) = -exp(n_val * log(lambda_val) - lambda_val - lgamma(n_val + 1.0) - log_Pi); } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/weibull_ccdf_log.hpp0000644000176200001440000000126314645137106024667 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WEIBULL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_WEIBULL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use weibull_lccdf */ template return_type_t weibull_ccdf_log(const T_y& y, const T_shape& alpha, const T_scale& sigma) { return weibull_lccdf(y, alpha, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/weibull_rng.hpp0000644000176200001440000000446614645137106023725 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WEIBULL_RNG_HPP #define STAN_MATH_PRIM_PROB_WEIBULL_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Weibull random variate for the given shape * and scale parameters using the specified random number generator. * * alpha and sigma can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_shape type of shape parameter * @tparam T_scale type of scale parameter * @tparam RNG type of random number generator * @param alpha (Sequence of) positive shape parameter(s) * @param sigma (Sequence of) positive scale parameter(s) * @param rng random number generator * @return (Sequence of) Weibull random variate(s) * @throw std::domain_error if alpha is nonpositive or sigma is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type weibull_rng( const T_shape& alpha, const T_scale& sigma, RNG& rng) { using boost::variate_generator; using boost::random::weibull_distribution; using T_alpha_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "weibull_rng"; check_consistent_sizes(function, "Shape parameter", alpha, "Scale Parameter", sigma); T_alpha_ref alpha_ref = alpha; T_sigma_ref sigma_ref = sigma; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Scale parameter", sigma_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(alpha, sigma); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > weibull_rng( rng, weibull_distribution<>(alpha_vec[n], sigma_vec[n])); output[n] = weibull_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/ordered_probit_lpmf.hpp0000644000176200001440000001020714645137106025423 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_ORDERED_PROBIT_LPMF_HPP #define STAN_MATH_PRIM_PROB_ORDERED_PROBIT_LPMF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Returns the (natural) log probability of the specified array * of integers given the vector of continuous locations and * array of specified cutpoints in an ordered probit model. * *

Typically the continuous location * will be the dot product of a vector of regression coefficients * and a vector of predictors for the outcome. * * @tparam propto True if calculating up to a proportion. * @tparam T_y Type of y variable - `int` or `std::vector`. * @tparam T_loc Location type - Eigen vector or scalar. * @tparam T_cut Cut-point type - Eigen vector or a std vector of Eigen vectors. * @param y integer or Array of integers * @param lambda Location. * @param c Positive increasing vectors of cutpoints. * @return Log probability of outcome given location and * cutpoints. * @throw std::domain_error If the outcome is not between 1 and * the number of cutpoints plus 2; if the cutpoint vector contains a * non-positive or non-finite value; or if the cutpoint vector is not sorted in * ascending order. * @throw std::invalid_argument If y and lambda are different * lengths; if the cutpoint vector is empty; if y and the array of cutpoints are * of different lengths. */ template return_type_t ordered_probit_lpmf(const T_y& y, const T_loc& lambda, const T_cut& c) { using std::exp; using std::log; using T_lambda_ref = ref_type_t; static const char* function = "ordered_probit"; check_nonzero_size(function, "Cut-points", c); int N = math::size(lambda); int C_l = size_mvt(c); vector_seq_view c_vec(c); int K = c_vec[0].size() + 1; check_consistent_sizes(function, "Integers", y, "Locations", lambda); if (C_l > 1) { check_size_match(function, "Length of location variables ", N, "Number of cutpoint vectors ", C_l); } check_bounded(function, "Random variable", y, 1, K); scalar_seq_view y_vec(y); check_nonzero_size(function, "First cutpoint set", c_vec[0]); for (int i = 0; i < size_mvt(c); ++i) { check_size_match(function, "One cutpoint set", K - 1, "First cutpoint set", c_vec[i].size()); check_ordered(function, "Cut-points", c_vec[i]); if (K > 2) { check_finite(function, "Final cut point", c_vec[i].coeff(K - 2)); } check_finite(function, "First cut point", c_vec[i].coeff(0)); } T_lambda_ref lambda_ref = lambda; check_finite(function, "Location parameter", lambda_ref); scalar_seq_view lambda_vec(lambda_ref); return_type_t logp_n(0.0); for (int i = 0; i < N; ++i) { int K = c_vec[i].size() + 1; if (y_vec[i] == 1) { logp_n += std_normal_lcdf(c_vec[i].coeff(0) - lambda_vec[i]); } else if (y_vec[i] == K) { logp_n += std_normal_lcdf(lambda_vec[i] - c_vec[i].coeff(K - 2)); } else { logp_n += log_diff_exp( std_normal_lcdf(c_vec[i].coeff(y_vec[i] - 1) - lambda_vec[i]), std_normal_lcdf(c_vec[i].coeff(y_vec[i] - 2) - lambda_vec[i])); } } return logp_n; } template return_type_t ordered_probit_lpmf(const T_y& y, const T_loc& lambda, const T_cut& c) { return ordered_probit_lpmf(y, lambda, c); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/chi_square_cdf.hpp0000644000176200001440000001031614645137106024342 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CHI_SQUARE_CDF_HPP #define STAN_MATH_PRIM_PROB_CHI_SQUARE_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the chi square cumulative distribution function for the given * variate and degrees of freedom. If given containers of matching sizes, * returns the product of probabilities. * * @tparam T_y type of scalar parameter * @tparam T_dof type of degrees of freedom parameter * @param y scalar parameter * @param nu degrees of freedom parameter * @return probability or product of probabilities * @throw std::domain_error if y is negative or nu is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template return_type_t chi_square_cdf(const T_y& y, const T_dof& nu) { using T_partials_return = partials_return_t; using std::exp; using std::pow; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; static const char* function = "chi_square_cdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; check_not_nan(function, "Random variable", y_ref); check_nonnegative(function, "Random variable", y_ref); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); if (size_zero(y, nu)) { return 1.0; } T_partials_return cdf(1.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); size_t N = max_size(y, nu); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_dof> gamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return alpha_dbl = nu_vec.val(i) * 0.5; gamma_vec[i] = tgamma(alpha_dbl); digamma_vec[i] = digamma(alpha_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return alpha_dbl = nu_vec.val(n) * 0.5; const T_partials_return beta_dbl = 0.5; const T_partials_return Pn = gamma_p(alpha_dbl, beta_dbl * y_dbl); cdf *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += beta_dbl * exp(-beta_dbl * y_dbl) * pow(beta_dbl * y_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= 0.5 * grad_reg_inc_gamma(alpha_dbl, beta_dbl * y_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= cdf; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(nu); ++n) { partials<1>(ops_partials)[n] *= cdf; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lognormal_lcdf.hpp0000644000176200001440000000666714645137106024403 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGNORMAL_LCDF_HPP #define STAN_MATH_PRIM_PROB_LOGNORMAL_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t lognormal_lcdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "lognormal_lcdf"; T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_nonnegative(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, mu, sigma)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); if (sum(promote_scalar(y_val == 0))) { return ops_partials.build(NEGATIVE_INFTY); } const auto& log_y = log(y_val); const auto& scaled_diff = to_ref_if::value>( (log_y - mu_val) / (sigma_val * SQRT_TWO)); const auto& erfc_calc = to_ref_if::value>( erfc(-scaled_diff)); size_t N = max_size(y, mu, sigma); T_partials_return cdf_log = N * LOG_HALF + sum(log(erfc_calc)); if (!is_constant_all::value) { const auto& exp_m_sq_diff = exp(-scaled_diff * scaled_diff); const auto& rep_deriv = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>( -SQRT_TWO_OVER_SQRT_PI * exp_m_sq_diff / (sigma_val * erfc_calc)); if (!is_constant_all::value) { partials<0>(ops_partials) = -rep_deriv / y_val; } if (!is_constant_all::value) { partials<1>(ops_partials) = rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = rep_deriv * scaled_diff * SQRT_TWO; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_proportion_rng.hpp0000644000176200001440000000463714645137106025470 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_PROPORTION_RNG_HPP #define STAN_MATH_PRIM_PROB_BETA_PROPORTION_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Beta random variate specified probability, location, and * precision parameters: beta_proportion_rng(y | mu, kappa) = * beta_rng(y | mu * kappa, (1 - mu) * kappa). Any arguments other * than scalars must be containers of the same size. With non-scalar * arguments, the return is a container matching the size of the * arguments with scalars broadcast as necessary. * * @tparam T_loc Type of location parameter * @tparam T_prec Type of precision parameter * @tparam RNG type of random number generator * @param mu (Sequence of) location parameter(s) in (0, 1) * @param kappa (Sequence of) positive finite precision parameter(s) * @param rng random number generator * @return (Sequence of) beta random variate(s) * @throw std::domain_error if mu is outside of (0, 1) * @throw std::domain_error if kappa is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type beta_proportion_rng(const T_loc &mu, const T_prec &kappa, RNG &rng) { using T_mu_ref = ref_type_t; using T_kappa_ref = ref_type_t; static const char *function = "beta_proportion_rng"; check_consistent_sizes(function, "Location parameter", mu, "Precision parameter", kappa); T_mu_ref mu_ref = mu; T_kappa_ref kappa_ref = kappa; check_positive(function, "Location parameter", mu_ref); check_less(function, "Location parameter", mu_ref, 1.0); check_positive_finite(function, "Precision parameter", kappa_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view kappa_vec(kappa_ref); size_t N = max_size(mu, kappa); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { double alpha = mu_vec[n] * kappa_vec[n]; double beta = kappa_vec[n] - alpha; output[n] = beta_rng(alpha, beta, rng); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_gamma_lccdf.hpp0000644000176200001440000001033514645137106024475 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_GAMMA_LCCDF_HPP #define STAN_MATH_PRIM_PROB_INV_GAMMA_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t inv_gamma_lccdf(const T_y& y, const T_shape& alpha, const T_scale& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; using std::exp; using std::log; using std::pow; static const char* function = "inv_gamma_lccdf"; check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Scale Parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Scale parameter", beta_ref); check_nonnegative(function, "Random variable", y_ref); if (size_zero(y, alpha, beta)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(y, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_shape> gamma_vec(math::size(alpha)); VectorBuilder::value, T_partials_return, T_shape> digamma_vec(math::size(alpha)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(alpha); i++) { const T_partials_return alpha_dbl = alpha_vec.val(i); gamma_vec[i] = tgamma(alpha_dbl); digamma_vec[i] = digamma(alpha_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { return ops_partials.build(negative_infinity()); } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return beta_dbl = beta_vec.val(n); const T_partials_return Pn = gamma_p(alpha_dbl, beta_dbl * y_inv_dbl); P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= beta_dbl * y_inv_dbl * y_inv_dbl * exp(-beta_dbl * y_inv_dbl) * pow(beta_dbl * y_inv_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] -= grad_reg_inc_gamma(alpha_dbl, beta_dbl * y_inv_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += y_inv_dbl * exp(-beta_dbl * y_inv_dbl) * pow(beta_dbl * y_inv_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_chi_square_cdf.hpp0000644000176200001440000001034614645137106025221 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_CDF_HPP #define STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the inverse chi square cumulative distribution function for the * given variate and degrees of freedom. If given containers of matching * sizes, returns the product of probabilities. * * @tparam T_y type of scalar parameter * @tparam T_dof type of degrees of freedom parameter * @param y scalar parameter * @param nu degrees of freedom parameter * @return probability or product of probabilities * @throw std::domain_error if y is negative or nu is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template return_type_t inv_chi_square_cdf(const T_y& y, const T_dof& nu) { using T_partials_return = partials_return_t; using std::exp; using std::pow; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; static const char* function = "inv_chi_square_cdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_nonnegative(function, "Random variable", y_ref); if (size_zero(y, nu)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); size_t N = max_size(y, nu); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_dof> gamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return nu_dbl = nu_vec.val(i); gamma_vec[i] = tgamma(0.5 * nu_dbl); digamma_vec[i] = digamma(0.5 * nu_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return nu_dbl = nu_vec.val(n); const T_partials_return Pn = gamma_q(0.5 * nu_dbl, 0.5 * y_inv_dbl); P *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += 0.5 * y_inv_dbl * y_inv_dbl * exp(-0.5 * y_inv_dbl) * pow(0.5 * y_inv_dbl, 0.5 * nu_dbl - 1) / tgamma(0.5 * nu_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += 0.5 * grad_reg_inc_gamma(0.5 * nu_dbl, 0.5 * y_inv_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(nu); ++n) { partials<1>(ops_partials)[n] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exponential_lpdf.hpp0000644000176200001440000000766214645137106024750 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXPONENTIAL_LPDF_HPP #define STAN_MATH_PRIM_PROB_EXPONENTIAL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of an exponential density for y with the specified * inverse scale parameter. * Inverse scale parameter must be greater than 0. * y must be greater than or equal to 0. * \f{eqnarray*}{ y &\sim& \mbox{\sf{Expon}}(\beta) \\ \log (p (y \, |\, \beta) ) &=& \log \left( \beta \exp^{-\beta y} \right) \\ &=& \log (\beta) - \beta y \\ & & \mathrm{where} \; y > 0 \f} * * @tparam T_y type of scalar * @tparam T_inv_scale type of inverse scale * @param y A scalar variable. * @param beta Inverse scale parameter. * @throw std::domain_error if beta is not greater than 0. * @throw std::domain_error if y is not greater than or equal to 0. */ template * = nullptr> return_type_t exponential_lpdf(const T_y& y, const T_inv_scale& beta) { using T_partials_return = partials_return_t; using T_partials_array = Eigen::Array; using T_y_ref = ref_type_if_t::value, T_y>; using T_beta_ref = ref_type_if_t::value, T_inv_scale>; static const char* function = "exponential_lpdf"; check_consistent_sizes(function, "Random variable", y, "Inverse scale parameter", beta); T_y_ref y_ref = y; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Inverse scale parameter", beta_val); if (size_zero(y, beta)) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, beta_ref); T_partials_return logp(0.0); if (include_summand::value) { logp = sum(log(beta_val)) * max_size(y, beta) / math::size(beta); } if (include_summand::value) { logp -= sum(beta_val * y_val); } if (!is_constant_all::value) { using beta_val_scalar = scalar_type_t; using beta_val_array = Eigen::Array; if (is_vector::value && !is_vector::value) { partials<0>(ops_partials) = T_partials_array::Constant( math::size(y), -forward_as(beta_val)); } else if (is_vector::value) { partials<0>(ops_partials) = -forward_as(beta_val); } else { forward_as>( partials<0>(ops_partials)) = -forward_as(beta_val); } } if (!is_constant_all::value) { partials<1>(ops_partials) = inv(beta_val) - y_val; } return ops_partials.build(logp); } template inline return_type_t exponential_lpdf( const T_y& y, const T_inv_scale& beta) { return exponential_lpdf(y, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_rng.hpp0000644000176200001440000000336114645137106024246 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_RNG_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Bernoulli random variate with specified chance of success * parameter using the specified random number generator. * * theta can be a scalar or a one-dimensional container. * * @tparam T_theta type of chance of success parameter * @tparam RNG type of random number generator * @param theta (Sequence of) chance of success parameter(s) * @param rng random number generator * @return (Sequence of) Bernoulli random variate(s) * @throw std::domain_error if chance of success parameter is less than zero or * greater than one. */ template inline typename VectorBuilder::type bernoulli_rng( const T_theta& theta, RNG& rng) { using boost::bernoulli_distribution; using boost::variate_generator; static const char* function = "bernoulli_rng"; ref_type_t theta_ref = theta; check_bounded(function, "Probability parameter", value_of(theta_ref), 0.0, 1.0); scalar_seq_view theta_vec(theta_ref); size_t N = stan::math::size(theta); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > bernoulli_rng( rng, bernoulli_distribution<>(theta_vec[n])); output[n] = bernoulli_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/wishart_rng.hpp0000644000176200001440000000225014645137106023730 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WISHART_RNG_HPP #define STAN_MATH_PRIM_PROB_WISHART_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { template inline Eigen::MatrixXd wishart_rng(double nu, const Eigen::MatrixXd& S, RNG& rng) { using Eigen::MatrixXd; static const char* function = "wishart_rng"; index_type_t k = S.rows(); check_square(function, "scale parameter", S); check_symmetric(function, "scale parameter", S); check_greater(function, "degrees of freedom > dims - 1", nu, k - 1); Eigen::LLT llt_of_S = S.llt(); check_pos_definite("wishart_rng", "scale parameter", llt_of_S); MatrixXd B = MatrixXd::Zero(k, k); for (int j = 0; j < k; ++j) { for (int i = 0; i < j; ++i) { B(i, j) = normal_rng(0, 1, rng); } B(j, j) = std::sqrt(chi_square_rng(nu - j, rng)); } return crossprod(B * llt_of_S.matrixU()); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_wishart_rng.hpp0000644000176200001440000000212214645137106024602 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_WISHART_RNG_HPP #define STAN_MATH_PRIM_PROB_INV_WISHART_RNG_HPP #include #include #include #include namespace stan { namespace math { template inline Eigen::MatrixXd inv_wishart_rng(double nu, const Eigen::MatrixXd& S, RNG& rng) { using Eigen::MatrixXd; static const char* function = "inv_wishart_rng"; index_type_t k = S.rows(); check_greater(function, "degrees of freedom > dims - 1", nu, k - 1); check_square(function, "scale parameter", S); check_symmetric(function, "scale parameter", S); MatrixXd S_inv = MatrixXd::Identity(k, k); Eigen::LDLT ldlt_of_S = S.ldlt(); check_pos_definite("inv_wishart_rng", "scale parameter", ldlt_of_S); S_inv = ldlt_of_S.solve(S_inv); MatrixXd asym = inverse_spd(wishart_rng(nu, S_inv, rng)); return 0.5 * (asym.transpose() + asym); // ensure symmetry } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/wishart_lpdf.hpp0000644000176200001440000000762214645137106024077 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WISHART_LPDF_HPP #define STAN_MATH_PRIM_PROB_WISHART_LPDF_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the Wishart density for the given W, degrees of freedom, * and scale matrix. * * The scale matrix, S, must be k x k, symmetric, and semi-positive definite. * Dimension, k, is implicit. * nu must be greater than k-1 * * \f{eqnarray*}{ W &\sim& \mbox{\sf{Wishart}}_{\nu} (S) \\ \log (p (W \, |\, \nu, S) ) &=& \log \left( \left(2^{\nu k/2} \pi^{k (k-1) /4} \prod_{i=1}^k{\Gamma (\frac{\nu + 1 - i}{2})} \right)^{-1} \times \left| S \right|^{-\nu/2} \left| W \right|^{(\nu - k - 1) / 2} \times \exp (-\frac{1}{2} \mbox{tr} (S^{-1} W)) \right) \\ &=& -\frac{\nu k}{2}\log(2) - \frac{k (k-1)}{4} \log(\pi) - \sum_{i=1}^{k}{\log (\Gamma (\frac{\nu+1-i}{2}))} -\frac{\nu}{2} \log(\det(S)) + \frac{\nu-k-1}{2}\log (\det(W)) - \frac{1}{2} \mbox{tr} (S^{-1}W) \f} * * @tparam T_y type of matrix * @tparam T_dof type of degrees of freedom * @tparam T_scale type of scale * @param W A scalar matrix * @param nu Degrees of freedom * @param S The scale matrix * @return The log of the Wishart density at W given nu and S. * @throw std::domain_error if nu is not greater than k-1 * @throw std::domain_error if S is not square, not symmetric, or not * semi-positive definite. */ template * = nullptr, require_all_matrix_t* = nullptr> return_type_t wishart_lpdf(const T_y& W, const T_dof& nu, const T_scale& S) { using Eigen::Dynamic; using Eigen::Lower; using Eigen::Matrix; using T_W_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_S_ref = ref_type_t; static const char* function = "wishart_lpdf"; Eigen::Index k = W.rows(); check_size_match(function, "Rows of random variable", W.rows(), "columns of scale parameter", S.rows()); T_W_ref W_ref = W; T_nu_ref nu_ref = nu; T_S_ref S_ref = S; check_greater(function, "Degrees of freedom parameter", nu_ref, k - 1); check_square(function, "random variable", W_ref); check_square(function, "scale parameter", S_ref); check_symmetric(function, "random variable", W_ref); check_symmetric(function, "scale parameter", S_ref); auto ldlt_W = make_ldlt_factor(W_ref); check_ldlt_factor(function, "LDLT_Factor of random variable", ldlt_W); auto ldlt_S = make_ldlt_factor(S_ref); check_ldlt_factor(function, "LDLT_Factor of scale parameter", ldlt_S); return_type_t lp(0.0); if (include_summand::value) { lp -= nu_ref * k * HALF_LOG_TWO; } if (include_summand::value) { lp -= lmgamma(k, 0.5 * nu_ref); } if (include_summand::value) { lp -= 0.5 * nu_ref * log_determinant_ldlt(ldlt_S); } if (include_summand::value) { lp -= 0.5 * trace(mdivide_left_ldlt(ldlt_S, W_ref)); } if (include_summand::value && nu_ref != (k + 1)) { lp += 0.5 * (nu_ref - k - 1.0) * log_determinant_ldlt(ldlt_W); } return lp; } template inline return_type_t wishart_lpdf(const T_y& W, const T_dof& nu, const T_scale& S) { return wishart_lpdf(W, nu, S); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_normal_cdf.hpp0000644000176200001440000001117114645137106024540 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_NORMAL_CDF_HPP #define STAN_MATH_PRIM_PROB_SKEW_NORMAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t skew_normal_cdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; static const char* function = "skew_normal_cdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma, "Shape paramter", alpha); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive(function, "Scale parameter", sigma_val); check_finite(function, "Shape parameter", alpha_val); if (size_zero(y, mu, sigma, alpha)) { return 1.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, alpha_ref); const auto& diff = to_ref((y_val - mu_val) / sigma_val); const auto& scaled_diff = to_ref_if::value>(diff / SQRT_TWO); const auto& erfc_m_scaled_diff = erfc(-scaled_diff); const auto& owens_t_diff_alpha = owens_t(diff, alpha_val); const auto& cdf_ = to_ref_if::value>( 0.5 * erfc_m_scaled_diff - 2 * owens_t_diff_alpha); T_partials_return cdf = prod(cdf_); if (!is_constant_all::value) { const auto& cdf_quot = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>(cdf / cdf_); const auto& diff_square = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>(square(diff)); if (!is_constant_all::value) { const auto& erf_alpha_scaled_diff = erf(alpha_val * scaled_diff); const auto& exp_m_scaled_diff_square = exp(-0.5 * diff_square); auto rep_deriv = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>((erf_alpha_scaled_diff + 1) * INV_SQRT_TWO_PI * cdf_quot / sigma_val * exp_m_scaled_diff_square); if (!is_constant_all::value) { partials<1>(ops_partials) = -rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = -rep_deriv * diff; } if (!is_constant_all::value) { partials<0>(ops_partials) = std::move(rep_deriv); } } if (!is_constant_all::value) { const auto& alpha_square = square(alpha_val); const auto& exp_tmp = exp(-0.5 * diff_square * (1.0 + alpha_square)); edge<3>(ops_partials).partials_ = -2.0 * exp_tmp / ((1 + alpha_square) * TWO_PI) * cdf_quot; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/double_exponential_ccdf_log.hpp0000644000176200001440000000120014645137106027073 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use double_exponential_lccdf */ template return_type_t double_exponential_ccdf_log( const T_y& y, const T_loc& mu, const T_scale& sigma) { return double_exponential_lccdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/wiener_lpdf.hpp0000644000176200001440000002116314645137106023703 0ustar liggesusers// Original code from which Stan's code is derived: // Copyright (c) 2013, Joachim Vandekerckhove. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted // provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // * this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // * this list of conditions and the following disclaimer in the // * documentation and/or other materials provided with the distribution. // * Neither the name of the University of California, Irvine nor the names // * of its contributors may be used to endorse or promote products derived // * from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. #ifndef STAN_MATH_PRIM_PROB_WIENER_LPDF_HPP #define STAN_MATH_PRIM_PROB_WIENER_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of the first passage time density function for a (Wiener) * drift diffusion model for the given \f$y\f$, * boundary separation \f$\alpha\f$, nondecision time \f$\tau\f$, * relative bias \f$\beta\f$, and drift rate \f$\delta\f$. * \f$\alpha\f$ and \f$\tau\f$ must be greater than 0, and * \f$\beta\f$ must be between 0 and 1. \f$y\f$ should contain * reaction times in seconds (strictly positive) with * upper-boundary responses. * * @tparam T_y type of scalar * @tparam T_alpha type of alpha parameter * @tparam T_tau type of tau parameter * @tparam T_beta type of beta parameter * @tparam T_delta type of delta parameter * * @param y A scalar variate. * @param alpha The boundary separation. * @param tau The nondecision time. * @param beta The relative bias. * @param delta The drift rate. * @return The log of the Wiener first passage time density of * the specified arguments. */ template return_type_t wiener_lpdf( const T_y& y, const T_alpha& alpha, const T_tau& tau, const T_beta& beta, const T_delta& delta) { using T_return_type = return_type_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_tau_ref = ref_type_t; using T_beta_ref = ref_type_t; using T_delta_ref = ref_type_t; using std::ceil; using std::exp; using std::floor; using std::log; using std::sin; using std::sqrt; static const char* function = "wiener_lpdf"; check_consistent_sizes(function, "Random variable", y, "Boundary separation", alpha, "A-priori bias", beta, "Nondecision time", tau, "Drift rate", delta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_tau_ref tau_ref = tau; T_beta_ref beta_ref = beta; T_delta_ref delta_ref = delta; check_positive(function, "Random variable", value_of(y_ref)); check_positive_finite(function, "Boundary separation", value_of(alpha_ref)); check_positive_finite(function, "Nondecision time", value_of(tau_ref)); check_bounded(function, "A-priori bias", value_of(beta_ref), 0, 1); check_finite(function, "Drift rate", value_of(delta_ref)); if (size_zero(y, alpha, beta, tau, delta)) { return 0; } T_return_type lp(0.0); size_t N = max_size(y, alpha, beta, tau, delta); if (!N) { return 0.0; } scalar_seq_view y_vec(y_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); scalar_seq_view tau_vec(tau_ref); scalar_seq_view delta_vec(delta_ref); size_t N_y_tau = max_size(y, tau); for (size_t i = 0; i < N_y_tau; ++i) { if (y_vec[i] <= tau_vec[i]) { std::stringstream msg; msg << ", but must be greater than nondecision time = " << tau_vec[i]; std::string msg_str(msg.str()); throw_domain_error(function, "Random variable", y_vec[i], " = ", msg_str.c_str()); } } if (!include_summand::value) { return 0; } static const double WIENER_ERR = 0.000001; static const double PI_TIMES_WIENER_ERR = pi() * WIENER_ERR; static const double LOG_PI_LOG_WIENER_ERR = LOG_PI + log(WIENER_ERR); static const double TWO_TIMES_SQRT_TWO_PI_TIMES_WIENER_ERR = 2.0 * SQRT_TWO_PI * WIENER_ERR; static const double LOG_TWO_OVER_TWO_PLUS_LOG_SQRT_PI = LOG_TWO / 2 + LOG_SQRT_PI; static const double SQUARE_PI_OVER_TWO = square(pi()) * 0.5; static const double TWO_TIMES_LOG_SQRT_PI = 2.0 * LOG_SQRT_PI; for (size_t i = 0; i < N; i++) { typename scalar_type::type one_minus_beta = 1.0 - beta_vec[i]; typename scalar_type::type alpha2 = square(alpha_vec[i]); T_return_type x = (y_vec[i] - tau_vec[i]) / alpha2; T_return_type kl, ks, tmp = 0; T_return_type k, K; T_return_type sqrt_x = sqrt(x); T_return_type log_x = log(x); T_return_type one_over_pi_times_sqrt_x = 1.0 / pi() * sqrt_x; // calculate number of terms needed for large t: // if error threshold is set low enough if (PI_TIMES_WIENER_ERR * x < 1) { // compute bound kl = sqrt(-2.0 * SQRT_PI * (LOG_PI_LOG_WIENER_ERR + log_x)) / sqrt_x; // ensure boundary conditions met kl = (kl > one_over_pi_times_sqrt_x) ? kl : one_over_pi_times_sqrt_x; } else { kl = one_over_pi_times_sqrt_x; // set to boundary condition } // calculate number of terms needed for small t: // if error threshold is set low enough T_return_type tmp_expr0 = TWO_TIMES_SQRT_TWO_PI_TIMES_WIENER_ERR * sqrt_x; if (tmp_expr0 < 1) { // compute bound ks = 2.0 + sqrt_x * sqrt(-2 * log(tmp_expr0)); // ensure boundary conditions are met T_return_type sqrt_x_plus_one = sqrt_x + 1.0; ks = (ks > sqrt_x_plus_one) ? ks : sqrt_x_plus_one; } else { // if error threshold was set too high ks = 2.0; // minimal kappa for that case } if (ks < kl) { // small t K = ceil(ks); // round to smallest integer meeting error T_return_type tmp_expr1 = (K - 1.0) / 2.0; T_return_type tmp_expr2 = ceil(tmp_expr1); for (k = -floor(tmp_expr1); k <= tmp_expr2; k++) { tmp += (one_minus_beta + 2.0 * k) * exp(-(square(one_minus_beta + 2.0 * k)) * 0.5 / x); } tmp = log(tmp) - LOG_TWO_OVER_TWO_PLUS_LOG_SQRT_PI - 1.5 * log_x; } else { // if large t is better... K = ceil(kl); // round to smallest integer meeting error for (k = 1; k <= K; ++k) { tmp += k * exp(-(square(k)) * (SQUARE_PI_OVER_TWO * x)) * sin(k * pi() * one_minus_beta); } tmp = log(tmp) + TWO_TIMES_LOG_SQRT_PI; } // convert to f(t|v,a,w) and return result lp += delta_vec[i] * alpha_vec[i] * one_minus_beta - square(delta_vec[i]) * x * alpha2 / 2.0 - log(alpha2) + tmp; } return lp; } template inline return_type_t wiener_lpdf( const T_y& y, const T_alpha& alpha, const T_tau& tau, const T_beta& beta, const T_delta& delta) { return wiener_lpdf(y, alpha, tau, beta, delta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_gp_cholesky_log.hpp0000644000176200001440000000410414645137106025763 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_GP_CHOLESKY_LOG_HPP #define STAN_MATH_PRIM_PROB_MULTI_GP_CHOLESKY_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of a multivariate Gaussian Process for the given y, w, and * a Cholesky factor L of the kernel matrix Sigma. * Sigma = LL', a square, semi-positive definite matrix. * y is a dxN matrix, where each column is a different observation and each * row is a different output dimension. The Gaussian Process is assumed to * have a scaled kernel matrix with a different scale for each output dimension. * This distribution is equivalent to: * for (i in 1:d) row(y, i) ~ multi_normal(0, (1/w[i])*LL'). * * @deprecated use multi_gp_cholesky_lpdf * * @param y A dxN matrix * @param L The Cholesky decomposition of a kernel matrix * @param w A d-dimensional vector of positive inverse scale parameters for each * output. * @return The log of the multivariate GP density. * @throw std::domain_error if Sigma is not square, not symmetric, * or not semi-positive definite. * @tparam T_y Type of scalar. * @tparam T_covar Type of kernel. * @tparam T_w Type of weight. */ template return_type_t multi_gp_cholesky_log(const T_y& y, const T_covar& L, const T_w& w) { return multi_gp_cholesky_lpdf(y, L, w); } /** \ingroup multivar_dists * @deprecated use multi_gp_cholesky_lpdf */ template inline return_type_t multi_gp_cholesky_log(const T_y& y, const T_covar& L, const T_w& w) { return multi_gp_cholesky_lpdf<>(y, L, w); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_log_rng.hpp0000644000176200001440000000355414645137106024612 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_LOG_RNG_HPP #define STAN_MATH_PRIM_PROB_POISSON_LOG_RNG_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Poisson random variate with specified log rate parameter * using the given random number generator. * * lambda can be a scalar or a one-dimensional container. * * @tparam T_rate type of log rate parameter * @tparam RNG type of random number generator * @param alpha (Sequence of) log rate parameter(s) * @param rng random number generator * @return (Sequence of) Poisson random variate(s) * @throw std::domain_error if alpha is nonfinite */ template inline typename VectorBuilder::type poisson_log_rng( const T_rate& alpha, RNG& rng) { using boost::variate_generator; using boost::random::poisson_distribution; static const char* function = "poisson_log_rng"; static const double POISSON_MAX_LOG_RATE = 30 * LOG_TWO; const auto& alpha_ref = to_ref(alpha); check_finite(function, "Log rate parameter", alpha_ref); check_less(function, "Log rate parameter", alpha_ref, POISSON_MAX_LOG_RATE); scalar_seq_view alpha_vec(alpha_ref); size_t N = stan::math::size(alpha); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > poisson_rng( rng, poisson_distribution<>(std::exp(alpha_vec[n]))); output[n] = poisson_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/binomial_cdf_log.hpp0000644000176200001440000000111014645137106024642 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_BINOMIAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use binomial_lcdf */ template return_type_t binomial_cdf_log(const T_n& n, const T_N& N, const T_prob& theta) { return binomial_lcdf(n, N, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/wishart_cholesky_lpdf.hpp0000644000176200001440000001002214645137106025764 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WISHART_CHOLESKY_LPDF_HPP #define STAN_MATH_PRIM_PROB_WISHART_CHOLESKY_LPDF_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return the natural logarithm of the unnormalized * Wishart density of the specified lower-triangular * Cholesky factor variate, positive degrees of freedom, * and lower-triangular Cholesky factor of the scale matrix. * * The scale matrix, `L_S`, must be a lower Cholesky factor * dimension, k, is implicit * nu must be greater than k-1 * * The change of variables from the input positive-definite matrix to * the Cholesky factor is given in Theorem 2.1.9 in * Muirhead, R. J. (2005). * Aspects of Multivariate Statistical Theory. Wiley-Interscience. * @tparam T_y Cholesky factor matrix * @tparam T_dof scalar degrees of freedom * @tparam T_scale Cholesky factor matrix * @param L_Y lower triangular Cholesky factor of the inverse covariance matrix * @param nu scalar degrees of freedom * @param L_S lower triangular Cholesky factor of the scale matrix * @return natural logarithm of the Wishart density at L_Y given nu and L_S * @throw std::domain_error if nu is not greater than k-1 * @throw std::domain_error if L_S is not a valid Cholesky factor */ template * = nullptr, require_all_matrix_t* = nullptr> return_type_t wishart_cholesky_lpdf(const T_y& L_Y, const T_dof& nu, const T_scale& L_S) { using Eigen::Lower; using T_L_Y_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_L_S_ref = ref_type_t; using T_return = return_type_t; static const char* function = "wishart_cholesky_lpdf"; Eigen::Index k = L_Y.rows(); check_size_match(function, "Rows of RSandom variable", L_Y.rows(), "columns of scale parameter", L_S.rows()); check_size_match(function, "Rows of random variable", L_Y.rows(), "columns of random variable", L_Y.cols()); check_size_match(function, "Rows of scale parameter", L_S.rows(), "columns of scale parameter", L_S.cols()); T_L_Y_ref L_Y_ref = L_Y; T_nu_ref nu_ref = nu; T_L_S_ref L_S_ref = L_S; check_greater(function, "Degrees of freedom parameter", nu_ref, k - 1); check_positive(function, "Cholesky Random variable", L_Y_ref.diagonal()); check_positive(function, "columns of Cholesky Random variable", L_Y_ref.cols()); check_positive(function, "Cholesky scale matrix", L_S_ref.diagonal()); check_positive(function, "columns of Cholesky scale matrix", L_S_ref.cols()); T_return lp(0.0); if (include_summand::value) { lp += k * LOG_TWO * (1 - 0.5 * nu_ref); lp += -lmgamma(k, 0.5 * nu_ref); } if (include_summand::value) { auto L_SinvL_Y = mdivide_left_tri(L_S_ref, L_Y_ref); T_return dot_LSinvLY(0.0); Eigen::Matrix linspaced_rv(k); T_return nu_minus_1 = nu_ref - 1; for (int i = 0; i < k; i++) { dot_LSinvLY += dot_self(L_SinvL_Y.row(i).head(i + 1)); linspaced_rv(i) = nu_minus_1 - i; } lp += -0.5 * dot_LSinvLY + dot_product(linspaced_rv, log(L_Y_ref.diagonal())) - nu_ref * sum(log(L_S_ref.diagonal())); } return lp; } template inline return_type_t wishart_cholesky_lpdf( const T_y& LW, const T_dof& nu, const T_scale& L_S) { return wishart_cholesky_lpdf(LW, nu, L_S); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/categorical_rng.hpp0000644000176200001440000000167014645137106024531 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CATEGORICAL_RNG_HPP #define STAN_MATH_PRIM_PROB_CATEGORICAL_RNG_HPP #include #include #include #include #include namespace stan { namespace math { template inline int categorical_rng( const Eigen::Matrix& theta, RNG& rng) { using boost::uniform_01; using boost::variate_generator; static const char* function = "categorical_rng"; check_simplex(function, "Probabilities parameter", theta); variate_generator > uniform01_rng(rng, uniform_01<>()); Eigen::VectorXd index(theta.rows()); index.setZero(); index = cumulative_sum(theta); double c = uniform01_rng(); int b = 0; while (c > index(b, 0)) { b++; } return b + 1; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/poisson_rng.hpp0000644000176200001440000000337514645137106023752 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_POISSON_RNG_HPP #define STAN_MATH_PRIM_PROB_POISSON_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Poisson random variate with specified rate parameter * using the given random number generator. * * lambda can be a scalar or a one-dimensional container. * * @tparam T_rate type of rate parameter * @tparam RNG type of random number generator * @param lambda (Sequence of) rate parameter(s) * @param rng random number generator * @return (Sequence of) Poisson random variate(s) * @throw std::domain_error if lambda is nonpositive */ template inline typename VectorBuilder::type poisson_rng( const T_rate& lambda, RNG& rng) { using boost::variate_generator; using boost::random::poisson_distribution; static const char* function = "poisson_rng"; const auto& lambda_ref = to_ref(lambda); check_positive(function, "Rate parameter", lambda_ref); check_less(function, "Rate parameter", lambda_ref, POISSON_MAX_RATE); scalar_seq_view lambda_vec(lambda_ref); size_t N = stan::math::size(lambda); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { variate_generator > poisson_rng( rng, poisson_distribution<>(lambda_vec[n])); output[n] = poisson_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_student_t_cholesky_lpdf.hpp0000644000176200001440000001264614645137106027544 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_STUDENT_T_CHOLESKY_LPDF_HPP #define STAN_MATH_PRIM_PROB_MULTI_STUDENT_T_CHOLESKY_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the multivariate student t density for the given y, mu, * nu, and a Cholesky factor L of the scale matrix. * Sigma = LL', a square, semi-positive definite matrix. * * This version of the function is vectorized on y and mu. * * @param y scalar vector of random variates * @param nu scalar degrees of freedom * @param mu location vector * @param L cholesky decomposition of the scale matrix * of the multivariate student t distribution. * @return The log of the multivariate student t density. * @throw std::domain_error if LL' is not square, not symmetric, * or not semi-positive definite. * @tparam T_y Type of scalar. * @tparam T_dof Type of scalar. * @tparam T_loc Type of location. * @tparam T_scale Type of scale. * @return log probability of the multivariate student t distribution. */ template return_type_t multi_student_t_cholesky_lpdf( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& L) { using T_scale_elem = typename scalar_type::type; using lp_type = return_type_t; using Eigen::Matrix; using std::log; using std::vector; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_L_ref = ref_type_t; static const char* function = "multi_student_t_cholesky"; check_not_nan(function, "Degrees of freedom parameter", nu); check_positive(function, "Degrees of freedom parameter", nu); check_finite(function, "Degrees of freedom parameter", nu); size_t num_y = size_mvt(y); size_t num_mu = size_mvt(mu); check_consistent_sizes_mvt(function, "y", y, "mu", mu); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_L_ref L_ref = L; vector_seq_view y_vec(y_ref); vector_seq_view mu_vec(mu_ref); size_t size_vec = max_size_mvt(y_ref, mu_ref); if (size_vec == 0) { return lp_type(0); } for (size_t i = 1, size_mvt_y = num_y; i < size_mvt_y; i++) { check_size_match( function, "Size of one of the vectors of the random variable", y_vec[i].size(), "Size of another vector of the random variable", y_vec[i - 1].size()); } for (size_t i = 1, size_mvt_mu = num_mu; i < size_mvt_mu; i++) { check_size_match(function, "Size of one of the vectors " "of the location variable", mu_vec[i].size(), "Size of another vector of " "the location variable", mu_vec[i - 1].size()); } int num_dims = y_vec[0].size(); check_size_match(function, "Size of random variable", mu_vec[0].size(), "rows of scale parameter", L.rows()); check_size_match(function, "Size of random variable", num_dims, "size of location parameter", mu_vec[0].size()); check_size_match(function, "Size of random variable", num_dims, "rows of scale parameter", L.rows()); check_size_match(function, "Size of random variable", num_dims, "columns of scale parameter", L.cols()); if (unlikely(num_dims == 0)) { return lp_type(0); } for (size_t i = 0; i < size_vec; i++) { check_finite(function, "Location parameter", mu_vec[i]); check_not_nan(function, "Random variable", y_vec[i]); } check_cholesky_factor(function, "scale parameter", L_ref); lp_type lp(0); if (include_summand::value) { lp += lgamma(0.5 * (nu + num_dims)) * size_vec; lp += -lgamma(0.5 * nu) * size_vec; lp += -(0.5 * num_dims) * log(nu) * size_vec; } if (include_summand::value) { lp += -(0.5 * num_dims) * LOG_PI * size_vec; } if (include_summand::value) { lp += -sum(stan::math::log(L_ref.diagonal())) * size_vec; } if (include_summand::value) { lp_type sum_lp_vec(0.0); for (size_t i = 0; i < size_vec; i++) { const auto& y_col = as_column_vector_or_scalar(y_vec[i]); const auto& mu_col = as_column_vector_or_scalar(mu_vec[i]); sum_lp_vec += log1p( dot_self(mdivide_left_tri(L_ref, y_col - mu_col)) / nu); } lp += -0.5 * (nu + num_dims) * sum_lp_vec; } return lp; } template inline return_type_t multi_student_t_cholesky_lpdf( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& L) { return multi_student_t_cholesky_lpdf(y, nu, mu, L); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/double_exponential_rng.hpp0000644000176200001440000000467314645137106026142 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_RNG_HPP #define STAN_MATH_PRIM_PROB_DOUBLE_EXPONENTIAL_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a double exponential random variate with the given location * and scale using the specified random number generator. * * mu and sigma can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_loc Type of location parameter * @tparam T_scale Type of scale parameter * @tparam RNG class of random number generator * @param mu (Sequence of) location parameter(s) * @param sigma (Sequence of) scale parameter(s) * @param rng random number generator * @return (Sequence of) double exponential random variate(s) * @throw std::domain_error if mu is infinite or sigma is nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type double_exponential_rng(const T_loc& mu, const T_scale& sigma, RNG& rng) { using boost::variate_generator; using boost::random::uniform_real_distribution; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "double_exponential_rng"; check_consistent_sizes(function, "Location parameter", mu, "Scale Parameter", sigma); T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(mu, sigma); VectorBuilder output(N); variate_generator > z_rng( rng, uniform_real_distribution<>(-1.0, 1.0)); for (size_t n = 0; n < N; ++n) { double z = z_rng(); output[n] = mu_vec[n] - ((z > 0) ? 1.0 : -1.0) * sigma_vec[n] * std::log(std::abs(z)); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/beta_rng.hpp0000644000176200001440000000715614645137106023174 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BETA_RNG_HPP #define STAN_MATH_PRIM_PROB_BETA_RNG_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a Beta random variate with the supplied success and failure * parameters using the given random number generator. * * alpha and beta can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_shape1 type of success parameter * @tparam T_shape2 type of failure parameter * @tparam RNG type of random number generator * @param alpha (Sequence of) positive finite success parameter(s) * @param beta (Sequence of) positive finite failure parameter(s) * @param rng random number generator * @return (Sequence of) beta random variate(s) * @throw std::domain_error if alpha or beta are nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type beta_rng( const T_shape1 &alpha, const T_shape2 &beta, RNG &rng) { using boost::variate_generator; using boost::random::gamma_distribution; using boost::random::uniform_real_distribution; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char *function = "beta_rng"; check_consistent_sizes(function, "First shape parameter", alpha, "Second shape Parameter", beta); T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "First shape parameter", alpha_ref); check_positive_finite(function, "Second shape parameter", beta_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(alpha, beta); VectorBuilder output(N); variate_generator> uniform_rng( rng, uniform_real_distribution<>(0.0, 1.0)); for (size_t n = 0; n < N; ++n) { // If alpha and beta are large, trust the usual ratio of gammas // method for generating beta random variables. If any parameter // is small, work in log space and use Marsaglia and Tsang's trick if (alpha_vec[n] > 1.0 && beta_vec[n] > 1.0) { variate_generator> rng_gamma_alpha( rng, gamma_distribution<>(alpha_vec[n], 1.0)); variate_generator> rng_gamma_beta( rng, gamma_distribution<>(beta_vec[n], 1.0)); double a = rng_gamma_alpha(); double b = rng_gamma_beta(); output[n] = a / (a + b); } else { variate_generator> rng_gamma_alpha( rng, gamma_distribution<>(alpha_vec[n] + 1, 1.0)); variate_generator> rng_gamma_beta( rng, gamma_distribution<>(beta_vec[n] + 1, 1.0)); double log_a = std::log(uniform_rng()) / alpha_vec[n] + std::log(rng_gamma_alpha()); double log_b = std::log(uniform_rng()) / beta_vec[n] + std::log(rng_gamma_beta()); double log_sum = log_sum_exp(log_a, log_b); output[n] = std::exp(log_a - log_sum); } } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/weibull_lpdf.hpp0000644000176200001440000001135414645137106024056 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WEIBULL_LPDF_HPP #define STAN_MATH_PRIM_PROB_WEIBULL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the Weibull log probability density for the given * location and scale. Given containers of matching sizes, returns the * log sum of probability densities. * * @tparam T_y type of real parameter * @tparam T_shape type of shape parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param alpha shape parameter * @param sigma scale parameter * @return log probability density or log sum of probability densities * @throw std::domain_error if y is negative, alpha or sigma are nonpositive */ template * = nullptr> return_type_t weibull_lpdf(const T_y& y, const T_shape& alpha, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_shape>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using std::pow; static const char* function = "weibull_lpdf"; check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Scale parameter", sigma); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_finite(function, "Random variable", y_val); check_positive_finite(function, "Shape parameter", alpha_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, alpha, sigma)) { return 0; } if (!include_summand::value) { return 0; } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, sigma_ref); if (sum(promote_scalar(y_val < 0))) { return LOG_ZERO; } const auto& log_y = to_ref_if::value>(log(y_val)); const auto& log_sigma = to_ref_if::value>( log(sigma_val)); const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& y_div_sigma_pow_alpha = to_ref_if::value>( pow(y_val * inv_sigma, alpha_val)); size_t N = max_size(y, alpha, sigma); T_partials_return logp = -sum(y_div_sigma_pow_alpha); if (include_summand::value) { logp += sum(log(alpha_val)) * N / math::size(alpha); } if (include_summand::value) { logp += sum((alpha_val - 1.0) * log_y) * N / max_size(alpha, y); } if (include_summand::value) { logp -= sum(alpha_val * log_sigma) * N / max_size(alpha, sigma); } if (!is_constant_all::value) { edge<0>(ops_partials).partials_ = (alpha_val * (1 - y_div_sigma_pow_alpha) - 1.0) / y_val; } if (!is_constant_all::value) { edge<1>(ops_partials).partials_ = inv(alpha_val) + (1.0 - y_div_sigma_pow_alpha) * (log_y - log_sigma); } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = alpha_val * inv_sigma * (y_div_sigma_pow_alpha - 1.0); } return ops_partials.build(logp); } template inline return_type_t weibull_lpdf(const T_y& y, const T_shape& alpha, const T_scale& sigma) { return weibull_lpdf(y, alpha, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lkj_corr_lpdf.hpp0000644000176200001440000000523014645137106024214 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LKJ_CORR_LPDF_HPP #define STAN_MATH_PRIM_PROB_LKJ_CORR_LPDF_HPP #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t do_lkj_constant(const T_shape& eta, const unsigned int& K) { // Lewandowski, Kurowicka, and Joe (2009) theorem 5 return_type_t constant; const int Km1 = K - 1; using stan::math::lgamma; if (eta == 1.0) { // C++ integer division is appropriate in this block Eigen::VectorXd denominator(Km1 / 2); for (int k = 1; k <= denominator.rows(); k++) { denominator(k - 1) = lgamma(2.0 * k); } constant = -denominator.sum(); if ((K % 2) == 1) { constant -= 0.25 * (K * K - 1) * LOG_PI - 0.25 * (Km1 * Km1) * LOG_TWO - Km1 * lgamma(0.5 * (K + 1)); } else { constant -= 0.25 * K * (K - 2) * LOG_PI + 0.25 * (3 * K * K - 4 * K) * LOG_TWO + K * lgamma(0.5 * K) - Km1 * lgamma(static_cast(K)); } } else { constant = Km1 * lgamma(eta + 0.5 * Km1); for (int k = 1; k <= Km1; k++) { constant -= 0.5 * k * LOG_PI + lgamma(eta + 0.5 * (Km1 - k)); } } return constant; } // LKJ_Corr(y|eta) [ y correlation matrix (not covariance matrix) // eta > 0; eta == 1 <-> uniform] template return_type_t lkj_corr_lpdf(const T_y& y, const T_shape& eta) { static const char* function = "lkj_corr_lpdf"; return_type_t lp(0.0); const auto& y_ref = to_ref(y); check_positive(function, "Shape parameter", eta); check_corr_matrix(function, "Correlation matrix", y_ref); const unsigned int K = y.rows(); if (K == 0) { return 0.0; } if (include_summand::value) { lp += do_lkj_constant(eta, K); } if (eta == 1.0 && is_constant_all>::value) { return lp; } if (!include_summand::value) { return lp; } value_type_t sum_values = sum(log(y_ref.ldlt().vectorD())); lp += (eta - 1.0) * sum_values; return lp; } template inline return_type_t lkj_corr_lpdf(const T_y& y, const T_shape& eta) { return lkj_corr_lpdf(y, eta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lognormal_log.hpp0000644000176200001440000000203414645137106024234 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGNORMAL_LOG_HPP #define STAN_MATH_PRIM_PROB_LOGNORMAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use lognormal_lpdf */ template return_type_t lognormal_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return lognormal_lpdf(y, mu, sigma); } /** \ingroup prob_dists * @deprecated use lognormal_lpdf */ template inline return_type_t lognormal_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { return lognormal_lpdf(y, mu, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_double_exponential_lpdf.hpp0000644000176200001440000001251514645137106027324 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_LPDF_HPP #define STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the skew double exponential log probability density function. Given * containers of matching sizes, returns the log sum of densities. * * @tparam T_y type of real parameter. * @tparam T_loc type of location parameter. * @tparam T_scale type of scale parameter. * @tparam T_skewness type of skewness parameter. * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @param tau skewness parameter * @return log probability or log sum of probabilities * @throw std::domain_error if mu is infinite or sigma is nonpositive or tau is * not bound between 0.0 and 1.0 * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t skew_double_exponential_lpdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_skewness& tau) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using T_tau_ref = ref_type_if_t::value, T_skewness>; static const char* function = "skew_double_exponential_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Shape parameter", sigma, "Skewness parameter", tau); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_tau_ref tau_ref = tau; if (size_zero(y, mu, sigma, tau)) { return 0.0; } if (!include_summand::value) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, tau_ref); decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); decltype(auto) tau_val = to_ref(as_value_column_array_or_scalar(tau_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); check_bounded(function, "Skewness parameter", tau_val, 0.0, 1.0); const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& y_m_mu = to_ref_if::value>(y_val - mu_val); const auto& diff_sign = sign(y_m_mu); const auto& diff_sign_smaller_0 = step(-diff_sign); const auto& abs_diff_y_mu = fabs(y_m_mu); const auto& abs_diff_y_mu_over_sigma = abs_diff_y_mu * inv_sigma; const auto& expo = to_ref_if::value>( (diff_sign_smaller_0 + diff_sign * tau_val) * abs_diff_y_mu_over_sigma); size_t N = max_size(y, mu, sigma, tau); T_partials_return logp = -2.0 * sum(expo); if (include_summand::value) { logp += N * LOG_TWO; } if (include_summand::value) { logp -= sum(log(sigma_val)) * N / math::size(sigma); } if (include_summand::value) { logp += sum(log(tau_val) + log1m(tau_val)) * N / math::size(tau); } if (!is_constant_all::value) { const auto& deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( 2.0 * (diff_sign_smaller_0 + diff_sign * tau_val) * diff_sign * inv_sigma); if (!is_constant_all::value) { partials<0>(ops_partials) = -deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = deriv; } } if (!is_constant_all::value) { partials<2>(ops_partials) = -inv_sigma + 2.0 * expo * inv_sigma; } if (!is_constant_all::value) { edge<3>(ops_partials).partials_ = inv(tau_val) - inv(1.0 - tau_val) + (-1.0 * diff_sign) * 2.0 * abs_diff_y_mu_over_sigma; } return ops_partials.build(logp); } template return_type_t skew_double_exponential_lpdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_skewness& tau) { return skew_double_exponential_lpdf(y, mu, sigma, tau); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_normal_ccdf_log.hpp0000644000176200001440000000122714645137106025545 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_NORMAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_SKEW_NORMAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use skew_normal_lccdf */ template return_type_t skew_normal_ccdf_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { return skew_normal_lccdf(y, mu, sigma, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/von_mises_lcdf.hpp0000644000176200001440000000667014645137106024405 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_VON_MISES_LCDF_HPP #define STAN_MATH_PRIM_PROB_VON_MISES_LCDF_HPP #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Calculates the log of the cumulative distribution function of the von Mises * distribution: * * \f$VonMisesLCDF(x, \mu, \kappa) = \log ( \frac{1}{2\pi I_0(\kappa)} * \int_{-\pi}^x\f$ \f$e^{\kappa cos(t - \mu)} dt )\f$ * * where * * \f$x \in [-\pi, \pi]\f$, \f$\mu \in \mathbb{R}\f$, * and \f$\kappa \in \mathbb{R}^+\f$. * * @param x Variate on the interval \f$[-pi, pi]\f$ * @param mu The mean of the distribution * @param k The inverse scale of the distriubtion * @return The log of the von Mises cdf evaluated at the specified arguments * @tparam T_x Type of x * @tparam T_mu Type of mean parameter * @tparam T_k Type of inverse scale parameter */ template inline return_type_t von_mises_lcdf(const T_x& x, const T_mu& mu, const T_k& k) { using internal::von_mises_cdf_centered; using std::log; const double pi = stan::math::pi(); using T_return = return_type_t; using T_x_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_k_ref = ref_type_t; static char const* function = "von_mises_lcdf"; check_consistent_sizes(function, "Random variable", x, "Location parameter", mu, "Scale parameter", k); T_x_ref x_ref = x; T_mu_ref mu_ref = mu; T_k_ref k_ref = k; check_bounded(function, "Random variable", value_of(x_ref), -pi, pi); check_finite(function, "Location parameter", value_of(mu_ref)); check_positive(function, "Scale parameter", value_of(k_ref)); if (size_zero(x, mu, k)) { return 0.0; } T_return res(0.0); scalar_seq_view x_vec(x_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view k_vec(k_ref); size_t N = max_size(x, mu, k); for (size_t n = 0; n < N; ++n) { auto x_n = x_vec[n]; auto mu_n = mu_vec[n]; auto k_n = k_vec[n]; if (x_n == -pi) { res += NEGATIVE_INFTY; } else if (x_n == pi) { res += 0.0; } else { // shift x so that mean is 0 T_return x2 = x_n - mu_n; // x2 is on an interval (2*n*pi, (2*n + 1)*pi), move it to (-pi, pi) x2 += pi; const auto x_floor = floor(x2 / TWO_PI); const auto x_modded = x2 - x_floor * TWO_PI; x2 = x_modded - pi; // mu is on an interval (2*n*pi, (2*n + 1)*pi), move it to (-pi, pi) T_return mu2 = mu_n + pi; const auto mu_floor = floor(mu2 / TWO_PI); const auto mu_modded = mu2 - mu_floor * TWO_PI; mu2 = mu_modded - pi; // find cut T_return cut, bound_val; if (mu2 < 0) { cut = mu2 + pi; bound_val = -pi - mu2; } if (mu2 >= 0) { cut = mu2 - pi; bound_val = pi - mu2; } T_return f_bound_val = von_mises_cdf_centered(bound_val, k_n); T_return cdf_n; if (x_n <= cut) { cdf_n = von_mises_cdf_centered(x2, k_n) - f_bound_val; } else { cdf_n = von_mises_cdf_centered(x2, k_n) + 1 - f_bound_val; } if (cdf_n < 0.0) cdf_n = 0.0; res += log(cdf_n); } } return res; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_normal_cdf_log.hpp0000644000176200001440000000122114645137106025374 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_NORMAL_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_SKEW_NORMAL_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use skew_normal_lcdf */ template return_type_t skew_normal_cdf_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { return skew_normal_lcdf(y, mu, sigma, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/frechet_log.hpp0000644000176200001440000000212714645137106023665 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_FRECHET_LOG_HPP #define STAN_MATH_PRIM_PROB_FRECHET_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use frechet_lpdf */ template return_type_t frechet_log(const T_y& y, const T_shape& alpha, const T_scale& sigma) { return frechet_lpdf(y, alpha, sigma); } /** \ingroup prob_dists * @deprecated use frechet_lpdf */ template inline return_type_t frechet_log(const T_y& y, const T_shape& alpha, const T_scale& sigma) { return frechet_lpdf(y, alpha, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/chi_square_log.hpp0000644000176200001440000000301414645137106024364 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CHI_SQUARE_LOG_HPP #define STAN_MATH_PRIM_PROB_CHI_SQUARE_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of a chi-squared density for y with the specified * degrees of freedom parameter. * The degrees of freedom parameter must be greater than 0. * y must be greater than or equal to 0. * \f{eqnarray*}{ y &\sim& \chi^2_\nu \\ \log (p (y \, |\, \nu)) &=& \log \left( \frac{2^{-\nu / 2}}{\Gamma (\nu / 2)} y^{\nu / 2 - 1} \exp^{- y / 2} \right) \\ &=& - \frac{\nu}{2} \log(2) - \log (\Gamma (\nu / 2)) + (\frac{\nu}{2} - 1) \log(y) - \frac{y}{2} \\ & & \mathrm{ where } \; y \ge 0 \f} * * @deprecated use chi_square_lpdf * @param y A scalar variable. * @param nu Degrees of freedom. * @throw std::domain_error if nu is not greater than or equal to 0 * @throw std::domain_error if y is not greater than or equal to 0. * @tparam T_y Type of scalar. * @tparam T_dof Type of degrees of freedom. */ template return_type_t chi_square_log(const T_y& y, const T_dof& nu) { return chi_square_lpdf(y, nu); } /** \ingroup prob_dists * @deprecated use chi_square_lpdf */ template inline return_type_t chi_square_log(const T_y& y, const T_dof& nu) { return chi_square_lpdf(y, nu); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/von_mises_rng.hpp0000644000176200001440000000735714645137106024266 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_VON_MISES_RNG_HPP #define STAN_MATH_PRIM_PROB_VON_MISES_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a von Mises random variate for the given location and concentration * using the specified random number generator. * * mu and kappa can each be a scalar or a vector. Any non-scalar * inputs must be the same length. * * The algorithm used in von_mises_rng is a modified version of the * algorithm in: * * Efficient Simulation of the von Mises Distribution * D. J. Best and N. I. Fisher * Journal of the Royal Statistical Society. Series C (Applied Statistics), * Vol. 28, No. 2 (1979), pp. 152-157 * * For kappa < 1.4e-8, this reduces to a uniform distribution. * * @tparam T_loc type of location parameter * @tparam T_conc type of scale (concentration) parameter * @tparam RNG type of random number generator * * @param mu (Sequence of) location parameter(s) * @param kappa (Sequence of) non-negative scale (concentration) parameter(s) * @param rng random number generator * @return (Sequence of) von Mises random variate(s) * @throw std::domain_error if mu is infinite or kappa is negative * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type von_mises_rng( const T_loc& mu, const T_conc& kappa, RNG& rng) { using boost::variate_generator; using boost::random::uniform_real_distribution; using T_mu_ref = ref_type_t; using T_kappa_ref = ref_type_t; static const char* function = "von_mises_rng"; check_consistent_sizes(function, "Location parameter", mu, "Scale parameter", kappa); T_mu_ref mu_ref = mu; T_kappa_ref kappa_ref = kappa; check_finite(function, "Location parameter", mu_ref); check_nonnegative(function, "Scale parameter", kappa_ref); check_finite(function, "Scale parameter", kappa_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view kappa_vec(kappa_ref); size_t N = max_size(mu, kappa_ref); VectorBuilder output(N); variate_generator > uniform_rng( rng, uniform_real_distribution<>(0.0, 1.0)); for (size_t n = 0; n < N; ++n) { // for kappa sufficiently close to zero, it reduces to a // circular uniform distribution centered at mu if (kappa_vec[n] < 1.4e-8) { output[n] = (uniform_rng() - 0.5) * TWO_PI + std::fmod(std::fmod(mu_vec[n], TWO_PI) + TWO_PI, TWO_PI); continue; } double r = 1 + std::pow((1 + 4 * kappa_vec[n] * kappa_vec[n]), 0.5); double rho = 0.5 * (r - std::pow(2 * r, 0.5)) / kappa_vec[n]; double s = 0.5 * (1 + rho * rho) / rho; bool done = false; double W; while (!done) { double Z = std::cos(pi() * uniform_rng()); W = (1 + s * Z) / (s + Z); double Y = kappa_vec[n] * (s - W); double U2 = uniform_rng(); done = Y * (2 - Y) - U2 > 0; if (!done) { done = std::log(Y / U2) + 1 - Y >= 0; } } double U3 = uniform_rng() - 0.5; double sign = ((U3 >= 0) - (U3 <= 0)); // it's really an fmod() with a positivity constraint output[n] = sign * std::acos(W) + std::fmod(std::fmod(mu_vec[n], TWO_PI) + TWO_PI, TWO_PI); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_rng.hpp0000644000176200001440000000633714645137106025125 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_RNG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a negative binomial random variate with the specified location and * precision parameters using the given random number generator. * * mu and phi can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_loc type of location parameter * @tparam T_prec type of precision parameter * @tparam RNG type of random number generator * @param mu (Sequence of) positive location parameter(s) * @param phi (Sequence of) positive precision parameter(s) * @param rng random number generator * @return (Sequence of) negative binomial random variate(s) * @throw std::domain_error if mu or phi are nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type neg_binomial_2_rng(const T_loc& mu, const T_prec& phi, RNG& rng) { using boost::gamma_distribution; using boost::variate_generator; using boost::random::poisson_distribution; using T_mu_ref = ref_type_t; using T_phi_ref = ref_type_t; static const char* function = "neg_binomial_2_rng"; check_consistent_sizes(function, "Location parameter", mu, "Precision parameter", phi); T_mu_ref mu_ref = mu; T_phi_ref phi_ref = phi; check_positive_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Precision parameter", phi_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view phi_vec(phi_ref); size_t N = max_size(mu, phi); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { double mu_div_phi = static_cast(mu_vec[n]) / phi_vec[n]; // gamma_rng params must be positive and finite check_positive_finite(function, "Location parameter divided by the " "precision parameter", mu_div_phi); double rng_from_gamma = variate_generator >( rng, gamma_distribution<>(phi_vec[n], mu_div_phi))(); // same as the constraints for poisson_rng check_less(function, "Random number that came from gamma distribution", rng_from_gamma, POISSON_MAX_RATE); check_not_nan(function, "Random number that came from gamma distribution", rng_from_gamma); check_nonnegative(function, "Random number that came from gamma distribution", rng_from_gamma); output[n] = variate_generator >( rng, poisson_distribution<>(rng_from_gamma))(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_chi_square_cdf_log.hpp0000644000176200001440000000111514645137106026054 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_INV_CHI_SQUARE_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use inv_chi_square_lcdf */ template return_type_t inv_chi_square_cdf_log(const T_y& y, const T_dof& nu) { return inv_chi_square_lcdf(y, nu); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_normal_cholesky_lpdf.hpp0000644000176200001440000002561514645137106027023 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_NORMAL_CHOLESKY_LPDF_HPP #define STAN_MATH_PRIM_PROB_MULTI_NORMAL_CHOLESKY_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the multivariate normal density for the given y, mu, and * a Cholesky factor L of the variance matrix. * Sigma = LL', a square, semi-positive definite matrix. * * This version of the function is vectorized on y and mu. * * Analytic expressions taken from * http://qwone.com/~jason/writing/multivariateNormal.pdf * written by Jason D. M. Rennie. * * @param y A scalar vector * @param mu The mean vector of the multivariate normal distribution. * @param L The Cholesky decomposition of a variance matrix * of the multivariate normal distribution * @return The log of the multivariate normal density. * @throw std::domain_error if LL' is not square, not symmetric, * or not semi-positive definite. * @tparam T_y Type of scalar. * @tparam T_loc Type of location. * @tparam T_covar Type of scale. */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< T_y, T_loc, T_covar>* = nullptr> return_type_t multi_normal_cholesky_lpdf( const T_y& y, const T_loc& mu, const T_covar& L) { static const char* function = "multi_normal_cholesky_lpdf"; using T_covar_elem = typename scalar_type::type; using T_return = return_type_t; using T_partials_return = partials_return_t; using matrix_partials_t = Eigen::Matrix; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_L_ref = ref_type_t; check_consistent_sizes_mvt(function, "y", y, "mu", mu); size_t number_of_y = size_mvt(y); size_t number_of_mu = size_mvt(mu); if (number_of_y == 0 || number_of_mu == 0) { return 0; } T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_L_ref L_ref = L; vector_seq_view y_vec(y_ref); vector_seq_view mu_vec(mu_ref); const size_t size_vec = max_size_mvt(y, mu); const int size_y = y_vec[0].size(); const int size_mu = mu_vec[0].size(); // check size consistency of all random variables y for (size_t i = 1, size_mvt_y = size_mvt(y); i < size_mvt_y; i++) { check_size_match(function, "Size of one of the vectors of " "the random variable", y_vec[i].size(), "Size of the first vector of the " "random variable", size_y); } // check size consistency of all means mu for (size_t i = 1, size_mvt_mu = size_mvt(mu); i < size_mvt_mu; i++) { check_size_match(function, "Size of one of the vectors of " "the location variable", mu_vec[i].size(), "Size of the first vector of the " "location variable", size_mu); } check_size_match(function, "Size of random variable", size_y, "size of location parameter", size_mu); check_size_match(function, "Size of random variable", size_y, "rows of covariance parameter", L.rows()); check_size_match(function, "Size of random variable", size_y, "columns of covariance parameter", L.cols()); for (size_t i = 0; i < size_vec; i++) { check_finite(function, "Location parameter", mu_vec[i]); check_not_nan(function, "Random variable", y_vec[i]); } if (unlikely(size_y == 0)) { return T_return(0); } auto ops_partials = make_partials_propagator(y_ref, mu_ref, L_ref); T_partials_return logp(0); if (include_summand::value) { logp += NEG_LOG_SQRT_TWO_PI * size_y * size_vec; } if (include_summand::value) { Eigen::Matrix y_val_minus_mu_val(size_y, size_vec); for (size_t i = 0; i < size_vec; i++) { decltype(auto) y_val = as_value_column_vector_or_scalar(y_vec[i]); decltype(auto) mu_val = as_value_column_vector_or_scalar(mu_vec[i]); y_val_minus_mu_val.col(i) = y_val - mu_val; } matrix_partials_t half; matrix_partials_t scaled_diff; // If the covariance is not autodiff, we can avoid computing a matrix // inverse if (is_constant::value) { matrix_partials_t L_val = value_of(L_ref); half = mdivide_left_tri(L_val, y_val_minus_mu_val) .transpose(); scaled_diff = mdivide_right_tri(half, L_val).transpose(); if (include_summand::value) { logp -= sum(log(L_val.diagonal())) * size_vec; } } else { matrix_partials_t inv_L_val = mdivide_left_tri(value_of(L_ref)); half = (inv_L_val.template triangularView() * y_val_minus_mu_val) .transpose(); scaled_diff = (half * inv_L_val.template triangularView()) .transpose(); logp += sum(log(inv_L_val.diagonal())) * size_vec; partials<2>(ops_partials) -= size_vec * inv_L_val.transpose(); for (size_t i = 0; i < size_vec; i++) { partials_vec<2>(ops_partials)[i] += scaled_diff.col(i) * half.row(i); } } logp -= 0.5 * sum(columns_dot_self(half)); for (size_t i = 0; i < size_vec; i++) { if (!is_constant_all::value) { partials_vec<0>(ops_partials)[i] -= scaled_diff.col(i); } if (!is_constant_all::value) { partials_vec<1>(ops_partials)[i] += scaled_diff.col(i); } } } return ops_partials.build(logp); } /** \ingroup multivar_dists * The log of the multivariate normal density for the given y, mu, and * a Cholesky factor L of the variance matrix. * Sigma = LL', a square, semi-positive definite matrix. * * Analytic expressions taken from * http://qwone.com/~jason/writing/multivariateNormal.pdf * written by Jason D. M. Rennie. * * @param y A scalar vector * @param mu The mean vector of the multivariate normal distribution. * @param L The Cholesky decomposition of a variance matrix * of the multivariate normal distribution * @return The log of the multivariate normal density. * @throw std::domain_error if LL' is not square, not symmetric, * or not semi-positive definite. * @tparam T_y Type of scalar. * @tparam T_loc Type of location. * @tparam T_covar Type of scale. */ template * = nullptr, require_all_not_nonscalar_prim_or_rev_kernel_expression_t< T_y, T_loc, T_covar>* = nullptr> return_type_t multi_normal_cholesky_lpdf( const T_y& y, const T_loc& mu, const T_covar& L) { static const char* function = "multi_normal_cholesky_lpdf"; using T_covar_elem = typename scalar_type::type; using T_return = return_type_t; using T_partials_return = partials_return_t; using matrix_partials_t = Eigen::Matrix; using vector_partials_t = Eigen::Matrix; using row_vector_partials_t = Eigen::Matrix; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_L_ref = ref_type_t; T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_L_ref L_ref = L; decltype(auto) y_val = as_value_column_vector_or_scalar(y_ref); decltype(auto) mu_val = as_value_column_vector_or_scalar(mu_ref); const int size_y = y_ref.size(); const int size_mu = mu_ref.size(); check_size_match(function, "Size of random variable", size_y, "size of location parameter", size_mu); check_size_match(function, "Size of random variable", size_y, "rows of covariance parameter", L.rows()); check_size_match(function, "Size of random variable", size_y, "columns of covariance parameter", L.cols()); check_finite(function, "Location parameter", mu_val); check_not_nan(function, "Random variable", y_val); if (unlikely(size_y == 0)) { return T_return(0); } auto ops_partials = make_partials_propagator(y_ref, mu_ref, L_ref); T_partials_return logp(0); if (include_summand::value) { logp += NEG_LOG_SQRT_TWO_PI * size_y; } if (include_summand::value) { row_vector_partials_t half; vector_partials_t scaled_diff; vector_partials_t y_val_minus_mu_val = y_val - mu_val; // If the covariance is not autodiff, we can avoid computing a matrix // inverse if (is_constant::value) { matrix_partials_t L_val = value_of(L_ref); half = mdivide_left_tri(L_val, y_val_minus_mu_val) .transpose(); scaled_diff = mdivide_right_tri(half, L_val).transpose(); if (include_summand::value) { logp -= sum(log(L_val.diagonal())); } } else { matrix_partials_t inv_L_val = mdivide_left_tri(value_of(L_ref)); half = (inv_L_val.template triangularView() * y_val_minus_mu_val.template cast()) .transpose(); scaled_diff = (half * inv_L_val.template triangularView()) .transpose(); logp += sum(log(inv_L_val.diagonal())); edge<2>(ops_partials).partials_ += scaled_diff * half - inv_L_val.transpose(); } logp -= 0.5 * sum(dot_self(half)); if (!is_constant_all::value) { partials<0>(ops_partials) -= scaled_diff; } if (!is_constant_all::value) { partials<1>(ops_partials) += scaled_diff; } } return ops_partials.build(logp); } template inline return_type_t multi_normal_cholesky_lpdf( const T_y& y, const T_loc& mu, const T_covar& L) { return multi_normal_cholesky_lpdf(y, mu, L); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_wishart_log.hpp0000644000176200001440000000331614645137106024603 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_WISHART_LOG_HPP #define STAN_MATH_PRIM_PROB_INV_WISHART_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * The log of the Inverse-Wishart density for the given W, degrees * of freedom, and scale matrix. * * The scale matrix, S, must be k x k, symmetric, and semi-positive * definite. * * @deprecated use inverse_wishart_lpdf * * @param W A scalar matrix * @param nu Degrees of freedom * @param S The scale matrix * @return The log of the Inverse-Wishart density at W given nu and S. * @throw std::domain_error if nu is not greater than k-1 * @throw std::domain_error if S is not square, not symmetric, or not * semi-positive definite. * @tparam T_y Type of scalar. * @tparam T_dof Type of degrees of freedom. * @tparam T_scale Type of scale. */ template return_type_t inv_wishart_log(const T_y& W, const T_dof& nu, const T_scale& S) { return inv_wishart_lpdf(W, nu, S); } /** \ingroup multivar_dists * @deprecated use inverse_wishart_lpdf */ template inline return_type_t inv_wishart_log(const T_y& W, const T_dof& nu, const T_scale& S) { return inv_wishart_lpdf<>(W, nu, S); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gamma_rng.hpp0000644000176200001440000000462014645137106023334 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAMMA_RNG_HPP #define STAN_MATH_PRIM_PROB_GAMMA_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a gamma random variate for the given shape and inverse * scale parameters using the specified random number generator. * * alpha and beta can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_shape type of shape parameter * @tparam T_inv type of inverse scale parameter * @tparam RNG type of random number generator * @param alpha (Sequence of) positive shape parameter(s) * @param beta (Sequence of) positive inverse scale parameter(s) * @param rng random number generator * @return (Sequence of) gamma random variate(s) * @throw std::domain_error if alpha or beta are nonpositive * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type gamma_rng( const T_shape& alpha, const T_inv& beta, RNG& rng) { using boost::gamma_distribution; using boost::variate_generator; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "gamma_rng"; check_consistent_sizes(function, "Shape parameter", alpha, "Inverse scale Parameter", beta); T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Inverse scale parameter", beta_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(alpha, beta); VectorBuilder output(N); for (size_t n = 0; n < N; ++n) { // Convert rate (inverse scale) argument to scale for boost variate_generator > gamma_rng( rng, gamma_distribution<>(alpha_vec[n], 1 / static_cast(beta_vec[n]))); output[n] = gamma_rng(); } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/chi_square_lpdf.hpp0000644000176200001440000000750514645137106024541 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CHI_SQUARE_LPDF_HPP #define STAN_MATH_PRIM_PROB_CHI_SQUARE_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of a chi-squared density for y with the specified * degrees of freedom parameter. * The degrees of freedom parameter must be greater than 0. * y must be greater than or equal to 0. * \f{eqnarray*}{ y &\sim& \chi^2_\nu \\ \log (p (y \, |\, \nu)) &=& \log \left( \frac{2^{-\nu / 2}}{\Gamma (\nu / 2)} y^{\nu / 2 - 1} \exp^{- y / 2} \right) \\ &=& - \frac{\nu}{2} \log(2) - \log (\Gamma (\nu / 2)) + (\frac{\nu}{2} - 1) \log(y) - \frac{y}{2} \\ & & \mathrm{ where } \; y \ge 0 \f} * * @tparam T_y type of scalar * @tparam T_dof type of degrees of freedom * @param y A scalar variable. * @param nu Degrees of freedom. * @throw std::domain_error if nu is not greater than or equal to 0 * @throw std::domain_error if y is not greater than or equal to 0. */ template * = nullptr> return_type_t chi_square_lpdf(const T_y& y, const T_dof& nu) { using T_partials_return = partials_return_t; using T_partials_array = Eigen::Array; using std::log; static const char* function = "chi_square_lpdf"; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) nu_val = to_ref(as_value_column_array_or_scalar(nu_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Degrees of freedom parameter", nu_val); if (size_zero(y, nu)) { return 0; } if (!include_summand::value) { return 0; } size_t N = max_size(y, nu); const auto& log_y = to_ref_if::value>(log(y_val)); const auto& half_nu = to_ref(0.5 * nu_val); T_partials_return logp(0); if (include_summand::value) { logp -= sum(nu_val * HALF_LOG_TWO + lgamma(half_nu)) * N / math::size(nu); } logp += sum((half_nu - 1.0) * log_y); if (include_summand::value) { logp -= 0.5 * sum(y_val) * N / math::size(y); } auto ops_partials = make_partials_propagator(y_ref, nu_ref); if (!is_constant_all::value) { partials<0>(ops_partials) = (half_nu - 1.0) / y_val - 0.5; } if (!is_constant_all::value) { if (is_vector::value) { partials<1>(ops_partials) = forward_as( (log_y - digamma(half_nu)) * 0.5 - HALF_LOG_TWO); } else { partials<1>(ops_partials)[0] = sum(log_y - digamma(half_nu)) * 0.5 - HALF_LOG_TWO * N; } } return ops_partials.build(logp); } template inline return_type_t chi_square_lpdf(const T_y& y, const T_dof& nu) { return chi_square_lpdf(y, nu); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exponential_cdf.hpp0000644000176200001440000000574614645137106024560 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXPONENTIAL_CDF_HPP #define STAN_MATH_PRIM_PROB_EXPONENTIAL_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Calculates the exponential cumulative distribution function for * the given y and beta. * * Inverse scale parameter must be greater than 0. * y must be greater than or equal to 0. * * @tparam T_y type of scalar * @tparam T_inv_scale type of inverse scale * @param y A scalar variable. * @param beta Inverse scale parameter. */ template * = nullptr> return_type_t exponential_cdf(const T_y& y, const T_inv_scale& beta) { using T_partials_return = partials_return_t; using T_partials_array = Eigen::Array; using T_y_ref = ref_type_if_t::value, T_y>; using T_beta_ref = ref_type_if_t::value, T_inv_scale>; static const char* function = "exponential_cdf"; T_y_ref y_ref = y; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Inverse scale parameter", beta_val); if (size_zero(y, beta)) { return 1.0; } auto ops_partials = make_partials_propagator(y_ref, beta_ref); constexpr bool any_derivatives = !is_constant_all::value; const auto& exp_val = to_ref_if(exp(-beta_val * y_val)); const auto& one_m_exp = to_ref_if(1 - exp_val); T_partials_return cdf(1.0); if (is_vector::value || is_vector::value) { cdf = forward_as(one_m_exp).prod(); } else { cdf = forward_as(one_m_exp); } if (any_derivatives) { const auto& rep_deriv = to_ref_if<( !is_constant_all::value && !is_constant_all::value)>( exp_val / one_m_exp * cdf); if (!is_constant_all::value) { partials<0>(ops_partials) = beta_val * rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = y_val * rep_deriv; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/discrete_range_cdf_log.hpp0000644000176200001440000000106114645137106026033 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DISCRETE_RANGE_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_DISCRETE_RANGE_CDF_LOG_HPP #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use discrete_range_lcdf */ template double discrete_range_cdf_log(const T_y& y, const T_lower& lower, const T_upper& upper) { return discrete_range_lcdf(y, lower, upper); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/dirichlet_rng.hpp0000644000176200001440000000510614645137106024221 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DIRICHLET_RNG_HPP #define STAN_MATH_PRIM_PROB_DIRICHLET_RNG_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a draw from a Dirichlet distribution with specified * parameters and pseudo-random number generator. * * For prior counts greater than zero, the usual algorithm that * draws gamma variates and normalizes is used. * * For prior counts less than zero (i.e., parameters with value * less than one), a log-scale version of the following algorithm * is used to deal with underflow: * *

* G. Marsaglia and W. Tsang. A simple method for generating gamma * variables. ACM Transactions on Mathematical Software. * 26(3):363--372, 2000. *
* * @tparam RNG type of pseudo-random number generator * @param alpha Prior count (plus 1) parameter for Dirichlet. * @param rng Pseudo-random number generator. */ template inline Eigen::VectorXd dirichlet_rng( const Eigen::Matrix& alpha, RNG& rng) { using boost::gamma_distribution; using boost::variate_generator; using boost::random::uniform_real_distribution; using Eigen::VectorXd; using std::exp; using std::log; // separate algorithm if any parameter is less than 1 if (alpha.minCoeff() < 1) { variate_generator > uniform_rng( rng, uniform_real_distribution<>(0.0, 1.0)); VectorXd log_y(alpha.size()); for (int i = 0; i < alpha.size(); ++i) { variate_generator > gamma_rng( rng, gamma_distribution<>(alpha(i) + 1, 1)); double log_u = log(uniform_rng()); log_y(i) = log(gamma_rng()) + log_u / alpha(i); } double log_sum_y = log_sum_exp(log_y); VectorXd theta(alpha.size()); for (int i = 0; i < alpha.size(); ++i) { theta(i) = exp(log_y(i) - log_sum_y); } return theta; } // standard normalized gamma algorithm Eigen::VectorXd y(alpha.rows()); for (int i = 0; i < alpha.rows(); i++) { variate_generator > gamma_rng( rng, gamma_distribution<>(alpha(i, 0), 1e-7)); y(i) = gamma_rng(); } return y / y.sum(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gamma_lcdf.hpp0000644000176200001440000000735714645137106023470 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAMMA_LCDF_HPP #define STAN_MATH_PRIM_PROB_GAMMA_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t gamma_lcdf(const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "gamma_lcdf"; check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Inverse scale parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Inverse scale parameter", beta_ref); check_nonnegative(function, "Random variable", y_ref); if (size_zero(y, alpha, beta)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(y, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(negative_infinity()); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { return ops_partials.build(0.0); } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return log_y_dbl = log(y_dbl); const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return beta_dbl = beta_vec.val(n); const T_partials_return log_beta_dbl = log(beta_dbl); const T_partials_return beta_y_dbl = beta_dbl * y_dbl; const T_partials_return Pn = gamma_p(alpha_dbl, beta_y_dbl); const T_partials_return log_Pn = log(Pn); P += log_Pn; if (!is_constant_all::value) { const T_partials_return d_num = (-beta_y_dbl) + (alpha_dbl - 1) * (log_beta_dbl + log_y_dbl); const T_partials_return d_den = lgamma(alpha_dbl) + log_Pn; const T_partials_return d = exp(d_num - d_den); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += beta_dbl * d; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += y_dbl * d; } } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += grad_reg_lower_inc_gamma(alpha_dbl, beta_y_dbl) / Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/pareto_lcdf.hpp0000644000176200001440000000754014645137106023672 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_PARETO_LCDF_HPP #define STAN_MATH_PRIM_PROB_PARETO_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t pareto_lcdf(const T_y& y, const T_scale& y_min, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_y_min_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; using std::isinf; static const char* function = "pareto_lcdf"; check_consistent_sizes(function, "Random variable", y, "Scale parameter", y_min, "Shape parameter", alpha); if (size_zero(y, y_min, alpha)) { return 0; } T_y_ref y_ref = y; T_y_min_ref y_min_ref = y_min; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) y_min_val = to_ref(as_value_column_array_or_scalar(y_min_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_nonnegative(function, "Random variable", y_val); check_positive_finite(function, "Scale parameter", y_min_val); check_positive_finite(function, "Shape parameter", alpha_val); auto ops_partials = make_partials_propagator(y_ref, y_min_ref, alpha_ref); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero if (sum(promote_scalar(y_val < y_min_val))) { return ops_partials.build(negative_infinity()); } if (sum(promote_scalar(isinf(y_val)))) { return ops_partials.build(0.0); } const auto& log_quot = to_ref_if::value>( log(y_min_val / y_val)); const auto& exp_prod = to_ref_if::value>( exp(alpha_val * log_quot)); T_partials_return P = sum(log(1 - exp_prod)); if (!is_constant_all::value) { const auto& common_deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( exp_prod / (1 - exp_prod)); if (!is_constant_all::value) { const auto& y_min_inv = inv(y_min_val); auto common_deriv2 = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( -alpha_val * y_min_inv * common_deriv); if (!is_constant_all::value) { partials<0>(ops_partials) = -common_deriv2 * exp(log_quot); } if (!is_constant_all::value) { partials<1>(ops_partials) = std::move(common_deriv2); } } if (!is_constant_all::value) { partials<2>(ops_partials) = -common_deriv * log_quot; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/student_t_lcdf.hpp0000644000176200001440000001425514645137106024412 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STUDENT_T_LCDF_HPP #define STAN_MATH_PRIM_PROB_STUDENT_T_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t student_t_lcdf(const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; using std::exp; using std::log; using std::pow; static const char* function = "student_t_lcdf"; T_y_ref y_ref = y; T_nu_ref nu_ref = nu; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, nu, mu, sigma)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, nu, mu, sigma); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == NEGATIVE_INFTY) { return ops_partials.build(negative_infinity()); } } T_partials_return digammaHalf = 0; VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digammaNu_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digammaNuPlusHalf_vec(math::size(nu)); if (!is_constant_all::value) { digammaHalf = digamma(0.5); for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return nu_dbl = nu_vec.val(i); digammaNu_vec[i] = digamma(0.5 * nu_dbl); digammaNuPlusHalf_vec[i] = digamma(0.5 + 0.5 * nu_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return sigma_inv = 1.0 / sigma_vec.val(n); const T_partials_return t = (y_vec.val(n) - mu_vec.val(n)) * sigma_inv; const T_partials_return nu_dbl = nu_vec.val(n); const T_partials_return q = nu_dbl / (t * t); const T_partials_return r = 1.0 / (1.0 + q); const T_partials_return J = 2 * r * r * q / t; const T_partials_return betaNuHalf = beta(0.5, 0.5 * nu_dbl); T_partials_return zJacobian = t > 0 ? -0.5 : 0.5; if (q < 2) { T_partials_return z = inc_beta(0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r); const T_partials_return Pn = t > 0 ? 1.0 - 0.5 * z : 0.5 * z; const T_partials_return d_ibeta = pow(r, -0.5) * pow(1.0 - r, 0.5 * nu_dbl - 1) / betaNuHalf; P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += -zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; grad_reg_inc_beta(g1, g2, 0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r, digammaNu_vec[n], digammaHalf, digammaNuPlusHalf_vec[n], betaNuHalf); partials<1>(ops_partials)[n] += zJacobian * (d_ibeta * (r / t) * (r / t) + 0.5 * g1) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { partials<3>(ops_partials)[n] += zJacobian * d_ibeta * J * sigma_inv * t / Pn; } } else { T_partials_return z = 1.0 - inc_beta((T_partials_return)0.5, 0.5 * nu_dbl, r); zJacobian *= -1; const T_partials_return Pn = t > 0 ? 1.0 - 0.5 * z : 0.5 * z; T_partials_return d_ibeta = pow(1.0 - r, 0.5 * nu_dbl - 1) * pow(r, -0.5) / betaNuHalf; P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; grad_reg_inc_beta(g1, g2, (T_partials_return)0.5, 0.5 * nu_dbl, r, digammaHalf, digammaNu_vec[n], digammaNuPlusHalf_vec[n], betaNuHalf); partials<1>(ops_partials)[n] += zJacobian * (-d_ibeta * (r / t) * (r / t) + 0.5 * g2) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += -zJacobian * d_ibeta * J * sigma_inv / Pn; } if (!is_constant_all::value) { partials<3>(ops_partials)[n] += -zJacobian * d_ibeta * J * sigma_inv * t / Pn; } } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_normal_lpdf.hpp0000644000176200001440000001222614645137106024733 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_NORMAL_LPDF_HPP #define STAN_MATH_PRIM_PROB_SKEW_NORMAL_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t skew_normal_lpdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using T_alpha_ref = ref_type_if_t::value, T_shape>; static const char* function = "skew_normal_lpdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma, "Shape paramter", alpha); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_alpha_ref alpha_ref = alpha; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_finite(function, "Shape parameter", alpha_val); check_positive(function, "Scale parameter", sigma_val); if (size_zero(y, mu, sigma, alpha)) { return 0.0; } if (!include_summand::value) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, alpha_ref); const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& y_minus_mu_over_sigma = to_ref_if::value>( (y_val - mu_val) * inv_sigma); const auto& log_erfc_alpha_z = to_ref_if::value>( log(erfc(-alpha_val * y_minus_mu_over_sigma * INV_SQRT_TWO))); size_t N = max_size(y, mu, sigma, alpha); T_partials_return logp = sum(log_erfc_alpha_z); if (include_summand::value) { logp -= HALF_LOG_TWO_PI * N; } if (include_summand::value) { logp -= sum(log(sigma_val)) * N / math::size(sigma); } if (include_summand::value) { logp -= sum(square(y_minus_mu_over_sigma)) * 0.5 * N / max_size(y, mu, sigma); } if (!is_constant_all::value) { const auto& sq = square(alpha_val * y_minus_mu_over_sigma * INV_SQRT_TWO); const auto& ex = exp(-sq - log_erfc_alpha_z); auto deriv_logerf = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(SQRT_TWO_OVER_SQRT_PI * ex); if (!is_constant_all::value) { auto deriv_y_loc = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( (y_minus_mu_over_sigma - deriv_logerf * alpha_val) * inv_sigma); if (!is_constant_all::value) { partials<0>(ops_partials) = -deriv_y_loc; } if (!is_constant_all::value) { partials<1>(ops_partials) = std::move(deriv_y_loc); } } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = ((y_minus_mu_over_sigma - deriv_logerf * alpha_val) * y_minus_mu_over_sigma - 1) * inv_sigma; } if (!is_constant_all::value) { partials<3>(ops_partials) = deriv_logerf * y_minus_mu_over_sigma; } } return ops_partials.build(logp); } template inline return_type_t skew_normal_lpdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { return skew_normal_lpdf(y, mu, sigma, alpha); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/bernoulli_lcdf.hpp0000644000176200001440000000531114645137106024365 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BERNOULLI_LCDF_HPP #define STAN_MATH_PRIM_PROB_BERNOULLI_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log CDF of the Bernoulli distribution. If containers are * supplied, returns the log sum of the probabilities. * * @tparam T_n type of integer parameter * @tparam T_prob type of chance of success parameter * @param n integer parameter * @param theta chance of success parameter * @return log probability or log sum of probabilities * @throw std::domain_error if theta is not a valid probability * @throw std::invalid_argument if container sizes mismatch. */ template * = nullptr> return_type_t bernoulli_lcdf(const T_n& n, const T_prob& theta) { using T_partials_return = partials_return_t; using T_theta_ref = ref_type_t; using std::log; static const char* function = "bernoulli_lcdf"; check_consistent_sizes(function, "Random variable", n, "Probability parameter", theta); T_theta_ref theta_ref = theta; check_bounded(function, "Probability parameter", value_of(theta_ref), 0.0, 1.0); if (size_zero(n, theta)) { return 0.0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(theta_ref); scalar_seq_view n_vec(n); scalar_seq_view theta_vec(theta_ref); size_t max_size_seq_view = max_size(n, theta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(n); i++) { if (n_vec.val(i) < 0) { return ops_partials.build(NEGATIVE_INFTY); } } for (size_t i = 0; i < max_size_seq_view; i++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_vec.val(i) >= 1) { continue; } const T_partials_return Pi = 1 - theta_vec.val(i); P += log(Pi); if (!is_constant_all::value) { partials<0>(ops_partials)[i] -= inv(Pi); } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/discrete_range_lpmf.hpp0000644000176200001440000000634414645137106025405 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DISCRETE_RANGE_LPMF_HPP #define STAN_MATH_PRIM_PROB_DISCRETE_RANGE_LPMF_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return the log PMF of a discrete range for the given y, lower and upper * bound (all integers). * \f{eqnarray*}{ y &\sim& \mbox{\sf{discrete\_range}}(lower, upper) \\ \log(p (y \, |\, lower, upper)) &=& \log \left( \frac{1}{upper - lower + 1} \right) \\ &=& -\log (upper - lower + 1) \f} * * `lower` and `upper` can each be a scalar or a one-dimensional container. * Any container arguments must be the same size. * * @tparam T_y type of scalar, either int or std::vector * @tparam T_lower type of lower bound, either int or std::vector * @tparam T_upper type of upper bound, either int or std::vector * * @param y integer random variable * @param lower integer lower bound * @param upper integer upper bound * @return Log probability. If containers are supplied, returns the log sum * of the probabilities. * @throw std::domain_error if upper is smaller than lower. * @throw std::invalid_argument if non-scalar arguments are of different * sizes. */ template double discrete_range_lpmf(const T_y& y, const T_lower& lower, const T_upper& upper) { using std::log; static const char* function = "discrete_range_lpmf"; check_not_nan(function, "Random variable", y); check_consistent_sizes(function, "Lower bound parameter", lower, "Upper bound parameter", upper); check_greater_or_equal(function, "Upper bound parameter", upper, lower); if (size_zero(y, lower, upper)) { return 0.0; } if (!include_summand::value) { return 0.0; } scalar_seq_view y_vec(y); scalar_seq_view lower_vec(lower); scalar_seq_view upper_vec(upper); size_t size_lower_upper = max_size(lower, upper); size_t N = max_size(y, lower, upper); for (size_t n = 0; n < N; ++n) { const double y_dbl = y_vec.val(n); if (y_dbl < lower_vec.val(n) || y_dbl > upper_vec.val(n)) { return LOG_ZERO; } } VectorBuilder log_upper_minus_lower( size_lower_upper); for (size_t i = 0; i < size_lower_upper; i++) { const double lower_dbl = lower_vec.val(i); const double upper_dbl = upper_vec.val(i); log_upper_minus_lower[i] = log(upper_dbl - lower_dbl + 1); } double logp(0.0); for (size_t n = 0; n < N; n++) { logp -= log_upper_minus_lower[n]; } return logp; } template inline double discrete_range_lpmf(const T_y& y, const T_lower& lower, const T_upper& upper) { return discrete_range_lpmf(y, lower, upper); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gumbel_lccdf.hpp0000644000176200001440000000741614645137106024020 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GUMBEL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_GUMBEL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the Gumbel log complementary cumulative distribution for the * given location and scale. Given containers of matching sizes, returns * the log sum of probabilities. * * @tparam T_y type of real parameter * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param mu location parameter * @param beta scale parameter * @return log probability or log sum of probabilities * @throw std::domain_error if y is nan, mu is infinite, or beta is nonpositive * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t gumbel_lccdf(const T_y& y, const T_loc& mu, const T_scale& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_beta_ref = ref_type_if_t::value, T_scale>; static const char* function = "gumbel_lccdf"; T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive(function, "Scale parameter", beta_val); if (size_zero(y, mu, beta)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, beta_ref); const auto& scaled_diff = to_ref_if::value>((y_val - mu_val) / beta_val); const auto& exp_m_scaled_diff = to_ref_if::value>( exp(-scaled_diff)); const auto& cdf_log_n_tmp = exp(-exp_m_scaled_diff); const auto& ccdf_n = to_ref_if::value>( 1.0 - cdf_log_n_tmp); T_partials_return ccdf_log = sum(log(ccdf_n)); if (!is_constant_all::value) { const auto& rep_deriv_tmp = exp(-scaled_diff - exp_m_scaled_diff); const auto& rep_deriv = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>(rep_deriv_tmp / (beta_val * ccdf_n)); if (!is_constant_all::value) { partials<0>(ops_partials) = -rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = rep_deriv * scaled_diff; } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/exp_mod_normal_lccdf.hpp0000644000176200001440000001325214645137106025543 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_EXP_MOD_NORMAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t exp_mod_normal_lccdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_inv_scale& lambda) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; using T_lambda_ref = ref_type_if_t::value, T_inv_scale>; static const char* function = "exp_mod_normal_lccdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma, "Inv_scale paramter", lambda); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_lambda_ref lambda_ref = lambda; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); decltype(auto) lambda_val = to_ref(as_value_column_array_or_scalar(lambda_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); check_positive_finite(function, "Inv_scale parameter", lambda_val); if (size_zero(y, mu, sigma, lambda)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, lambda_ref); scalar_seq_view y_vec(y_val); for (size_t n = 0, size_y = stan::math::size(y); n < size_y; n++) { if (is_inf(y_vec[n])) { return ops_partials.build(y_vec[n] > 0 ? negative_infinity() : 0); } } const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& diff = to_ref(y_val - mu_val); const auto& v = to_ref(lambda_val * sigma_val); const auto& scaled_diff = to_ref(diff * INV_SQRT_TWO * inv_sigma); const auto& scaled_diff_diff = to_ref_if::value>( scaled_diff - v * INV_SQRT_TWO); const auto& erf_calc = to_ref(0.5 * (1 + erf(scaled_diff_diff))); const auto& exp_term = to_ref_if::value>( exp(0.5 * square(v) - lambda_val * diff)); const auto& ccdf_n = to_ref(0.5 - 0.5 * erf(scaled_diff) + exp_term * erf_calc); T_partials_return ccdf_log = sum(log(ccdf_n)); if (!is_constant_all::value) { const auto& exp_term_2 = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( exp(-square(scaled_diff_diff))); if (!is_constant_all::value) { constexpr bool need_deriv_refs = !is_constant_all::value && !is_constant_all::value; const auto& deriv_1 = to_ref_if(lambda_val * exp_term * erf_calc); const auto& deriv_2 = to_ref_if( INV_SQRT_TWO_PI * exp_term * exp_term_2 * inv_sigma); const auto& sq_scaled_diff = square(scaled_diff); const auto& exp_m_sq_scaled_diff = exp(-sq_scaled_diff); const auto& deriv_3 = to_ref_if( INV_SQRT_TWO_PI * exp_m_sq_scaled_diff * inv_sigma); if (!is_constant_all::value) { const auto& deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( (deriv_1 - deriv_2 + deriv_3) / ccdf_n); if (!is_constant_all::value) { partials<0>(ops_partials) = -deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = deriv; } } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = ((deriv_1 - deriv_2) * v + (deriv_3 - deriv_2) * scaled_diff * SQRT_TWO) / ccdf_n; } } if (!is_constant_all::value) { edge<3>(ops_partials).partials_ = exp_term * ((v * sigma_val - diff) * erf_calc - INV_SQRT_TWO_PI * sigma_val * exp_term_2) / ccdf_n; } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/scaled_inv_chi_square_log.hpp0000644000176200001440000000365614645137106026567 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_LOG_HPP #define STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of a scaled inverse chi-squared density for y with the * specified degrees of freedom parameter and scale parameter. * \f{eqnarray*}{ y &\sim& \mbox{\sf{Inv-}}\chi^2(\nu, s^2) \\ \log (p (y \, |\, \nu, s)) &=& \log \left( \frac{(\nu / 2)^{\nu / 2}}{\Gamma (\nu / 2)} s^\nu y^{- (\nu / 2 + 1)} \exp^{-\nu s^2 / (2y)} \right) \\ &=& \frac{\nu}{2} \log(\frac{\nu}{2}) - \log (\Gamma (\nu / 2)) + \nu \log(s) - (\frac{\nu}{2} + 1) \log(y) - \frac{\nu s^2}{2y} \\ & & \mathrm{ where } \; y > 0 \f} * * @deprecated use scaled_inv_chi_square_lpdf * * @param y A scalar variable. * @param nu Degrees of freedom. * @param s Scale parameter. * @throw std::domain_error if nu is not greater than 0 * @throw std::domain_error if s is not greater than 0. * @throw std::domain_error if y is not greater than 0. * @tparam T_y Type of scalar. * @tparam T_dof Type of degrees of freedom. */ template return_type_t scaled_inv_chi_square_log(const T_y& y, const T_dof& nu, const T_scale& s) { return scaled_inv_chi_square_lpdf(y, nu, s); } /** \ingroup prob_dists * @deprecated use scaled_inv_chi_square_lpdf */ template inline return_type_t scaled_inv_chi_square_log( const T_y& y, const T_dof& nu, const T_scale& s) { return scaled_inv_chi_square_lpdf(y, nu, s); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/cauchy_lccdf.hpp0000644000176200001440000000646014645137106024017 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CAUCHY_LCCDF_HPP #define STAN_MATH_PRIM_PROB_CAUCHY_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the cauchy log complementary cumulative distribution function * for the given location, and scale. If given containers of matching sizes * returns the log sum of probabilities. * * @tparam T_y type of real parameter * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @return log probability or log sum of probabilities * @throw std::domain_error if sigma is nonpositive or y, mu are nan * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t cauchy_lccdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::atan; using std::log; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "cauchy_lccdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale Parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 0; } T_partials_return ccdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, mu, sigma); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return sigma_inv_dbl = 1.0 / sigma_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return z = (y_dbl - mu_dbl) * sigma_inv_dbl; const T_partials_return Pn = 0.5 - atan(z) / pi(); ccdf_log += log(Pn); const T_partials_return rep_deriv = 1.0 / (Pn * pi() * (z * z * sigma_dbl + sigma_dbl)); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += rep_deriv * z; } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/std_normal_log.hpp0000644000176200001440000000210014645137106024376 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_STD_NORMAL_LOG_HPP #define STAN_MATH_PRIM_PROB_STD_NORMAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * The log of a standard normal density for the specified scalar(s). * y can be either a scalar or a vector. * *

The result log probability is defined to be the sum of the * log probabilities for each observation. * * @deprecated use std_normal_lpdf * * @tparam T_y Underlying type of scalar in sequence. * @param y (Sequence of) scalar(s). * @return The log of the product of the densities. * @throw std::domain_error if any scalar is nan. */ template return_type_t std_normal_log(const T_y& y) { return std_normal_lpdf(y); } /** \ingroup prob_dists * @deprecated use std_normal_lpdf */ template inline return_type_t std_normal_log(const T_y& y) { return std_normal_lpdf(y); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/logistic_lcdf.hpp0000644000176200001440000000644114645137106024214 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGISTIC_LCDF_HPP #define STAN_MATH_PRIM_PROB_LOGISTIC_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t logistic_lcdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::exp; using std::log; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "logistic_lcdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, mu, sigma); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == NEGATIVE_INFTY) { return ops_partials.build(NEGATIVE_INFTY); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return sigma_inv_vec = 1.0 / sigma_vec.val(n); const T_partials_return Pn = 1.0 / (1.0 + exp(-(y_dbl - mu_dbl) * sigma_inv_vec)); P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] += exp(logistic_log(y_dbl, mu_dbl, sigma_dbl)) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += -exp(logistic_log(y_dbl, mu_dbl, sigma_dbl)) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += -(y_dbl - mu_dbl) * sigma_inv_vec * exp(logistic_log(y_dbl, mu_dbl, sigma_dbl)) / Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/normal_lccdf.hpp0000644000176200001440000000662014645137106024031 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NORMAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_NORMAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> inline return_type_t normal_lccdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::exp; using std::log; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "normal_lccdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 0; } T_partials_return ccdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, mu, sigma); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return scaled_diff = (y_dbl - mu_dbl) / (sigma_dbl * SQRT_TWO); T_partials_return one_m_erf; if (scaled_diff < -37.5 * INV_SQRT_TWO) { one_m_erf = 2.0; } else if (scaled_diff < -5.0 * INV_SQRT_TWO) { one_m_erf = 2.0 - erfc(-scaled_diff); } else if (scaled_diff > 8.25 * INV_SQRT_TWO) { one_m_erf = 0.0; } else { one_m_erf = 1.0 - erf(scaled_diff); } ccdf_log += LOG_HALF + log(one_m_erf); if (!is_constant_all::value) { const T_partials_return rep_deriv_div_sigma = scaled_diff > 8.25 * INV_SQRT_TWO ? INFTY : SQRT_TWO_OVER_SQRT_PI * exp(-scaled_diff * scaled_diff) / one_m_erf / sigma_dbl; if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= rep_deriv_div_sigma; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += rep_deriv_div_sigma; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += rep_deriv_div_sigma * scaled_diff * SQRT_TWO; } } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_normal_cholesky_rng.hpp0000644000176200001440000000504214645137106026654 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_NORMAL_CHOLESKY_RNG_HPP #define STAN_MATH_PRIM_PROB_MULTI_NORMAL_CHOLESKY_RNG_HPP #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a multivariate normal random variate with the given location and * Cholesky factorization of the covariance using the specified random number * generator. * * mu can be either an Eigen::VectorXd, an Eigen::RowVectorXd, or a std::vector * of either of those types. * * @tparam T_loc Type of location parameter * @tparam RNG Type of pseudo-random number generator * @param mu (Sequence of) location parameter(s) * @param L Lower Cholesky factor of target covariance matrix * @param rng random number generator * @throw std::invalid_argument if the length of (each) mu is not equal to the * number of rows and columns in L */ template inline typename StdVectorBuilder::type multi_normal_cholesky_rng( const T_loc& mu, const Eigen::Matrix& L, RNG& rng) { using boost::normal_distribution; using boost::variate_generator; static const char* function = "multi_normal_cholesky_rng"; vector_seq_view mu_vec(mu); size_t size_mu = mu_vec[0].size(); size_t N = size_mvt(mu); for (size_t i = 1; i < N; i++) { check_size_match(function, "Size of one of the vectors of " "the location variable", mu_vec[i].size(), "Size of the first vector of the " "location variable", size_mu); } for (size_t i = 0; i < N; i++) { check_finite(function, "Location parameter", mu_vec[i]); } const auto& L_ref = to_ref(L); StdVectorBuilder output(N); variate_generator > std_normal_rng( rng, normal_distribution<>(0, 1)); for (size_t n = 0; n < N; ++n) { Eigen::VectorXd z(L.cols()); for (int i = 0; i < L.cols(); i++) { z(i) = std_normal_rng(); } output[n] = as_column_vector_or_scalar(mu_vec[n]) + L_ref * z; } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gamma_lccdf.hpp0000644000176200001440000001014514645137106023620 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAMMA_LCCDF_HPP #define STAN_MATH_PRIM_PROB_GAMMA_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template return_type_t gamma_lccdf(const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { using T_partials_return = partials_return_t; using std::exp; using std::log; using std::pow; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_beta_ref = ref_type_t; static const char* function = "gamma_lccdf"; check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Inverse scale parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Inverse scale parameter", beta_ref); check_nonnegative(function, "Random variable", y_ref); if (size_zero(y, alpha, beta)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view alpha_vec(alpha_ref); scalar_seq_view beta_vec(beta_ref); size_t N = max_size(y, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_shape> gamma_vec(math::size(alpha)); VectorBuilder::value, T_partials_return, T_shape> digamma_vec(math::size(alpha)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(alpha); i++) { const T_partials_return alpha_dbl = alpha_vec.val(i); gamma_vec[i] = tgamma(alpha_dbl); digamma_vec[i] = digamma(alpha_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { return ops_partials.build(negative_infinity()); } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return beta_dbl = beta_vec.val(n); const T_partials_return Pn = gamma_q(alpha_dbl, beta_dbl * y_dbl); P += log(Pn); if (!is_constant_all::value) { partials<0>(ops_partials)[n] -= beta_dbl * exp(-beta_dbl * y_dbl) * pow(beta_dbl * y_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += grad_reg_inc_gamma(alpha_dbl, beta_dbl * y_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= y_dbl * exp(-beta_dbl * y_dbl) * pow(beta_dbl * y_dbl, alpha_dbl - 1) / tgamma(alpha_dbl) / Pn; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/binomial_lccdf.hpp0000644000176200001440000000727314645137106024340 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_BINOMIAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the log CCDF for the binomial distribution evaluated at the * specified success, population size, and chance of success. If given * containers of matching lengths, returns the log sum of probabilities. * * @tparam T_n type of successes parameter * @tparam T_N type of population size parameter * @tparam theta type of chance of success parameter * @param n successes parameter * @param N population size parameter * @param theta chance of success parameter * @return log probability or log sum of probabilities * @throw std::domain_error if N is negative * @throw std::domain_error if theta is not a valid probability * @throw std::invalid_argument if container sizes mismatch */ template return_type_t binomial_lccdf(const T_n& n, const T_N& N, const T_prob& theta) { using T_partials_return = partials_return_t; using T_n_ref = ref_type_t; using T_N_ref = ref_type_t; using T_theta_ref = ref_type_t; using std::exp; using std::log; using std::pow; static const char* function = "binomial_lccdf"; check_consistent_sizes(function, "Successes variable", n, "Population size parameter", N, "Probability parameter", theta); T_n_ref n_ref = n; T_N_ref N_ref = N; T_theta_ref theta_ref = theta; check_nonnegative(function, "Population size parameter", N_ref); check_bounded(function, "Probability parameter", value_of(theta_ref), 0.0, 1.0); if (size_zero(n, N, theta)) { return 0; } T_partials_return P(0.0); auto ops_partials = make_partials_propagator(theta_ref); scalar_seq_view n_vec(n_ref); scalar_seq_view N_vec(N_ref); scalar_seq_view theta_vec(theta_ref); size_t max_size_seq_view = max_size(n, N, theta); // Explicit return for extreme values // The gradients are technically ill-defined, // but treated as negative infinity for (size_t i = 0; i < stan::math::size(n); i++) { if (n_vec.val(i) < 0) { return ops_partials.build(0.0); } } for (size_t i = 0; i < max_size_seq_view; i++) { const T_partials_return n_dbl = n_vec.val(i); const T_partials_return N_dbl = N_vec.val(i); // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (n_dbl >= N_dbl) { return ops_partials.build(NEGATIVE_INFTY); } const T_partials_return theta_dbl = theta_vec.val(i); const T_partials_return Pi = 1.0 - inc_beta(N_dbl - n_dbl, n_dbl + 1, 1 - theta_dbl); P += log(Pi); if (!is_constant_all::value) { const T_partials_return denom = beta(N_dbl - n_dbl, n_dbl + 1) * Pi; partials<0>(ops_partials)[i] += pow(theta_dbl, n_dbl) * pow(1 - theta_dbl, N_dbl - n_dbl - 1) / denom; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lkj_cov_log.hpp0000644000176200001440000000176414645137106023702 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LKJ_COV_LOG_HPP #define STAN_MATH_PRIM_PROB_LKJ_COV_LOG_HPP #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * @deprecated use lkj_cov_lpdf */ template inline return_type_t lkj_cov_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& eta) { return lkj_cov_lpdf(y, mu, sigma, eta); } /** \ingroup multivar_dists * @deprecated use lkj_cov_lpdf */ template inline return_type_t lkj_cov_log( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& eta) { return lkj_cov_lpdf<>(y, mu, sigma, eta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/cauchy_cdf.hpp0000644000176200001440000001004014645137106023465 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_CAUCHY_CDF_HPP #define STAN_MATH_PRIM_PROB_CAUCHY_CDF_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the cauchy cumulative distribution function for the given * location, and scale. If given containers of matching sizes * returns the product of probabilities. * * @tparam T_y type of real parameter * @tparam T_loc type of location parameter * @tparam T_scale type of scale parameter * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @return probability or product of probabilities * @throw std::domain_error if sigma is nonpositive or y, mu are nan * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t cauchy_cdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using std::atan; using T_y_ref = ref_type_t; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "cauchy_cdf"; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale Parameter", sigma); T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; check_not_nan(function, "Random variable", y_ref); check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y, mu, sigma)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); size_t N = max_size(y, mu, sigma); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == NEGATIVE_INFTY) { return ops_partials.build(0.0); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return mu_dbl = mu_vec.val(n); const T_partials_return sigma_inv_dbl = 1.0 / sigma_vec.val(n); const T_partials_return z = (y_dbl - mu_dbl) * sigma_inv_dbl; const T_partials_return Pn = atan(z) / pi() + 0.5; P *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += sigma_inv_dbl / (pi() * (1.0 + z * z) * Pn); } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += -sigma_inv_dbl / (pi() * (1.0 + z * z) * Pn); } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += -z * sigma_inv_dbl / (pi() * (1.0 + z * z) * Pn); } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(mu); ++n) { partials<1>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(sigma); ++n) { partials<2>(ops_partials)[n] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/frechet_lcdf.hpp0000644000176200001440000000536014645137106024016 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_FRECHET_LCDF_HPP #define STAN_MATH_PRIM_PROB_FRECHET_LCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t frechet_lcdf(const T_y& y, const T_shape& alpha, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "frechet_lcdf"; T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_sigma_ref sigma_ref = sigma; using std::exp; using std::pow; check_positive(function, "Random variable", y_ref); check_positive_finite(function, "Shape parameter", alpha_ref); check_positive_finite(function, "Scale parameter", sigma_ref); if (size_zero(y_ref, alpha_ref, sigma_ref)) { return 0.0; } T_partials_return cdf_log(0.0); auto ops_partials = make_partials_propagator(y_ref, alpha_ref, sigma_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view sigma_vec(sigma_ref); scalar_seq_view alpha_vec(alpha_ref); size_t N = max_size(y_ref, sigma_ref, alpha_ref); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = y_vec.val(n); const T_partials_return sigma_dbl = sigma_vec.val(n); const T_partials_return alpha_dbl = alpha_vec.val(n); const T_partials_return pow_n = pow(sigma_dbl / y_dbl, alpha_dbl); cdf_log -= pow_n; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += pow_n * alpha_dbl / y_dbl; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += pow_n * log(y_dbl / sigma_dbl); } if (!is_constant_all::value) { partials<2>(ops_partials)[n] -= pow_n * alpha_dbl / sigma_dbl; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_double_exponential_rng.hpp0000644000176200001440000000610114645137106027157 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_RNG_HPP #define STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_RNG_HPP #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return a skew double exponential random variate with the given location * scale and skewness using the specified random number generator. * * mu, sigma and tau can each be a scalar or a one-dimensional container. Any * non-scalar inputs must be the same size. * * @tparam T_loc Type of location parameter * @tparam T_scale Type of scale parameter * @tparam T_skewness Type of skewness parameter * @tparam RNG class of random number generator * @param mu (Sequence of) location parameter(s) * @param sigma (Sequence of) scale parameter(s) * @param tau (Sequence of) skewness parameter(s) * @param rng random number generator * @return (Sequence of) double exponential random variate(s) * @throw std::domain_error if mu is infinite or sigma is nonpositive or tau is * not bound between 0.0 and 1.0 * @throw std::invalid_argument if non-scalar arguments are of different * sizes */ template inline typename VectorBuilder::type skew_double_exponential_rng(const T_loc& mu, const T_scale& sigma, const T_skewness& tau, RNG& rng) { using boost::variate_generator; using boost::random::uniform_real_distribution; using T_mu_ref = ref_type_t; using T_sigma_ref = ref_type_t; using T_tau_ref = ref_type_t; static const char* function = "skew_double_exponential_rng"; check_consistent_sizes(function, "Location parameter", mu, "Scale Parameter", sigma, "Skewness Parameter", tau); T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; T_tau_ref tau_ref = tau; check_finite(function, "Location parameter", mu_ref); check_positive_finite(function, "Scale parameter", sigma_ref); check_bounded(function, "Skewness parameter", tau_ref, 0.0, 1.0); scalar_seq_view mu_vec(mu_ref); scalar_seq_view sigma_vec(sigma_ref); scalar_seq_view tau_vec(tau_ref); size_t N = max_size(mu, sigma, tau); VectorBuilder output(N); variate_generator > z_rng( rng, uniform_real_distribution<>(0.0, 1.0)); for (size_t n = 0; n < N; ++n) { double z = z_rng(); if (z < tau_vec[n]) { output[n] = log(z / tau_vec[n]) * sigma_vec[n] / (2.0 * (1.0 - tau_vec[n])) + mu_vec[n]; } else { output[n] = log((1.0 - z) / (1.0 - tau_vec[n])) * (-sigma_vec[n]) / (2.0 * tau_vec[n]) + mu_vec[n]; } } return output.data(); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_2_ccdf_log.hpp0000644000176200001440000000120314645137106026062 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_2_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use neg_binomial_2_lccdf */ template return_type_t neg_binomial_2_ccdf_log( const T_n& n, const T_location& mu, const T_precision& phi) { return neg_binomial_2_lccdf(n, mu, phi); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/scaled_inv_chi_square_cdf.hpp0000644000176200001440000001232514645137106026533 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_CDF_HPP #define STAN_MATH_PRIM_PROB_SCALED_INV_CHI_SQUARE_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * The CDF of a scaled inverse chi-squared density for y with the * specified degrees of freedom parameter and scale parameter. * * @tparam T_y type of scalar * @tparam T_dof type of degrees of freedom * @param y A scalar variable. * @param nu Degrees of freedom. * @param s Scale parameter. * @throw std::domain_error if nu is not greater than 0 * @throw std::domain_error if s is not greater than 0. * @throw std::domain_error if y is not greater than 0. */ template return_type_t scaled_inv_chi_square_cdf(const T_y& y, const T_dof& nu, const T_scale& s) { using T_partials_return = partials_return_t; using std::exp; using std::pow; using T_y_ref = ref_type_t; using T_nu_ref = ref_type_t; using T_s_ref = ref_type_t; static const char* function = "scaled_inv_chi_square_cdf"; check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu, "Scale parameter", s); T_y_ref y_ref = y; T_nu_ref nu_ref = nu; T_s_ref s_ref = s; check_nonnegative(function, "Random variable", y_ref); check_positive_finite(function, "Degrees of freedom parameter", nu_ref); check_positive_finite(function, "Scale parameter", s_ref); if (size_zero(y, nu, s)) { return 1.0; } T_partials_return P(1.0); auto ops_partials = make_partials_propagator(y_ref, nu_ref, s_ref); scalar_seq_view y_vec(y_ref); scalar_seq_view nu_vec(nu_ref); scalar_seq_view s_vec(s_ref); size_t N = max_size(y, nu, s); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::math::size(y); i++) { if (y_vec.val(i) == 0) { return ops_partials.build(0.0); } } VectorBuilder::value, T_partials_return, T_dof> gamma_vec(math::size(nu)); VectorBuilder::value, T_partials_return, T_dof> digamma_vec(math::size(nu)); if (!is_constant_all::value) { for (size_t i = 0; i < stan::math::size(nu); i++) { const T_partials_return half_nu_dbl = 0.5 * nu_vec.val(i); gamma_vec[i] = tgamma(half_nu_dbl); digamma_vec[i] = digamma(half_nu_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (y_vec.val(n) == INFTY) { continue; } const T_partials_return y_dbl = y_vec.val(n); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return half_nu_dbl = 0.5 * nu_vec.val(n); const T_partials_return s_dbl = s_vec.val(n); const T_partials_return half_s2_overx_dbl = 0.5 * s_dbl * s_dbl * y_inv_dbl; const T_partials_return half_nu_s2_overx_dbl = 2.0 * half_nu_dbl * half_s2_overx_dbl; const T_partials_return Pn = gamma_q(half_nu_dbl, half_nu_s2_overx_dbl); const T_partials_return gamma_p_deriv = exp(-half_nu_s2_overx_dbl) * pow(half_nu_s2_overx_dbl, half_nu_dbl - 1) / tgamma(half_nu_dbl); P *= Pn; if (!is_constant_all::value) { partials<0>(ops_partials)[n] += half_nu_s2_overx_dbl * y_inv_dbl * gamma_p_deriv / Pn; } if (!is_constant_all::value) { partials<1>(ops_partials)[n] += (0.5 * grad_reg_inc_gamma(half_nu_dbl, half_nu_s2_overx_dbl, gamma_vec[n], digamma_vec[n]) - half_s2_overx_dbl * gamma_p_deriv) / Pn; } if (!is_constant_all::value) { partials<2>(ops_partials)[n] += -2.0 * half_nu_dbl * s_dbl * y_inv_dbl * gamma_p_deriv / Pn; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(y); ++n) { partials<0>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(nu); ++n) { partials<1>(ops_partials)[n] *= P; } } if (!is_constant_all::value) { for (size_t n = 0; n < stan::math::size(s); ++n) { partials<2>(ops_partials)[n] *= P; } } return ops_partials.build(P); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multinomial_logit_rng.hpp0000644000176200001440000000323114645137106025777 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTINOMIAL_LOGIT_RNG_HPP #define STAN_MATH_PRIM_PROB_MULTINOMIAL_LOGIT_RNG_HPP #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a draw from a Multinomial distribution given a * a vector of unnormalized log probabilities and a pseudo-random * number generator. * * @tparam RNG Type of pseudo-random number generator. * @param beta Vector of unnormalized log probabilities. * @param N Total count * @param rng Pseudo-random number generator. * @return Multinomial random variate */ template * = nullptr> inline std::vector multinomial_logit_rng(const T_beta& beta, int N, RNG& rng) { static const char* function = "multinomial_logit_rng"; const auto& beta_ref = to_ref(beta); check_finite(function, "Log-probabilities parameter", beta_ref); check_positive(function, "number of trials variables", N); plain_type_t theta = softmax(beta_ref); std::vector result(theta.size(), 0); double mass_left = 1.0; int n_left = N; for (int k = 0; n_left > 0 && k < theta.size(); ++k) { double p = theta.coeff(k) / mass_left; if (p > 1.0) { p = 1.0; } result[k] = binomial_rng(n_left, p, rng); n_left -= result[k]; mass_left -= theta.coeff(k); } return result; } // namespace math } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/skew_double_exponential_lccdf.hpp0000644000176200001440000001246014645137106027451 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_SKEW_DOUBLE_EXPONENTIAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Returns the skew double exponential log complementary cumulative density * function. Given containers of matching sizes, returns the log sum of * probabilities. * * @tparam T_y type of real parameter. * @tparam T_loc type of location parameter. * @tparam T_scale type of scale parameter. * @tparam T_skewness type of skewness parameter. * @param y real parameter * @param mu location parameter * @param sigma scale parameter * @param tau skewness parameter * @return log probability or log sum of probabilities * @throw std::domain_error if mu is infinite or sigma is nonpositive or tau is * not bound between 0.0 and 1.0 * @throw std::invalid_argument if container sizes mismatch */ template * = nullptr> return_type_t skew_double_exponential_lccdf( const T_y& y, const T_loc& mu, const T_scale& sigma, const T_skewness& tau) { static const char* function = "skew_double_exponential_lccdf"; using T_partials_return = partials_return_t; check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Shape parameter", sigma, "Skewness parameter", tau); auto&& y_ref = to_ref(y); auto&& mu_ref = to_ref(mu); auto&& sigma_ref = to_ref(sigma); auto&& tau_ref = to_ref(tau); auto&& y_val = as_value_array_or_scalar(y_ref); auto&& mu_val = as_value_array_or_scalar(mu_ref); auto&& sigma_val = as_value_array_or_scalar(sigma_ref); auto&& tau_val = as_value_array_or_scalar(tau_ref); check_not_nan(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); check_bounded(function, "Skewness parameter", tau_val, 0.0, 1.0); if (size_zero(y, mu, sigma, tau)) { return 0.0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref, tau_ref); scalar_seq_view> y_vec(y_val); scalar_seq_view> mu_vec(mu_val); scalar_seq_view> sigma_vec(sigma_val); scalar_seq_view> tau_vec(tau_val); const auto N = max_size(y, mu, sigma, tau); auto inv_sigma_val = to_ref(inv(sigma_val)); scalar_seq_view inv_sigma(inv_sigma_val); T_partials_return cdf_log(0.0); for (int i = 0; i < N; ++i) { const T_partials_return y_dbl = y_vec[i]; const T_partials_return mu_dbl = mu_vec[i]; const T_partials_return sigma_dbl = sigma_vec[i]; const T_partials_return tau_dbl = tau_vec[i]; const T_partials_return y_m_mu = y_dbl - mu_dbl; const T_partials_return diff_sign = sign(y_m_mu); const T_partials_return diff_sign_smaller_0 = step(-diff_sign); const T_partials_return abs_diff_y_mu = fabs(y_m_mu); const T_partials_return abs_diff_y_mu_over_sigma = abs_diff_y_mu * inv_sigma[i]; const T_partials_return expo = (diff_sign_smaller_0 + diff_sign * tau_dbl) * abs_diff_y_mu_over_sigma; const T_partials_return inv_exp_2_expo_tau = inv(exp(2.0 * (tau_dbl - 1.0) * y_m_mu * inv_sigma[i]) - tau_dbl); const T_partials_return tau_m1_tau = (tau_dbl - 1.0) * tau_dbl; const T_partials_return rep_deriv = y_dbl < mu_dbl ? 2.0 * tau_m1_tau * inv_sigma[i] * inv_exp_2_expo_tau : -2.0 * inv_sigma[i] * tau_dbl; const T_partials_return sig_deriv = y_dbl < mu_dbl ? -2.0 * tau_m1_tau * y_m_mu * inv_sigma[i] * inv_sigma[i] * inv_exp_2_expo_tau : 2.0 * inv_sigma[i] * expo; const T_partials_return skew_deriv = y_dbl < mu_dbl ? -(sigma_dbl + 2.0 * tau_dbl * y_m_mu * diff_sign) * inv_sigma[i] * inv_exp_2_expo_tau : 1.0 / (tau_dbl - 1.0) - 2.0 * inv_sigma[i] * y_m_mu; if (y_dbl <= mu_dbl) { cdf_log += log1m(tau_dbl * exp(-2.0 * expo)); } else { cdf_log += log(1 - tau_dbl) - 2.0 * expo; } if (!is_constant_all::value) { partials<0>(ops_partials)[i] += rep_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials)[i] -= rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials)[i] += sig_deriv; } if (!is_constant_all::value) { partials<3>(ops_partials)[i] += skew_deriv; } } return ops_partials.build(cdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/uniform_cdf.hpp0000644000176200001440000000725314645137106023704 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_UNIFORM_CDF_HPP #define STAN_MATH_PRIM_PROB_UNIFORM_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t uniform_cdf(const T_y& y, const T_low& alpha, const T_high& beta) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_alpha_ref = ref_type_if_t::value, T_low>; using T_beta_ref = ref_type_if_t::value, T_high>; static const char* function = "uniform_cdf"; check_consistent_sizes(function, "Random variable", y, "Lower bound parameter", alpha, "Upper bound parameter", beta); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_beta_ref beta_ref = beta; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) beta_val = to_ref(as_value_column_array_or_scalar(beta_ref)); check_not_nan(function, "Random variable", y_val); check_finite(function, "Lower bound parameter", alpha_val); check_finite(function, "Upper bound parameter", beta_val); check_greater(function, "Upper bound parameter", beta_val, alpha_val); if (size_zero(y, alpha, beta)) { return 1.0; } if (sum(promote_scalar(y_val < alpha_val)) || sum(promote_scalar(beta_val < y_val))) { return 0; } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, beta_ref); const auto& b_minus_a = to_ref_if::value>(beta_val - alpha_val); const auto& cdf_n = to_ref_if::value>( (y_val - alpha_val) / b_minus_a); T_partials_return cdf = prod(cdf_n); if (!is_constant_all::value) { const auto& rep_deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>(cdf / b_minus_a); if (!is_constant_all::value) { auto deriv_y = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>(rep_deriv / cdf_n); if (!is_constant_all::value) { edge<1>(ops_partials).partials_ = (y_val - beta_val) * deriv_y / b_minus_a; } if (!is_constant_all::value) { partials<0>(ops_partials) = std::move(deriv_y); } } if (!is_constant_all::value) { if (is_vector::value && !is_vector::value && !is_vector::value) { edge<2>(ops_partials).partials_ = -rep_deriv * max_size(y, alpha, beta) / max_size(alpha, beta); } else { partials<2>(ops_partials) = -rep_deriv; } } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/multi_student_t_log.hpp0000644000176200001440000000242314645137106025467 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_MULTI_STUDENT_T_LOG_HPP #define STAN_MATH_PRIM_PROB_MULTI_STUDENT_T_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return the log of the multivariate Student t distribution * at the specified arguments. * * @deprecated use multi_student_t_lpdf * * @tparam propto Carry out calculations up to a proportion */ template return_type_t multi_student_t_log( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& Sigma) { return multi_student_t_lpdf(y, nu, mu, Sigma); } /** \ingroup multivar_dists * @deprecated use multi_student_t_lpdf */ template inline return_type_t multi_student_t_log( const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& Sigma) { return multi_student_t_lpdf(y, nu, mu, Sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/binomial_log.hpp0000644000176200001440000000161714645137106024042 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_BINOMIAL_LOG_HPP #define STAN_MATH_PRIM_PROB_BINOMIAL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use binomial_lpmf */ template return_type_t binomial_log(const T_n& n, const T_N& N, const T_prob& theta) { return binomial_lpmf(n, N, theta); } /** \ingroup prob_dists * @deprecated use binomial_lpmf */ template inline return_type_t binomial_log(const T_n& n, const T_N& N, const T_prob& theta) { return binomial_lpmf(n, N, theta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/gamma_cdf_log.hpp0000644000176200001440000000111014645137106024132 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_GAMMA_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_GAMMA_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use gamma_lcdf */ template return_type_t gamma_cdf_log( const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { return gamma_lcdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/frechet_lpdf.hpp0000644000176200001440000001032414645137106024027 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_FRECHET_LPDF_HPP #define STAN_MATH_PRIM_PROB_FRECHET_LPDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { // Frechet(y|alpha, sigma) [y > 0; alpha > 0; sigma > 0] // FIXME: document template * = nullptr> return_type_t frechet_lpdf(const T_y& y, const T_shape& alpha, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_t; using T_alpha_ref = ref_type_t; using T_sigma_ref = ref_type_t; static const char* function = "frechet_lpdf"; check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Scale parameter", sigma); T_y_ref y_ref = y; T_alpha_ref alpha_ref = alpha; T_sigma_ref sigma_ref = sigma; using std::pow; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) alpha_val = to_ref(as_value_column_array_or_scalar(alpha_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_positive(function, "Random variable", y_val); check_positive_finite(function, "Shape parameter", alpha_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, alpha, sigma)) { return 0; } if (!include_summand::value) { return 0; } auto ops_partials = make_partials_propagator(y_ref, alpha_ref, sigma_ref); const auto& log_y = to_ref_if<(include_summand::value && include_summand::value)>( log(y_val)); const auto& sigma_div_y_pow_alpha = to_ref_if::value>( pow(sigma_val / y_val, alpha_val)); size_t N = max_size(y, alpha, sigma); T_partials_return logp = -sum(sigma_div_y_pow_alpha); if (include_summand::value) { logp += sum(log(alpha_val)) * N / math::size(alpha); } if (include_summand::value) { logp -= sum((alpha_val + 1.0) * log_y) * N / max_size(y, alpha); } if (include_summand::value) { const auto& log_sigma = to_ref_if::value>(log(sigma_val)); logp += sum(alpha_val * log_sigma) * N / max_size(alpha, sigma); if (!is_constant_all::value) { edge<1>(ops_partials).partials_ = inv(alpha_val) + (1 - sigma_div_y_pow_alpha) * (log_sigma - log_y); } } if (!is_constant_all::value) { edge<0>(ops_partials).partials_ = (alpha_val * sigma_div_y_pow_alpha - (alpha_val + 1)) / y_val; } if (!is_constant_all::value) { edge<2>(ops_partials).partials_ = alpha_val / sigma_val * (1 - sigma_div_y_pow_alpha); } return ops_partials.build(logp); } template inline return_type_t frechet_lpdf(const T_y& y, const T_shape& alpha, const T_scale& sigma) { return frechet_lpdf(y, alpha, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/uniform_cdf_log.hpp0000644000176200001440000000122714645137106024540 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_UNIFORM_CDF_LOG_HPP #define STAN_MATH_PRIM_PROB_UNIFORM_CDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use uniform_lcdf */ template return_type_t uniform_cdf_log(const T_y& y, const T_low& alpha, const T_high& beta) { return uniform_lcdf(y, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/weibull_log.hpp0000644000176200001440000000212714645137106023710 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_WEIBULL_LOG_HPP #define STAN_MATH_PRIM_PROB_WEIBULL_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use weibull_lpdf */ template return_type_t weibull_log(const T_y& y, const T_shape& alpha, const T_scale& sigma) { return weibull_lpdf(y, alpha, sigma); } /** \ingroup prob_dists * @deprecated use weibull_lpdf */ template inline return_type_t weibull_log(const T_y& y, const T_shape& alpha, const T_scale& sigma) { return weibull_lpdf(y, alpha, sigma); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/discrete_range_cdf.hpp0000644000176200001440000000464014645137106025200 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_DISCRETE_RANGE_CDF_HPP #define STAN_MATH_PRIM_PROB_DISCRETE_RANGE_CDF_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace math { /** \ingroup prob_dists * Return the CDF of a discrete range distribution for the given y, * lower and upper bounds (all integers). * * `y`, `lower` and `upper` can each be a scalar or a one-dimensional container. * Any container arguments must be the same size. * * @tparam T_y type of scalar, either int or std::vector * @tparam T_lower type of lower bound, either int or std::vector * @tparam T_upper type of upper bound, either int or std::vector * * @param y integer random variable * @param lower integer lower bound * @param upper integer upper bound * @return The CDF evaluated at the specified arguments. If containers are * supplied, returns the product of the CDFs. * @throw std::domain_error if upper is smaller than lower. * @throw std::invalid_argument if non-scalar arguments are of different * sizes. */ template double discrete_range_cdf(const T_y& y, const T_lower& lower, const T_upper& upper) { static const char* function = "discrete_range_cdf"; check_consistent_sizes(function, "Lower bound parameter", lower, "Upper bound parameter", upper); check_greater_or_equal(function, "Upper bound parameter", upper, lower); if (size_zero(y, lower, upper)) { return 1; } scalar_seq_view y_vec(y); scalar_seq_view lower_vec(lower); scalar_seq_view upper_vec(upper); size_t N = max_size(y, lower, upper); for (size_t n = 0; n < N; ++n) { const int y_dbl = y_vec[n]; if (y_dbl < lower_vec[n]) { return 0; } } double cdf(1.0); for (size_t n = 0; n < N; n++) { const double y_dbl = y_vec[n]; if (y_dbl <= upper_vec[n]) { const double lower_dbl = lower_vec[n]; const double upper_dbl = upper_vec[n]; cdf *= (y_dbl - lower_dbl + 1) / (upper_dbl - lower_dbl + 1); } } return cdf; } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/inv_wishart_cholesky_rng.hpp0000644000176200001440000000356414645137106026516 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_INV_WISHART_CHOLESKY_RNG_HPP #define STAN_MATH_PRIM_PROB_INV_WISHART_CHOLESKY_RNG_HPP #include #include #include #include #include namespace stan { namespace math { /** \ingroup multivar_dists * Return a random Cholesky factor of a covariance matrix * of the specified dimensionality drawn * from the inverse Wishart distribution with the specified degrees of freedom * using the specified random number generator. * * @tparam RNG Random number generator type * @param[in] nu scalar degrees of freedom * @param[in] L_S lower Cholesky factor of the scale matrix * @param[in, out] rng random-number generator * @return random lower Cholesky factor drawn from the given inverse Wishart * distribution * @throw std::domain_error if the scale matrix is not a Cholesky factor * @throw std::domain_error if the degrees of freedom is greater than k - 1 * where k is the dimension of L */ template inline Eigen::MatrixXd inv_wishart_cholesky_rng(double nu, const Eigen::MatrixXd& L_S, RNG& rng) { using Eigen::MatrixXd; static const char* function = "inv_wishart_cholesky_rng"; index_type_t k = L_S.rows(); check_square(function, "Cholesky Scale matrix", L_S); check_greater(function, "degrees of freedom > dims - 1", nu, k - 1); check_positive(function, "Cholesky Scale matrix", L_S.diagonal()); check_positive(function, "columns of Cholesky Scale matrix", L_S.cols()); MatrixXd L_Sinv = mdivide_left_tri(L_S); return mdivide_left_tri(wishart_cholesky_rng(nu, L_Sinv, rng)); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/rayleigh_cdf.hpp0000644000176200001440000000523714645137106024031 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_RAYLEIGH_CDF_HPP #define STAN_MATH_PRIM_PROB_RAYLEIGH_CDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t rayleigh_cdf(const T_y& y, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "rayleigh_cdf"; check_consistent_sizes(function, "Random variable", y, "Scale parameter", sigma); T_y_ref y_ref = y; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_nonnegative(function, "Random variable", y_val); check_positive(function, "Scale parameter", sigma_val); if (size_zero(y, sigma)) { return 1.0; } auto ops_partials = make_partials_propagator(y_ref, sigma_ref); const auto& inv_sigma = to_ref_if::value>(inv(sigma_val)); const auto& inv_sigma_square = to_ref_if::value>(square(inv_sigma)); const auto& exp_val = to_ref_if::value>( exp(-0.5 * square(y_val) * inv_sigma_square)); T_partials_return cdf = prod(1 - exp_val); if (!is_constant_all::value) { const auto& common_deriv = to_ref_if<(!is_constant_all::value && !is_constant_all::value)>( y_val * inv_sigma_square * exp_val / (1.0 - exp_val) * cdf); if (!is_constant_all::value) { partials<0>(ops_partials) = common_deriv; } if (!is_constant_all::value) { partials<1>(ops_partials) = -y_val * inv_sigma * common_deriv; } } return ops_partials.build(cdf); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/lognormal_lccdf.hpp0000644000176200001440000000674514645137106024543 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_LOGNORMAL_LCCDF_HPP #define STAN_MATH_PRIM_PROB_LOGNORMAL_LCCDF_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace stan { namespace math { template * = nullptr> return_type_t lognormal_lccdf(const T_y& y, const T_loc& mu, const T_scale& sigma) { using T_partials_return = partials_return_t; using T_y_ref = ref_type_if_t::value, T_y>; using T_mu_ref = ref_type_if_t::value, T_loc>; using T_sigma_ref = ref_type_if_t::value, T_scale>; static const char* function = "lognormal_lccdf"; T_y_ref y_ref = y; T_mu_ref mu_ref = mu; T_sigma_ref sigma_ref = sigma; decltype(auto) y_val = to_ref(as_value_column_array_or_scalar(y_ref)); decltype(auto) mu_val = to_ref(as_value_column_array_or_scalar(mu_ref)); decltype(auto) sigma_val = to_ref(as_value_column_array_or_scalar(sigma_ref)); check_nonnegative(function, "Random variable", y_val); check_finite(function, "Location parameter", mu_val); check_positive_finite(function, "Scale parameter", sigma_val); if (size_zero(y, mu, sigma)) { return 0; } auto ops_partials = make_partials_propagator(y_ref, mu_ref, sigma_ref); if (sum(promote_scalar(y_val == 0))) { return ops_partials.build(0.0); } const auto& log_y = log(y_val); const auto& scaled_diff = to_ref_if::value>( (log_y - mu_val) / (sigma_val * SQRT_TWO)); const auto& erfc_calc = to_ref_if::value>( erfc(scaled_diff)); size_t N = max_size(y, mu, sigma); T_partials_return ccdf_log = N * LOG_HALF + sum(log(erfc_calc)); if (!is_constant_all::value) { const auto& exp_m_sq_diff = exp(-scaled_diff * scaled_diff); const auto& rep_deriv = to_ref_if::value + !is_constant_all::value + !is_constant_all::value >= 2>( SQRT_TWO_OVER_SQRT_PI * exp_m_sq_diff / (sigma_val * erfc_calc)); if (!is_constant_all::value) { partials<0>(ops_partials) = -rep_deriv / y_val; } if (!is_constant_all::value) { partials<1>(ops_partials) = rep_deriv; } if (!is_constant_all::value) { partials<2>(ops_partials) = rep_deriv * scaled_diff * SQRT_TWO; } } return ops_partials.build(ccdf_log); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/prob/neg_binomial_ccdf_log.hpp0000644000176200001440000000116314645137106025646 0ustar liggesusers#ifndef STAN_MATH_PRIM_PROB_NEG_BINOMIAL_CCDF_LOG_HPP #define STAN_MATH_PRIM_PROB_NEG_BINOMIAL_CCDF_LOG_HPP #include #include namespace stan { namespace math { /** \ingroup prob_dists * @deprecated use neg_binomial_lccdf */ template return_type_t neg_binomial_ccdf_log( const T_n& n, const T_shape& alpha, const T_inv_scale& beta) { return neg_binomial_lccdf(n, alpha, beta); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/fun.hpp0000644000176200001440000004234714645137106021242 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_HPP #define STAN_MATH_PRIM_FUN_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif StanHeaders/inst/include/stan/math/prim/meta/0000755000176200001440000000000014645161272020656 5ustar liggesusersStanHeaders/inst/include/stan/math/prim/meta/contains_fvar.hpp0000644000176200001440000000106514645137106024224 0ustar liggesusers#ifndef STAN_MATH_PRIM_META_CONTAINS_FVAR_HPP #define STAN_MATH_PRIM_META_CONTAINS_FVAR_HPP #include #include #include namespace stan { /** \ingroup type_trait * Extends std::true_type when instantiated with at least 1 * template parameter that is a fvar. Extends std::false_type * otherwise. * @tparam T Types to test */ template using contains_fvar = math::disjunction>...>; } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/meta/is_dense_dynamic.hpp0000644000176200001440000000315514645137106024667 0ustar liggesusers#ifndef STAN_MATH_PRIM_META_IS_DENSE_DYNAMIC_HPP #define STAN_MATH_PRIM_META_IS_DENSE_DYNAMIC_HPP #include #include #include #include #include #include #include #include #include namespace stan { namespace internal { template struct is_dense_dynamic_impl : std::false_type {}; template struct is_dense_dynamic_impl>>> : std::true_type {}; template struct is_dense_dynamic_impl>> : bool_constant>::value> {}; } // namespace internal /** * Checks whether type T is derived from Eigen::DenseBase and has dynamic rows * and columns or is a `var_value<>` whose inner type satisfies the conditions * above. If true this will have a static member function named value with * a type of true, else value is false. * @tparam T Type to check * @ingroup type_trait */ template using is_dense_dynamic = internal::is_dense_dynamic_impl>; STAN_ADD_REQUIRE_UNARY(dense_dynamic, is_dense_dynamic, require_eigens_types); STAN_ADD_REQUIRE_CONTAINER(dense_dynamic, is_dense_dynamic, require_eigens_types); } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/meta/ref_type.hpp0000644000176200001440000000421714645137106023207 0ustar liggesusers#ifndef STAN_MATH_PRIM_META_REF_TYPE_HPP #define STAN_MATH_PRIM_META_REF_TYPE_HPP #include #include #include #include #include #include namespace stan { /** * If the condition is true determines appropriate type for assigning expression * of given type to, to evaluate expensive expressions, but not make a copy if T * involves no calculations. This works similarly as `Eigen::Ref`. It also * handles rvalue references, so it can be used with perfect forwarding. If the * condition is false the expression will never be evaluated. * * Warning: if a variable of this type could be assigned a rvalue, make sure * template parameter `T` is of correct reference type (rvalue). * @tparam T type to determine reference for */ template struct ref_type_if { using type = std::conditional_t::value, std::remove_reference_t, const T&>; }; template struct ref_type_if< Condition, T, require_all_t, bool_constant::value>>> { using T_plain = plain_type_t; using T_optionally_ref = std::conditional_t::value, std::remove_reference_t, const T&>; using T_dec = std::decay_t().derived())>; using type = std::conditional_t< Eigen::internal::traits>>:: template match::MatchAtCompileTime || !Condition, T_optionally_ref, T_plain>; }; template struct ref_type_if> { using type = typename ref_type_if::Base>::type; }; template using ref_type_t = typename ref_type_if::type; template using ref_type_if_t = typename ref_type_if::type; } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/meta/holder.hpp0000644000176200001440000003303514645137106022647 0ustar liggesusers#ifndef STAN_MATH_PRIM_FUN_HOLDER_HPP #define STAN_MATH_PRIM_FUN_HOLDER_HPP #include #include #include #include #include #include /** * \defgroup eigen_expressions Eigen expressions */ /** * \ingroup eigen_expressions * \defgroup returning_expressions Returning expressions */ /** * \ingroup returning_expressions * * Operations in Eigen Expressions hold their arguments either by value or by * reference. Which one is chosen depends on type of the argument. Other * operations are held by value. "Heavy" objects that can hold data themselves, * such as `Eigen::Matrix` or `Eigen::Ref` are instead held by reference. THis * is the only criterion - holding rvalue arguments by value is not supported, * so we can not use perfect forwarding. * * When returning an expression from function we have to be careful that any * arguments in this expression that are held by reference do not go out of * scope. For instance, consider the function: * * ``` * template * auto f(const T& x){ * const Eigen::Ref& x_ref = x; * return x_ref.cwiseProduct(x_ref); * } * ``` * And the code calling it: * ``` * Eigen::MatrixXd test_mat(2,2); * test_mat << 5, 5, 5, 5; * VectorXd X = f(test_mat.diagonal()); * ``` * This function will return back a `CwiseBinaryOp` Eigen expression, which is * then evaluated out of the function scope when assigned to `X`. The expression * references `x_ref`, which was created withing function and destroyed when * the function returned. The returned expression is evaluated after the * function returned, so its evaluation references a matrix that was already * deleted. In other words the returned expression contains a dangling * reference. * * So a function returning an expression referencing local matrices or * matrices that were rvalue reference arguments to the function will not work. * * A workarount to this issue is allocating and constructing or moving such * objects to heap. `Holder` object is a no-op operation that can also take * pointers to such objects and release them when it goes out of scope. It can * be created either by directly supplying pointers to such objects to `holder` * function or by forwarding function arguments and moving local variables to * `make_holder`, which will move any rvalues to heap first. */ // This was implenmented following the tutorial on edding new expressions to // Eigen: https://eigen.tuxfamily.org/dox/TopicNewExpressionType.html namespace stan { namespace math { template class Holder; } // namespace math } // namespace stan namespace Eigen { namespace internal { template struct traits> { typedef typename ArgType::StorageKind StorageKind; typedef typename traits::XprKind XprKind; typedef typename ArgType::StorageIndex StorageIndex; typedef typename ArgType::Scalar Scalar; enum { // Possible flags are documented here: // https://eigen.tuxfamily.org/dox/group__flags.html Flags = (ArgType::Flags & (RowMajorBit | LvalueBit | LinearAccessBit | DirectAccessBit | PacketAccessBit | NoPreferredStorageOrderBit)) | NestByRefBit, RowsAtCompileTime = ArgType::RowsAtCompileTime, ColsAtCompileTime = ArgType::ColsAtCompileTime, MaxRowsAtCompileTime = ArgType::MaxRowsAtCompileTime, MaxColsAtCompileTime = ArgType::MaxColsAtCompileTime, InnerStrideAtCompileTime = ArgType::InnerStrideAtCompileTime, OuterStrideAtCompileTime = ArgType::OuterStrideAtCompileTime }; }; } // namespace internal } // namespace Eigen namespace stan { namespace math { /** * A no-op Eigen operation. This object also owns pointers to dynamically * allocated objects used in its argument expression. When this object is * destructed, those objects are deleted. * @tparam Derived derived type * @tparam T type of the argument */ template class Holder : public Eigen::internal::dense_xpr_base>::type { public: typedef typename Eigen::internal::ref_selector>::type Nested; typename Eigen::internal::ref_selector::non_const_type m_arg; std::tuple...> m_unique_ptrs; explicit Holder(ArgType&& arg, Ptrs*... pointers) : m_arg(arg), m_unique_ptrs(std::unique_ptr(pointers)...) {} // we need to explicitely default copy and move constructors as we are // defining copy and move assignment operators Holder(const Holder&) = default; Holder(Holder&&) = default; // all these functions just call the same on the argument Eigen::Index rows() const { return m_arg.rows(); } Eigen::Index cols() const { return m_arg.cols(); } Eigen::Index innerStride() const { return m_arg.innerStride(); } Eigen::Index outerStride() const { return m_arg.outerStride(); } auto* data() { return m_arg.data(); } /** * Assignment operator assigns expresssions. * @param other expression to assign to this * @return *this */ template * = nullptr> inline Holder& operator=(const T& other) { m_arg = other; return *this; } // copy and move assignment operators need to be separately overloaded, // otherwise defaults will be used. inline Holder& operator=( const Holder& other) { m_arg = other; return *this; } inline Holder& operator=(Holder&& other) { m_arg = std::move(other.m_arg); return *this; } }; } // namespace math } // namespace stan namespace Eigen { namespace internal { template struct evaluator> : evaluator_base> { typedef stan::math::Holder XprType; typedef typename remove_all::type ArgTypeNestedCleaned; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::Scalar Scalar; enum { CoeffReadCost = evaluator::CoeffReadCost, // Possible flags are documented here: // https://eigen.tuxfamily.org/dox/group__flags.html Flags = evaluator::Flags, Alignment = evaluator::Alignment, }; evaluator m_argImpl; explicit evaluator(const XprType& xpr) : m_argImpl(xpr.m_arg) {} // all these functions just call the same on the argument EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(row, col); } EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(row, col); } EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet(row, col); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_argImpl.template packet(index); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { return m_argImpl.template writePacket(row, col, x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { return m_argImpl.template writePacket(index, x); } }; } // namespace internal } // namespace Eigen namespace stan { namespace math { /** * Constructs a no-op operation that also holds pointer to some other * expressions, allocated on heap. When the object is destructed those * expressions will be deleted. * @tparam T type of argument expression * @tparam Ptrs types of pointers * @param a argument expression * @param ptrs pointers to objects the constructed object will own. * @return holder operation */ template = 1>* = nullptr> Holder holder(T&& arg, Ptrs*... pointers) { return Holder(std::forward(arg), pointers...); } // trivial case with no pointers constructs no holder object template T holder(T&& arg) { return std::forward(arg); } namespace internal { // the function holder_handle_element is also used in holder_cl /** * Handles single element (moving rvalue non-expressions to heap) for * construction of `holder` or `holder_cl` from a functor. For lvalues or * expressions just sets the `res` pointer. * @tparam T type of the element * @param a element to handle * @param res resulting pointer to element * @return tuple of pointers allocated on heap (empty). */ template auto holder_handle_element(T& a, T*& res) { res = &a; return std::make_tuple(); } template >::Flags & Eigen::NestByRefBit)>* = nullptr> auto holder_handle_element(T&& a, std::remove_reference_t*& res) { res = &a; return std::make_tuple(); } /** * Handles single element (moving rvalue non-expressions to heap) for * construction of `holder` or `holder_cl` from a functor. Rvalue non-expression * is moved to heap and the pointer to heap memory is assigned to res and * returned in a tuple. * @tparam T type of the element * @param a element to handle * @param res resulting pointer to element * @return tuple of pointers allocated on heap (containing single pointer). */ template >* = nullptr, std::enable_if_t< static_cast(Eigen::internal::traits>::Flags& Eigen::NestByRefBit)>* = nullptr> auto holder_handle_element(T&& a, T*& res) { res = new T(std::move(a)); return std::make_tuple(res); } template >* = nullptr, require_not_eigen_t* = nullptr> auto holder_handle_element(T&& a, T*& res) { res = new T(std::move(a)); return std::make_tuple(res); } /** * Second step in implementation of construction `holder` from a functor. * Constructs holder object form given expression and tuple of pointers. * @tparam T type of the result expression * @tparam Is index sequence for `ptrs` * @tparam Args types of pointes to heap * @param expr result expression * @param ptrs pointers to heap that need to be released when the expression is * destructed * @return `holder` referencing given expression */ template auto make_holder_impl_construct_object(T&& expr, std::index_sequence, const std::tuple& ptrs) { return holder(std::forward(expr), std::get(ptrs)...); } /** * Implementation of construction `holder` from a functor. * @tparam F type of the functor * @tparam Is index sequence for `args` * @tparam Args types of the arguments * @param func functor * @param args arguments for the functor * @return `holder` referencing expression constructed by given functor */ template auto make_holder_impl(const F& func, std::index_sequence, Args&&... args) { std::tuple*...> res; auto ptrs = std::tuple_cat( holder_handle_element(std::forward(args), std::get(res))...); return make_holder_impl_construct_object( func(*std::get(res)...), std::make_index_sequence::value>(), ptrs); } } // namespace internal /** * Constructs an expression from given arguments using given functor. * This is similar to calling the functor with given arguments. Except that any * rvalue argument will be moved to heap first. The arguments moved to heap are * deleted once the expression is destructed. * * @tparam F type of the functor * @tparam Args types of the arguments * @param func the functor * @param args arguments for the functor * @return `holder` referencing expression constructed by given functor */ template ()(std::declval()...))>* = nullptr> auto make_holder(const F& func, Args&&... args) { return internal::make_holder_impl(func, std::make_index_sequence(), std::forward(args)...); } /** * Calls given function with given arguments. No `holder` is necessary if the * function is not returning Eigen expression. * * @tparam F type of the functor * @tparam Args types of the arguments * @param func the functor * @param args arguments for the functor * @return `holder` referencing expression constructed by given functor */ template ()(std::declval()...))>* = nullptr> auto make_holder(const F& func, Args&&... args) { return func(std::forward(args)...); } } // namespace math } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/meta/is_eigen.hpp0000644000176200001440000000574314645137106023161 0ustar liggesusers#ifndef STAN_MATH_PRIM_META_IS_EIGEN_HPP #define STAN_MATH_PRIM_META_IS_EIGEN_HPP #include #include #include #include #include #include namespace stan { /** * Check if type derives from `EigenBase` * @tparam T Type to check if it is derived from `EigenBase` * @tparam Enable used for SFINAE deduction. * @ingroup type_trait **/ template struct is_eigen : bool_constant::value> {}; /** * Template metaprogram defining the base scalar type of * values stored in an Eigen matrix. * * @tparam T type to check. * @ingroup type_trait */ template struct scalar_type::value>> { using type = scalar_type_t::Scalar>; }; /** * Template metaprogram defining the type of values stored in an * Eigen matrix, vector, or row vector. * * @tparam T type to check * @ingroup type_trait */ template struct value_type::value>> { using type = typename std::decay_t::Scalar; }; STAN_ADD_REQUIRE_UNARY(eigen, is_eigen, require_eigens_types); STAN_ADD_REQUIRE_CONTAINER(eigen, is_eigen, require_eigens_types); /** * Check if a type is derived from `Eigen::ArrayBase` * @tparam T type to check * @ingroup type_trait */ template struct is_eigen_array : bool_constant::value> {}; STAN_ADD_REQUIRE_UNARY(eigen_array, is_eigen_array, require_eigens_types); STAN_ADD_REQUIRE_CONTAINER(eigen_array, is_eigen_array, require_eigens_types); /** * Check if a type is derived from `Eigen::MatrixBase` or `Eigen::ArrayBase` * @tparam T type to check. * @ingroup type_trait */ template using is_eigen_matrix_or_array = math::disjunction, is_eigen_array>; STAN_ADD_REQUIRE_UNARY(eigen_matrix_or_array, is_eigen_matrix_or_array, require_eigens_types); STAN_ADD_REQUIRE_CONTAINER(eigen_matrix_or_array, is_eigen_matrix_or_array, require_eigens_types); namespace internal { template struct is_eigen_contiguous_map_impl : std::false_type {}; template struct is_eigen_contiguous_map_impl>> : std::true_type {}; } // namespace internal /** * Check if a type is an `Eigen::Map` with contiguous stride * @ingroup type_trait */ template struct is_eigen_contiguous_map : internal::is_eigen_contiguous_map_impl> {}; STAN_ADD_REQUIRE_UNARY(eigen_contiguous_map, is_eigen_contiguous_map, require_eigens_types); STAN_ADD_REQUIRE_CONTAINER(eigen_contiguous_map, is_eigen_contiguous_map, require_eigens_types); } // namespace stan #endif StanHeaders/inst/include/stan/math/prim/meta/require_helpers.hpp0000644000176200001440000006567214645137106024604 0ustar liggesusers#ifndef STAN_MATH_PRIM_META_REQUIRE_HELPERS_HPP #define STAN_MATH_PRIM_META_REQUIRE_HELPERS_HPP #include #include #include #include #include #include namespace stan { /** * If condition is true, template is enabled * @ingroup type_traits */ template using require_t = std::enable_if_t; /** * If condition is false, template is disabled * @ingroup type_traits */ template using require_not_t = std::enable_if_t; /** * If all conditions are true, template is enabled * Returns a type void if all conditions are true and otherwise fails. * @ingroup type_traits */ template using require_all_t = std::enable_if_t::value>; /** * If any condition is true, template is enabled. * * Returns a type void if any of the conditions are true and otherwise fails. * @ingroup type_traits */ template using require_any_t = std::enable_if_t::value>; /** * If all conditions are false, template is enabled. * * Returns a type void if all of the conditions are false. * @ingroup type_traits */ template using require_all_not_t = std::enable_if_t::value>; /** * If any condition is false, template is enabled. * * Returns a type void if any of the conditions are false. * @ingroup type_traits */ template using require_any_not_t = std::enable_if_t::value>; /** \ingroup macro_helpers * Adds Unary require aliases. * @param check_type The name of the type to check, used to define * `require__t`. * @param checker A struct that returns holds a boolean `value` * @param doxygen_group The doxygen group to add this requires to. */ #define STAN_ADD_REQUIRE_UNARY(check_type, checker, doxygen_group) \ /*! \ingroup doxygen_group */ \ /*! \defgroup check_type##_types check_type */ \ /*! \addtogroup check_type##_types */ \ /*! @{ */ \ /*! \brief Require type satisfies checker */ \ template \ using require_##check_type##_t = require_t>>; \ \ /*! \brief Require type does not satisfy checker */ \ template \ using require_not_##check_type##_t \ = require_not_t>>; \ \ /*! \brief Require all of the types satisfy checker */ \ template \ using require_all_##check_type##_t \ = require_all_t>...>; \ \ /*! \brief Require any of the types satisfy checker */ \ template \ using require_any_##check_type##_t \ = require_any_t>...>; \ \ /*! \brief Require none of the types satisfy checker */ \ template \ using require_all_not_##check_type##_t \ = require_all_not_t>...>; \ \ /*! \brief Require at least one of the types do not satisfy checker */ \ template \ using require_any_not_##check_type##_t \ = require_any_not_t>...>; \ /*! @} */ /** \ingroup macro_helpers * Adds unary require aliases that check the `value_type`. * @param check_type The name of the type to check, used to define * `require__t`. * @param checker A struct that returns holds a boolean `value` * @param doxygen_group The doxygen group to add this requires to. */ #define STAN_ADD_REQUIRE_UNARY_INNER(check_type, checker, doxygen_group) \ /*! \ingroup doxygen_group */ \ /*! \addtogroup check_type##_types */ \ /*! @{ */ \ /*! \brief Require value type satisfies checker */ \ template \ using require_vt_##check_type \ = require_t>>>; \ \ /*! \brief Require value type does not satisfy checker */ \ template \ using require_not_vt_##check_type \ = require_not_t>>>; \ \ /*! \brief Require all of the value types satisfy checker */ \ template \ using require_all_vt_##check_type \ = require_all_t>>...>; \ \ /*! \brief Require any of the value types satisfy checker */ \ template \ using require_any_vt_##check_type \ = require_any_t>>...>; \ \ /*! \brief Require none of the value types satisfy checker */ \ template \ using require_all_not_vt_##check_type \ = require_all_not_t>>...>; \ \ /*! \brief Require at least one of the value types do not satisfy checker */ \ template \ using require_any_not_vt_##check_type \ = require_any_not_t>>...>; \ \ /*! \brief Require scalar type satisfies checker */ \ template \ using require_st_##check_type \ = require_t>>>; \ \ /*! \brief Require scalar type does not satisfy checker */ \ template \ using require_not_st_##check_type \ = require_not_t>>>; \ \ /*! \brief Require all of the scalar types satisfy checker */ \ template \ using require_all_st_##check_type \ = require_all_t>>...>; \ \ /*! \brief Require any of the scalar types satisfy checker */ \ template \ using require_any_st_##check_type \ = require_any_t>>...>; \ \ /*! \brief Require none of the scalar types satisfy checker */ \ template \ using require_all_not_st_##check_type \ = require_all_not_t>>...>; \ \ /*! \brief Any of the scalar types do not satisfy checker */ \ template \ using require_any_not_st_##check_type \ = require_any_not_t>>...>; \ /*! @} */ /** \ingroup macro_helpers * Adds binary require aliases. * @param check_type The name of the type to check, used to define * `require__t`. * @param checker A struct that returns holds a boolean `value` * @param doxygen_group The doxygen group to add this requires to. */ #define STAN_ADD_REQUIRE_BINARY(check_type, checker, doxygen_group) \ /*! \ingroup doxygen_group */ \ /*! \defgroup check_type##_types check_type */ \ /*! \addtogroup check_type##_types */ \ /*! @{ */ \ /*! \brief Require types `T` and `S` satisfies checker */ \ template \ using require_##check_type##_t \ = require_t, std::decay_t>>; \ \ /*! \brief Require types `T` and `S` does not satisfy checker */ \ template \ using require_not_##check_type##_t \ = require_not_t, std::decay_t>>; \ \ /*! \brief Require `T` and all of the `Types` satisfy checker */ \ template \ using require_all_##check_type##_t \ = require_all_t, std::decay_t>...>; \ \ /*! \brief Require any of the `Types` and `T` satisfy checker */ \ template \ using require_any_##check_type##_t \ = require_any_t, std::decay_t>...>; \ \ /*! \brief Require none of the `Types` and `T` satisfy checker */ \ template \ using require_all_not_##check_type##_t \ = require_all_not_t, std::decay_t>...>; \ \ /*! \brief Any one of the `Types` and `T` do not satisfy */ \ template \ using require_any_not_##check_type##_t \ = require_any_not_t, std::decay_t>...>; \ /*! @} */ /** \ingroup macro_helpers * Adds binary require aliases that check the `scalar_type`. * @param check_type The name of the type to check, used to define * `require__t`. * @param checker A struct that returns holds a boolean `value` * @param doxygen_group The doxygen group to add this requires to. */ #define STAN_ADD_REQUIRE_BINARY_INNER(check_type, checker, doxygen_group) \ /*! \ingroup doxygen_group */ \ /*! \addtogroup check_type##_types */ \ /*! @{ */ \ /*! \brief Require that value types of `T` and `S` satisfies checker */ \ template \ using require_st_##check_type \ = require_t>, \ scalar_type_t>>>; \ \ /*! \brief Require scalar types of `T` and `S` does not satisfy checker */ \ template \ using require_not_st_##check_type \ = require_not_t>, \ scalar_type_t>>>; \ \ /*! \brief All scalar types of `T` and all of the `Types` satisfy checker */ \ template \ using require_all_st_##check_type \ = require_all_t>, \ scalar_type_t>>...>; \ \ /*! \brief Any of the scalar types of `Types` and `T` satisfy checker */ \ template \ using require_any_st_##check_type \ = require_any_t>, \ scalar_type_t>>...>; \ \ /*! \brief None of the scalar types of `Types` and `T` satisfy checker */ \ template \ using require_all_not_st_##check_type \ = require_all_not_t>, \ scalar_type_t>>...>; \ \ /*! \brief Any of the scalar types `Types` and `T` do not satisfy checker */ \ template \ using require_any_not_st_##check_type \ = require_any_not_t>, \ scalar_type_t>>...>; \ \ /*! \brief Value types of `T` and `S` satisfies checker */ \ template \ using require_vt_##check_type = require_t< \ checker>, value_type_t>>>; \ \ /*! \brief Value types of `T` and `S` does not satisfy checker */ \ template \ using require_not_vt_##check_type = require_not_t< \ checker>, value_type_t>>>; \ \ /*! \brief Value types of `T` and all of the `Types` satisfy checker */ \ template \ using require_all_vt_##check_type \ = require_all_t>, \ value_type_t>>...>; \ \ /*! \brief Any of the value types of `Types` and `T` satisfy checker */ \ template \ using require_any_vt_##check_type \ = require_any_t>, \ value_type_t>>...>; \ \ /*! \brief None of the value types of `Types` and `T` satisfy checker */ \ template \ using require_all_not_vt_##check_type \ = require_all_not_t>, \ value_type_t>>...>; \ \ /*! \brief Any of the value types `Types` and `T` do not satisfy checker */ \ template \ using require_any_not_vt_##check_type \ = require_any_not_t>, \ value_type_t>>...>; \ /*! @} */ /** * Used as the base for checking whether a type is a container with * an underlying scalar type * * @tparam ContainerCheck Templated struct or alias that wraps a static constant * scalar called type. Used to check the container satisfies a particular type * check. * @tparam ValueCheck Templated struct or alias that returns a containers * inner type. * @tparam CheckType Templated struct or alias that wraps a static constant * scalar called type. Used to check the container's underlying type satisfies a * particular type check. * */ template